18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
|
# File 'lib/rails_code_auditor/llm_client.rb', line 18
def self.score_with_llm(json_results, endpoint: "http://localhost:11434/api/generate", model: "llama3")
sanitized = sanitize_results(json_results)
puts "[*] Scoring with LLM (LLaMA3)..."
prompt = " Analyze the following Rails code audit summary and return a JSON object like:\n {\n \"security\": 75,\n \"code_quality\": 80,\n \"test_coverage\": 60,\n \"dependencies\": 90,\n \"overall\": 76\n }\n\n Only return the raw JSON, nothing else.\n\n Input:\n \#{JSON.pretty_generate(sanitized)}\n PROMPT\n\n uri = URI(endpoint)\n body = {\n model: model,\n prompt: prompt,\n stream: false\n }\n\n response = Net::HTTP.post(uri, body.to_json, \"Content-Type\" => \"application/json\")\n response_body = JSON.parse(response.body)\n\n raw_output = response_body[\"response\"].strip\n # Try to extract JSON from any surrounding text\n json_match = raw_output.match(/\\{.*\\}/m)\n\n raise \"Response did not contain valid JSON\" unless json_match\n\n parsed_scores = JSON.parse(json_match[0])\n parsed_scores.transform_values do |score|\n remark = case score\n when 90..100 then \"Excellent\"\n when 75..89 then \"Good\"\n when 60..74 then \"Average\"\n else \"Needs Improvement\"\n end\n { score: score, remark: remark }\n end\nrescue StandardError => e\n puts \"[!] LLM scoring failed: \#{e.message}\"\n nil\nend\n"
|