Method: N2M::Llm::Ollama#analyze_code_diff

Defined in:
lib/n2b/llm/ollama.rb

#analyze_code_diff(prompt_content) ⇒ Object



84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# File 'lib/n2b/llm/ollama.rb', line 84

def analyze_code_diff(prompt_content)
  request = Net::HTTP::Post.new(@api_uri)
  request.content_type = 'application/json'

  # The prompt_content for diff analysis should instruct the LLM to return JSON.
  # For Ollama, you can also try adding "format": "json" to the request if the model supports it.
  request_body = {
    "model" => get_model_name,
    "messages" => [
      {
        "role" => "user",
        "content" => prompt_content # This prompt must ask for JSON output
      }
    ],
    "stream" => false
  }
  # Some Ollama models/versions might respect a "format": "json" parameter
  # request_body["format"] = "json" # Uncomment if you want to try this

  request.body = JSON.dump(request_body)

  begin
    response = Net::HTTP.start(@api_uri.hostname, @api_uri.port, use_ssl: @api_uri.scheme == 'https') do |http|
      http.open_timeout = 5
      http.read_timeout = 180 # Potentially longer for analysis
      http.request(request)
    end
  rescue Net::OpenTimeout, Net::ReadTimeout => e
    raise N2B::LlmApiError.new("Ollama API Error (analyze_code_diff): Timeout for #{@api_uri}: #{e.message}")
  rescue Errno::ECONNREFUSED => e
    raise N2B::LlmApiError.new("Ollama API Error (analyze_code_diff): Connection refused at #{@api_uri}. Is Ollama running? #{e.message}")
  end


  if response.code != '200'
    raise N2B::LlmApiError.new("Ollama API Error (analyze_code_diff): #{response.code} #{response.message} - #{response.body}")
  end

  # Return the raw JSON string from the LLM's response content.
  # The calling method (call_llm_for_diff_analysis in cli.rb) will parse this.
  raw_response_body = JSON.parse(response.body)
  raw_response_body['message']['content']
end