Class: N2M::Llm::Ollama

Inherits:
Object
  • Object
show all
Defined in:
lib/n2b/llm/ollama.rb

Constant Summary collapse

DEFAULT_OLLAMA_API_URI =

Default API URI for Ollama. This might need to be configurable later.

'http://localhost:11434/api/chat'

Instance Method Summary collapse

Constructor Details

#initialize(config) ⇒ Ollama

Returns a new instance of Ollama.



12
13
14
15
16
# File 'lib/n2b/llm/ollama.rb', line 12

def initialize(config)
  @config = config
  # Allow overriding the Ollama API URI from config if needed
  @api_uri = URI.parse(@config['ollama_api_url'] || DEFAULT_OLLAMA_API_URI)
end

Instance Method Details

#analyze_code_diff(prompt_content) ⇒ Object



84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# File 'lib/n2b/llm/ollama.rb', line 84

def analyze_code_diff(prompt_content)
  request = Net::HTTP::Post.new(@api_uri)
  request.content_type = 'application/json'

  # The prompt_content for diff analysis should instruct the LLM to return JSON.
  # For Ollama, you can also try adding "format": "json" to the request if the model supports it.
  request_body = {
    "model" => get_model_name,
    "messages" => [
      {
        "role" => "user",
        "content" => prompt_content # This prompt must ask for JSON output
      }
    ],
    "stream" => false
  }
  # Some Ollama models/versions might respect a "format": "json" parameter
  # request_body["format"] = "json" # Uncomment if you want to try this

  request.body = JSON.dump(request_body)

  begin
    response = Net::HTTP.start(@api_uri.hostname, @api_uri.port, use_ssl: @api_uri.scheme == 'https') do |http|
      http.open_timeout = 5
      http.read_timeout = 180 # Potentially longer for analysis
      http.request(request)
    end
  rescue Net::OpenTimeout, Net::ReadTimeout => e
    raise N2B::LlmApiError.new("Ollama API Error (analyze_code_diff): Timeout for #{@api_uri}: #{e.message}")
  rescue Errno::ECONNREFUSED => e
    raise N2B::LlmApiError.new("Ollama API Error (analyze_code_diff): Connection refused at #{@api_uri}. Is Ollama running? #{e.message}")
  end


  if response.code != '200'
    raise N2B::LlmApiError.new("Ollama API Error (analyze_code_diff): #{response.code} #{response.message} - #{response.body}")
  end

  # Return the raw JSON string from the LLM's response content.
  # The calling method (call_llm_for_diff_analysis in cli.rb) will parse this.
  raw_response_body = JSON.parse(response.body)
  raw_response_body['message']['content']
end

#get_model_nameObject



18
19
20
21
22
23
24
25
26
# File 'lib/n2b/llm/ollama.rb', line 18

def get_model_name
  # Resolve model name using the centralized configuration
  model_name = N2B::ModelConfig.resolve_model('ollama', @config['model'])
  if model_name.nil? || model_name.empty?
    # Fallback to default if no model specified
    model_name = N2B::ModelConfig.resolve_model('ollama', N2B::ModelConfig.default_model('ollama'))
  end
  model_name
end

#make_request(prompt_content) ⇒ Object



28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
# File 'lib/n2b/llm/ollama.rb', line 28

def make_request(prompt_content)
  request = Net::HTTP::Post.new(@api_uri)
  request.content_type = 'application/json'

  # Ollama expects the model name directly in the request body.
  # It also expects the full message history.
  request.body = JSON.dump({
    "model" => get_model_name,
    "messages" => [
      {
        "role" => "user",
        "content" => prompt_content
      }
    ],
    "stream" => false # Ensure we get the full response, not a stream
    # "format" => "json" # For some Ollama versions/models to enforce JSON output
  })

  begin
    response = Net::HTTP.start(@api_uri.hostname, @api_uri.port, use_ssl: @api_uri.scheme == 'https') do |http|
      # Set timeouts: open_timeout for connection, read_timeout for waiting for response
      http.open_timeout = 5 # seconds
      http.read_timeout = 120 # seconds
      http.request(request)
    end
  rescue Net::OpenTimeout, Net::ReadTimeout => e
    raise N2B::LlmApiError.new("Ollama API Error: Timeout connecting or reading from Ollama at #{@api_uri}: #{e.message}")
  rescue Errno::ECONNREFUSED => e
    raise N2B::LlmApiError.new("Ollama API Error: Connection refused at #{@api_uri}. Is Ollama running? #{e.message}")
  end


  if response.code != '200'
    raise N2B::LlmApiError.new("Ollama API Error: #{response.code} #{response.message} - #{response.body}")
  end

  # Ollama's chat response structure is slightly different. The message is in `message.content`.
  raw_response_body = JSON.parse(response.body)
  answer_content = raw_response_body['message']['content']

  begin
    # Attempt to parse the answer_content as JSON
    # This is for n2b's expectation of JSON with 'commands' and 'explanation'
    parsed_answer = JSON.parse(answer_content)
    if parsed_answer.is_a?(Hash) && parsed_answer.key?('commands')
      parsed_answer
    else
      # If the content itself is valid JSON but not the expected structure, wrap it.
       { 'commands' => [answer_content], 'explanation' => 'Response from LLM (JSON content).' }
    end
  rescue JSON::ParserError
    # If answer_content is not JSON, wrap it in the n2b expected structure
    { 'commands' => [answer_content], 'explanation' => answer_content }
  end
end