Class: DSPy::LM::OllamaAdapter

Inherits:
OpenAIAdapter show all
Defined in:
lib/dspy/lm/adapters/ollama_adapter.rb

Constant Summary collapse

DEFAULT_BASE_URL =
'http://localhost:11434/v1'

Instance Attribute Summary

Attributes inherited from Adapter

#api_key, #model

Instance Method Summary collapse

Constructor Details

#initialize(model:, api_key: nil, base_url: nil, structured_outputs: true) ⇒ OllamaAdapter

Returns a new instance of OllamaAdapter.



10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
# File 'lib/dspy/lm/adapters/ollama_adapter.rb', line 10

def initialize(model:, api_key: nil, base_url: nil, structured_outputs: true)
  # Ollama doesn't require API key for local instances
  # But may need it for remote/protected instances
  api_key ||= 'ollama' # OpenAI client requires non-empty key
  base_url ||= DEFAULT_BASE_URL
  
  # Store base_url before calling super
  @base_url = base_url
  
  # Don't call parent's initialize, do it manually to control client creation
  @model = model
  @api_key = api_key
  @structured_outputs_enabled = structured_outputs
  validate_configuration!
  
  # Create client with custom base URL
  @client = OpenAI::Client.new(
    api_key: @api_key,
    base_url: @base_url
  )
end

Instance Method Details

#chat(messages:, signature: nil, response_format: nil, &block) ⇒ Object



32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
# File 'lib/dspy/lm/adapters/ollama_adapter.rb', line 32

def chat(messages:, signature: nil, response_format: nil, &block)
  # For Ollama, we need to be more lenient with structured outputs
  # as it may not fully support OpenAI's response_format spec
  begin
    super
  rescue => e
    # If structured output fails, retry with enhanced prompting
    if @structured_outputs_enabled && signature && e.message.include?('response_format')
      DSPy.logger.debug("Ollama structured output failed, falling back to enhanced prompting")
      @structured_outputs_enabled = false
      retry
    else
      raise
    end
  end
end