Class: Agentic::LlmClient

Inherits:
Object
  • Object
show all
Defined in:
lib/agentic/llm_client.rb

Overview

Generic wrapper for LLM API clients

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(config, retry_config = {}) ⇒ LlmClient

Initializes a new LlmClient

Parameters:

  • config (LlmConfig)

    The configuration for the LLM

  • retry_config (RetryConfig, Hash) (defaults to: {})

    Configuration for the retry handler



22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
# File 'lib/agentic/llm_client.rb', line 22

def initialize(config, retry_config = {})
  client_options = {access_token: Agentic.configuration.access_token}

  # Add custom base URL if configured (for Ollama, etc.)
  if Agentic.configuration.api_base_url
    client_options[:uri_base] = Agentic.configuration.api_base_url
  end

  @client = OpenAI::Client.new(client_options)
  @config = config
  @last_response = nil

  # Convert retry_config to RetryConfig if it's a hash
  @retry_handler = if retry_config.is_a?(RetryConfig)
    retry_config.to_handler
  else
    RetryHandler.new(**retry_config)
  end
end

Instance Attribute Details

#clientOpenAI::Client (readonly)

Returns The underlying LLM client instance.

Returns:

  • (OpenAI::Client)

    The underlying LLM client instance



14
15
16
# File 'lib/agentic/llm_client.rb', line 14

def client
  @client
end

#last_responseOpenAI::Client (readonly)

Returns The underlying LLM client instance.

Returns:

  • (OpenAI::Client)

    The underlying LLM client instance



14
15
16
# File 'lib/agentic/llm_client.rb', line 14

def last_response
  @last_response
end

#retry_handlerRetryHandler (readonly)

Returns The retry handler for transient errors.

Returns:



17
18
19
# File 'lib/agentic/llm_client.rb', line 17

def retry_handler
  @retry_handler
end

Instance Method Details

#complete(messages, output_schema: nil, fail_on_error: false, use_retries: true, options: {}) ⇒ LlmResponse

Sends a completion request to the LLM

Parameters:

  • messages (Array<Hash>)

    The messages to send

  • output_schema (Agentic::StructuredOutputs::Schema, nil) (defaults to: nil)

    Optional schema for structured output

  • fail_on_error (Boolean) (defaults to: false)

    Whether to raise errors or return them as part of the response

  • use_retries (Boolean) (defaults to: true)

    Whether to retry on transient errors

  • options (Hash) (defaults to: {})

    Additional options to override the config

Returns:

  • (LlmResponse)

    The structured response from the LLM



49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
# File 'lib/agentic/llm_client.rb', line 49

def complete(messages, output_schema: nil, fail_on_error: false, use_retries: true, options: {})
  # Start with base parameters from the config
  parameters = @config.to_api_parameters({messages: messages})

  # Add response format if schema is provided
  if output_schema
    parameters[:response_format] = {
      type: "json_schema",
      json_schema: output_schema.to_hash
    }
  end

  # Override with any additional options
  parameters.merge!(options)

  execution_method = use_retries ? method(:with_retry) : method(:without_retry)
  execution_method.call(messages, parameters, output_schema, fail_on_error)
end

#models(fail_on_error: false) ⇒ Array<Hash>?

Fetches available models from the LLM provider

Parameters:

  • fail_on_error (Boolean) (defaults to: false)

    Whether to raise errors or return nil on error

Returns:

  • (Array<Hash>, nil)

    The list of available models, or nil if an error occurred and fail_on_error is false

Raises:



157
158
159
160
161
162
163
164
165
166
167
168
169
# File 'lib/agentic/llm_client.rb', line 157

def models(fail_on_error: false)
  client.models.list&.dig("data")
rescue OpenAI::Error => e
  error = map_openai_error(e)
  Agentic.logger.error("OpenAI API error when listing models: #{error.message}")
  handle_error(error, fail_on_error)
  nil
rescue => e
  error = Errors::LlmError.new("Unexpected error listing models: #{e.message}")
  Agentic.logger.error("#{error.message}\n#{e.backtrace.join("\n")}")
  handle_error(error, fail_on_error)
  nil
end

#query_generation_stats(generation_id, fail_on_error: false) ⇒ Hash?

Queries generation stats for a given generation ID

Parameters:

  • generation_id (String)

    The ID of the generation

  • fail_on_error (Boolean) (defaults to: false)

    Whether to raise errors or return nil on error

Returns:

  • (Hash, nil)

    The generation stats, or nil if an error occurred and fail_on_error is false

Raises:



176
177
178
179
180
181
182
183
184
185
186
187
188
# File 'lib/agentic/llm_client.rb', line 176

def query_generation_stats(generation_id, fail_on_error: false)
  client.query_generation_stats(generation_id)
rescue OpenAI::Error => e
  error = map_openai_error(e)
  Agentic.logger.error("OpenAI API error when querying generation stats: #{error.message}")
  handle_error(error, fail_on_error)
  nil
rescue => e
  error = Errors::LlmError.new("Unexpected error querying generation stats: #{e.message}")
  Agentic.logger.error("#{error.message}\n#{e.backtrace.join("\n")}")
  handle_error(error, fail_on_error)
  nil
end

#with_retry(messages, parameters, output_schema, fail_on_error) ⇒ LlmResponse

Executes the API call with retries for transient errors

Parameters:

  • messages (Array<Hash>)

    The messages being sent

  • parameters (Hash)

    The request parameters

  • output_schema (Agentic::StructuredOutputs::Schema, nil)

    Optional schema for structured output

  • fail_on_error (Boolean)

    Whether to raise errors or return them as part of the response

Returns:

  • (LlmResponse)

    The structured response from the LLM



74
75
76
77
78
79
80
81
82
# File 'lib/agentic/llm_client.rb', line 74

def with_retry(messages, parameters, output_schema, fail_on_error)
  retry_handler.with_retry do
    without_retry(messages, parameters, output_schema, fail_on_error)
  end
rescue Errors::LlmError => e
  # If we get here, we've exhausted retries or hit a non-retryable error
  Agentic.logger.error("Failed after retries: #{e.message}")
  handle_error(e, fail_on_error)
end

#without_retry(messages, parameters, output_schema, fail_on_error) ⇒ LlmResponse

Executes the API call without retries

Parameters:

  • messages (Array<Hash>)

    The messages being sent

  • parameters (Hash)

    The request parameters

  • output_schema (Agentic::StructuredOutputs::Schema, nil)

    Optional schema for structured output

  • fail_on_error (Boolean)

    Whether to raise errors or return them as part of the response

Returns:

  • (LlmResponse)

    The structured response from the LLM



90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
# File 'lib/agentic/llm_client.rb', line 90

def without_retry(messages, parameters, output_schema, fail_on_error)
  @last_response = client.chat(parameters: parameters)

  # Check for API-level refusal
  if (refusal = @last_response.dig("choices", 0, "message", "refusal"))
    refusal_error = Errors::LlmRefusalError.new(
      refusal,
      response: @last_response,
      context: {input_messages: extract_message_content(messages)}
    )

    Agentic.logger.warn("LLM refused the request: #{refusal} (Category: #{refusal_error.refusal_category})")

    if fail_on_error
      raise refusal_error
    else
      return LlmResponse.refusal(@last_response, refusal, refusal_error)
    end
  end

  # Process the response based on whether we expect structured output
  if output_schema
    begin
      content_text = @last_response.dig("choices", 0, "message", "content")
      if content_text.nil? || content_text.empty?
        error = Errors::LlmParseError.new("Empty content returned from LLM", response: @last_response)
        Agentic.logger.error(error.message)
        return handle_error(error, fail_on_error)
      end

      content = JSON.parse(content_text)
      LlmResponse.success(@last_response, content)
    rescue JSON::ParserError => e
      error = Errors::LlmParseError.new(
        "Failed to parse JSON response: #{e.message}",
        parse_exception: e,
        response: @last_response
      )
      Agentic.logger.error(error.message)
      handle_error(error, fail_on_error)
    end
  else
    content = @last_response.dig("choices", 0, "message", "content")
    LlmResponse.success(@last_response, content)
  end
rescue OpenAI::Error => e
  error = map_openai_error(e)
  Agentic.logger.error("OpenAI API error: #{error.message}")
  handle_error(error, fail_on_error)
rescue Net::ReadTimeout, Net::OpenTimeout => e
  error = Errors::LlmTimeoutError.new("Request to LLM timed out: #{e.message}", context: {timeout_type: e.class.name})
  Agentic.logger.error(error.message)
  handle_error(error, fail_on_error)
rescue JSON::ParserError => e
  error = Errors::LlmParseError.new("Failed to parse LLM response: #{e.message}", parse_exception: e)
  Agentic.logger.error(error.message)
  handle_error(error, fail_on_error)
rescue => e
  error = Errors::LlmError.new("Unexpected error in LLM request: #{e.message}", context: {error_class: e.class.name})
  Agentic.logger.error("#{error.message}\n#{e.backtrace.join("\n")}")
  handle_error(error, fail_on_error)
end