Module: LLM::OpenAI

Defined in:
lib/scout/llm/backends/openai.rb

Class Method Summary collapse

Class Method Details

.ask(question, options = {}, &block) ⇒ Object



40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
# File 'lib/scout/llm/backends/openai.rb', line 40

def self.ask(question, options = {}, &block)
  original_options = options.dup

  messages = LLM.chat(question)
  options = options.merge LLM.options messages

  client, url, key, model, log_errors, return_messages, format, tool_choice_next, previous_response_id, tools, = IndiferentHash.process_options options,
    :client, :url, :key, :model, :log_errors, :return_messages, :format, :tool_choice_next, :previous_response_id, :tools,
    log_errors: true, tool_choice_next: :none

  if client.nil?
    url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
    key ||= LLM.get_url_config(:key, url, :openai_ask, :ask, :openai, env: 'OPENAI_KEY')
    client = self.client url, key, log_errors
  end

  if model.nil?
    url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
    model ||= LLM.get_url_config(:model, url, :openai_ask, :ask, :openai, env: 'OPENAI_MODEL', default: "gpt-4.1")
  end

  case format
  when Hash
    options[:response_format] = format
  when 'json', 'json_object', :json, :json_object
    options[:response_format] = {type: 'json_object'}
  else
    options[:response_format] = {type: format}
  end if format

  parameters = options.merge(model: model)

  # Process tools

  case tools
  when Array
    tools = tools.inject({}) do |acc,definition|
      IndiferentHash.setup definition
      name = definition.dig('name') || definition.dig('function', 'name')
      acc.merge(name => definition)
    end
  when nil
    tools = {}
  end

  tools.merge!(LLM.tools messages)
  tools.merge!(LLM.associations messages)

  if tools.any?
    parameters[:tools] = LLM.tool_definitions_to_openai tools
  end

  messages = self.process_input messages

  Log.debug "Calling openai #{url}: #{Log.fingerprint(parameters.except(:tools))}}"
  Log.high "Tools: #{Log.fingerprint tools.keys}}" if tools

  parameters[:messages] = LLM.tools_to_openai messages

  response = self.process_response client.chat(parameters: parameters), tools, &block

  res = if response.last[:role] == 'function_call_output' 
          response + self.ask(messages + response, original_options.merge(tool_choice: tool_choice_next, return_messages: true, tools: tools ), &block)
        else
          response
        end

  if return_messages
    res
  else
    res.last['content']
  end
end

.client(url = nil, key = nil, log_errors = false, request_timeout: 1200) ⇒ Object



8
9
10
11
12
# File 'lib/scout/llm/backends/openai.rb', line 8

def self.client(url = nil, key = nil, log_errors = false, request_timeout: 1200)
  url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
  key ||= LLM.get_url_config(:key, url, :openai_ask, :ask, :openai, env: 'OPENAI_KEY')
  Object::OpenAI::Client.new(access_token:key, log_errors: log_errors, uri_base: url, request_timeout: request_timeout)
end

.embed(text, options = {}) ⇒ Object



114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
# File 'lib/scout/llm/backends/openai.rb', line 114

def self.embed(text, options = {})

  client, url, key, model, log_errors = IndiferentHash.process_options options, :client, :url, :key, :model, :log_errors

  if client.nil?
    url ||= Scout::Config.get(:url, :openai_embed, :embed, :openai, env: 'OPENAI_URL')
    key ||= LLM.get_url_config(:key, url, :openai_embed, :embed, :openai, env: 'OPENAI_KEY')
    client = self.client url, key, log_errors
  end

  if model.nil?
    url ||= Scout::Config.get(:url, :openai_embed, :embed, :openai, env: 'OPENAI_URL')
    model ||= LLM.get_url_config(:model, url, :openai_embed, :embed, :openai, env: 'OPENAI_MODEL', default: "gpt-3.5-turbo")
  end

  response = client.embeddings(parameters: {input: text, model: model})
  response.dig('data', 0, 'embedding')
end

.process_input(messages) ⇒ Object



14
15
16
17
18
19
20
21
22
23
# File 'lib/scout/llm/backends/openai.rb', line 14

def self.process_input(messages)
  messages.collect do |message|
    if message[:role] == 'image'
      Log.warn "Endpoint 'openai' does not support images, try 'responses': #{message[:content]}"
      next
    else
      message
    end
  end.flatten.compact
end

.process_response(response, tools, &block) ⇒ Object

Raises:

  • (Exception)


25
26
27
28
29
30
31
32
33
34
35
36
37
38
# File 'lib/scout/llm/backends/openai.rb', line 25

def self.process_response(response, tools, &block)
  Log.debug "Respose: #{Log.fingerprint response}"
  raise Exception, response["error"] if response["error"]

  message = response.dig("choices", 0, "message")
  tool_calls = response.dig("choices", 0, "tool_calls") ||
    response.dig("choices", 0, "message", "tool_calls")

  if tool_calls && tool_calls.any?
    LLM.process_calls(tools, tool_calls, &block)
  else
    [message]
  end
end