Class: LastLLM::Providers::OpenAI

Inherits:
LastLLM::Provider show all
Defined in:
lib/last_llm/providers/openai.rb

Overview

OpenAI provider implementation

Constant Summary collapse

BASE_ENDPOINT =

API Configuration

'https://api.openai.com'
DEFAULT_MODEL =
'gpt-4o-mini'
EMBEDDINGS_MODEL =
'text-embedding-ada-002'
DEFAULT_TEMPERATURE =

LLM Default Parameters

0.7
DEFAULT_TOP_P =
0.7
DEFAULT_MAX_TOKENS =
4096
DEFAULT_TEMPERATURE_OBJECT =
0.2
SUCCESS_STATUS =

Response Configuration

200
UNAUTHORIZED_STATUS =

Error Status Codes

401
BAD_REQUEST_STATUS =
400

Instance Attribute Summary

Attributes inherited from LastLLM::Provider

#config, #name

Class Method Summary collapse

Instance Method Summary collapse

Methods inherited from LastLLM::Provider

#parse_response

Constructor Details

#initialize(config) ⇒ OpenAI

Returns a new instance of OpenAI.



27
28
29
30
31
# File 'lib/last_llm/providers/openai.rb', line 27

def initialize(config)
  super(Constants::OPENAI, config)
  @conn = connection(config[:base_url] || BASE_ENDPOINT)
  logger.debug("#{@name}: Initialized OpenAI provider with endpoint: #{config[:base_url] || BASE_ENDPOINT}")
end

Class Method Details

.execute_tool(tool, response) ⇒ Hash?

Execute a tool from an OpenAI response

Parameters:

  • tool (LastLLM::Tool)

    The tool to execute

  • response (Hash)

    The OpenAI response containing tool call information

Returns:

  • (Hash, nil)

    The result of the function call or nil if the tool wasn’t called



114
115
116
117
118
119
120
# File 'lib/last_llm/providers/openai.rb', line 114

def self.execute_tool(tool, response)
  tool_call = response[:tool_calls]&.first
  return nil unless tool_call && tool_call[:function][:name] == tool.name

  arguments = JSON.parse(tool_call[:function][:arguments], symbolize_names: true)
  tool.call(arguments)
end

.format_tool(tool) ⇒ Hash

Format a tool for OpenAI function calling

Parameters:

Returns:

  • (Hash)

    The tool in OpenAI format



99
100
101
102
103
104
105
106
107
108
# File 'lib/last_llm/providers/openai.rb', line 99

def self.format_tool(tool)
  {
    type: 'function',
    function: {
      name: tool.name,
      description: tool.description,
      parameters: tool.parameters
    }
  }
end

Instance Method Details

#embeddings(text, options = {}) ⇒ Array<Float>

Generate embeddings from text

Parameters:

  • text (String)

    The text to generate embeddings for

  • options (Hash) (defaults to: {})

    Options for the embedding generation

Returns:

  • (Array<Float>)

    The embedding vector as an array of floats



66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
# File 'lib/last_llm/providers/openai.rb', line 66

def embeddings(text, options = {})
  # Ensure text is a string
  text_str = text.to_s
  logger.info("#{@name}: Generating embeddings with model: #{options[:model] || EMBEDDINGS_MODEL}")
  logger.debug("#{@name}: Text for embeddings: #{truncate_text(text_str)}")

  response = @conn.post('/v1/embeddings') do |req|
    req.body = {
      model: options[:model] || EMBEDDINGS_MODEL,
      input: text_str,
      encoding_format: options[:encoding_format] || 'float'
    }.compact
    logger.debug("#{@name}: Embedding request body: #{req.body.inspect}")
  end

  logger.info("#{@name}: API response status: #{response.status}")
  result = parse_response(response)

  # Extract embeddings from response
  embeddings = result.dig(:data, 0, :embedding)
  logger.debug("#{@name}: Generated embeddings vector of length: #{embeddings&.length || 0}")

  raise LastLLM::ApiError.new('Invalid embeddings response format', nil) unless embeddings.is_a?(Array)

  embeddings
rescue Faraday::Error => e
  logger.error("#{@name}: API request failed: #{e.message}")
  handle_request_error(e)
end

#generate_object(prompt, schema, options = {}) ⇒ Object



45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
# File 'lib/last_llm/providers/openai.rb', line 45

def generate_object(prompt, schema, options = {})
  model = get_model(options, DEFAULT_MODEL)
  logger.info("#{@name}: Generating object with model: #{model}")
  logger.debug("#{@name}: Object prompt: #{format_prompt_for_logging(prompt)}")

  make_object_request(prompt, schema, options) do |content|
    logger.debug("#{@name}: Raw JSON response: #{content}")
    parsed_json = JSON.parse(content, symbolize_names: true)

    if parsed_json.key?(:$schema) && parsed_json.key?(:properties)
      parsed_json[:properties]
    else
      parsed_json
    end
  end
end

#generate_text(prompt, options = {}) ⇒ Object



33
34
35
36
37
38
39
40
41
42
43
# File 'lib/last_llm/providers/openai.rb', line 33

def generate_text(prompt, options = {})
  model = get_model(options, DEFAULT_MODEL)
  logger.info("#{@name}: Generating text with model: #{model}")
  logger.debug("#{@name}: Text prompt: #{format_prompt_for_logging(prompt)}")

  make_text_request(prompt, options) do |result|
    response = result.dig(:choices, 0, :message, :content).to_s
    logger.debug("#{@name}: Generated response of #{response.length} characters")
    response
  end
end