Class: LLMs::Executors::BaseExecutor

Inherits:
Object
  • Object
show all
Defined in:
lib/llms/executors/base_executor.rb

Constant Summary collapse

DEFAULT_TEMPERATURE =
0.0

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(**params) ⇒ BaseExecutor

Returns a new instance of BaseExecutor.



17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
# File 'lib/llms/executors/base_executor.rb', line 17

def initialize(**params)
  raise LLMs::ConfigurationError, "model_name is required" if params[:model_name].nil?

  @model_name = params[:model_name]
  
  # Connection Info
  @base_url = params[:base_url]
  @api_key = params[:api_key] ## Will take precedence over an env var if present
  @api_key_env_var = params[:api_key_env_var]
  @pricing = params[:pricing]
  @exclude_params = params[:exclude_params]

  # Execution Info
  @system_prompt = params[:system_prompt]
  @temperature = validate_temperature(params[:temperature] || DEFAULT_TEMPERATURE)
  @available_tools = params[:tools]
  
  @cache_prompt = params[:cache_prompt] ## TODO caching is automatic for most models now
  
  @max_tokens = validate_positive_integer_or_nil(params[:max_tokens], "max_tokens")
  @max_completion_tokens = validate_positive_integer_or_nil(params[:max_completion_tokens], "max_completion_tokens")
  @max_thinking_tokens = validate_positive_integer_or_nil(params[:max_thinking_tokens], "max_thinking_tokens")

  @thinking_mode = params.key?(:thinking) ? params[:thinking] : false
  @thinking_effort = validate_thinking_effort(params[:thinking_effort])

  ##TODO warn if max_tokens is used instead of max_completion_tokens and model is a thinking model (or thinking is enabled)

  @last_sent_message = nil
  @last_received_message = nil
  @last_usage_data = nil
  @last_error = nil

  initialize_client
end

Instance Attribute Details

#available_toolsObject (readonly)

Returns the value of attribute available_tools.



11
12
13
# File 'lib/llms/executors/base_executor.rb', line 11

def available_tools
  @available_tools
end

#base_urlObject (readonly)

Returns the value of attribute base_url.



11
12
13
# File 'lib/llms/executors/base_executor.rb', line 11

def base_url
  @base_url
end

#clientObject (readonly)

Returns the value of attribute client.



11
12
13
# File 'lib/llms/executors/base_executor.rb', line 11

def client
  @client
end

#last_errorObject (readonly)

Returns the value of attribute last_error.



11
12
13
# File 'lib/llms/executors/base_executor.rb', line 11

def last_error
  @last_error
end

#last_received_messageObject (readonly)

Returns the value of attribute last_received_message.



11
12
13
# File 'lib/llms/executors/base_executor.rb', line 11

def last_received_message
  @last_received_message
end

#last_received_message_idObject (readonly)

Returns the value of attribute last_received_message_id.



11
12
13
# File 'lib/llms/executors/base_executor.rb', line 11

def last_received_message_id
  @last_received_message_id
end

#last_sent_messageObject (readonly)

Returns the value of attribute last_sent_message.



11
12
13
# File 'lib/llms/executors/base_executor.rb', line 11

def last_sent_message
  @last_sent_message
end

#last_usage_dataObject (readonly)

Returns the value of attribute last_usage_data.



11
12
13
# File 'lib/llms/executors/base_executor.rb', line 11

def last_usage_data
  @last_usage_data
end

#max_completion_tokensObject (readonly)

Returns the value of attribute max_completion_tokens.



11
12
13
# File 'lib/llms/executors/base_executor.rb', line 11

def max_completion_tokens
  @max_completion_tokens
end

#max_thinking_tokensObject (readonly)

Returns the value of attribute max_thinking_tokens.



11
12
13
# File 'lib/llms/executors/base_executor.rb', line 11

def max_thinking_tokens
  @max_thinking_tokens
end

#max_tokensObject (readonly)

Returns the value of attribute max_tokens.



11
12
13
# File 'lib/llms/executors/base_executor.rb', line 11

def max_tokens
  @max_tokens
end

#model_nameObject (readonly)

Returns the value of attribute model_name.



11
12
13
# File 'lib/llms/executors/base_executor.rb', line 11

def model_name
  @model_name
end

#system_promptObject (readonly)

Returns the value of attribute system_prompt.



11
12
13
# File 'lib/llms/executors/base_executor.rb', line 11

def system_prompt
  @system_prompt
end

#temperatureObject (readonly)

Returns the value of attribute temperature.



11
12
13
# File 'lib/llms/executors/base_executor.rb', line 11

def temperature
  @temperature
end

#thinking_effortObject (readonly)

Returns the value of attribute thinking_effort.



11
12
13
# File 'lib/llms/executors/base_executor.rb', line 11

def thinking_effort
  @thinking_effort
end

#thinking_modeObject (readonly)

Returns the value of attribute thinking_mode.



11
12
13
# File 'lib/llms/executors/base_executor.rb', line 11

def thinking_mode
  @thinking_mode
end

Instance Method Details

#execute_conversation(conversation, &block) ⇒ Object

Raises:

  • (NotImplementedError)


63
64
65
# File 'lib/llms/executors/base_executor.rb', line 63

def execute_conversation(conversation, &block)
  raise NotImplementedError, "Subclasses must implement execute_conversation"
end

#execute_prompt(prompt, system_prompt: nil, &block) ⇒ Object



53
54
55
56
57
58
59
60
61
# File 'lib/llms/executors/base_executor.rb', line 53

def execute_prompt(prompt, system_prompt: nil, &block)
  conversation = LLMs::Conversation.new
  if sp = system_prompt || @system_prompt
    conversation.set_system_message(sp)
  end
  conversation.add_user_message(prompt)
  response_message = self.execute_conversation(conversation, &block)
  response_message&.text
end