Class: LLMs::Executors::BaseExecutor
- Inherits:
-
Object
- Object
- LLMs::Executors::BaseExecutor
- Defined in:
- lib/llms/executors/base_executor.rb
Direct Known Subclasses
AnthropicExecutor, GoogleGeminiExecutor, OpenAICompatibleExecutor
Constant Summary collapse
- DEFAULT_TEMPERATURE =
0.0
Instance Attribute Summary collapse
-
#available_tools ⇒ Object
readonly
Returns the value of attribute available_tools.
-
#base_url ⇒ Object
readonly
Returns the value of attribute base_url.
-
#client ⇒ Object
readonly
Returns the value of attribute client.
-
#last_error ⇒ Object
readonly
Returns the value of attribute last_error.
-
#last_received_message ⇒ Object
readonly
Returns the value of attribute last_received_message.
-
#last_received_message_id ⇒ Object
readonly
Returns the value of attribute last_received_message_id.
-
#last_sent_message ⇒ Object
readonly
Returns the value of attribute last_sent_message.
-
#last_usage_data ⇒ Object
readonly
Returns the value of attribute last_usage_data.
-
#max_completion_tokens ⇒ Object
readonly
Returns the value of attribute max_completion_tokens.
-
#max_thinking_tokens ⇒ Object
readonly
Returns the value of attribute max_thinking_tokens.
-
#max_tokens ⇒ Object
readonly
Returns the value of attribute max_tokens.
-
#model_name ⇒ Object
readonly
Returns the value of attribute model_name.
-
#system_prompt ⇒ Object
readonly
Returns the value of attribute system_prompt.
-
#temperature ⇒ Object
readonly
Returns the value of attribute temperature.
-
#thinking_effort ⇒ Object
readonly
Returns the value of attribute thinking_effort.
-
#thinking_mode ⇒ Object
readonly
Returns the value of attribute thinking_mode.
Instance Method Summary collapse
- #execute_conversation(conversation, &block) ⇒ Object
- #execute_prompt(prompt, system_prompt: nil, &block) ⇒ Object
-
#initialize(**params) ⇒ BaseExecutor
constructor
A new instance of BaseExecutor.
Constructor Details
#initialize(**params) ⇒ BaseExecutor
Returns a new instance of BaseExecutor.
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
# File 'lib/llms/executors/base_executor.rb', line 17 def initialize(**params) raise LLMs::ConfigurationError, "model_name is required" if params[:model_name].nil? @model_name = params[:model_name] # Connection Info @base_url = params[:base_url] @api_key = params[:api_key] ## Will take precedence over an env var if present @api_key_env_var = params[:api_key_env_var] @pricing = params[:pricing] @exclude_params = params[:exclude_params] # Execution Info @system_prompt = params[:system_prompt] @temperature = validate_temperature(params[:temperature] || DEFAULT_TEMPERATURE) @available_tools = params[:tools] @cache_prompt = params[:cache_prompt] ## TODO caching is automatic for most models now @max_tokens = validate_positive_integer_or_nil(params[:max_tokens], "max_tokens") @max_completion_tokens = validate_positive_integer_or_nil(params[:max_completion_tokens], "max_completion_tokens") @max_thinking_tokens = validate_positive_integer_or_nil(params[:max_thinking_tokens], "max_thinking_tokens") @thinking_mode = params.key?(:thinking) ? params[:thinking] : false @thinking_effort = validate_thinking_effort(params[:thinking_effort]) ##TODO warn if max_tokens is used instead of max_completion_tokens and model is a thinking model (or thinking is enabled) @last_sent_message = nil @last_received_message = nil @last_usage_data = nil @last_error = nil initialize_client end |
Instance Attribute Details
#available_tools ⇒ Object (readonly)
Returns the value of attribute available_tools.
11 12 13 |
# File 'lib/llms/executors/base_executor.rb', line 11 def available_tools @available_tools end |
#base_url ⇒ Object (readonly)
Returns the value of attribute base_url.
11 12 13 |
# File 'lib/llms/executors/base_executor.rb', line 11 def base_url @base_url end |
#client ⇒ Object (readonly)
Returns the value of attribute client.
11 12 13 |
# File 'lib/llms/executors/base_executor.rb', line 11 def client @client end |
#last_error ⇒ Object (readonly)
Returns the value of attribute last_error.
11 12 13 |
# File 'lib/llms/executors/base_executor.rb', line 11 def last_error @last_error end |
#last_received_message ⇒ Object (readonly)
Returns the value of attribute last_received_message.
11 12 13 |
# File 'lib/llms/executors/base_executor.rb', line 11 def @last_received_message end |
#last_received_message_id ⇒ Object (readonly)
Returns the value of attribute last_received_message_id.
11 12 13 |
# File 'lib/llms/executors/base_executor.rb', line 11 def @last_received_message_id end |
#last_sent_message ⇒ Object (readonly)
Returns the value of attribute last_sent_message.
11 12 13 |
# File 'lib/llms/executors/base_executor.rb', line 11 def @last_sent_message end |
#last_usage_data ⇒ Object (readonly)
Returns the value of attribute last_usage_data.
11 12 13 |
# File 'lib/llms/executors/base_executor.rb', line 11 def last_usage_data @last_usage_data end |
#max_completion_tokens ⇒ Object (readonly)
Returns the value of attribute max_completion_tokens.
11 12 13 |
# File 'lib/llms/executors/base_executor.rb', line 11 def max_completion_tokens @max_completion_tokens end |
#max_thinking_tokens ⇒ Object (readonly)
Returns the value of attribute max_thinking_tokens.
11 12 13 |
# File 'lib/llms/executors/base_executor.rb', line 11 def max_thinking_tokens @max_thinking_tokens end |
#max_tokens ⇒ Object (readonly)
Returns the value of attribute max_tokens.
11 12 13 |
# File 'lib/llms/executors/base_executor.rb', line 11 def max_tokens @max_tokens end |
#model_name ⇒ Object (readonly)
Returns the value of attribute model_name.
11 12 13 |
# File 'lib/llms/executors/base_executor.rb', line 11 def model_name @model_name end |
#system_prompt ⇒ Object (readonly)
Returns the value of attribute system_prompt.
11 12 13 |
# File 'lib/llms/executors/base_executor.rb', line 11 def system_prompt @system_prompt end |
#temperature ⇒ Object (readonly)
Returns the value of attribute temperature.
11 12 13 |
# File 'lib/llms/executors/base_executor.rb', line 11 def temperature @temperature end |
#thinking_effort ⇒ Object (readonly)
Returns the value of attribute thinking_effort.
11 12 13 |
# File 'lib/llms/executors/base_executor.rb', line 11 def thinking_effort @thinking_effort end |
#thinking_mode ⇒ Object (readonly)
Returns the value of attribute thinking_mode.
11 12 13 |
# File 'lib/llms/executors/base_executor.rb', line 11 def thinking_mode @thinking_mode end |
Instance Method Details
#execute_conversation(conversation, &block) ⇒ Object
63 64 65 |
# File 'lib/llms/executors/base_executor.rb', line 63 def execute_conversation(conversation, &block) raise NotImplementedError, "Subclasses must implement execute_conversation" end |
#execute_prompt(prompt, system_prompt: nil, &block) ⇒ Object
53 54 55 56 57 58 59 60 61 |
# File 'lib/llms/executors/base_executor.rb', line 53 def execute_prompt(prompt, system_prompt: nil, &block) conversation = LLMs::Conversation.new if sp = system_prompt || @system_prompt conversation.(sp) end conversation.(prompt) = self.execute_conversation(conversation, &block) &.text end |