Class: Botiasloop::Loop
- Inherits:
-
Object
- Object
- Botiasloop::Loop
- Defined in:
- lib/botiasloop/loop.rb
Constant Summary collapse
- MAX_TOOL_RETRIES =
3
Instance Method Summary collapse
-
#initialize(provider, model, registry, max_iterations: 20) ⇒ Loop
constructor
Initialize the ReAct loop.
-
#run(conversation, user_input, verbose_callback = nil) ⇒ String
Run the ReAct loop.
Constructor Details
#initialize(provider, model, registry, max_iterations: 20) ⇒ Loop
Initialize the ReAct loop
15 16 17 18 19 20 |
# File 'lib/botiasloop/loop.rb', line 15 def initialize(provider, model, registry, max_iterations: 20) @provider = provider @model = model @registry = registry @max_iterations = max_iterations end |
Instance Method Details
#run(conversation, user_input, verbose_callback = nil) ⇒ String
Run the ReAct loop
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
# File 'lib/botiasloop/loop.rb', line 29 def run(conversation, user_input, verbose_callback = nil) @conversation = conversation @verbose_callback = verbose_callback conversation.add("user", user_input) = (conversation) # Track accumulated tokens across all iterations total_input_tokens = 0 total_output_tokens = 0 @max_iterations.times do response = iterate() # Accumulate tokens from this response total_input_tokens += response.input_tokens || 0 total_output_tokens += response.output_tokens || 0 if response.tool_call? # Send reasoning to verbose callback if verbose mode enabled if @conversation.verbose && @verbose_callback && response.content && !response.content.empty? reasoning_content = (response.content) @verbose_callback.call(reasoning_content) end # Add the assistant's message as a RubyLLM::Message object << RubyLLM::Message.new( role: :assistant, content: response.content || "" ) response.tool_calls.each_value do |tool_call| observation = execute_tool(tool_call) << (tool_call.id, observation) end else conversation.add("assistant", response.content, input_tokens: total_input_tokens, output_tokens: total_output_tokens) maybe_auto_label(conversation) return response.content end end raise MaxIterationsExceeded.new(@max_iterations) end |