Class: AIA::ChatProcessorService
- Inherits:
-
Object
- Object
- AIA::ChatProcessorService
- Defined in:
- lib/aia/chat_processor_service.rb
Instance Method Summary collapse
- #determine_operation_type ⇒ Object
-
#initialize(ui_presenter, directive_processor = nil) ⇒ ChatProcessorService
constructor
A new instance of ChatProcessorService.
- #maybe_change_model ⇒ Object
- #output_response(response) ⇒ Object
- #process_next_prompts(response, prompt_handler) ⇒ Object
- #process_prompt(prompt) ⇒ Object
-
#send_to_client(conversation_or_conversations) ⇒ Object
conversation is an Array of Hashes (single model) or Hash of Arrays (multi-model per-model contexts) Each entry is an interchange with the LLM.
- #speak(text) ⇒ Object
Constructor Details
#initialize(ui_presenter, directive_processor = nil) ⇒ ChatProcessorService
Returns a new instance of ChatProcessorService.
5 6 7 8 9 |
# File 'lib/aia/chat_processor_service.rb', line 5 def initialize(ui_presenter, directive_processor = nil) @ui_presenter = ui_presenter @speaker = AIA.speak? ? AiClient.new(AIA.config.speech_model) : nil @directive_processor = directive_processor end |
Instance Method Details
#determine_operation_type ⇒ Object
140 141 142 143 144 145 146 147 148 149 |
# File 'lib/aia/chat_processor_service.rb', line 140 def determine_operation_type # With multiple models, determine operation type from the first model # or provide a generic description if AIA.config.model.is_a?(Array) && AIA.config.model.size > 1 "MULTI-MODEL PROCESSING" else mode = AIA.config.client.model.modalities mode.input.join(',') + " TO " + mode.output.join(',') end end |
#maybe_change_model ⇒ Object
87 88 89 90 91 92 93 94 95 96 97 98 |
# File 'lib/aia/chat_processor_service.rb', line 87 def maybe_change_model # With multiple models, we don't need to change the model in the same way # The RubyLLMAdapter now handles multiple models internally # This method is kept for backward compatibility but may not be needed return if AIA.config.model.is_a?(Array) client_model = AIA.client.model.id # RubyLLM::Model instance unless AIA.config.model.downcase.include?(client_model.downcase) AIA.client = AIA.client.class.new end end |
#output_response(response) ⇒ Object
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
# File 'lib/aia/chat_processor_service.rb', line 101 def output_response(response) speak(response) # Output to STDOUT or file based on out_file configuration if AIA.config.out_file.nil? || 'STDOUT' == AIA.config.out_file.upcase print "\nAI:\n " puts response else mode = AIA.append? ? 'a' : 'w' File.open(AIA.config.out_file, mode) do |file| file.puts "\nAI: " # Handle multi-line responses by adding proper indentation response_lines = response.to_s.split("\n") response_lines.each do |line| file.puts " #{line}" end end end if AIA.config.log_file File.open(AIA.config.log_file, 'a') do |f| f.puts "=== #{Time.now} ===" f.puts "Prompt: #{AIA.config.prompt_id}" f.puts "Response: #{response}" f.puts "===" end end end |
#process_next_prompts(response, prompt_handler) ⇒ Object
131 132 133 134 135 136 137 |
# File 'lib/aia/chat_processor_service.rb', line 131 def process_next_prompts(response, prompt_handler) if @directive_processor.directive?(response) directive_result = @directive_processor.process(response, @history_manager.history) response = directive_result[:result] @history_manager.history = directive_result[:modified_history] if directive_result[:modified_history] end end |
#process_prompt(prompt) ⇒ Object
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
# File 'lib/aia/chat_processor_service.rb', line 25 def process_prompt(prompt) result = nil @ui_presenter.with_spinner("Processing", determine_operation_type) do result = send_to_client(prompt) end # Debug output to understand what we're receiving puts "[DEBUG ChatProcessor] Result class: #{result.class}" if AIA.config.debug puts "[DEBUG ChatProcessor] Result inspect: #{result.inspect[0..500]}..." if AIA.config.debug # Preserve token information if available for metrics if result.is_a?(String) puts "[DEBUG ChatProcessor] Processing as String" if AIA.config.debug { content: result, metrics: nil } elsif result.respond_to?(:multi_model?) && result.multi_model? puts "[DEBUG ChatProcessor] Processing as multi-model response" if AIA.config.debug # Handle multi-model response with metrics { content: result.content, metrics: nil, # Individual model metrics handled separately multi_metrics: result.metrics_list } elsif result.respond_to?(:content) puts "[DEBUG ChatProcessor] Processing as standard response with content method" if AIA.config.debug # Standard response object with content method { content: result.content, metrics: { input_tokens: result.respond_to?(:input_tokens) ? result.input_tokens : nil, output_tokens: result.respond_to?(:output_tokens) ? result.output_tokens : nil, model_id: result.respond_to?(:model_id) ? result.model_id : nil } } else puts "[DEBUG ChatProcessor] Processing as fallback (unexpected type)" if AIA.config.debug # Fallback for unexpected response types { content: result.to_s, metrics: nil } end end |
#send_to_client(conversation_or_conversations) ⇒ Object
conversation is an Array of Hashes (single model) or Hash of Arrays (multi-model per-model contexts) Each entry is an interchange with the LLM.
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
# File 'lib/aia/chat_processor_service.rb', line 68 def send_to_client(conversation_or_conversations) maybe_change_model # Handle per-model conversations (Hash) or single conversation (Array) - ADR-002 revised if conversation_or_conversations.is_a?(Hash) # Multi-model with per-model contexts: pass Hash directly to adapter puts "[DEBUG ChatProcessor] Sending per-model conversations to client" if AIA.config.debug result = AIA.client.chat(conversation_or_conversations) else # Single conversation for single model puts "[DEBUG ChatProcessor] Sending conversation to client: #{conversation_or_conversations.inspect[0..500]}..." if AIA.config.debug result = AIA.client.chat(conversation_or_conversations) end puts "[DEBUG ChatProcessor] Client returned: #{result.class} - #{result.inspect[0..500]}..." if AIA.config.debug result end |
#speak(text) ⇒ Object
12 13 14 15 16 17 18 19 20 21 22 |
# File 'lib/aia/chat_processor_service.rb', line 12 def speak(text) return unless AIA.speak? @speaker ||= AiClient.new(AIA.config.speech_model) if AIA.config.speech_model if @speaker `#{AIA.config.speak_command} #{@speaker.speak(text).path}` else puts "Warning: Unable to speak. Speech model not configured properly." end end |