Class: AIA::RubyLLMAdapter

Inherits:
Object
  • Object
show all
Defined in:
lib/aia/ruby_llm_adapter.rb

Defined Under Namespace

Classes: MultiModelResponse

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initializeRubyLLMAdapter

Returns a new instance of RubyLLMAdapter.



10
11
12
13
14
15
16
17
18
19
# File 'lib/aia/ruby_llm_adapter.rb', line 10

def initialize
  @model_specs = extract_models_config  # Full specs with role info
  @models = extract_model_names(@model_specs)  # Just model names for backward compat
  @chats = {}
  @contexts = {} # Store isolated contexts for each model

  configure_rubyllm
  refresh_local_model_registry
  setup_chats_with_tools
end

Dynamic Method Handling

This class handles dynamic methods through the method_missing method

#method_missing(method, *args, &block) ⇒ Object



624
625
626
627
628
629
630
631
632
# File 'lib/aia/ruby_llm_adapter.rb', line 624

def method_missing(method, *args, &block)
  # Use the first chat instance for backward compatibility with method_missing
  first_chat = @chats.values.first
  if first_chat&.respond_to?(method)
    first_chat.public_send(method, *args, &block)
  else
    super
  end
end

Instance Attribute Details

#chatsObject (readonly)

Returns the value of attribute chats.



8
9
10
# File 'lib/aia/ruby_llm_adapter.rb', line 8

def chats
  @chats
end

#model_specsObject (readonly)

Returns the value of attribute model_specs.



8
9
10
# File 'lib/aia/ruby_llm_adapter.rb', line 8

def model_specs
  @model_specs
end

#toolsObject (readonly)

Returns the value of attribute tools.



8
9
10
# File 'lib/aia/ruby_llm_adapter.rb', line 8

def tools
  @tools
end

Instance Method Details

#build_consensus_prompt(results) ⇒ Object



437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
# File 'lib/aia/ruby_llm_adapter.rb', line 437

def build_consensus_prompt(results)
  prompt_parts = []
  prompt_parts << "You are tasked with creating a consensus response based on multiple AI model responses to the same query."
  prompt_parts << "Please analyze the following responses and provide a unified, comprehensive answer that:"
  prompt_parts << "- Incorporates the best insights from all models"
  prompt_parts << "- Resolves any contradictions with clear reasoning"
  prompt_parts << "- Provides additional context or clarification when helpful"
  prompt_parts << "- Maintains accuracy and avoids speculation"
  prompt_parts << ""
  prompt_parts << "Model responses:"
  prompt_parts << ""

  results.each do |model_name, result|
    # Extract content from RubyLLM::Message if needed
    content = if result.respond_to?(:content)
                result.content
              else
                result.to_s
              end
    next if content.start_with?("Error with")
    prompt_parts << "#{model_name}:"
    prompt_parts << content
    prompt_parts << ""
  end

  prompt_parts << "Please provide your consensus response:"
  prompt_parts.join("\n")
end

#chat(prompt) ⇒ Object



287
288
289
290
291
292
293
294
295
296
297
# File 'lib/aia/ruby_llm_adapter.rb', line 287

def chat(prompt)
  result = if @models.size == 1
    # Single model - use the original behavior
    single_model_chat(prompt, @models.first)
  else
    # Multiple models - use concurrent processing
    multi_model_chat(prompt)
  end

  result
end

#clear_contextObject

Clear the chat context/history Needed for the //clear and //restore directives Simplified with ADR-002: Each model has isolated context, no global state to manage



582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
# File 'lib/aia/ruby_llm_adapter.rb', line 582

def clear_context
  old_chats = @chats.dup
  new_chats = {}

  @models.each do |model_name|
    begin
      # Get the isolated context for this model
      context = @contexts[model_name]
      actual_model, provider = extract_model_and_provider(model_name)

      # Create a fresh chat instance from the same isolated context
      chat = if provider
               context.chat(model: actual_model, provider: provider, assume_model_exists: true)
             else
               context.chat(model: actual_model)
             end

      # Re-add tools if they were previously loaded
      if @tools && !@tools.empty? && chat.model&.supports_functions?
        chat.with_tools(*@tools)
      end

      new_chats[model_name] = chat
    rescue StandardError => e
      # If recreation fails, keep the old chat but clear its messages
      warn "Warning: Could not recreate chat for #{model_name}: #{e.message}. Clearing existing chat."
      chat = old_chats[model_name]
      if chat&.instance_variable_defined?(:@messages)
        chat.instance_variable_set(:@messages, [])
      end
      chat.clear_history if chat&.respond_to?(:clear_history)
      new_chats[model_name] = chat
    end
  end

  @chats = new_chats
  'Chat context successfully cleared.'
rescue StandardError => e
  "Error clearing chat context: #{e.message}"
end

#configure_rubyllmObject



22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
# File 'lib/aia/ruby_llm_adapter.rb', line 22

def configure_rubyllm
  # TODO: Add some of these configuration items to AIA.config
  RubyLLM.configure do |config|
    config.anthropic_api_key  = ENV.fetch('ANTHROPIC_API_KEY', nil)
    config.deepseek_api_key   = ENV.fetch('DEEPSEEK_API_KEY', nil)
    config.gemini_api_key     = ENV.fetch('GEMINI_API_KEY', nil)
    config.gpustack_api_key   = ENV.fetch('GPUSTACK_API_KEY', nil)
    config.mistral_api_key    = ENV.fetch('MISTRAL_API_KEY', nil)
    config.openrouter_api_key = ENV.fetch('OPENROUTER_API_KEY', nil)
    config.perplexity_api_key = ENV.fetch('PERPLEXITY_API_KEY', nil)

    # These providers require a little something extra
    config.openai_api_key         = ENV.fetch('OPENAI_API_KEY', nil)
    config.openai_organization_id = ENV.fetch('OPENAI_ORGANIZATION_ID', nil)
    config.openai_project_id      = ENV.fetch('OPENAI_PROJECT_ID', nil)

    config.bedrock_api_key       = ENV.fetch('BEDROCK_ACCESS_KEY_ID', nil)
    config.bedrock_secret_key    = ENV.fetch('BEDROCK_SECRET_ACCESS_KEY', nil)
    config.bedrock_region        = ENV.fetch('BEDROCK_REGION', nil)
    config.bedrock_session_token = ENV.fetch('BEDROCK_SESSION_TOKEN', nil)

    # Ollama is based upon the OpenAI API so it needs to over-ride a few things
    config.ollama_api_base = ENV.fetch('OLLAMA_API_BASE', nil)

    # --- Custom OpenAI Endpoint ---
    # Use this for Azure OpenAI, proxies, or self-hosted models via OpenAI-compatible APIs.
    # For osaurus: Use model name prefix "osaurus/" and set OSAURUS_API_BASE env var
    # For LM Studio: Use model name prefix "lms/" and set LMS_API_BASE env var
    config.openai_api_base = ENV.fetch('OPENAI_API_BASE', nil) # e.g., "https://your-azure.openai.azure.com"

    # --- Default Models ---
    # Used by RubyLLM.chat, RubyLLM.embed, RubyLLM.paint if no model is specified.
    # config.default_model            = 'gpt-4.1-nano'            # Default: 'gpt-4.1-nano'
    # config.default_embedding_model  = 'text-embedding-3-small'  # Default: 'text-embedding-3-small'
    # config.default_image_model      = 'dall-e-3'                # Default: 'dall-e-3'

    # --- Connection Settings ---
    config.request_timeout            = 120 # Request timeout in seconds (default: 120)
            config.max_retries                = 3   # Max retries on transient network errors (default: 3)
            config.retry_interval             = 0.1 # Initial delay in seconds (default: 0.1)
            config.retry_backoff_factor       = 2   # Multiplier for subsequent retries (default: 2)
            config.retry_interval_randomness  = 0.5 # Jitter factor (default: 0.5)

    # Connection pooling settings removed - not supported in current RubyLLM version
    # config.connection_pool_size       = 10  # Number of connections to maintain in pool
    # config.connection_pool_timeout    = 60  # Connection pool timeout in seconds
    # config.log_file   = '/logs/ruby_llm.log'
    config.log_level = :fatal # debug level can also be set to debug by setting RUBYLLM_DEBUG envar to true
  end
end

#create_isolated_context_for_model(model_name) ⇒ Object

Create an isolated RubyLLM::Context for a model to prevent cross-talk (ADR-002) Each model gets its own context with provider-specific configuration



87
88
89
90
91
92
93
94
95
96
97
98
99
100
# File 'lib/aia/ruby_llm_adapter.rb', line 87

def create_isolated_context_for_model(model_name)
  config = RubyLLM.config.dup

  # Apply provider-specific configuration
  if model_name.start_with?('lms/')
    config.openai_api_base = ENV.fetch('LMS_API_BASE', 'http://localhost:1234/v1')
    config.openai_api_key = 'dummy' # Local servers don't need a real API key
  elsif model_name.start_with?('osaurus/')
    config.openai_api_base = ENV.fetch('OSAURUS_API_BASE', 'http://localhost:11434/v1')
    config.openai_api_key = 'dummy' # Local servers don't need a real API key
  end

  RubyLLM::Context.new(config)
end

#drop_duplicate_toolsObject



267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
# File 'lib/aia/ruby_llm_adapter.rb', line 267

def drop_duplicate_tools
  seen_names = Set.new
  original_size = @tools.size

  @tools.select! do |tool|
    tool_name = tool.name
    if seen_names.include?(tool_name)
      warn "WARNING: Duplicate tool name detected: '#{tool_name}'. Only the first occurrence will be used."
      false
    else
      seen_names.add(tool_name)
      true
    end
  end

  removed_count = original_size - @tools.size
  warn "Removed #{removed_count} duplicate tools" if removed_count > 0
end

#extract_model_and_provider(model_name) ⇒ Object

Extract the actual model name and provider from the prefixed model_name Returns: [actual_model, provider] where provider may be nil for auto-detection



105
106
107
108
109
110
111
112
113
# File 'lib/aia/ruby_llm_adapter.rb', line 105

def extract_model_and_provider(model_name)
  if model_name.start_with?('ollama/')
    [model_name.sub('ollama/', ''), 'ollama']
  elsif model_name.start_with?('lms/') || model_name.start_with?('osaurus/')
    [model_name.sub(%r{^(lms|osaurus)/}, ''), 'openai']
  else
    [model_name, nil] # Let RubyLLM auto-detect provider
  end
end

#format_individual_responses(results) ⇒ Object



466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
# File 'lib/aia/ruby_llm_adapter.rb', line 466

def format_individual_responses(results)
  # For metrics support, return a special structure if all results have token info
  has_metrics = results.values.all? { |r| r.respond_to?(:input_tokens) && r.respond_to?(:output_tokens) }

  if has_metrics && AIA.config.show_metrics
    # Return structured data that preserves metrics for multi-model
    format_multi_model_with_metrics(results)
  else
    # Original string formatting for non-metrics mode with role labels (ADR-005)
    output = []
    results.each do |internal_id, result|
      # Get model spec to include role in output
      spec = get_model_spec(internal_id)
      display_name = format_model_display_name(spec)

      output << "from: #{display_name}"
      # Extract content from RubyLLM::Message if needed
      content = if result.respond_to?(:content)
                  result.content
                else
                  result.to_s
                end
      output << content
      output << "" # Add blank line between results
    end
    output.join("\n")
  end
end

#format_model_display_name(spec) ⇒ Object

Format display name with instance number and role (ADR-005)



496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
# File 'lib/aia/ruby_llm_adapter.rb', line 496

def format_model_display_name(spec)
  return spec unless spec.is_a?(Hash)

  model_name = spec[:model]
  instance = spec[:instance]
  role = spec[:role]

  # Add instance number if > 1
  display = if instance > 1
              "#{model_name} ##{instance}"
            else
              model_name
            end

  # Add role label if present
  display += " (#{role})" if role

  display
end

#format_multi_model_results(results) ⇒ Object



398
399
400
401
402
403
404
405
406
407
408
# File 'lib/aia/ruby_llm_adapter.rb', line 398

def format_multi_model_results(results)
  use_consensus = should_use_consensus_mode?

  if use_consensus
    # Generate consensus response using primary model
    generate_consensus_response(results)
  else
    # Show individual responses from all models
    format_individual_responses(results)
  end
end

#format_multi_model_with_metrics(results) ⇒ Object



516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
# File 'lib/aia/ruby_llm_adapter.rb', line 516

def format_multi_model_with_metrics(results)
  # Create a composite response that includes all model responses and metrics
  formatted_content = []
  metrics_data = []

  results.each do |model_name, result|
    formatted_content << "from: #{model_name}"
    formatted_content << result.content
    formatted_content << ""

    # Collect metrics for each model
    metrics_data << {
      model_id: model_name,
      input_tokens: result.input_tokens,
      output_tokens: result.output_tokens
    }
  end

  # Return a special MultiModelResponse that ChatProcessorService can handle
  MultiModelResponse.new(formatted_content.join("\n"), metrics_data)
end

#generate_consensus_response(results) ⇒ Object



415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
# File 'lib/aia/ruby_llm_adapter.rb', line 415

def generate_consensus_response(results)
  primary_model = @models.first
  primary_chat = @chats[primary_model]

  # Build the consensus prompt with all model responses
  # Note: This prompt does NOT include the model's role (ADR-005)
  # The primary model synthesizes neutrally without role bias
  consensus_prompt = build_consensus_prompt(results)

  begin
    # Have the primary model generate the consensus
    # The consensus prompt is already role-neutral
    consensus_result = primary_chat.ask(consensus_prompt).content

    # Format the consensus response - no role label for consensus
    "from: #{primary_model}\n#{consensus_result}"
  rescue StandardError => e
    # If consensus fails, fall back to individual responses
    "Error generating consensus: #{e.message}\n\n" + format_individual_responses(results)
  end
end

#load_toolsObject



211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
# File 'lib/aia/ruby_llm_adapter.rb', line 211

def load_tools
  @tools = []

  support_local_tools
  support_mcp
  filter_tools_by_allowed_list
  filter_tools_by_rejected_list
  drop_duplicate_tools

  if tools.empty?
    AIA.config.tool_names = ''
  else
    AIA.config.tool_names = @tools.map(&:name).join(', ')
    AIA.config.tools      = @tools
  end
end

#load_tools_lazy_mcp_support_only_when_neededObject



193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
# File 'lib/aia/ruby_llm_adapter.rb', line 193

def load_tools_lazy_mcp_support_only_when_needed
  @tools = []

  support_local_tools
  support_mcp_lazy
  filter_tools_by_allowed_list
  filter_tools_by_rejected_list
  drop_duplicate_tools

  if tools.empty?
    AIA.config.tool_names = ''
  else
    AIA.config.tool_names = @tools.map(&:name).join(', ')
    AIA.config.tools      = @tools
  end
end

#multi_model_chat(prompt_or_contexts) ⇒ Object



364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
# File 'lib/aia/ruby_llm_adapter.rb', line 364

def multi_model_chat(prompt_or_contexts)
  results = {}

  # Check if we're receiving per-model contexts (Hash) or shared prompt (String/Array) - ADR-002 revised
  per_model_contexts = prompt_or_contexts.is_a?(Hash) &&
                       prompt_or_contexts.keys.all? { |k| @models.include?(k) }

  Async do |task|
    @models.each do |internal_id|
      task.async do
        begin
          # Use model-specific context if available, otherwise shared prompt
          prompt = if per_model_contexts
                     prompt_or_contexts[internal_id]
                   else
                     prompt_or_contexts
                   end

          # Add per-model role if specified (ADR-005)
          prompt = prepend_model_role(prompt, internal_id)

          result = single_model_chat(prompt, internal_id)
          results[internal_id] = result
        rescue StandardError => e
          results[internal_id] = "Error with #{internal_id}: #{e.message}"
        end
      end
    end
  end

  # Format and return results from all models
  format_multi_model_results(results)
end

#prepend_model_role(prompt, internal_id) ⇒ Object

Prepend role content to prompt for a specific model (ADR-005)



323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
# File 'lib/aia/ruby_llm_adapter.rb', line 323

def prepend_model_role(prompt, internal_id)
  # Get model spec to find role
  spec = get_model_spec(internal_id)
  return prompt unless spec && spec[:role]

  # Get role content using PromptHandler
  # Need to create PromptHandler instance if not already available
  prompt_handler = AIA::PromptHandler.new
  role_content = prompt_handler.load_role_for_model(spec, AIA.config.role)

  return prompt unless role_content

  # Prepend role to prompt based on prompt type
  if prompt.is_a?(String)
    # Simple string prompt
    "#{role_content}\n\n#{prompt}"
  elsif prompt.is_a?(Array)
    # Conversation array - prepend to first user message
    prepend_role_to_conversation(prompt, role_content)
  else
    prompt
  end
end

#prepend_role_to_conversation(conversation, role_content) ⇒ Object



347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
# File 'lib/aia/ruby_llm_adapter.rb', line 347

def prepend_role_to_conversation(conversation, role_content)
  # Find the first user message and prepend role
  modified = conversation.dup
  first_user_index = modified.find_index { |msg| msg[:role] == "user" || msg["role"] == "user" }

  if first_user_index
    msg = modified[first_user_index].dup
    role_key = msg.key?(:role) ? :role : "role"
    content_key = msg.key?(:content) ? :content : "content"

    msg[content_key] = "#{role_content}\n\n#{msg[content_key]}"
    modified[first_user_index] = msg
  end

  modified
end

#refresh_local_model_registryObject



74
75
76
77
78
79
80
81
82
# File 'lib/aia/ruby_llm_adapter.rb', line 74

def refresh_local_model_registry
  if  AIA.config.refresh.nil?           ||
      Integer(AIA.config.refresh).zero? ||
      Date.today > (AIA.config.last_refresh + Integer(AIA.config.refresh))
    RubyLLM.models.refresh!
    AIA.config.last_refresh = Date.today
    AIA::Config.dump_config(AIA.config, AIA.config.config_file) if AIA.config.config_file
  end
end

#respond_to_missing?(method, include_private = false) ⇒ Boolean

Returns:

  • (Boolean)


635
636
637
638
# File 'lib/aia/ruby_llm_adapter.rb', line 635

def respond_to_missing?(method, include_private = false)
  # Check if any of our chat instances respond to the method
  @chats.values.any? { |chat| chat.respond_to?(method) } || super
end

#setup_chats_with_toolsObject



116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
# File 'lib/aia/ruby_llm_adapter.rb', line 116

def setup_chats_with_tools
  valid_chats = {}
  valid_contexts = {}
  valid_specs = []
  failed_models = []

  @model_specs.each do |spec|
    model_name = spec[:model]          # Actual model name (e.g., "gpt-4o")
    internal_id = spec[:internal_id]   # Key for storage (e.g., "gpt-4o#1", "gpt-4o#2")

    begin
      # Create isolated context for this model to prevent cross-talk (ADR-002)
      context = create_isolated_context_for_model(model_name)

      # Determine provider and actual model name
      actual_model, provider = extract_model_and_provider(model_name)

      # Validate LM Studio models
      if model_name.start_with?('lms/')
        lms_api_base = ENV.fetch('LMS_API_BASE', 'http://localhost:1234/v1')
        validate_lms_model!(actual_model, lms_api_base)
      end

      # Create chat using isolated context
      chat = if provider
               context.chat(model: actual_model, provider: provider, assume_model_exists: true)
             else
               context.chat(model: actual_model)
             end

      valid_chats[internal_id] = chat
      valid_contexts[internal_id] = context
      valid_specs << spec
    rescue StandardError => e
      failed_models << "#{internal_id}: #{e.message}"
    end
  end

  # Report failed models but continue with valid ones
  unless failed_models.empty?
    puts "\n❌ Failed to initialize the following models:"
    failed_models.each { |failure| puts "   - #{failure}" }
  end

  # If no models initialized successfully, exit
  if valid_chats.empty?
    puts "\n❌ No valid models could be initialized. Exiting."
    puts "\n💡 Available models can be listed with: bin/aia --help models"
    exit 1
  end

  @chats = valid_chats
  @contexts = valid_contexts
  @model_specs = valid_specs
  @models = valid_chats.keys

  # Update the config to reflect only the valid models (keep as specs)
  AIA.config.model = @model_specs

  # Report successful models
  if failed_models.any?
    puts "\n✅ Successfully initialized: #{@models.join(', ')}"
    puts
  end

  # Use the first chat to determine tool support (assuming all models have similar tool support)
  first_chat = @chats.values.first
  return unless first_chat&.model&.supports_functions?

  load_tools_lazy_mcp_support_only_when_needed

  @chats.each_value do |chat|
    chat.with_tools(*tools) unless tools.empty?
  end
end

#should_use_consensus_mode?Boolean

Returns:

  • (Boolean)


410
411
412
413
# File 'lib/aia/ruby_llm_adapter.rb', line 410

def should_use_consensus_mode?
  # Only use consensus when explicitly enabled with --consensus flag
  AIA.config.consensus == true
end

#single_model_chat(prompt, internal_id) ⇒ Object



299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
# File 'lib/aia/ruby_llm_adapter.rb', line 299

def single_model_chat(prompt, internal_id)
  chat_instance = @chats[internal_id]
  modes = chat_instance.model.modalities

  # TODO: Need to consider how to handle multi-mode models
  result = if modes.text_to_text?
    text_to_text_single(prompt, internal_id)
  elsif modes.image_to_text?
    image_to_text_single(prompt, internal_id)
  elsif modes.text_to_image?
    text_to_image_single(prompt, internal_id)
  elsif modes.text_to_audio?
    text_to_audio_single(prompt, internal_id)
  elsif modes.audio_to_text?
    audio_to_text_single(prompt, internal_id)
  else
    # TODO: what else can be done?
    "Error: No matching modality for model #{internal_id}"
  end

  result
end

#speak(_text) ⇒ Object



560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
# File 'lib/aia/ruby_llm_adapter.rb', line 560

def speak(_text)
  output_file = "#{Time.now.to_i}.mp3"

  # NOTE: RubyLLM doesn't have a direct text-to-speech feature
  # This is a placeholder for a custom implementation or external service
  begin
    # Try using a TTS API if available
    # For now, we'll use a mock implementation
    File.write(output_file, 'Mock TTS audio content')
    if File.exist?(output_file) && system("which #{AIA.config.speak_command} > /dev/null 2>&1")
      system("#{AIA.config.speak_command} #{output_file}")
    end
    "Audio generated and saved to: #{output_file}"
  rescue StandardError => e
    "Error generating audio: #{e.message}"
  end
end

#support_local_toolsObject



229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
# File 'lib/aia/ruby_llm_adapter.rb', line 229

def support_local_tools
  @tools += ObjectSpace.each_object(Class).select do |klass|
    next false unless klass < RubyLLM::Tool

    # Filter out tools that can't be instantiated without arguments
    # RubyLLM calls tool.new without args, so we must verify each tool works
    begin
      klass.new
      true
    rescue ArgumentError, LoadError, StandardError
      # Skip tools that require arguments or have missing dependencies
      false
    end
  end
end

#support_mcpObject



259
260
261
262
263
264
# File 'lib/aia/ruby_llm_adapter.rb', line 259

def support_mcp
  RubyLLM::MCP.establish_connection
  @tools += RubyLLM::MCP.tools
rescue StandardError => e
  warn "Warning: Failed to connect MCP clients: #{e.message}"
end

#support_mcp_lazyObject



246
247
248
249
250
251
252
253
254
255
256
# File 'lib/aia/ruby_llm_adapter.rb', line 246

def support_mcp_lazy
  # Only load MCP tools if MCP servers are actually configured
  return if AIA.config.mcp_servers.nil? || AIA.config.mcp_servers.empty?

  begin
    RubyLLM::MCP.establish_connection
    @tools += RubyLLM::MCP.tools
  rescue StandardError => e
    warn "Warning: Failed to connect MCP clients: #{e.message}"
  end
end

#transcribe(audio_file) ⇒ Object



553
554
555
556
557
# File 'lib/aia/ruby_llm_adapter.rb', line 553

def transcribe(audio_file)
  # Use the first model for transcription
  first_model = @models.first
  @chats[first_model].ask('Transcribe this audio', with: audio_file).content
end