Class: DSPy::LM
- Inherits:
-
Object
- Object
- DSPy::LM
- Extended by:
- T::Sig
- Defined in:
- lib/dspy/lm.rb,
lib/dspy/lm/errors.rb,
lib/dspy/lm/adapter.rb,
lib/dspy/lm/response.rb,
lib/dspy/lm/cache_manager.rb,
lib/dspy/lm/retry_handler.rb,
lib/dspy/lm/adapter_factory.rb,
lib/dspy/lm/strategy_selector.rb,
lib/dspy/lm/adapters/openai_adapter.rb,
lib/dspy/lm/strategies/base_strategy.rb,
lib/dspy/lm/adapters/anthropic_adapter.rb,
lib/dspy/lm/structured_output_strategy.rb,
lib/dspy/lm/adapters/openai/schema_converter.rb,
lib/dspy/lm/strategies/enhanced_prompting_strategy.rb,
lib/dspy/lm/strategies/anthropic_extraction_strategy.rb,
lib/dspy/lm/strategies/openai_structured_output_strategy.rb
Defined Under Namespace
Modules: Adapters, Strategies Classes: Adapter, AdapterError, AdapterFactory, AnthropicAdapter, CacheManager, ConfigurationError, Error, MissingAPIKeyError, OpenAIAdapter, Response, RetryHandler, StrategySelector, StructuredOutputStrategy, UnsupportedProviderError
Instance Attribute Summary collapse
-
#adapter ⇒ Object
readonly
Returns the value of attribute adapter.
-
#api_key ⇒ Object
readonly
Returns the value of attribute api_key.
-
#model ⇒ Object
readonly
Returns the value of attribute model.
-
#model_id ⇒ Object
readonly
Returns the value of attribute model_id.
-
#provider ⇒ Object
readonly
Returns the value of attribute provider.
Class Method Summary collapse
Instance Method Summary collapse
- #chat(inference_module, input_values, &block) ⇒ Object
-
#initialize(model_id, api_key: nil, **options) ⇒ LM
constructor
A new instance of LM.
Constructor Details
#initialize(model_id, api_key: nil, **options) ⇒ LM
Returns a new instance of LM.
25 26 27 28 29 30 31 32 33 34 |
# File 'lib/dspy/lm.rb', line 25 def initialize(model_id, api_key: nil, **) @model_id = model_id @api_key = api_key # Parse provider and model from model_id @provider, @model = parse_model_id(model_id) # Create appropriate adapter with options @adapter = AdapterFactory.create(model_id, api_key: api_key, **) end |
Instance Attribute Details
#adapter ⇒ Object (readonly)
Returns the value of attribute adapter.
23 24 25 |
# File 'lib/dspy/lm.rb', line 23 def adapter @adapter end |
#api_key ⇒ Object (readonly)
Returns the value of attribute api_key.
23 24 25 |
# File 'lib/dspy/lm.rb', line 23 def api_key @api_key end |
#model ⇒ Object (readonly)
Returns the value of attribute model.
23 24 25 |
# File 'lib/dspy/lm.rb', line 23 def model @model end |
#model_id ⇒ Object (readonly)
Returns the value of attribute model_id.
23 24 25 |
# File 'lib/dspy/lm.rb', line 23 def model_id @model_id end |
#provider ⇒ Object (readonly)
Returns the value of attribute provider.
23 24 25 |
# File 'lib/dspy/lm.rb', line 23 def provider @provider end |
Class Method Details
.cache_manager ⇒ Object
146 147 148 |
# File 'lib/dspy/lm/cache_manager.rb', line 146 def cache_manager @cache_manager ||= CacheManager.new end |
Instance Method Details
#chat(inference_module, input_values, &block) ⇒ Object
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
# File 'lib/dspy/lm.rb', line 36 def chat(inference_module, input_values, &block) signature_class = inference_module.signature_class # Build messages from inference module = (inference_module, input_values) # Calculate input size for monitoring input_text = .map { |m| m[:content] }.join(' ') input_size = input_text.length # Use smart consolidation: emit LM events only when not in nested context response = nil token_usage = {} if should_emit_lm_events? # Emit all LM events when not in nested context response = Instrumentation.instrument('dspy.lm.request', { gen_ai_operation_name: 'chat', gen_ai_system: provider, gen_ai_request_model: model, signature_class: signature_class.name, provider: provider, adapter_class: adapter.class.name, input_size: input_size }) do chat_with_strategy(, signature_class, &block) end # Extract actual token usage from response (more accurate than estimation) token_usage = Instrumentation::TokenTracker.extract_token_usage(response, provider) # Emit token usage event if available if token_usage.any? Instrumentation.emit('dspy.lm.tokens', token_usage.merge({ gen_ai_system: provider, gen_ai_request_model: model, signature_class: signature_class.name })) end # Instrument response parsing parsed_result = Instrumentation.instrument('dspy.lm.response.parsed', { signature_class: signature_class.name, provider: provider, response_length: response.content&.length || 0 }) do parse_response(response, input_values, signature_class) end else # Consolidated mode: execute without nested instrumentation response = chat_with_strategy(, signature_class, &block) token_usage = Instrumentation::TokenTracker.extract_token_usage(response, provider) parsed_result = parse_response(response, input_values, signature_class) end parsed_result end |