Class: Aidp::Harness::AIDecisionEngine

Inherits:
Object
  • Object
show all
Defined in:
lib/aidp/harness/ai_decision_engine.rb

Overview

Zero Framework Cognition (ZFC) Decision Engine

Delegates semantic analysis and decision-making to AI models instead of using brittle pattern matching, scoring formulas, or heuristic thresholds.

Examples:

Basic usage

engine = AIDecisionEngine.new(config, provider_manager)
result = engine.decide(:condition_detection,
  context: { error: "Rate limit exceeded" },
  schema: ConditionSchema,
  tier: "mini"
)
# => { condition: "rate_limit", confidence: 0.95, reasoning: "..." }

See Also:

  • docs/ZFC_COMPLIANCE_ASSESSMENTdocs/ZFC_COMPLIANCE_ASSESSMENT.md
  • docs/ZFC_IMPLEMENTATION_PLANdocs/ZFC_IMPLEMENTATION_PLAN.md

Constant Summary collapse

DECISION_TEMPLATES =

Decision templates define prompts, schemas, and defaults for each decision type

{
  condition_detection: {
    prompt_template: "      Analyze the following API response or error message and classify the condition.\n\n      Response/Error:\n      {{response}}\n\n      Classify this into one of the following conditions:\n      - rate_limit: API rate limiting or quota exceeded\n      - auth_error: Authentication or authorization failure\n      - timeout: Request timeout or network timeout\n      - completion_marker: Work is complete or done\n      - user_feedback_needed: AI is asking for user input/clarification\n      - api_error: General API error (not rate limit/auth)\n      - success: Successful response\n      - other: None of the above\n\n      Provide your classification with a confidence score (0.0 to 1.0) and brief reasoning.\n    PROMPT\n    schema: {\n      type: \"object\",\n      properties: {\n        condition: {\n          type: \"string\",\n          enum: [\n            \"rate_limit\",\n            \"auth_error\",\n            \"timeout\",\n            \"completion_marker\",\n            \"user_feedback_needed\",\n            \"api_error\",\n            \"success\",\n            \"other\"\n          ]\n        },\n        confidence: {\n          type: \"number\",\n          minimum: 0.0,\n          maximum: 1.0\n        },\n        reasoning: {\n          type: \"string\"\n        }\n      },\n      required: [\"condition\", \"confidence\"]\n    },\n    default_tier: \"mini\",\n    cache_ttl: nil  # Each response is unique\n  },\n\n  error_classification: {\n    prompt_template: <<~PROMPT,\n      Classify the following error and determine if it's retryable.\n\n      Error:\n      {{error_message}}\n\n      Context:\n      {{context}}\n\n      Determine:\n      1. Error type (rate_limit, auth, timeout, network, api_bug, other)\n      2. Whether it's retryable (transient vs permanent)\n      3. Recommended action (retry, switch_provider, escalate, fail)\n\n      Provide classification with confidence and reasoning.\n    PROMPT\n    schema: {\n      type: \"object\",\n      properties: {\n        error_type: {\n          type: \"string\",\n          enum: [\"rate_limit\", \"auth\", \"timeout\", \"network\", \"api_bug\", \"other\"]\n        },\n        retryable: {\n          type: \"boolean\"\n        },\n        recommended_action: {\n          type: \"string\",\n          enum: [\"retry\", \"switch_provider\", \"escalate\", \"fail\"]\n        },\n        confidence: {\n          type: \"number\",\n          minimum: 0.0,\n          maximum: 1.0\n        },\n        reasoning: {\n          type: \"string\"\n        }\n      },\n      required: [\"error_type\", \"retryable\", \"recommended_action\", \"confidence\"]\n    },\n    default_tier: \"mini\",\n    cache_ttl: nil\n  },\n\n  completion_detection: {\n    prompt_template: <<~PROMPT,\n      Determine if the work described is complete based on the AI response.\n\n      Task:\n      {{task_description}}\n\n      AI Response:\n      {{response}}\n\n      Is the work complete? Consider:\n      - Explicit completion markers (\"done\", \"finished\", etc.)\n      - Implicit indicators (results provided, no follow-up questions)\n      - Requests for more information (incomplete)\n\n      Provide boolean completion status with confidence and reasoning.\n    PROMPT\n    schema: {\n      type: \"object\",\n      properties: {\n        complete: {\n          type: \"boolean\"\n        },\n        confidence: {\n          type: \"number\",\n          minimum: 0.0,\n          maximum: 1.0\n        },\n        reasoning: {\n          type: \"string\"\n        }\n      },\n      required: [\"complete\", \"confidence\"]\n    },\n    default_tier: \"mini\",\n    cache_ttl: nil\n  },\n\n  implementation_verification: {\n    prompt_template: \"{{prompt}}\",  # Custom prompt provided by caller\n    schema: {\n      type: \"object\",\n      properties: {\n        fully_implemented: {\n          type: \"boolean\",\n          description: \"True if the implementation fully addresses all issue requirements\"\n        },\n        reasoning: {\n          type: \"string\",\n          description: \"Detailed explanation of the verification decision\"\n        },\n        missing_requirements: {\n          type: \"array\",\n          items: {type: \"string\"},\n          description: \"List of specific requirements from the issue that are not yet implemented\"\n        },\n        additional_work_needed: {\n          type: \"array\",\n          items: {type: \"string\"},\n          description: \"List of specific tasks needed to complete the implementation\"\n        }\n      },\n      required: [\"fully_implemented\", \"reasoning\", \"missing_requirements\", \"additional_work_needed\"]\n    },\n    default_tier: \"mini\",\n    cache_ttl: nil\n  }\n}.freeze\n",

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(config, provider_factory: nil) ⇒ AIDecisionEngine

Initialize the AI Decision Engine

Parameters:

  • config (Configuration)

    AIDP configuration object

  • provider_factory (ProviderFactory) (defaults to: nil)

    Factory for creating provider instances



199
200
201
202
203
204
# File 'lib/aidp/harness/ai_decision_engine.rb', line 199

def initialize(config, provider_factory: nil)
  @config = config
  @provider_factory = provider_factory || ProviderFactory.new(config)
  @cache = {}
  @cache_timestamps = {}
end

Instance Attribute Details

#cacheObject (readonly)

Returns the value of attribute cache.



193
194
195
# File 'lib/aidp/harness/ai_decision_engine.rb', line 193

def cache
  @cache
end

#configObject (readonly)

Returns the value of attribute config.



193
194
195
# File 'lib/aidp/harness/ai_decision_engine.rb', line 193

def config
  @config
end

#provider_factoryObject (readonly)

Returns the value of attribute provider_factory.



193
194
195
# File 'lib/aidp/harness/ai_decision_engine.rb', line 193

def provider_factory
  @provider_factory
end

Instance Method Details

#decide(decision_type, context:, schema: nil, tier: nil, cache_ttl: nil) ⇒ Hash

Make an AI-powered decision

Parameters:

  • decision_type (Symbol)

    Type of decision (:condition_detection, :error_classification, etc.)

  • context (Hash)

    Context data for the decision

  • schema (Hash, nil) (defaults to: nil)

    JSON schema for response validation (overrides default)

  • tier (String, nil) (defaults to: nil)

    Thinking depth tier (overrides default)

  • cache_ttl (Integer, nil) (defaults to: nil)

    Cache TTL in seconds (overrides default)

Returns:

  • (Hash)

    Validated decision result

Raises:

  • (ArgumentError)

    If decision_type is unknown

  • (ValidationError)

    If response doesn’t match schema



216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
# File 'lib/aidp/harness/ai_decision_engine.rb', line 216

def decide(decision_type, context:, schema: nil, tier: nil, cache_ttl: nil)
  template = DECISION_TEMPLATES[decision_type]
  raise ArgumentError, "Unknown decision type: #{decision_type}" unless template

  # Check cache if TTL specified
  cache_key = build_cache_key(decision_type, context)
  ttl = cache_ttl || template[:cache_ttl]
  if ttl && (cached_result = get_cached(cache_key, ttl))
    Aidp.log_debug("ai_decision_engine", "Cache hit for #{decision_type}", {
      cache_key: cache_key,
      ttl: ttl
    })
    return cached_result
  end

  # Build prompt from template
  prompt = build_prompt(template[:prompt_template], context)

  # Select tier
  selected_tier = tier || template[:default_tier]

  # Get model for tier, using harness default provider
  thinking_manager = ThinkingDepthManager.new(config)
  provider_name, model_name, _model_data = thinking_manager.select_model_for_tier(
    selected_tier,
    provider: config.default_provider
  )

  Aidp.log_debug("ai_decision_engine", "Making AI decision", {
    decision_type: decision_type,
    tier: selected_tier,
    provider: provider_name,
    model: model_name,
    cache_ttl: ttl
  })

  # Call AI with schema validation
  response_schema = schema || template[:schema]
  result = call_ai_with_schema(provider_name, model_name, prompt, response_schema)

  # Validate result
  validate_schema(result, response_schema)

  # Cache if TTL specified
  if ttl
    set_cached(cache_key, result)
  end

  result
end