Class: Aidp::Harness::AIDecisionEngine

Inherits:
Object
  • Object
show all
Defined in:
lib/aidp/harness/ai_decision_engine.rb

Overview

Zero Framework Cognition (ZFC) Decision Engine

Delegates semantic analysis and decision-making to AI models instead of using brittle pattern matching, scoring formulas, or heuristic thresholds.

Examples:

Basic usage

engine = AIDecisionEngine.new(config, provider_manager)
result = engine.decide(:condition_detection,
  context: { error: "Rate limit exceeded" },
  schema: ConditionSchema,
  tier: "mini"
)
# => { condition: "rate_limit", confidence: 0.95, reasoning: "..." }

See Also:

  • docs/ZFC_COMPLIANCE_ASSESSMENTdocs/ZFC_COMPLIANCE_ASSESSMENT.md
  • docs/ZFC_IMPLEMENTATION_PLANdocs/ZFC_IMPLEMENTATION_PLAN.md

Constant Summary collapse

DECISION_TEMPLATES =

Decision templates define prompts, schemas, and defaults for each decision type

{
  condition_detection: {
    prompt_template: "      Analyze the following API response or error message and classify the condition.\n\n      Response/Error:\n      {{response}}\n\n      Classify this into one of the following conditions:\n      - rate_limit: API rate limiting or quota exceeded\n      - auth_error: Authentication or authorization failure\n      - timeout: Request timeout or network timeout\n      - completion_marker: Work is complete or done\n      - user_feedback_needed: AI is asking for user input/clarification\n      - api_error: General API error (not rate limit/auth)\n      - success: Successful response\n      - other: None of the above\n\n      Provide your classification with a confidence score (0.0 to 1.0) and brief reasoning.\n    PROMPT\n    schema: {\n      type: \"object\",\n      properties: {\n        condition: {\n          type: \"string\",\n          enum: [\n            \"rate_limit\",\n            \"auth_error\",\n            \"timeout\",\n            \"completion_marker\",\n            \"user_feedback_needed\",\n            \"api_error\",\n            \"success\",\n            \"other\"\n          ]\n        },\n        confidence: {\n          type: \"number\",\n          minimum: 0.0,\n          maximum: 1.0\n        },\n        reasoning: {\n          type: \"string\"\n        }\n      },\n      required: [\"condition\", \"confidence\"]\n    },\n    default_tier: \"mini\",\n    cache_ttl: nil  # Each response is unique\n  },\n\n  error_classification: {\n    prompt_template: <<~PROMPT,\n      Classify the following error and determine if it's retryable.\n\n      Error:\n      {{error_message}}\n\n      Context:\n      {{context}}\n\n      Determine:\n      1. Error type (rate_limit, auth, timeout, network, api_bug, other)\n      2. Whether it's retryable (transient vs permanent)\n      3. Recommended action (retry, switch_provider, escalate, fail)\n\n      Provide classification with confidence and reasoning.\n    PROMPT\n    schema: {\n      type: \"object\",\n      properties: {\n        error_type: {\n          type: \"string\",\n          enum: [\"rate_limit\", \"auth\", \"timeout\", \"network\", \"api_bug\", \"other\"]\n        },\n        retryable: {\n          type: \"boolean\"\n        },\n        recommended_action: {\n          type: \"string\",\n          enum: [\"retry\", \"switch_provider\", \"escalate\", \"fail\"]\n        },\n        confidence: {\n          type: \"number\",\n          minimum: 0.0,\n          maximum: 1.0\n        },\n        reasoning: {\n          type: \"string\"\n        }\n      },\n      required: [\"error_type\", \"retryable\", \"recommended_action\", \"confidence\"]\n    },\n    default_tier: \"mini\",\n    cache_ttl: nil\n  },\n\n  completion_detection: {\n    prompt_template: <<~PROMPT,\n      Determine if the work described is complete based on the AI response.\n\n      Task:\n      {{task_description}}\n\n      AI Response:\n      {{response}}\n\n      Is the work complete? Consider:\n      - Explicit completion markers (\"done\", \"finished\", etc.)\n      - Implicit indicators (results provided, no follow-up questions)\n      - Requests for more information (incomplete)\n\n      Provide boolean completion status with confidence and reasoning.\n    PROMPT\n    schema: {\n      type: \"object\",\n      properties: {\n        complete: {\n          type: \"boolean\"\n        },\n        confidence: {\n          type: \"number\",\n          minimum: 0.0,\n          maximum: 1.0\n        },\n        reasoning: {\n          type: \"string\"\n        }\n      },\n      required: [\"complete\", \"confidence\"]\n    },\n    default_tier: \"mini\",\n    cache_ttl: nil\n  },\n\n  implementation_verification: {\n    prompt_template: \"{{prompt}}\",  # Custom prompt provided by caller\n    schema: {\n      type: \"object\",\n      properties: {\n        fully_implemented: {\n          type: \"boolean\",\n          description: \"True if the implementation fully addresses all issue requirements\"\n        },\n        reasoning: {\n          type: \"string\",\n          description: \"Detailed explanation of the verification decision\"\n        },\n        missing_requirements: {\n          type: \"array\",\n          items: {type: \"string\"},\n          description: \"List of specific requirements from the issue that are not yet implemented\"\n        },\n        additional_work_needed: {\n          type: \"array\",\n          items: {type: \"string\"},\n          description: \"List of specific tasks needed to complete the implementation\"\n        }\n      },\n      required: [\"fully_implemented\", \"reasoning\", \"missing_requirements\", \"additional_work_needed\"]\n    },\n    default_tier: \"mini\",\n    cache_ttl: nil\n  },\n\n  # FIX for issue #391: Prompt effectiveness evaluation for stuck work loops\n  prompt_evaluation: {\n    prompt_template: \"{{prompt}}\",  # Custom prompt provided by caller\n    schema: {\n      type: \"object\",\n      properties: {\n        effective: {\n          type: \"boolean\",\n          description: \"True if the prompt is likely to lead to completion\"\n        },\n        issues: {\n          type: \"array\",\n          items: {type: \"string\"},\n          description: \"Specific problems with the current prompt\"\n        },\n        suggestions: {\n          type: \"array\",\n          items: {type: \"string\"},\n          description: \"Actionable suggestions to improve effectiveness\"\n        },\n        likely_blockers: {\n          type: \"array\",\n          items: {type: \"string\"},\n          description: \"Potential blockers preventing progress\"\n        },\n        confidence: {\n          type: \"number\",\n          minimum: 0.0,\n          maximum: 1.0,\n          description: \"Confidence in this assessment\"\n        }\n      },\n      required: [\"effective\", \"issues\", \"suggestions\", \"confidence\"]\n    },\n    default_tier: \"mini\",\n    cache_ttl: nil\n  },\n\n  # FIX for issue #391: Template improvement suggestions for AGD pattern\n  template_improvement: {\n    prompt_template: \"{{prompt}}\",  # Custom prompt provided by caller\n    schema: {\n      type: \"object\",\n      properties: {\n        improved_sections: {\n          type: \"array\",\n          items: {\n            type: \"object\",\n            properties: {\n              section_name: {type: \"string\"},\n              original: {type: \"string\"},\n              improved: {type: \"string\"},\n              rationale: {type: \"string\"}\n            }\n          }\n        },\n        additional_sections: {\n          type: \"array\",\n          items: {\n            type: \"object\",\n            properties: {\n              section_name: {type: \"string\"},\n              content: {type: \"string\"},\n              rationale: {type: \"string\"}\n            }\n          }\n        },\n        completion_criteria_improvements: {\n          type: \"array\",\n          items: {type: \"string\"},\n          description: \"Improvements to completion criteria definitions\"\n        }\n      },\n      required: [\"improved_sections\", \"completion_criteria_improvements\"]\n    },\n    default_tier: \"standard\",  # Use standard tier for thoughtful improvements\n    cache_ttl: nil\n  }\n}.freeze\n",

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(config, provider_factory: nil) ⇒ AIDecisionEngine

Initialize the AI Decision Engine



278
279
280
281
282
283
# File 'lib/aidp/harness/ai_decision_engine.rb', line 278

def initialize(config, provider_factory: nil)
  @config = config
  @provider_factory = provider_factory || ProviderFactory.new(config)
  @cache = {}
  @cache_timestamps = {}
end

Instance Attribute Details

#cacheObject (readonly)

Returns the value of attribute cache.



272
273
274
# File 'lib/aidp/harness/ai_decision_engine.rb', line 272

def cache
  @cache
end

#configObject (readonly)

Returns the value of attribute config.



272
273
274
# File 'lib/aidp/harness/ai_decision_engine.rb', line 272

def config
  @config
end

#provider_factoryObject (readonly)

Returns the value of attribute provider_factory.



272
273
274
# File 'lib/aidp/harness/ai_decision_engine.rb', line 272

def provider_factory
  @provider_factory
end

Instance Method Details

#decide(decision_type, context:, schema: nil, tier: nil, cache_ttl: nil) ⇒ Hash

Make an AI-powered decision

Raises:

  • (ArgumentError)

    If decision_type is unknown

  • (ValidationError)

    If response doesn’t match schema



295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
# File 'lib/aidp/harness/ai_decision_engine.rb', line 295

def decide(decision_type, context:, schema: nil, tier: nil, cache_ttl: nil)
  template = DECISION_TEMPLATES[decision_type]
  raise ArgumentError, "Unknown decision type: #{decision_type}" unless template

  # Check cache if TTL specified
  cache_key = build_cache_key(decision_type, context)
  ttl = cache_ttl || template[:cache_ttl]
  if ttl && (cached_result = get_cached(cache_key, ttl))
    Aidp.log_debug("ai_decision_engine", "Cache hit for #{decision_type}", {
      cache_key: cache_key,
      ttl: ttl
    })
    return cached_result
  end

  # Build prompt from template
  prompt = build_prompt(template[:prompt_template], context)

  # Select tier
  selected_tier = tier || template[:default_tier]

  # Get model for tier, using harness default provider
  thinking_manager = ThinkingDepthManager.new(config)
  provider_name, model_name, _model_data = thinking_manager.select_model_for_tier(
    selected_tier,
    provider: config.default_provider
  )

  Aidp.log_debug("ai_decision_engine", "Making AI decision", {
    decision_type: decision_type,
    tier: selected_tier,
    provider: provider_name,
    model: model_name,
    cache_ttl: ttl
  })

  # Call AI with schema validation
  response_schema = schema || template[:schema]
  result = call_ai_with_schema(provider_name, model_name, prompt, response_schema)

  # Validate result
  validate_schema(result, response_schema)

  # Cache if TTL specified
  if ttl
    set_cached(cache_key, result)
  end

  result
end