Class: Deeprails::Models::MonitorDetailResponse::Evaluation

Inherits:
Internal::Type::BaseModel show all
Defined in:
lib/deeprails/models/monitor_detail_response.rb

Defined Under Namespace

Modules: EvaluationStatus, GuardrailMetric, RunMode Classes: ModelInput

Instance Attribute Summary collapse

Class Method Summary collapse

Instance Method Summary collapse

Methods inherited from Internal::Type::BaseModel

==, #==, #[], coerce, #deconstruct_keys, #deep_to_h, dump, fields, hash, #hash, inherited, inspect, #inspect, known_fields, optional, recursively_to_h, required, #to_h, #to_json, #to_s, to_sorbet_type, #to_yaml

Methods included from Internal::Type::Converter

#coerce, coerce, #dump, dump, inspect, #inspect, meta_info, new_coerce_state, type_info

Methods included from Internal::Util::SorbetRuntimeSupport

#const_missing, #define_sorbet_constant!, #sorbet_constant_defined?, #to_sorbet_type, to_sorbet_type

Constructor Details

#initialize(ground_truth: nil, system_prompt: nil, user_prompt: nil) ⇒ Object

A dictionary of inputs sent to the LLM to generate output. The dictionary must contain at least a ‘user_prompt` field or a `system_prompt` field. For ground_truth_adherence guardrail metric, `ground_truth` should be provided.

Parameters:

  • ground_truth (String) (defaults to: nil)

    The ground truth for evaluating Ground Truth Adherence guardrail.

  • system_prompt (String) (defaults to: nil)

    The system prompt used to generate the output.

  • user_prompt (String) (defaults to: nil)

    The user prompt used to generate the output.



# File 'lib/deeprails/models/monitor_detail_response.rb', line 182


Instance Attribute Details

#created_atTime?

The time the evaluation was created in UTC.

Returns:

  • (Time, nil)


140
# File 'lib/deeprails/models/monitor_detail_response.rb', line 140

optional :created_at, Time

#error_messageString?

Error message if the evaluation failed.

Returns:

  • (String, nil)


146
# File 'lib/deeprails/models/monitor_detail_response.rb', line 146

optional :error_message, String

#evaluation_resultHash{Symbol=>Object}?

Evaluation result consisting of average scores and rationales for each of the evaluated guardrail metrics.

Returns:

  • (Hash{Symbol=>Object}, nil)


153
# File 'lib/deeprails/models/monitor_detail_response.rb', line 153

optional :evaluation_result, Deeprails::Internal::Type::HashOf[Deeprails::Internal::Type::Unknown]

#evaluation_statusSymbol, Deeprails::Models::MonitorDetailResponse::Evaluation::EvaluationStatus

Status of the evaluation.



113
# File 'lib/deeprails/models/monitor_detail_response.rb', line 113

required :evaluation_status, enum: -> { Deeprails::MonitorDetailResponse::Evaluation::EvaluationStatus }

#evaluation_total_costFloat?

Total cost of the evaluation.

Returns:

  • (Float, nil)


159
# File 'lib/deeprails/models/monitor_detail_response.rb', line 159

optional :evaluation_total_cost, Float

#guardrail_metricsArray<Symbol, Deeprails::Models::MonitorDetailResponse::Evaluation::GuardrailMetric>?

An array of guardrail metrics that the input and output pair will be evaluated on.



166
167
# File 'lib/deeprails/models/monitor_detail_response.rb', line 166

optional :guardrail_metrics,
-> { Deeprails::Internal::Type::ArrayOf[enum: Deeprails::MonitorDetailResponse::Evaluation::GuardrailMetric] }

#model_inputDeeprails::Models::MonitorDetailResponse::Evaluation::ModelInput

A dictionary of inputs sent to the LLM to generate output. The dictionary must contain at least a ‘user_prompt` field or a `system_prompt` field. For ground_truth_adherence guardrail metric, `ground_truth` should be provided.



121
# File 'lib/deeprails/models/monitor_detail_response.rb', line 121

required :model_input, -> { Deeprails::MonitorDetailResponse::Evaluation::ModelInput }

#model_outputString

Output generated by the LLM to be evaluated.

Returns:

  • (String)


127
# File 'lib/deeprails/models/monitor_detail_response.rb', line 127

required :model_output, String

#nametagString?

An optional, user-defined tag for the evaluation.

Returns:

  • (String, nil)


173
# File 'lib/deeprails/models/monitor_detail_response.rb', line 173

optional :nametag, String

#progressInteger?

Evaluation progress. Values range between 0 and 100; 100 corresponds to a completed ‘evaluation_status`.

Returns:

  • (Integer, nil)


180
# File 'lib/deeprails/models/monitor_detail_response.rb', line 180

optional :progress, Integer

#run_modeSymbol, Deeprails::Models::MonitorDetailResponse::Evaluation::RunMode

Run mode for the evaluation. The run mode allows the user to optimize for speed, accuracy, and cost by determining which models are used to evaluate the event.



134
# File 'lib/deeprails/models/monitor_detail_response.rb', line 134

required :run_mode, enum: -> { Deeprails::MonitorDetailResponse::Evaluation::RunMode }

Class Method Details

.valuesArray<Symbol>

Returns:

  • (Array<Symbol>)


# File 'lib/deeprails/models/monitor_detail_response.rb', line 220