Class: OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample

Inherits:
Internal::Type::BaseModel show all
Defined in:
lib/openai/models/evals/runs/output_item_list_response.rb

Overview

See Also:

Defined Under Namespace

Classes: Input, Output, Usage

Instance Attribute Summary collapse

Instance Method Summary collapse

Methods inherited from Internal::Type::BaseModel

==, #==, #[], coerce, #deconstruct_keys, #deep_to_h, dump, fields, hash, #hash, inherited, inspect, #inspect, known_fields, optional, recursively_to_h, required, #to_h, #to_json, #to_s, to_sorbet_type, #to_yaml

Methods included from Internal::Type::Converter

#coerce, coerce, #dump, dump, inspect, #inspect, meta_info, new_coerce_state, type_info

Methods included from Internal::Util::SorbetRuntimeSupport

#const_missing, #define_sorbet_constant!, #sorbet_constant_defined?, #to_sorbet_type, to_sorbet_type

Constructor Details

#initialize(content: nil, role: nil) ⇒ Object

Parameters:

  • content (String) (defaults to: nil)

    The content of the message.

  • role (String) (defaults to: nil)

    The role of the message (e.g. “system”, “assistant”, “user”).



# File 'lib/openai/models/evals/runs/output_item_list_response.rb', line 241


Instance Attribute Details

#errorOpenAI::Models::Evals::EvalAPIError

An object representing an error response from the Eval API.



144
# File 'lib/openai/models/evals/runs/output_item_list_response.rb', line 144

required :error, -> { OpenAI::Evals::EvalAPIError }

#finish_reasonString

The reason why the sample generation was finished.

Returns:

  • (String)


150
# File 'lib/openai/models/evals/runs/output_item_list_response.rb', line 150

required :finish_reason, String

#inputArray<OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Input>

An array of input messages.



156
157
# File 'lib/openai/models/evals/runs/output_item_list_response.rb', line 156

required :input,
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Input] }

#max_completion_tokensInteger

The maximum number of tokens allowed for completion.

Returns:

  • (Integer)


163
# File 'lib/openai/models/evals/runs/output_item_list_response.rb', line 163

required :max_completion_tokens, Integer

#modelString

The model used for generating the sample.

Returns:

  • (String)


169
# File 'lib/openai/models/evals/runs/output_item_list_response.rb', line 169

required :model, String

#outputArray<OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Output>

An array of output messages.



175
176
# File 'lib/openai/models/evals/runs/output_item_list_response.rb', line 175

required :output,
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Output] }

#seedInteger

The seed used for generating the sample.

Returns:

  • (Integer)


182
# File 'lib/openai/models/evals/runs/output_item_list_response.rb', line 182

required :seed, Integer

#temperatureFloat

The sampling temperature used.

Returns:

  • (Float)


188
# File 'lib/openai/models/evals/runs/output_item_list_response.rb', line 188

required :temperature, Float

#top_pFloat

The top_p value used for sampling.

Returns:

  • (Float)


194
# File 'lib/openai/models/evals/runs/output_item_list_response.rb', line 194

required :top_p, Float

#usageOpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Usage

Token usage details for the sample.



200
# File 'lib/openai/models/evals/runs/output_item_list_response.rb', line 200

required :usage, -> { OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Usage }