Module: LLM::OpenAI::Response::Responds
- Defined in:
- lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/responds.rb
Instance Method Summary collapse
- #annotations ⇒ Object
- #choices ⇒ Object
- #completion_tokens ⇒ Object
- #model ⇒ Object
-
#output_text ⇒ String
Returns the aggregated text content from the response outputs.
- #prompt_tokens ⇒ Object
- #response_id ⇒ Object
- #total_tokens ⇒ Object
Instance Method Details
#annotations ⇒ Object
8 |
# File 'lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/responds.rb', line 8 def annotations = choices[0].annotations |
#choices ⇒ Object
7 |
# File 'lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/responds.rb', line 7 def choices = [] |
#completion_tokens ⇒ Object
11 |
# File 'lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/responds.rb', line 11 def completion_tokens = body.usage&.output_tokens |
#model ⇒ Object
5 |
# File 'lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/responds.rb', line 5 def model = body.model |
#output_text ⇒ String
Returns the aggregated text content from the response outputs.
17 18 19 |
# File 'lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/responds.rb', line 17 def output_text choices.find(&:assistant?).content || "" end |
#prompt_tokens ⇒ Object
10 |
# File 'lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/responds.rb', line 10 def prompt_tokens = body.usage&.input_tokens |
#response_id ⇒ Object
6 |
# File 'lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/responds.rb', line 6 def response_id = respond_to?(:response) ? response["id"] : id |
#total_tokens ⇒ Object
12 |
# File 'lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/responds.rb', line 12 def total_tokens = body.usage&.total_tokens |