Class: OpenAI::Resources::Responses

Inherits:
Object
  • Object
show all
Defined in:
lib/openai/resources/responses.rb,
lib/openai/resources/responses/input_items.rb

Defined Under Namespace

Classes: InputItems

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(client:) ⇒ Responses

This method is part of a private API. You should avoid using this method if possible, as it may be removed or be changed in the future.

Returns a new instance of Responses.

Parameters:



438
439
440
441
# File 'lib/openai/resources/responses.rb', line 438

def initialize(client:)
  @client = client
  @input_items = OpenAI::Resources::Responses::InputItems.new(client: client)
end

Instance Attribute Details

#input_itemsOpenAI::Resources::Responses::InputItems (readonly)



7
8
9
# File 'lib/openai/resources/responses.rb', line 7

def input_items
  @input_items
end

Instance Method Details

#cancel(response_id, request_options: {}) ⇒ OpenAI::Models::Responses::Response

Cancels a model response with the given ID. Only responses created with the background parameter set to true can be cancelled. Learn more.

Parameters:

  • response_id (String)

    The ID of the response to cancel.

  • request_options (OpenAI::RequestOptions, Hash{Symbol=>Object}, nil)

Returns:

See Also:



426
427
428
429
430
431
432
433
# File 'lib/openai/resources/responses.rb', line 426

def cancel(response_id, params = {})
  @client.request(
    method: :post,
    path: ["responses/%1$s/cancel", response_id],
    model: OpenAI::Responses::Response,
    options: params[:request_options]
  )
end

#create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) ⇒ OpenAI::Models::Responses::Response

See #stream_raw for streaming counterpart.

Some parameter documentations has been truncated, see Models::Responses::ResponseCreateParams for more details.

Creates a model response. Provide text or image inputs to generate text or JSON outputs. Have the model call your own custom code or use built-in tools like web search or file search to use your own data as input for the model's response.

Parameters:

Returns:

See Also:



81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
# File 'lib/openai/resources/responses.rb', line 81

def create(params = {})
  parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params)
  if parsed[:stream]
    message = "Please use `#stream_raw` for the streaming use case."
    raise ArgumentError.new(message)
  end

  model, tool_models = get_structured_output_models(parsed)

  unwrap = ->(raw) do
    parse_structured_outputs!(raw, model, tool_models)
  end

  @client.request(
    method: :post,
    path: "responses",
    body: parsed,
    unwrap: unwrap,
    model: OpenAI::Responses::Response,
    options: options
  )
end

#delete(response_id, request_options: {}) ⇒ nil

Deletes a model response with the given ID.

Parameters:

  • response_id (String)

    The ID of the response to delete.

  • request_options (OpenAI::RequestOptions, Hash{Symbol=>Object}, nil)

Returns:

  • (nil)

See Also:



404
405
406
407
408
409
410
411
# File 'lib/openai/resources/responses.rb', line 404

def delete(response_id, params = {})
  @client.request(
    method: :delete,
    path: ["responses/%1$s", response_id],
    model: NilClass,
    options: params[:request_options]
  )
end

#retrieve(response_id, include: nil, starting_after: nil, request_options: {}) ⇒ OpenAI::Models::Responses::Response

See #retrieve_streaming for streaming counterpart.

Some parameter documentations has been truncated, see Models::Responses::ResponseRetrieveParams for more details.

Retrieves a model response with the given ID.

Parameters:

  • response_id (String)

    The ID of the response to retrieve.

  • include (Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>)

    Additional fields to include in the response. See the include

  • starting_after (Integer)

    The sequence number of the event after which to start streaming.

  • request_options (OpenAI::RequestOptions, Hash{Symbol=>Object}, nil)

Returns:

See Also:



325
326
327
328
329
330
331
332
333
334
335
336
337
338
# File 'lib/openai/resources/responses.rb', line 325

def retrieve(response_id, params = {})
  parsed, options = OpenAI::Responses::ResponseRetrieveParams.dump_request(params)
  if parsed[:stream]
    message = "Please use `#retrieve_streaming` for the streaming use case."
    raise ArgumentError.new(message)
  end
  @client.request(
    method: :get,
    path: ["responses/%1$s", response_id],
    query: parsed,
    model: OpenAI::Responses::Response,
    options: options
  )
end

#retrieve_streaming(response_id, include: nil, starting_after: nil, request_options: {}) ⇒ OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent>

See #retrieve for non-streaming counterpart.

Some parameter documentations has been truncated, see Models::Responses::ResponseRetrieveParams for more details.

Retrieves a model response with the given ID.

Parameters:

  • response_id (String)

    The ID of the response to retrieve.

  • include (Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>)

    Additional fields to include in the response. See the include

  • starting_after (Integer)

    The sequence number of the event after which to start streaming.

  • request_options (OpenAI::RequestOptions, Hash{Symbol=>Object}, nil)

Returns:

See Also:



360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
# File 'lib/openai/resources/responses.rb', line 360

def retrieve_streaming(response_id, params = {})
  parsed, options = OpenAI::Responses::ResponseRetrieveParams.dump_request(params)
  unless parsed.fetch(:stream, true)
    message = "Please use `#retrieve` for the non-streaming use case."
    raise ArgumentError.new(message)
  end
  parsed.store(:stream, true)
  @client.request(
    method: :get,
    path: ["responses/%1$s", response_id],
    query: parsed,
    headers: {"accept" => "text/event-stream"},
    stream: OpenAI::Internal::Stream,
    model: OpenAI::Responses::ResponseStreamEvent,
    options: options
  )
end

#stream_raw(input: , model: , background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) ⇒ OpenAI::Helpers::Streaming::ResponseStream

See #create for non-streaming counterpart.

Some parameter documentations has been truncated, see Models::Responses::ResponseCreateParams for more details.

Creates a model response. Provide text or image inputs to generate text or JSON outputs. Have the model call your own custom code or use built-in tools like web search or file search to use your own data as input for the model's response.

Parameters:

Returns:

See Also:



168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
# File 'lib/openai/resources/responses.rb', line 168

def stream(params)
  parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params)
  starting_after, previous_response_id = parsed.values_at(:starting_after, :previous_response_id)

  if starting_after && !previous_response_id
    raise ArgumentError, "starting_after can only be used with previous_response_id"
  end
  model, tool_models = get_structured_output_models(parsed)

  unwrap = ->(raw) do
    if raw[:type] == "response.completed" && raw[:response]
      parse_structured_outputs!(raw[:response], model, tool_models)
    end
    raw
  end

  if previous_response_id
    retrieve_params = params.slice(:include, :request_options)

    raw_stream = retrieve_streaming_internal(
      previous_response_id,
      params: retrieve_params,
      unwrap: unwrap
    )
  else
    parsed[:stream] = true

    raw_stream = @client.request(
      method: :post,
      path: "responses",
      headers: {"accept" => "text/event-stream"},
      body: parsed,
      stream: OpenAI::Internal::Stream,
      model: OpenAI::Models::Responses::ResponseStreamEvent,
      unwrap: unwrap,
      options: options
    )
  end

  OpenAI::Streaming::ResponseStream.new(
    raw_stream: raw_stream,
    text_format: model,
    starting_after: starting_after
  )
end

#stream_raw(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) ⇒ OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent>

See #create for non-streaming counterpart.

Some parameter documentations has been truncated, see Models::Responses::ResponseCreateParams for more details.

Creates a model response. Provide text or image inputs to generate text or JSON outputs. Have the model call your own custom code or use built-in tools like web search or file search to use your own data as input for the model's response.

Parameters:

Returns:

See Also:



286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
# File 'lib/openai/resources/responses.rb', line 286

def stream_raw(params = {})
  parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params)
  unless parsed.fetch(:stream, true)
    message = "Please use `#create` for the non-streaming use case."
    raise ArgumentError.new(message)
  end
  parsed.store(:stream, true)

  @client.request(
    method: :post,
    path: "responses",
    headers: {"accept" => "text/event-stream"},
    body: parsed,
    stream: OpenAI::Internal::Stream,
    model: OpenAI::Responses::ResponseStreamEvent,
    options: options
  )
end