Class: Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client

Inherits:
Object
  • Object
show all
Includes:
Paths
Defined in:
lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb

Overview

REST client for the PredictionService service.

A service for online predictions and explanations.

Defined Under Namespace

Classes: Configuration

Instance Attribute Summary collapse

Class Method Summary collapse

Instance Method Summary collapse

Methods included from Paths

#cached_content_path, #endpoint_path, #rag_corpus_path, #template_path

Constructor Details

#initialize {|config| ... } ⇒ Client

Create a new PredictionService REST client object.

Examples:


# Create a client using the default configuration
client = ::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new

# Create a client using a custom configuration
client = ::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new do |config|
  config.timeout = 10.0
end

Yields:

  • (config)

    Configure the PredictionService client.

Yield Parameters:



128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
# File 'lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb', line 128

def initialize
  # Create the configuration object
  @config = Configuration.new Client.configure

  # Yield the configuration if needed
  yield @config if block_given?

  # Create credentials
  credentials = @config.credentials
  # Use self-signed JWT if the endpoint is unchanged from default,
  # but only if the default endpoint does not have a region prefix.
  enable_self_signed_jwt = @config.endpoint.nil? ||
                           (@config.endpoint == Configuration::DEFAULT_ENDPOINT &&
                           !@config.endpoint.split(".").first.include?("-"))
  credentials ||= Credentials.default scope: @config.scope,
                                      enable_self_signed_jwt: enable_self_signed_jwt
  if credentials.is_a?(::String) || credentials.is_a?(::Hash)
    credentials = Credentials.new credentials, scope: @config.scope
  end

  @quota_project_id = @config.quota_project
  @quota_project_id ||= credentials.quota_project_id if credentials.respond_to? :quota_project_id

  @prediction_service_stub = ::Google::Cloud::AIPlatform::V1::PredictionService::Rest::ServiceStub.new(
    endpoint: @config.endpoint,
    endpoint_template: DEFAULT_ENDPOINT_TEMPLATE,
    universe_domain: @config.universe_domain,
    credentials: credentials,
    logger: @config.logger
  )

  @prediction_service_stub.logger(stub: true)&.info do |entry|
    entry.set_system_name
    entry.set_service
    entry.message = "Created client for #{entry.service}"
    entry.set_credentials_fields credentials
    entry.set "customEndpoint", @config.endpoint if @config.endpoint
    entry.set "defaultTimeout", @config.timeout if @config.timeout
    entry.set "quotaProject", @quota_project_id if @quota_project_id
  end

  @location_client = Google::Cloud::Location::Locations::Rest::Client.new do |config|
    config.credentials = credentials
    config.quota_project = @quota_project_id
    config.endpoint = @prediction_service_stub.endpoint
    config.universe_domain = @prediction_service_stub.universe_domain
    config.bindings_override = @config.bindings_override
    config.logger = @prediction_service_stub.logger if config.respond_to? :logger=
  end

  @iam_policy_client = Google::Iam::V1::IAMPolicy::Rest::Client.new do |config|
    config.credentials = credentials
    config.quota_project = @quota_project_id
    config.endpoint = @prediction_service_stub.endpoint
    config.universe_domain = @prediction_service_stub.universe_domain
    config.bindings_override = @config.bindings_override
    config.logger = @prediction_service_stub.logger if config.respond_to? :logger=
  end
end

Instance Attribute Details

#iam_policy_clientGoogle::Iam::V1::IAMPolicy::Rest::Client (readonly)

Get the associated client for mix-in of the IAMPolicy.

Returns:

  • (Google::Iam::V1::IAMPolicy::Rest::Client)


200
201
202
# File 'lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb', line 200

def iam_policy_client
  @iam_policy_client
end

#location_clientGoogle::Cloud::Location::Locations::Rest::Client (readonly)

Get the associated client for mix-in of the Locations.

Returns:

  • (Google::Cloud::Location::Locations::Rest::Client)


193
194
195
# File 'lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb', line 193

def location_client
  @location_client
end

Class Method Details

.configure {|config| ... } ⇒ Client::Configuration

Configure the PredictionService Client class.

See Configuration for a description of the configuration fields.

Examples:


# Modify the configuration for all PredictionService clients
::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.configure do |config|
  config.timeout = 10.0
end

Yields:

  • (config)

    Configure the Client client.

Yield Parameters:

Returns:



66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
# File 'lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb', line 66

def self.configure
  @configure ||= begin
    namespace = ["Google", "Cloud", "AIPlatform", "V1"]
    parent_config = while namespace.any?
                      parent_name = namespace.join "::"
                      parent_const = const_get parent_name
                      break parent_const.configure if parent_const.respond_to? :configure
                      namespace.pop
                    end
    default_config = Client::Configuration.new parent_config

    default_config
  end
  yield @configure if block_given?
  @configure
end

Instance Method Details

#configure {|config| ... } ⇒ Client::Configuration

Configure the PredictionService Client instance.

The configuration is set to the derived mode, meaning that values can be changed, but structural changes (adding new fields, etc.) are not allowed. Structural changes should be made on configure.

See Configuration for a description of the configuration fields.

Yields:

  • (config)

    Configure the Client client.

Yield Parameters:

Returns:



98
99
100
101
# File 'lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb', line 98

def configure
  yield @config if block_given?
  @config
end

#direct_predict(request, options = nil) ⇒ ::Google::Cloud::AIPlatform::V1::DirectPredictResponse #direct_predict(endpoint: nil, inputs: nil, parameters: nil) ⇒ ::Google::Cloud::AIPlatform::V1::DirectPredictResponse

Perform an unary online prediction request to a gRPC model server for Vertex first-party products and frameworks.

Examples:

Basic example

require "google/cloud/ai_platform/v1"

# Create a client object. The client can be reused for multiple calls.
client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new

# Create a request. To set request fields, pass in keyword arguments.
request = Google::Cloud::AIPlatform::V1::DirectPredictRequest.new

# Call the direct_predict method.
result = client.direct_predict request

# The returned object is of type Google::Cloud::AIPlatform::V1::DirectPredictResponse.
p result

Overloads:

  • #direct_predict(request, options = nil) ⇒ ::Google::Cloud::AIPlatform::V1::DirectPredictResponse

    Pass arguments to direct_predict via a request object, either of type DirectPredictRequest or an equivalent Hash.

    Parameters:

    • request (::Google::Cloud::AIPlatform::V1::DirectPredictRequest, ::Hash)

      A request object representing the call parameters. Required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash.

    • options (::Gapic::CallOptions, ::Hash) (defaults to: nil)

      Overrides the default settings for this call, e.g, timeout, retries etc. Optional.

  • #direct_predict(endpoint: nil, inputs: nil, parameters: nil) ⇒ ::Google::Cloud::AIPlatform::V1::DirectPredictResponse

    Pass arguments to direct_predict via keyword arguments. Note that at least one keyword argument is required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash as a request object (see above).

    Parameters:

    • endpoint (::String) (defaults to: nil)

      Required. The name of the Endpoint requested to serve the prediction. Format: projects/{project}/locations/{location}/endpoints/{endpoint}

    • inputs (::Array<::Google::Cloud::AIPlatform::V1::Tensor, ::Hash>) (defaults to: nil)

      The prediction input.

    • parameters (::Google::Cloud::AIPlatform::V1::Tensor, ::Hash) (defaults to: nil)

      The parameters that govern the prediction.

Yields:

  • (result, operation)

    Access the result along with the TransportOperation object

Yield Parameters:

Returns:

Raises:

  • (::Google::Cloud::Error)

    if the REST call is aborted.



555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
# File 'lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb', line 555

def direct_predict request, options = nil
  raise ::ArgumentError, "request must be provided" if request.nil?

  request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::DirectPredictRequest

  # Converts hash and nil to an options object
  options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

  # Customize the options with defaults
   = @config.rpcs.direct_predict..to_h

  # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
  [:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
    lib_name: @config.lib_name, lib_version: @config.lib_version,
    gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
    transports_version_send: [:rest]

  [:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
  [:"x-goog-user-project"] = @quota_project_id if @quota_project_id

  options.apply_defaults timeout:      @config.rpcs.direct_predict.timeout,
                         metadata:     ,
                         retry_policy: @config.rpcs.direct_predict.retry_policy

  options.apply_defaults timeout:      @config.timeout,
                         metadata:     @config.,
                         retry_policy: @config.retry_policy

  @prediction_service_stub.direct_predict request, options do |result, operation|
    yield result, operation if block_given?
  end
rescue ::Gapic::Rest::Error => e
  raise ::Google::Cloud::Error.from_error(e)
end

#direct_raw_predict(request, options = nil) ⇒ ::Google::Cloud::AIPlatform::V1::DirectRawPredictResponse #direct_raw_predict(endpoint: nil, method_name: nil, input: nil) ⇒ ::Google::Cloud::AIPlatform::V1::DirectRawPredictResponse

Perform an unary online prediction request to a gRPC model server for custom containers.

Examples:

Basic example

require "google/cloud/ai_platform/v1"

# Create a client object. The client can be reused for multiple calls.
client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new

# Create a request. To set request fields, pass in keyword arguments.
request = Google::Cloud::AIPlatform::V1::DirectRawPredictRequest.new

# Call the direct_raw_predict method.
result = client.direct_raw_predict request

# The returned object is of type Google::Cloud::AIPlatform::V1::DirectRawPredictResponse.
p result

Overloads:

  • #direct_raw_predict(request, options = nil) ⇒ ::Google::Cloud::AIPlatform::V1::DirectRawPredictResponse

    Pass arguments to direct_raw_predict via a request object, either of type DirectRawPredictRequest or an equivalent Hash.

    Parameters:

    • request (::Google::Cloud::AIPlatform::V1::DirectRawPredictRequest, ::Hash)

      A request object representing the call parameters. Required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash.

    • options (::Gapic::CallOptions, ::Hash) (defaults to: nil)

      Overrides the default settings for this call, e.g, timeout, retries etc. Optional.

  • #direct_raw_predict(endpoint: nil, method_name: nil, input: nil) ⇒ ::Google::Cloud::AIPlatform::V1::DirectRawPredictResponse

    Pass arguments to direct_raw_predict via keyword arguments. Note that at least one keyword argument is required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash as a request object (see above).

    Parameters:

    • endpoint (::String) (defaults to: nil)

      Required. The name of the Endpoint requested to serve the prediction. Format: projects/{project}/locations/{location}/endpoints/{endpoint}

    • method_name (::String) (defaults to: nil)

      Fully qualified name of the API method being invoked to perform predictions.

      Format: /namespace.Service/Method/ Example: /tensorflow.serving.PredictionService/Predict

    • input (::String) (defaults to: nil)

      The prediction input.

Yields:

  • (result, operation)

    Access the result along with the TransportOperation object

Yield Parameters:

Returns:

Raises:

  • (::Google::Cloud::Error)

    if the REST call is aborted.



646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
# File 'lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb', line 646

def direct_raw_predict request, options = nil
  raise ::ArgumentError, "request must be provided" if request.nil?

  request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::DirectRawPredictRequest

  # Converts hash and nil to an options object
  options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

  # Customize the options with defaults
   = @config.rpcs.direct_raw_predict..to_h

  # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
  [:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
    lib_name: @config.lib_name, lib_version: @config.lib_version,
    gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
    transports_version_send: [:rest]

  [:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
  [:"x-goog-user-project"] = @quota_project_id if @quota_project_id

  options.apply_defaults timeout:      @config.rpcs.direct_raw_predict.timeout,
                         metadata:     ,
                         retry_policy: @config.rpcs.direct_raw_predict.retry_policy

  options.apply_defaults timeout:      @config.timeout,
                         metadata:     @config.,
                         retry_policy: @config.retry_policy

  @prediction_service_stub.direct_raw_predict request, options do |result, operation|
    yield result, operation if block_given?
  end
rescue ::Gapic::Rest::Error => e
  raise ::Google::Cloud::Error.from_error(e)
end

#embed_content(request, options = nil) ⇒ ::Google::Cloud::AIPlatform::V1::EmbedContentResponse #embed_content(model: nil, content: nil, title: nil, task_type: nil, output_dimensionality: nil, auto_truncate: nil) ⇒ ::Google::Cloud::AIPlatform::V1::EmbedContentResponse

Embed content with multimodal inputs.

Examples:

Basic example

require "google/cloud/ai_platform/v1"

# Create a client object. The client can be reused for multiple calls.
client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new

# Create a request. To set request fields, pass in keyword arguments.
request = Google::Cloud::AIPlatform::V1::EmbedContentRequest.new

# Call the embed_content method.
result = client.embed_content request

# The returned object is of type Google::Cloud::AIPlatform::V1::EmbedContentResponse.
p result

Overloads:

  • #embed_content(request, options = nil) ⇒ ::Google::Cloud::AIPlatform::V1::EmbedContentResponse

    Pass arguments to embed_content via a request object, either of type EmbedContentRequest or an equivalent Hash.

    Parameters:

    • request (::Google::Cloud::AIPlatform::V1::EmbedContentRequest, ::Hash)

      A request object representing the call parameters. Required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash.

    • options (::Gapic::CallOptions, ::Hash) (defaults to: nil)

      Overrides the default settings for this call, e.g, timeout, retries etc. Optional.

  • #embed_content(model: nil, content: nil, title: nil, task_type: nil, output_dimensionality: nil, auto_truncate: nil) ⇒ ::Google::Cloud::AIPlatform::V1::EmbedContentResponse

    Pass arguments to embed_content via keyword arguments. Note that at least one keyword argument is required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash as a request object (see above).

    Parameters:

    • model (::String) (defaults to: nil)

      Required. The name of the publisher model requested to serve the prediction. Format: projects/{project}/locations/{location}/publishers/*/models/*

    • content (::Google::Cloud::AIPlatform::V1::Content, ::Hash) (defaults to: nil)

      Required. Input content to be embedded. Required.

    • title (::String) (defaults to: nil)

      Optional. An optional title for the text.

    • task_type (::Google::Cloud::AIPlatform::V1::EmbedContentRequest::EmbeddingTaskType) (defaults to: nil)

      Optional. The task type of the embedding.

    • output_dimensionality (::Integer) (defaults to: nil)

      Optional. Optional reduced dimension for the output embedding. If set, excessive values in the output embedding are truncated from the end.

    • auto_truncate (::Boolean) (defaults to: nil)

      Optional. Whether to silently truncate the input content if it's longer than the maximum sequence length.

Yields:

  • (result, operation)

    Access the result along with the TransportOperation object

Yield Parameters:

Returns:

Raises:

  • (::Google::Cloud::Error)

    if the REST call is aborted.



1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
# File 'lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb', line 1206

def embed_content request, options = nil
  raise ::ArgumentError, "request must be provided" if request.nil?

  request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::EmbedContentRequest

  # Converts hash and nil to an options object
  options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

  # Customize the options with defaults
   = @config.rpcs.embed_content..to_h

  # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
  [:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
    lib_name: @config.lib_name, lib_version: @config.lib_version,
    gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
    transports_version_send: [:rest]

  [:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
  [:"x-goog-user-project"] = @quota_project_id if @quota_project_id

  options.apply_defaults timeout:      @config.rpcs.embed_content.timeout,
                         metadata:     ,
                         retry_policy: @config.rpcs.embed_content.retry_policy

  options.apply_defaults timeout:      @config.timeout,
                         metadata:     @config.,
                         retry_policy: @config.retry_policy

  @prediction_service_stub.embed_content request, options do |result, operation|
    yield result, operation if block_given?
  end
rescue ::Gapic::Rest::Error => e
  raise ::Google::Cloud::Error.from_error(e)
end

#explain(request, options = nil) ⇒ ::Google::Cloud::AIPlatform::V1::ExplainResponse #explain(endpoint: nil, instances: nil, parameters: nil, explanation_spec_override: nil, deployed_model_id: nil) ⇒ ::Google::Cloud::AIPlatform::V1::ExplainResponse

Perform an online explanation.

If deployed_model_id is specified, the corresponding DeployModel must have explanation_spec populated. If deployed_model_id is not specified, all DeployedModels must have explanation_spec populated.

Examples:

Basic example

require "google/cloud/ai_platform/v1"

# Create a client object. The client can be reused for multiple calls.
client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new

# Create a request. To set request fields, pass in keyword arguments.
request = Google::Cloud::AIPlatform::V1::ExplainRequest.new

# Call the explain method.
result = client.explain request

# The returned object is of type Google::Cloud::AIPlatform::V1::ExplainResponse.
p result

Overloads:

  • #explain(request, options = nil) ⇒ ::Google::Cloud::AIPlatform::V1::ExplainResponse

    Pass arguments to explain via a request object, either of type ExplainRequest or an equivalent Hash.

    Parameters:

    • request (::Google::Cloud::AIPlatform::V1::ExplainRequest, ::Hash)

      A request object representing the call parameters. Required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash.

    • options (::Gapic::CallOptions, ::Hash) (defaults to: nil)

      Overrides the default settings for this call, e.g, timeout, retries etc. Optional.

  • #explain(endpoint: nil, instances: nil, parameters: nil, explanation_spec_override: nil, deployed_model_id: nil) ⇒ ::Google::Cloud::AIPlatform::V1::ExplainResponse

    Pass arguments to explain via keyword arguments. Note that at least one keyword argument is required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash as a request object (see above).

    Parameters:

    • endpoint (::String) (defaults to: nil)

      Required. The name of the Endpoint requested to serve the explanation. Format: projects/{project}/locations/{location}/endpoints/{endpoint}

    • instances (::Array<::Google::Protobuf::Value, ::Hash>) (defaults to: nil)

      Required. The instances that are the input to the explanation call. A DeployedModel may have an upper limit on the number of instances it supports per request, and when it is exceeded the explanation call errors in case of AutoML Models, or, in case of customer created Models, the behaviour is as documented by that Model. The schema of any single instance may be specified via Endpoint's DeployedModels' [Model's][google.cloud.aiplatform.v1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] instance_schema_uri.

    • parameters (::Google::Protobuf::Value, ::Hash) (defaults to: nil)

      The parameters that govern the prediction. The schema of the parameters may be specified via Endpoint's DeployedModels' [Model's ][google.cloud.aiplatform.v1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] parameters_schema_uri.

    • explanation_spec_override (::Google::Cloud::AIPlatform::V1::ExplanationSpecOverride, ::Hash) (defaults to: nil)

      If specified, overrides the explanation_spec of the DeployedModel. Can be used for explaining prediction results with different configurations, such as:

      • Explaining top-5 predictions results as opposed to top-1;
      • Increasing path count or step count of the attribution methods to reduce approximate errors;
      • Using different baselines for explaining the prediction results.
    • deployed_model_id (::String) (defaults to: nil)

      If specified, this ExplainRequest will be served by the chosen DeployedModel, overriding Endpoint.traffic_split.

Yields:

  • (result, operation)

    Access the result along with the TransportOperation object

Yield Parameters:

Returns:

Raises:

  • (::Google::Cloud::Error)

    if the REST call is aborted.



855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
# File 'lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb', line 855

def explain request, options = nil
  raise ::ArgumentError, "request must be provided" if request.nil?

  request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::ExplainRequest

  # Converts hash and nil to an options object
  options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

  # Customize the options with defaults
   = @config.rpcs.explain..to_h

  # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
  [:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
    lib_name: @config.lib_name, lib_version: @config.lib_version,
    gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
    transports_version_send: [:rest]

  [:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
  [:"x-goog-user-project"] = @quota_project_id if @quota_project_id

  options.apply_defaults timeout:      @config.rpcs.explain.timeout,
                         metadata:     ,
                         retry_policy: @config.rpcs.explain.retry_policy

  options.apply_defaults timeout:      @config.timeout,
                         metadata:     @config.,
                         retry_policy: @config.retry_policy

  @prediction_service_stub.explain request, options do |result, operation|
    yield result, operation if block_given?
  end
rescue ::Gapic::Rest::Error => e
  raise ::Google::Cloud::Error.from_error(e)
end

#generate_content(request, options = nil) ⇒ ::Google::Cloud::AIPlatform::V1::GenerateContentResponse #generate_content(model: nil, contents: nil, system_instruction: nil, cached_content: nil, tools: nil, tool_config: nil, labels: nil, safety_settings: nil, model_armor_config: nil, generation_config: nil) ⇒ ::Google::Cloud::AIPlatform::V1::GenerateContentResponse

Generate content with multimodal inputs.

Examples:

Basic example

require "google/cloud/ai_platform/v1"

# Create a client object. The client can be reused for multiple calls.
client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new

# Create a request. To set request fields, pass in keyword arguments.
request = Google::Cloud::AIPlatform::V1::GenerateContentRequest.new

# Call the generate_content method.
result = client.generate_content request

# The returned object is of type Google::Cloud::AIPlatform::V1::GenerateContentResponse.
p result

Overloads:

  • #generate_content(request, options = nil) ⇒ ::Google::Cloud::AIPlatform::V1::GenerateContentResponse

    Pass arguments to generate_content via a request object, either of type GenerateContentRequest or an equivalent Hash.

    Parameters:

    • request (::Google::Cloud::AIPlatform::V1::GenerateContentRequest, ::Hash)

      A request object representing the call parameters. Required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash.

    • options (::Gapic::CallOptions, ::Hash) (defaults to: nil)

      Overrides the default settings for this call, e.g, timeout, retries etc. Optional.

  • #generate_content(model: nil, contents: nil, system_instruction: nil, cached_content: nil, tools: nil, tool_config: nil, labels: nil, safety_settings: nil, model_armor_config: nil, generation_config: nil) ⇒ ::Google::Cloud::AIPlatform::V1::GenerateContentResponse

    Pass arguments to generate_content via keyword arguments. Note that at least one keyword argument is required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash as a request object (see above).

    Parameters:

    • model (::String) (defaults to: nil)

      Required. The fully qualified name of the publisher model or tuned model endpoint to use.

      Publisher model format: projects/{project}/locations/{location}/publishers/*/models/*

      Tuned model endpoint format: projects/{project}/locations/{location}/endpoints/{endpoint}

    • contents (::Array<::Google::Cloud::AIPlatform::V1::Content, ::Hash>) (defaults to: nil)

      Required. The content of the current conversation with the model.

      For single-turn queries, this is a single instance. For multi-turn queries, this is a repeated field that contains conversation history + latest request.

    • system_instruction (::Google::Cloud::AIPlatform::V1::Content, ::Hash) (defaults to: nil)

      Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph.

    • cached_content (::String) (defaults to: nil)

      Optional. The name of the cached content used as context to serve the prediction. Note: only used in explicit caching, where users can have control over caching (e.g. what content to cache) and enjoy guaranteed cost savings. Format: projects/{project}/locations/{location}/cachedContents/{cachedContent}

    • tools (::Array<::Google::Cloud::AIPlatform::V1::Tool, ::Hash>) (defaults to: nil)

      Optional. A list of Tools the model may use to generate the next response.

      A Tool is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model.

    • tool_config (::Google::Cloud::AIPlatform::V1::ToolConfig, ::Hash) (defaults to: nil)

      Optional. Tool config. This config is shared for all tools provided in the request.

    • labels (::Hash{::String => ::String}) (defaults to: nil)

      Optional. The labels with user-defined metadata for the request. It is used for billing and reporting only.

      Label keys and values can be no longer than 63 characters (Unicode codepoints) and can only contain lowercase letters, numeric characters, underscores, and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter.

    • safety_settings (::Array<::Google::Cloud::AIPlatform::V1::SafetySetting, ::Hash>) (defaults to: nil)

      Optional. Per request settings for blocking unsafe content. Enforced on GenerateContentResponse.candidates.

    • model_armor_config (::Google::Cloud::AIPlatform::V1::ModelArmorConfig, ::Hash) (defaults to: nil)

      Optional. Settings for prompt and response sanitization using the Model Armor service. If supplied, safety_settings must not be supplied.

    • generation_config (::Google::Cloud::AIPlatform::V1::GenerationConfig, ::Hash) (defaults to: nil)

      Optional. Generation config.

Yields:

  • (result, operation)

    Access the result along with the TransportOperation object

Yield Parameters:

Returns:

Raises:

  • (::Google::Cloud::Error)

    if the REST call is aborted.



982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
# File 'lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb', line 982

def generate_content request, options = nil
  raise ::ArgumentError, "request must be provided" if request.nil?

  request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::GenerateContentRequest

  # Converts hash and nil to an options object
  options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

  # Customize the options with defaults
   = @config.rpcs.generate_content..to_h

  # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
  [:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
    lib_name: @config.lib_name, lib_version: @config.lib_version,
    gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
    transports_version_send: [:rest]

  [:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
  [:"x-goog-user-project"] = @quota_project_id if @quota_project_id

  options.apply_defaults timeout:      @config.rpcs.generate_content.timeout,
                         metadata:     ,
                         retry_policy: @config.rpcs.generate_content.retry_policy

  options.apply_defaults timeout:      @config.timeout,
                         metadata:     @config.,
                         retry_policy: @config.retry_policy

  @prediction_service_stub.generate_content request, options do |result, operation|
    yield result, operation if block_given?
  end
rescue ::Gapic::Rest::Error => e
  raise ::Google::Cloud::Error.from_error(e)
end

#loggerLogger

The logger used for request/response debug logging.

Returns:

  • (Logger)


207
208
209
# File 'lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb', line 207

def logger
  @prediction_service_stub.logger
end

#predict(request, options = nil) ⇒ ::Google::Cloud::AIPlatform::V1::PredictResponse #predict(endpoint: nil, instances: nil, parameters: nil, labels: nil) ⇒ ::Google::Cloud::AIPlatform::V1::PredictResponse

Perform an online prediction.

Examples:

Basic example

require "google/cloud/ai_platform/v1"

# Create a client object. The client can be reused for multiple calls.
client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new

# Create a request. To set request fields, pass in keyword arguments.
request = Google::Cloud::AIPlatform::V1::PredictRequest.new

# Call the predict method.
result = client.predict request

# The returned object is of type Google::Cloud::AIPlatform::V1::PredictResponse.
p result

Overloads:

  • #predict(request, options = nil) ⇒ ::Google::Cloud::AIPlatform::V1::PredictResponse

    Pass arguments to predict via a request object, either of type Google::Cloud::AIPlatform::V1::PredictRequest or an equivalent Hash.

    Parameters:

    • request (::Google::Cloud::AIPlatform::V1::PredictRequest, ::Hash)

      A request object representing the call parameters. Required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash.

    • options (::Gapic::CallOptions, ::Hash) (defaults to: nil)

      Overrides the default settings for this call, e.g, timeout, retries etc. Optional.

  • #predict(endpoint: nil, instances: nil, parameters: nil, labels: nil) ⇒ ::Google::Cloud::AIPlatform::V1::PredictResponse

    Pass arguments to predict via keyword arguments. Note that at least one keyword argument is required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash as a request object (see above).

    Parameters:

    • endpoint (::String) (defaults to: nil)

      Required. The name of the Endpoint requested to serve the prediction. Format: projects/{project}/locations/{location}/endpoints/{endpoint}

    • instances (::Array<::Google::Protobuf::Value, ::Hash>) (defaults to: nil)

      Required. The instances that are the input to the prediction call. A DeployedModel may have an upper limit on the number of instances it supports per request, and when it is exceeded the prediction call errors in case of AutoML Models, or, in case of customer created Models, the behaviour is as documented by that Model. The schema of any single instance may be specified via Endpoint's DeployedModels' [Model's][google.cloud.aiplatform.v1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] instance_schema_uri.

    • parameters (::Google::Protobuf::Value, ::Hash) (defaults to: nil)

      The parameters that govern the prediction. The schema of the parameters may be specified via Endpoint's DeployedModels' [Model's ][google.cloud.aiplatform.v1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] parameters_schema_uri.

    • labels (::Hash{::String => ::String}) (defaults to: nil)

      Optional. The user labels for Imagen billing usage only. Only Imagen supports labels. For other use cases, it will be ignored.

Yields:

  • (result, operation)

    Access the result along with the TransportOperation object

Yield Parameters:

Returns:

Raises:

  • (::Google::Cloud::Error)

    if the REST call is aborted.



277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
# File 'lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb', line 277

def predict request, options = nil
  raise ::ArgumentError, "request must be provided" if request.nil?

  request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::PredictRequest

  # Converts hash and nil to an options object
  options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

  # Customize the options with defaults
   = @config.rpcs.predict..to_h

  # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
  [:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
    lib_name: @config.lib_name, lib_version: @config.lib_version,
    gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
    transports_version_send: [:rest]

  [:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
  [:"x-goog-user-project"] = @quota_project_id if @quota_project_id

  options.apply_defaults timeout:      @config.rpcs.predict.timeout,
                         metadata:     ,
                         retry_policy: @config.rpcs.predict.retry_policy

  options.apply_defaults timeout:      @config.timeout,
                         metadata:     @config.,
                         retry_policy: @config.retry_policy

  @prediction_service_stub.predict request, options do |result, operation|
    yield result, operation if block_given?
  end
rescue ::Gapic::Rest::Error => e
  raise ::Google::Cloud::Error.from_error(e)
end

#raw_predict(request, options = nil) ⇒ ::Google::Api::HttpBody #raw_predict(endpoint: nil, http_body: nil) ⇒ ::Google::Api::HttpBody

Perform an online prediction with an arbitrary HTTP payload.

The response includes the following HTTP headers:

  • X-Vertex-AI-Endpoint-Id: ID of the Endpoint that served this prediction.

  • X-Vertex-AI-Deployed-Model-Id: ID of the Endpoint's DeployedModel that served this prediction.

Examples:

Basic example

require "google/cloud/ai_platform/v1"

# Create a client object. The client can be reused for multiple calls.
client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new

# Create a request. To set request fields, pass in keyword arguments.
request = Google::Cloud::AIPlatform::V1::RawPredictRequest.new

# Call the raw_predict method.
result = client.raw_predict request

# The returned object is of type Google::Api::HttpBody.
p result

Overloads:

  • #raw_predict(request, options = nil) ⇒ ::Google::Api::HttpBody

    Pass arguments to raw_predict via a request object, either of type RawPredictRequest or an equivalent Hash.

    Parameters:

    • request (::Google::Cloud::AIPlatform::V1::RawPredictRequest, ::Hash)

      A request object representing the call parameters. Required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash.

    • options (::Gapic::CallOptions, ::Hash) (defaults to: nil)

      Overrides the default settings for this call, e.g, timeout, retries etc. Optional.

  • #raw_predict(endpoint: nil, http_body: nil) ⇒ ::Google::Api::HttpBody

    Pass arguments to raw_predict via keyword arguments. Note that at least one keyword argument is required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash as a request object (see above).

    Parameters:

    • endpoint (::String) (defaults to: nil)

      Required. The name of the Endpoint requested to serve the prediction. Format: projects/{project}/locations/{location}/endpoints/{endpoint}

    • http_body (::Google::Api::HttpBody, ::Hash) (defaults to: nil)

      The prediction input. Supports HTTP headers and arbitrary data payload.

      A DeployedModel may have an upper limit on the number of instances it supports per request. When this limit it is exceeded for an AutoML model, the RawPredict method returns an error. When this limit is exceeded for a custom-trained model, the behavior varies depending on the model.

      You can specify the schema for each instance in the predict_schemata.instance_schema_uri field when you create a Model. This schema applies when you deploy the Model as a DeployedModel to an Endpoint and use the RawPredict method.

Yields:

  • (result, operation)

    Access the result along with the TransportOperation object

Yield Parameters:

Returns:

Raises:

  • (::Google::Cloud::Error)

    if the REST call is aborted.



383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
# File 'lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb', line 383

def raw_predict request, options = nil
  raise ::ArgumentError, "request must be provided" if request.nil?

  request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::RawPredictRequest

  # Converts hash and nil to an options object
  options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

  # Customize the options with defaults
   = @config.rpcs.raw_predict..to_h

  # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
  [:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
    lib_name: @config.lib_name, lib_version: @config.lib_version,
    gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
    transports_version_send: [:rest]

  [:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
  [:"x-goog-user-project"] = @quota_project_id if @quota_project_id

  options.apply_defaults timeout:      @config.rpcs.raw_predict.timeout,
                         metadata:     ,
                         retry_policy: @config.rpcs.raw_predict.retry_policy

  options.apply_defaults timeout:      @config.timeout,
                         metadata:     @config.,
                         retry_policy: @config.retry_policy

  @prediction_service_stub.raw_predict request, options do |result, operation|
    yield result, operation if block_given?
  end
rescue ::Gapic::Rest::Error => e
  raise ::Google::Cloud::Error.from_error(e)
end

#server_streaming_predict(request, options = nil) ⇒ ::Enumerable<::Google::Cloud::AIPlatform::V1::StreamingPredictResponse> #server_streaming_predict(endpoint: nil, inputs: nil, parameters: nil) ⇒ ::Enumerable<::Google::Cloud::AIPlatform::V1::StreamingPredictResponse>

Perform a server-side streaming online prediction request for Vertex LLM streaming.

Examples:

Basic example

require "google/cloud/ai_platform/v1"

# Create a client object. The client can be reused for multiple calls.
client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new

# Create a request. To set request fields, pass in keyword arguments.
request = Google::Cloud::AIPlatform::V1::StreamingPredictRequest.new

# Call the server_streaming_predict method to start streaming.
output = client.server_streaming_predict request

# The returned object is a streamed enumerable yielding elements of type
# ::Google::Cloud::AIPlatform::V1::StreamingPredictResponse
output.each do |current_response|
  p current_response
end

Overloads:

  • #server_streaming_predict(request, options = nil) ⇒ ::Enumerable<::Google::Cloud::AIPlatform::V1::StreamingPredictResponse>

    Pass arguments to server_streaming_predict via a request object, either of type StreamingPredictRequest or an equivalent Hash.

    Parameters:

    • request (::Google::Cloud::AIPlatform::V1::StreamingPredictRequest, ::Hash)

      A request object representing the call parameters. Required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash.

    • options (::Gapic::CallOptions, ::Hash) (defaults to: nil)

      Overrides the default settings for this call, e.g, timeout, retries etc. Optional.

  • #server_streaming_predict(endpoint: nil, inputs: nil, parameters: nil) ⇒ ::Enumerable<::Google::Cloud::AIPlatform::V1::StreamingPredictResponse>

    Pass arguments to server_streaming_predict via keyword arguments. Note that at least one keyword argument is required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash as a request object (see above).

    Parameters:

    • endpoint (::String) (defaults to: nil)

      Required. The name of the Endpoint requested to serve the prediction. Format: projects/{project}/locations/{location}/endpoints/{endpoint}

    • inputs (::Array<::Google::Cloud::AIPlatform::V1::Tensor, ::Hash>) (defaults to: nil)

      The prediction input.

    • parameters (::Google::Cloud::AIPlatform::V1::Tensor, ::Hash) (defaults to: nil)

      The parameters that govern the prediction.

Returns:

Raises:

  • (::Google::Cloud::Error)

    if the REST call is aborted.



730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
# File 'lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb', line 730

def server_streaming_predict request, options = nil
  raise ::ArgumentError, "request must be provided" if request.nil?

  request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::StreamingPredictRequest

  # Converts hash and nil to an options object
  options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

  # Customize the options with defaults
   = @config.rpcs.server_streaming_predict..to_h

  # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
  [:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
    lib_name: @config.lib_name, lib_version: @config.lib_version,
    gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
    transports_version_send: [:rest]

  [:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
  [:"x-goog-user-project"] = @quota_project_id if @quota_project_id

  options.apply_defaults timeout:      @config.rpcs.server_streaming_predict.timeout,
                         metadata:     ,
                         retry_policy: @config.rpcs.server_streaming_predict.retry_policy

  options.apply_defaults timeout:      @config.timeout,
                         metadata:     @config.,
                         retry_policy: @config.retry_policy

  ::Gapic::Rest::ServerStream.new(
    ::Google::Cloud::AIPlatform::V1::StreamingPredictResponse,
    ::Gapic::Rest::ThreadedEnumerator.new do |in_q, out_q|
      @prediction_service_stub.server_streaming_predict request, options do |chunk|
        in_q.deq
        out_q.enq chunk
      end
    end
  )
rescue ::Gapic::Rest::Error => e
  raise ::Google::Cloud::Error.from_error(e)
end

#stream_generate_content(request, options = nil) ⇒ ::Enumerable<::Google::Cloud::AIPlatform::V1::GenerateContentResponse> #stream_generate_content(model: nil, contents: nil, system_instruction: nil, cached_content: nil, tools: nil, tool_config: nil, labels: nil, safety_settings: nil, model_armor_config: nil, generation_config: nil) ⇒ ::Enumerable<::Google::Cloud::AIPlatform::V1::GenerateContentResponse>

Generate content with multimodal inputs with streaming support.

Examples:

Basic example

require "google/cloud/ai_platform/v1"

# Create a client object. The client can be reused for multiple calls.
client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new

# Create a request. To set request fields, pass in keyword arguments.
request = Google::Cloud::AIPlatform::V1::GenerateContentRequest.new

# Call the stream_generate_content method to start streaming.
output = client.stream_generate_content request

# The returned object is a streamed enumerable yielding elements of type
# ::Google::Cloud::AIPlatform::V1::GenerateContentResponse
output.each do |current_response|
  p current_response
end

Overloads:

  • #stream_generate_content(request, options = nil) ⇒ ::Enumerable<::Google::Cloud::AIPlatform::V1::GenerateContentResponse>

    Pass arguments to stream_generate_content via a request object, either of type GenerateContentRequest or an equivalent Hash.

    Parameters:

    • request (::Google::Cloud::AIPlatform::V1::GenerateContentRequest, ::Hash)

      A request object representing the call parameters. Required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash.

    • options (::Gapic::CallOptions, ::Hash) (defaults to: nil)

      Overrides the default settings for this call, e.g, timeout, retries etc. Optional.

  • #stream_generate_content(model: nil, contents: nil, system_instruction: nil, cached_content: nil, tools: nil, tool_config: nil, labels: nil, safety_settings: nil, model_armor_config: nil, generation_config: nil) ⇒ ::Enumerable<::Google::Cloud::AIPlatform::V1::GenerateContentResponse>

    Pass arguments to stream_generate_content via keyword arguments. Note that at least one keyword argument is required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash as a request object (see above).

    Parameters:

    • model (::String) (defaults to: nil)

      Required. The fully qualified name of the publisher model or tuned model endpoint to use.

      Publisher model format: projects/{project}/locations/{location}/publishers/*/models/*

      Tuned model endpoint format: projects/{project}/locations/{location}/endpoints/{endpoint}

    • contents (::Array<::Google::Cloud::AIPlatform::V1::Content, ::Hash>) (defaults to: nil)

      Required. The content of the current conversation with the model.

      For single-turn queries, this is a single instance. For multi-turn queries, this is a repeated field that contains conversation history + latest request.

    • system_instruction (::Google::Cloud::AIPlatform::V1::Content, ::Hash) (defaults to: nil)

      Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph.

    • cached_content (::String) (defaults to: nil)

      Optional. The name of the cached content used as context to serve the prediction. Note: only used in explicit caching, where users can have control over caching (e.g. what content to cache) and enjoy guaranteed cost savings. Format: projects/{project}/locations/{location}/cachedContents/{cachedContent}

    • tools (::Array<::Google::Cloud::AIPlatform::V1::Tool, ::Hash>) (defaults to: nil)

      Optional. A list of Tools the model may use to generate the next response.

      A Tool is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model.

    • tool_config (::Google::Cloud::AIPlatform::V1::ToolConfig, ::Hash) (defaults to: nil)

      Optional. Tool config. This config is shared for all tools provided in the request.

    • labels (::Hash{::String => ::String}) (defaults to: nil)

      Optional. The labels with user-defined metadata for the request. It is used for billing and reporting only.

      Label keys and values can be no longer than 63 characters (Unicode codepoints) and can only contain lowercase letters, numeric characters, underscores, and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter.

    • safety_settings (::Array<::Google::Cloud::AIPlatform::V1::SafetySetting, ::Hash>) (defaults to: nil)

      Optional. Per request settings for blocking unsafe content. Enforced on GenerateContentResponse.candidates.

    • model_armor_config (::Google::Cloud::AIPlatform::V1::ModelArmorConfig, ::Hash) (defaults to: nil)

      Optional. Settings for prompt and response sanitization using the Model Armor service. If supplied, safety_settings must not be supplied.

    • generation_config (::Google::Cloud::AIPlatform::V1::GenerationConfig, ::Hash) (defaults to: nil)

      Optional. Generation config.

Returns:

Raises:

  • (::Google::Cloud::Error)

    if the REST call is aborted.



1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
# File 'lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb', line 1108

def stream_generate_content request, options = nil
  raise ::ArgumentError, "request must be provided" if request.nil?

  request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::GenerateContentRequest

  # Converts hash and nil to an options object
  options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

  # Customize the options with defaults
   = @config.rpcs.stream_generate_content..to_h

  # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
  [:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
    lib_name: @config.lib_name, lib_version: @config.lib_version,
    gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
    transports_version_send: [:rest]

  [:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
  [:"x-goog-user-project"] = @quota_project_id if @quota_project_id

  options.apply_defaults timeout:      @config.rpcs.stream_generate_content.timeout,
                         metadata:     ,
                         retry_policy: @config.rpcs.stream_generate_content.retry_policy

  options.apply_defaults timeout:      @config.timeout,
                         metadata:     @config.,
                         retry_policy: @config.retry_policy

  ::Gapic::Rest::ServerStream.new(
    ::Google::Cloud::AIPlatform::V1::GenerateContentResponse,
    ::Gapic::Rest::ThreadedEnumerator.new do |in_q, out_q|
      @prediction_service_stub.stream_generate_content request, options do |chunk|
        in_q.deq
        out_q.enq chunk
      end
    end
  )
rescue ::Gapic::Rest::Error => e
  raise ::Google::Cloud::Error.from_error(e)
end

#stream_raw_predict(request, options = nil) ⇒ ::Enumerable<::Google::Api::HttpBody> #stream_raw_predict(endpoint: nil, http_body: nil) ⇒ ::Enumerable<::Google::Api::HttpBody>

Perform a streaming online prediction with an arbitrary HTTP payload.

Examples:

Basic example

require "google/cloud/ai_platform/v1"

# Create a client object. The client can be reused for multiple calls.
client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new

# Create a request. To set request fields, pass in keyword arguments.
request = Google::Cloud::AIPlatform::V1::StreamRawPredictRequest.new

# Call the stream_raw_predict method to start streaming.
output = client.stream_raw_predict request

# The returned object is a streamed enumerable yielding elements of type
# ::Google::Api::HttpBody
output.each do |current_response|
  p current_response
end

Overloads:

  • #stream_raw_predict(request, options = nil) ⇒ ::Enumerable<::Google::Api::HttpBody>

    Pass arguments to stream_raw_predict via a request object, either of type StreamRawPredictRequest or an equivalent Hash.

    Parameters:

    • request (::Google::Cloud::AIPlatform::V1::StreamRawPredictRequest, ::Hash)

      A request object representing the call parameters. Required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash.

    • options (::Gapic::CallOptions, ::Hash) (defaults to: nil)

      Overrides the default settings for this call, e.g, timeout, retries etc. Optional.

  • #stream_raw_predict(endpoint: nil, http_body: nil) ⇒ ::Enumerable<::Google::Api::HttpBody>

    Pass arguments to stream_raw_predict via keyword arguments. Note that at least one keyword argument is required. To specify no parameters, or to keep all the default parameter values, pass an empty Hash as a request object (see above).

    Parameters:

    • endpoint (::String) (defaults to: nil)

      Required. The name of the Endpoint requested to serve the prediction. Format: projects/{project}/locations/{location}/endpoints/{endpoint}

    • http_body (::Google::Api::HttpBody, ::Hash) (defaults to: nil)

      The prediction input. Supports HTTP headers and arbitrary data payload.

Returns:

Raises:

  • (::Google::Cloud::Error)

    if the REST call is aborted.



464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
# File 'lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb', line 464

def stream_raw_predict request, options = nil
  raise ::ArgumentError, "request must be provided" if request.nil?

  request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::StreamRawPredictRequest

  # Converts hash and nil to an options object
  options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

  # Customize the options with defaults
   = @config.rpcs.stream_raw_predict..to_h

  # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
  [:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
    lib_name: @config.lib_name, lib_version: @config.lib_version,
    gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
    transports_version_send: [:rest]

  [:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
  [:"x-goog-user-project"] = @quota_project_id if @quota_project_id

  options.apply_defaults timeout:      @config.rpcs.stream_raw_predict.timeout,
                         metadata:     ,
                         retry_policy: @config.rpcs.stream_raw_predict.retry_policy

  options.apply_defaults timeout:      @config.timeout,
                         metadata:     @config.,
                         retry_policy: @config.retry_policy

  ::Gapic::Rest::ServerStream.new(
    ::Google::Api::HttpBody,
    ::Gapic::Rest::ThreadedEnumerator.new do |in_q, out_q|
      @prediction_service_stub.stream_raw_predict request, options do |chunk|
        in_q.deq
        out_q.enq chunk
      end
    end
  )
rescue ::Gapic::Rest::Error => e
  raise ::Google::Cloud::Error.from_error(e)
end

#universe_domainString

The effective universe domain

Returns:

  • (String)


108
109
110
# File 'lib/google/cloud/ai_platform/v1/prediction_service/rest/client.rb', line 108

def universe_domain
  @prediction_service_stub.universe_domain
end