Class: Google::Cloud::Notebooks::V1::ExecutionTemplate

Inherits:
Object
  • Object
show all
Extended by:
Protobuf::MessageExts::ClassMethods
Includes:
Protobuf::MessageExts
Defined in:
proto_docs/google/cloud/notebooks/v1/execution.rb

Overview

The description a notebook execution workload.

Defined Under Namespace

Modules: JobType, ScaleTier, SchedulerAcceleratorType Classes: DataprocParameters, LabelsEntry, SchedulerAcceleratorConfig, VertexAIParameters

Instance Attribute Summary collapse

Instance Attribute Details

#accelerator_config::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorConfig



148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 148

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#container_image_uri::String



148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 148

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#dataproc_parameters::Google::Cloud::Notebooks::V1::ExecutionTemplate::DataprocParameters



148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 148

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#input_notebook_file::String



148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 148

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#job_type::Google::Cloud::Notebooks::V1::ExecutionTemplate::JobType



148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 148

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#kernel_spec::String



148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 148

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#labels::Google::Protobuf::Map{::String => ::String}



148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 148

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#master_type::String



148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 148

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#output_notebook_folder::String



148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 148

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#parameters::String



148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 148

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#params_yaml_file::String



148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 148

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#scale_tier::Google::Cloud::Notebooks::V1::ExecutionTemplate::ScaleTier

Deprecated.

This field is deprecated and may be removed in the next major version update.

Returns Required. Scale tier of the hardware used for notebook execution. DEPRECATED Will be discontinued. As right now only CUSTOM is supported.



148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 148

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#service_account::String



148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 148

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#tensorboard::String



148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 148

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#vertex_ai_parameters::Google::Cloud::Notebooks::V1::ExecutionTemplate::VertexAIParameters



148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 148

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end