Module: TensorFlow::RawOps
- Defined in:
- lib/tensorflow/raw_ops.rb
Class Method Summary collapse
- .abort(error_msg: nil, exit_without_error: nil) ⇒ Object
- .abs(x: nil) ⇒ Object
- .accumulate_nv2(inputs: nil, shape: nil) ⇒ Object
- .accumulator_apply_gradient(handle: nil, local_step: nil, gradient: nil, dtype: nil) ⇒ Object
- .accumulator_num_accumulated(handle: nil) ⇒ Object
- .accumulator_set_global_step(handle: nil, new_global_step: nil) ⇒ Object
- .accumulator_take_gradient(handle: nil, num_required: nil, dtype: nil) ⇒ Object
- .acos(x: nil) ⇒ Object
- .acosh(x: nil) ⇒ Object
- .add(x: nil, y: nil) ⇒ Object
- .add_many_sparse_to_tensors_map(sparse_indices: nil, sparse_values: nil, sparse_shape: nil, container: nil, shared_name: nil) ⇒ Object
- .add_n(inputs: nil) ⇒ Object
- .add_sparse_to_tensors_map(sparse_indices: nil, sparse_values: nil, sparse_shape: nil, container: nil, shared_name: nil) ⇒ Object
- .add_v2(x: nil, y: nil) ⇒ Object
- .adjust_contrast(images: nil, contrast_factor: nil, min_value: nil, max_value: nil) ⇒ Object
- .adjust_contrastv2(images: nil, contrast_factor: nil) ⇒ Object
- .adjust_hue(images: nil, delta: nil) ⇒ Object
- .adjust_saturation(images: nil, scale: nil) ⇒ Object
- .all(input: nil, reduction_indices: nil, keep_dims: nil) ⇒ Object
- .all_candidate_sampler(true_classes: nil, num_true: nil, num_sampled: nil, unique: nil, seed: nil, seed2: nil) ⇒ Object
- .all_to_all(input: nil, group_assignment: nil, concat_dimension: nil, split_dimension: nil, split_count: nil) ⇒ Object
- .angle(input: nil) ⇒ Object
- .anonymous_iterator(output_types: nil, output_shapes: nil) ⇒ Object
- .anonymous_iterator_v2(output_types: nil, output_shapes: nil) ⇒ Object
- .any(input: nil, reduction_indices: nil, keep_dims: nil) ⇒ Object
- .apply_ada_max(var: nil, m: nil, v: nil, beta1_power: nil, lr: nil, beta1: nil, beta2: nil, epsilon: nil, grad: nil, use_locking: nil) ⇒ Object
- .apply_adadelta(var: nil, accum: nil, accum_update: nil, lr: nil, rho: nil, epsilon: nil, grad: nil, use_locking: nil) ⇒ Object
- .apply_adagrad(var: nil, accum: nil, lr: nil, grad: nil, use_locking: nil, update_slots: nil) ⇒ Object
- .apply_adagrad_da(var: nil, gradient_accumulator: nil, gradient_squared_accumulator: nil, grad: nil, lr: nil, l1: nil, l2: nil, global_step: nil, use_locking: nil) ⇒ Object
- .apply_adam(var: nil, m: nil, v: nil, beta1_power: nil, beta2_power: nil, lr: nil, beta1: nil, beta2: nil, epsilon: nil, grad: nil, use_locking: nil, use_nesterov: nil) ⇒ Object
- .apply_add_sign(var: nil, m: nil, lr: nil, alpha: nil, sign_decay: nil, beta: nil, grad: nil, use_locking: nil) ⇒ Object
- .apply_centered_rms_prop(var: nil, mg: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, use_locking: nil) ⇒ Object
- .apply_ftrl(var: nil, accum: nil, linear: nil, grad: nil, lr: nil, l1: nil, l2: nil, lr_power: nil, use_locking: nil) ⇒ Object
- .apply_ftrl_v2(var: nil, accum: nil, linear: nil, grad: nil, lr: nil, l1: nil, l2: nil, l2_shrinkage: nil, lr_power: nil, use_locking: nil) ⇒ Object
- .apply_gradient_descent(var: nil, alpha: nil, delta: nil, use_locking: nil) ⇒ Object
- .apply_momentum(var: nil, accum: nil, lr: nil, grad: nil, momentum: nil, use_locking: nil, use_nesterov: nil) ⇒ Object
- .apply_power_sign(var: nil, m: nil, lr: nil, logbase: nil, sign_decay: nil, beta: nil, grad: nil, use_locking: nil) ⇒ Object
- .apply_proximal_adagrad(var: nil, accum: nil, lr: nil, l1: nil, l2: nil, grad: nil, use_locking: nil) ⇒ Object
- .apply_proximal_gradient_descent(var: nil, alpha: nil, l1: nil, l2: nil, delta: nil, use_locking: nil) ⇒ Object
- .apply_rms_prop(var: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, use_locking: nil) ⇒ Object
- .approximate_equal(x: nil, y: nil, tolerance: nil) ⇒ Object
- .arg_max(input: nil, dimension: nil, output_type: nil) ⇒ Object
- .arg_min(input: nil, dimension: nil, output_type: nil) ⇒ Object
- .as_string(input: nil, precision: nil, scientific: nil, shortest: nil, width: nil, fill: nil) ⇒ Object
- .asin(x: nil) ⇒ Object
- .asinh(x: nil) ⇒ Object
- .assert(condition: nil, data: nil, summarize: nil) ⇒ Object
- .assign(ref: nil, value: nil, validate_shape: nil, use_locking: nil) ⇒ Object
- .assign_add(ref: nil, value: nil, use_locking: nil) ⇒ Object
- .assign_add_variable_op(resource: nil, value: nil, dtype: nil) ⇒ Object
- .assign_sub(ref: nil, value: nil, use_locking: nil) ⇒ Object
- .assign_sub_variable_op(resource: nil, value: nil, dtype: nil) ⇒ Object
- .assign_variable_op(resource: nil, value: nil, dtype: nil) ⇒ Object
- .atan(x: nil) ⇒ Object
- .atan2(y: nil, x: nil) ⇒ Object
- .atanh(x: nil) ⇒ Object
- .audio_spectrogram(input: nil, window_size: nil, stride: nil, magnitude_squared: nil) ⇒ Object
- .audio_summary(tag: nil, tensor: nil, sample_rate: nil, max_outputs: nil) ⇒ Object
- .audio_summary_v2(tag: nil, tensor: nil, sample_rate: nil, max_outputs: nil) ⇒ Object
- .avg_pool(value: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
- .avg_pool3d(input: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
- .avg_pool3d_grad(orig_input_shape: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
- .avg_pool_grad(orig_input_shape: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
- .barrier(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) ⇒ Object
- .barrier_close(handle: nil, cancel_pending_enqueues: nil) ⇒ Object
- .barrier_incomplete_size(handle: nil) ⇒ Object
- .barrier_insert_many(handle: nil, keys: nil, values: nil, component_index: nil) ⇒ Object
- .barrier_ready_size(handle: nil) ⇒ Object
- .barrier_take_many(handle: nil, num_elements: nil, component_types: nil, allow_small_batch: nil, wait_for_incomplete: nil, timeout_ms: nil) ⇒ Object
- .batch(in_tensors: nil, num_batch_threads: nil, max_batch_size: nil, max_enqueued_batches: nil, batch_timeout_micros: nil, allowed_batch_sizes: nil, grad_timeout_micros: nil, container: nil, shared_name: nil, batching_queue: nil) ⇒ Object
- .batch_cholesky(input: nil) ⇒ Object
- .batch_cholesky_grad(l: nil, grad: nil) ⇒ Object
- .batch_dataset(input_dataset: nil, batch_size: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .batch_dataset_v2(input_dataset: nil, batch_size: nil, drop_remainder: nil, parallel_copy: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .batch_fft(input: nil) ⇒ Object
- .batch_fft2d(input: nil) ⇒ Object
- .batch_fft3d(input: nil) ⇒ Object
- .batch_function(in_tensors: nil, captured_tensors: nil, f: nil, num_batch_threads: nil, max_batch_size: nil, batch_timeout_micros: nil, max_enqueued_batches: nil, allowed_batch_sizes: nil, container: nil, shared_name: nil, batching_queue: nil) ⇒ Object
- .batch_ifft(input: nil) ⇒ Object
- .batch_ifft2d(input: nil) ⇒ Object
- .batch_ifft3d(input: nil) ⇒ Object
- .batch_mat_mul(x: nil, y: nil, adj_x: nil, adj_y: nil) ⇒ Object
- .batch_mat_mul_v2(x: nil, y: nil, adj_x: nil, adj_y: nil) ⇒ Object
- .batch_matrix_band_part(input: nil, num_lower: nil, num_upper: nil) ⇒ Object
- .batch_matrix_determinant(input: nil) ⇒ Object
- .batch_matrix_diag(diagonal: nil) ⇒ Object
- .batch_matrix_diag_part(input: nil) ⇒ Object
- .batch_matrix_inverse(input: nil, adjoint: nil) ⇒ Object
- .batch_matrix_set_diag(input: nil, diagonal: nil) ⇒ Object
- .batch_matrix_solve(matrix: nil, rhs: nil, adjoint: nil) ⇒ Object
- .batch_matrix_solve_ls(matrix: nil, rhs: nil, l2_regularizer: nil, fast: nil) ⇒ Object
- .batch_matrix_triangular_solve(matrix: nil, rhs: nil, lower: nil, adjoint: nil) ⇒ Object
- .batch_norm_with_global_normalization(t: nil, m: nil, v: nil, beta: nil, gamma: nil, variance_epsilon: nil, scale_after_normalization: nil) ⇒ Object
- .batch_norm_with_global_normalization_grad(t: nil, m: nil, v: nil, gamma: nil, backprop: nil, variance_epsilon: nil, scale_after_normalization: nil) ⇒ Object
- .batch_self_adjoint_eig(input: nil) ⇒ Object
- .batch_self_adjoint_eig_v2(input: nil, compute_v: nil) ⇒ Object
- .batch_svd(input: nil, compute_uv: nil, full_matrices: nil) ⇒ Object
- .batch_to_space(input: nil, crops: nil, block_size: nil) ⇒ Object
- .batch_to_space_nd(input: nil, block_shape: nil, crops: nil) ⇒ Object
- .bessel_i0e(x: nil) ⇒ Object
- .bessel_i1e(x: nil) ⇒ Object
- .betainc(a: nil, b: nil, x: nil) ⇒ Object
- .bias_add(value: nil, bias: nil, data_format: nil) ⇒ Object
- .bias_add_grad(out_backprop: nil, data_format: nil) ⇒ Object
- .bias_add_v1(value: nil, bias: nil) ⇒ Object
- .big_query_reader(container: nil, shared_name: nil, project_id: nil, dataset_id: nil, table_id: nil, columns: nil, timestamp_millis: nil, test_end_point: nil) ⇒ Object
- .bincount(arr: nil, size: nil, weights: nil) ⇒ Object
- .bitcast(input: nil, type: nil) ⇒ Object
- .bitwise_and(x: nil, y: nil) ⇒ Object
- .bitwise_or(x: nil, y: nil) ⇒ Object
- .bitwise_xor(x: nil, y: nil) ⇒ Object
- .boosted_trees_aggregate_stats(node_ids: nil, gradients: nil, hessians: nil, feature: nil, max_splits: nil, num_buckets: nil) ⇒ Object
- .boosted_trees_bucketize(float_values: nil, bucket_boundaries: nil, num_features: nil) ⇒ Object
- .boosted_trees_calculate_best_feature_split(node_id_range: nil, stats_summary: nil, l1: nil, l2: nil, tree_complexity: nil, min_node_weight: nil, logits_dimension: nil, split_type: nil) ⇒ Object
- .boosted_trees_calculate_best_gains_per_feature(node_id_range: nil, stats_summary_list: nil, l1: nil, l2: nil, tree_complexity: nil, min_node_weight: nil, max_splits: nil, num_features: nil) ⇒ Object
- .boosted_trees_center_bias(tree_ensemble_handle: nil, mean_gradients: nil, mean_hessians: nil, l1: nil, l2: nil) ⇒ Object
- .boosted_trees_create_ensemble(tree_ensemble_handle: nil, stamp_token: nil, tree_ensemble_serialized: nil) ⇒ Object
- .boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle: nil, epsilon: nil, num_streams: nil, max_elements: nil) ⇒ Object
- .boosted_trees_deserialize_ensemble(tree_ensemble_handle: nil, stamp_token: nil, tree_ensemble_serialized: nil) ⇒ Object
- .boosted_trees_ensemble_resource_handle_op(container: nil, shared_name: nil) ⇒ Object
- .boosted_trees_example_debug_outputs(tree_ensemble_handle: nil, bucketized_features: nil, num_bucketized_features: nil, logits_dimension: nil) ⇒ Object
- .boosted_trees_get_ensemble_states(tree_ensemble_handle: nil) ⇒ Object
- .boosted_trees_make_quantile_summaries(float_values: nil, example_weights: nil, epsilon: nil, num_features: nil) ⇒ Object
- .boosted_trees_make_stats_summary(node_ids: nil, gradients: nil, hessians: nil, bucketized_features_list: nil, max_splits: nil, num_buckets: nil, num_features: nil) ⇒ Object
- .boosted_trees_predict(tree_ensemble_handle: nil, bucketized_features: nil, num_bucketized_features: nil, logits_dimension: nil) ⇒ Object
- .boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle: nil, summaries: nil, num_features: nil) ⇒ Object
- .boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle: nil, bucket_boundaries: nil, num_streams: nil) ⇒ Object
- .boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle: nil, num_buckets: nil, generate_quantiles: nil) ⇒ Object
- .boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle: nil, num_features: nil) ⇒ Object
- .boosted_trees_quantile_stream_resource_handle_op(container: nil, shared_name: nil) ⇒ Object
- .boosted_trees_serialize_ensemble(tree_ensemble_handle: nil) ⇒ Object
- .boosted_trees_training_predict(tree_ensemble_handle: nil, cached_tree_ids: nil, cached_node_ids: nil, bucketized_features: nil, num_bucketized_features: nil, logits_dimension: nil) ⇒ Object
- .boosted_trees_update_ensemble(tree_ensemble_handle: nil, feature_ids: nil, node_ids: nil, gains: nil, thresholds: nil, left_node_contribs: nil, right_node_contribs: nil, max_depth: nil, learning_rate: nil, pruning_mode: nil, num_features: nil) ⇒ Object
- .broadcast_args(s0: nil, s1: nil) ⇒ Object
- .broadcast_gradient_args(s0: nil, s1: nil) ⇒ Object
- .broadcast_to(input: nil, shape: nil) ⇒ Object
- .bucketize(input: nil, boundaries: nil) ⇒ Object
- .cache_dataset(input_dataset: nil, filename: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .case(branch_index: nil, input: nil, branches: nil, output_shapes: nil) ⇒ Object
- .cast(x: nil) ⇒ Object
- .ceil(x: nil) ⇒ Object
- .check_numerics(tensor: nil, message: nil) ⇒ Object
- .cholesky(input: nil) ⇒ Object
- .cholesky_grad(l: nil, grad: nil) ⇒ Object
- .choose_fastest_branch_dataset(input_dataset: nil, ratio_numerator: nil, ratio_denominator: nil, other_arguments: nil, num_elements_per_branch: nil, branches: nil, other_arguments_lengths: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .clip_by_value(t: nil, clip_value_min: nil, clip_value_max: nil) ⇒ Object
- .close_summary_writer(writer: nil) ⇒ Object
- .collective_bcast_recv(group_size: nil, group_key: nil, instance_key: nil, shape: nil) ⇒ Object
- .collective_bcast_send(input: nil, group_size: nil, group_key: nil, instance_key: nil, shape: nil) ⇒ Object
- .collective_gather(input: nil, group_size: nil, group_key: nil, instance_key: nil, shape: nil) ⇒ Object
- .collective_permute(input: nil, source_target_pairs: nil) ⇒ Object
- .collective_reduce(input: nil, group_size: nil, group_key: nil, instance_key: nil, merge_op: nil, final_op: nil, subdiv_offsets: nil, wait_for: nil) ⇒ Object
- .combined_non_max_suppression(boxes: nil, scores: nil, max_output_size_per_class: nil, max_total_size: nil, iou_threshold: nil, score_threshold: nil, pad_per_class: nil, clip_boxes: nil) ⇒ Object
- .compare_and_bitpack(input: nil, threshold: nil) ⇒ Object
- .complex(real: nil, imag: nil) ⇒ Object
- .complex_abs(x: nil) ⇒ Object
- .compute_accidental_hits(true_classes: nil, sampled_candidates: nil, num_true: nil, seed: nil, seed2: nil) ⇒ Object
- .concat(concat_dim: nil, values: nil) ⇒ Object
- .concat_offset(concat_dim: nil, shape: nil) ⇒ Object
- .concat_v2(values: nil, axis: nil) ⇒ Object
- .concatenate_dataset(input_dataset: nil, another_dataset: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .conditional_accumulator(dtype: nil, shape: nil, container: nil, shared_name: nil, reduction_type: nil) ⇒ Object
- .configure_distributed_tpu(embedding_config: nil, tpu_embedding_config: nil, is_global_init: nil) ⇒ Object
- .conj(input: nil) ⇒ Object
- .conjugate_transpose(x: nil, perm: nil) ⇒ Object
- .const(value: nil, dtype: nil) ⇒ Object
- .consume_mutex_lock(mutex_lock: nil) ⇒ Object
- .control_trigger ⇒ Object
- .conv2d(input: nil, filter: nil, strides: nil, use_cudnn_on_gpu: nil, padding: nil, explicit_paddings: nil, data_format: nil, dilations: nil) ⇒ Object
- .conv2d_backprop_filter(input: nil, filter_sizes: nil, out_backprop: nil, strides: nil, use_cudnn_on_gpu: nil, padding: nil, explicit_paddings: nil, data_format: nil, dilations: nil) ⇒ Object
- .conv2d_backprop_input(input_sizes: nil, filter: nil, out_backprop: nil, strides: nil, use_cudnn_on_gpu: nil, padding: nil, explicit_paddings: nil, data_format: nil, dilations: nil) ⇒ Object
- .conv3d(input: nil, filter: nil, strides: nil, padding: nil, data_format: nil, dilations: nil) ⇒ Object
- .conv3d_backprop_filter(input: nil, filter: nil, out_backprop: nil, strides: nil, padding: nil, dilations: nil) ⇒ Object
- .conv3d_backprop_filter_v2(input: nil, filter_sizes: nil, out_backprop: nil, strides: nil, padding: nil, data_format: nil, dilations: nil) ⇒ Object
- .conv3d_backprop_input(input: nil, filter: nil, out_backprop: nil, strides: nil, padding: nil, dilations: nil) ⇒ Object
- .conv3d_backprop_input_v2(input_sizes: nil, filter: nil, out_backprop: nil, strides: nil, padding: nil, data_format: nil, dilations: nil) ⇒ Object
- .copy(input: nil, tensor_name: nil, debug_ops_spec: nil) ⇒ Object
- .copy_host(input: nil, tensor_name: nil, debug_ops_spec: nil) ⇒ Object
- .cos(x: nil) ⇒ Object
- .cosh(x: nil) ⇒ Object
- .count_up_to(ref: nil, limit: nil) ⇒ Object
- .create_summary_db_writer(writer: nil, db_uri: nil, experiment_name: nil, run_name: nil, user_name: nil) ⇒ Object
- .create_summary_file_writer(writer: nil, logdir: nil, max_queue: nil, flush_millis: nil, filename_suffix: nil) ⇒ Object
- .crop_and_resize(image: nil, boxes: nil, box_ind: nil, crop_size: nil, method: nil, extrapolation_value: nil) ⇒ Object
- .crop_and_resize_grad_boxes(grads: nil, image: nil, boxes: nil, box_ind: nil, method: nil) ⇒ Object
- .crop_and_resize_grad_image(grads: nil, boxes: nil, box_ind: nil, image_size: nil, method: nil) ⇒ Object
- .cross(a: nil, b: nil) ⇒ Object
- .cross_replica_sum(input: nil, group_assignment: nil) ⇒ Object
- .ctc_beam_search_decoder(inputs: nil, sequence_length: nil, beam_width: nil, top_paths: nil, merge_repeated: nil) ⇒ Object
- .ctc_greedy_decoder(inputs: nil, sequence_length: nil, merge_repeated: nil) ⇒ Object
- .ctc_loss(inputs: nil, labels_indices: nil, labels_values: nil, sequence_length: nil, preprocess_collapse_repeated: nil, ctc_merge_repeated: nil, ignore_longer_outputs_than_inputs: nil) ⇒ Object
- .cudnn_rnn(input: nil, input_h: nil, input_c: nil, params: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil, is_training: nil) ⇒ Object
- .cudnn_rnn_backprop(input: nil, input_h: nil, input_c: nil, params: nil, output: nil, output_h: nil, output_c: nil, output_backprop: nil, output_h_backprop: nil, output_c_backprop: nil, reserve_space: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil) ⇒ Object
- .cudnn_rnn_backprop_v2(input: nil, input_h: nil, input_c: nil, params: nil, output: nil, output_h: nil, output_c: nil, output_backprop: nil, output_h_backprop: nil, output_c_backprop: nil, reserve_space: nil, host_reserved: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil) ⇒ Object
- .cudnn_rnn_backprop_v3(input: nil, input_h: nil, input_c: nil, params: nil, sequence_lengths: nil, output: nil, output_h: nil, output_c: nil, output_backprop: nil, output_h_backprop: nil, output_c_backprop: nil, reserve_space: nil, host_reserved: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil, time_major: nil) ⇒ Object
- .cudnn_rnn_canonical_to_params(num_layers: nil, num_units: nil, input_size: nil, weights: nil, biases: nil, num_params: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil) ⇒ Object
- .cudnn_rnn_params_size(num_layers: nil, num_units: nil, input_size: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil) ⇒ Object
- .cudnn_rnn_params_to_canonical(num_layers: nil, num_units: nil, input_size: nil, params: nil, num_params: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil) ⇒ Object
- .cudnn_rnnv2(input: nil, input_h: nil, input_c: nil, params: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil, is_training: nil) ⇒ Object
- .cudnn_rnnv3(input: nil, input_h: nil, input_c: nil, params: nil, sequence_lengths: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil, is_training: nil, time_major: nil) ⇒ Object
- .cumprod(x: nil, axis: nil, exclusive: nil, reverse: nil) ⇒ Object
- .cumsum(x: nil, axis: nil, exclusive: nil, reverse: nil) ⇒ Object
- .data_format_dim_map(x: nil, src_format: nil, dst_format: nil) ⇒ Object
- .data_format_vec_permute(x: nil, src_format: nil, dst_format: nil) ⇒ Object
- .dataset_to_graph(input_dataset: nil) ⇒ Object
- .dataset_to_single_element(dataset: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .debug_gradient_identity(input: nil) ⇒ Object
- .debug_gradient_ref_identity(input: nil) ⇒ Object
- .debug_identity(input: nil, device_name: nil, tensor_name: nil, debug_urls: nil, gated_grpc: nil) ⇒ Object
- .debug_nan_count(input: nil, device_name: nil, tensor_name: nil, debug_urls: nil, gated_grpc: nil) ⇒ Object
- .debug_numeric_summary(input: nil, device_name: nil, tensor_name: nil, debug_urls: nil, lower_bound: nil, upper_bound: nil, mute_if_healthy: nil, gated_grpc: nil) ⇒ Object
- .decode_and_crop_jpeg(contents: nil, crop_window: nil, channels: nil, ratio: nil, fancy_upscaling: nil, try_recover_truncated: nil, acceptable_fraction: nil, dct_method: nil) ⇒ Object
- .decode_base64(input: nil) ⇒ Object
- .decode_bmp(contents: nil, channels: nil) ⇒ Object
- .decode_compressed(bytes: nil, compression_type: nil) ⇒ Object
- .decode_csv(records: nil, record_defaults: nil, field_delim: nil, use_quote_delim: nil, na_value: nil, select_cols: nil) ⇒ Object
- .decode_gif(contents: nil) ⇒ Object
- .decode_jpeg(contents: nil, channels: nil, ratio: nil, fancy_upscaling: nil, try_recover_truncated: nil, acceptable_fraction: nil, dct_method: nil) ⇒ Object
- .decode_json_example(json_examples: nil) ⇒ Object
- .decode_padded_raw(input_bytes: nil, fixed_length: nil, out_type: nil, little_endian: nil) ⇒ Object
- .decode_png(contents: nil, channels: nil, dtype: nil) ⇒ Object
- .decode_proto_v2(bytes: nil, message_type: nil, field_names: nil, output_types: nil, descriptor_source: nil, message_format: nil, sanitize: nil) ⇒ Object
- .decode_raw(bytes: nil, out_type: nil, little_endian: nil) ⇒ Object
- .decode_wav(contents: nil, desired_channels: nil, desired_samples: nil) ⇒ Object
- .deep_copy(x: nil) ⇒ Object
- .delete_iterator(handle: nil, deleter: nil) ⇒ Object
- .delete_session_tensor(handle: nil) ⇒ Object
- .dense_to_dense_set_operation(set1: nil, set2: nil, set_operation: nil, validate_indices: nil) ⇒ Object
- .dense_to_sparse_set_operation(set1: nil, set2_indices: nil, set2_values: nil, set2_shape: nil, set_operation: nil, validate_indices: nil) ⇒ Object
- .depth_to_space(input: nil, block_size: nil, data_format: nil) ⇒ Object
- .depthwise_conv2d_native(input: nil, filter: nil, strides: nil, padding: nil, data_format: nil, dilations: nil) ⇒ Object
- .depthwise_conv2d_native_backprop_filter(input: nil, filter_sizes: nil, out_backprop: nil, strides: nil, padding: nil, data_format: nil, dilations: nil) ⇒ Object
- .depthwise_conv2d_native_backprop_input(input_sizes: nil, filter: nil, out_backprop: nil, strides: nil, padding: nil, data_format: nil, dilations: nil) ⇒ Object
- .dequantize(input: nil, min_range: nil, max_range: nil, mode: nil) ⇒ Object
- .deserialize_iterator(resource_handle: nil, serialized: nil) ⇒ Object
- .deserialize_many_sparse(serialized_sparse: nil, dtype: nil) ⇒ Object
- .deserialize_sparse(serialized_sparse: nil, dtype: nil) ⇒ Object
- .destroy_resource_op(resource: nil, ignore_lookup_error: nil) ⇒ Object
- .destroy_temporary_variable(ref: nil, var_name: nil) ⇒ Object
- .diag(diagonal: nil) ⇒ Object
- .diag_part(input: nil) ⇒ Object
- .digamma(x: nil) ⇒ Object
- .dilation2d(input: nil, filter: nil, strides: nil, rates: nil, padding: nil) ⇒ Object
- .dilation2d_backprop_filter(input: nil, filter: nil, out_backprop: nil, strides: nil, rates: nil, padding: nil) ⇒ Object
- .dilation2d_backprop_input(input: nil, filter: nil, out_backprop: nil, strides: nil, rates: nil, padding: nil) ⇒ Object
- .div(x: nil, y: nil) ⇒ Object
- .div_no_nan(x: nil, y: nil) ⇒ Object
- .draw_bounding_boxes(images: nil, boxes: nil) ⇒ Object
- .draw_bounding_boxes_v2(images: nil, boxes: nil, colors: nil) ⇒ Object
- .dynamic_partition(data: nil, partitions: nil, num_partitions: nil) ⇒ Object
- .dynamic_stitch(indices: nil, data: nil) ⇒ Object
- .eager_py_func(input: nil, token: nil) ⇒ Object
- .edit_distance(hypothesis_indices: nil, hypothesis_values: nil, hypothesis_shape: nil, truth_indices: nil, truth_values: nil, truth_shape: nil, normalize: nil) ⇒ Object
- .elu(features: nil) ⇒ Object
- .elu_grad(gradients: nil, outputs: nil) ⇒ Object
- .empty(shape: nil, dtype: nil, init: nil) ⇒ Object
- .empty_tensor_list(element_shape: nil, max_num_elements: nil, element_dtype: nil, shape_type: nil) ⇒ Object
- .encode_base64(input: nil, pad: nil) ⇒ Object
- .encode_jpeg(image: nil, format: nil, quality: nil, progressive: nil, optimize_size: nil, chroma_downsampling: nil, density_unit: nil, x_density: nil, y_density: nil, xmp_metadata: nil) ⇒ Object
- .encode_jpeg_variable_quality(images: nil, quality: nil) ⇒ Object
- .encode_png(image: nil, compression: nil) ⇒ Object
- .encode_proto(sizes: nil, values: nil, field_names: nil, message_type: nil, descriptor_source: nil) ⇒ Object
- .encode_wav(audio: nil, sample_rate: nil) ⇒ Object
- .enqueue_tpu_embedding_integer_batch(batch: nil, mode_override: nil, device_ordinal: nil) ⇒ Object
- .enqueue_tpu_embedding_sparse_batch(sample_indices: nil, embedding_indices: nil, aggregation_weights: nil, mode_override: nil, device_ordinal: nil, combiners: nil) ⇒ Object
- .enqueue_tpu_embedding_sparse_tensor_batch(sample_indices: nil, embedding_indices: nil, aggregation_weights: nil, mode_override: nil, device_ordinal: nil, combiners: nil, table_ids: nil, max_sequence_lengths: nil) ⇒ Object
- .ensure_shape(input: nil, shape: nil) ⇒ Object
- .enter(data: nil, frame_name: nil, is_constant: nil, parallel_iterations: nil) ⇒ Object
- .equal(x: nil, y: nil) ⇒ Object
- .erf(x: nil) ⇒ Object
- .erfc(x: nil) ⇒ Object
- .euclidean_norm(input: nil, reduction_indices: nil, keep_dims: nil) ⇒ Object
- .exit(data: nil) ⇒ Object
- .exp(x: nil) ⇒ Object
- .expand_dims(input: nil, dim: nil) ⇒ Object
- .experimental_assert_next_dataset(input_dataset: nil, transformations: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_auto_shard_dataset(input_dataset: nil, num_workers: nil, index: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_bytes_produced_stats_dataset(input_dataset: nil, tag: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_choose_fastest_dataset(input_datasets: nil, num_experiments: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_csv_dataset(filenames: nil, compression_type: nil, buffer_size: nil, header: nil, field_delim: nil, use_quote_delim: nil, na_value: nil, select_cols: nil, record_defaults: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_dataset_cardinality(input_dataset: nil) ⇒ Object
- .experimental_dataset_to_tf_record(input_dataset: nil, filename: nil, compression_type: nil) ⇒ Object
- .experimental_dense_to_sparse_batch_dataset(input_dataset: nil, batch_size: nil, row_shape: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_directed_interleave_dataset(selector_input_dataset: nil, data_input_datasets: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_group_by_reducer_dataset(input_dataset: nil, key_func_other_arguments: nil, init_func_other_arguments: nil, reduce_func_other_arguments: nil, finalize_func_other_arguments: nil, key_func: nil, init_func: nil, reduce_func: nil, finalize_func: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_group_by_window_dataset(input_dataset: nil, key_func_other_arguments: nil, reduce_func_other_arguments: nil, window_size_func_other_arguments: nil, key_func: nil, reduce_func: nil, window_size_func: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_identity_indexed_dataset(size: nil) ⇒ Object
- .experimental_ignore_errors_dataset(input_dataset: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_indexed_dataset_get(materialized: nil, index: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_indexed_dataset_materialize(dataset: nil, materialized: nil) ⇒ Object
- .experimental_iterator_get_device(resource: nil) ⇒ Object
- .experimental_latency_stats_dataset(input_dataset: nil, tag: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_lmdb_dataset(filenames: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_map_and_batch_dataset(input_dataset: nil, other_arguments: nil, batch_size: nil, num_parallel_calls: nil, drop_remainder: nil, f: nil, output_types: nil, output_shapes: nil, preserve_cardinality: nil) ⇒ Object
- .experimental_map_dataset(input_dataset: nil, other_arguments: nil, f: nil, output_types: nil, output_shapes: nil, use_inter_op_parallelism: nil, preserve_cardinality: nil) ⇒ Object
- .experimental_matching_files_dataset(patterns: nil) ⇒ Object
- .experimental_materialized_index_dataset_handle(container: nil, shared_name: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_max_intra_op_parallelism_dataset(input_dataset: nil, max_intra_op_parallelism: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_non_serializable_dataset(input_dataset: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_numa_map_and_batch_dataset(input_dataset: nil, other_arguments: nil, batch_size: nil, num_parallel_calls: nil, drop_remainder: nil, f: nil, output_types: nil, output_shapes: nil, preserve_cardinality: nil) ⇒ Object
- .experimental_parallel_interleave_dataset(input_dataset: nil, other_arguments: nil, cycle_length: nil, block_length: nil, sloppy: nil, buffer_output_elements: nil, prefetch_input_elements: nil, f: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_parse_example_dataset(input_dataset: nil, num_parallel_calls: nil, dense_defaults: nil, sparse_keys: nil, dense_keys: nil, sparse_types: nil, dense_shapes: nil, output_types: nil, output_shapes: nil, sloppy: nil) ⇒ Object
- .experimental_private_thread_pool_dataset(input_dataset: nil, num_threads: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_random_dataset(seed: nil, seed2: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_rebatch_dataset(input_dataset: nil, num_workers: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_scan_dataset(input_dataset: nil, initial_state: nil, other_arguments: nil, f: nil, output_types: nil, output_shapes: nil, preserve_cardinality: nil) ⇒ Object
- .experimental_set_stats_aggregator_dataset(input_dataset: nil, stats_aggregator: nil, tag: nil, counter_prefix: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_sleep_dataset(input_dataset: nil, sleep_microseconds: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_sliding_window_dataset(input_dataset: nil, window_size: nil, window_shift: nil, window_stride: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_sql_dataset(driver_name: nil, data_source_name: nil, query: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_stats_aggregator_handle(container: nil, shared_name: nil) ⇒ Object
- .experimental_stats_aggregator_summary(iterator: nil) ⇒ Object
- .experimental_take_while_dataset(input_dataset: nil, other_arguments: nil, predicate: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_thread_pool_dataset(input_dataset: nil, thread_pool: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_thread_pool_handle(num_threads: nil, max_intra_op_parallelism: nil, display_name: nil, container: nil, shared_name: nil) ⇒ Object
- .experimental_unbatch_dataset(input_dataset: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .experimental_unique_dataset(input_dataset: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .expm1(x: nil) ⇒ Object
- .extract_glimpse(input: nil, size: nil, offsets: nil, centered: nil, normalized: nil, uniform_noise: nil, noise: nil) ⇒ Object
- .extract_image_patches(images: nil, ksizes: nil, strides: nil, rates: nil, padding: nil) ⇒ Object
- .extract_jpeg_shape(contents: nil, output_type: nil) ⇒ Object
- .extract_volume_patches(input: nil, ksizes: nil, strides: nil, padding: nil) ⇒ Object
- .fact ⇒ Object
- .fake_param(dtype: nil, shape: nil) ⇒ Object
- .fake_quant_with_min_max_args(inputs: nil, min: nil, max: nil, num_bits: nil, narrow_range: nil) ⇒ Object
- .fake_quant_with_min_max_args_gradient(gradients: nil, inputs: nil, min: nil, max: nil, num_bits: nil, narrow_range: nil) ⇒ Object
- .fake_quant_with_min_max_vars(inputs: nil, min: nil, max: nil, num_bits: nil, narrow_range: nil) ⇒ Object
- .fake_quant_with_min_max_vars_gradient(gradients: nil, inputs: nil, min: nil, max: nil, num_bits: nil, narrow_range: nil) ⇒ Object
- .fake_quant_with_min_max_vars_per_channel(inputs: nil, min: nil, max: nil, num_bits: nil, narrow_range: nil) ⇒ Object
- .fake_quant_with_min_max_vars_per_channel_gradient(gradients: nil, inputs: nil, min: nil, max: nil, num_bits: nil, narrow_range: nil) ⇒ Object
- .fake_queue(resource: nil) ⇒ Object
- .fft(input: nil) ⇒ Object
- .fft2d(input: nil) ⇒ Object
- .fft3d(input: nil) ⇒ Object
- .fifo_queue(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) ⇒ Object
- .fifo_queue_v2(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) ⇒ Object
- .fill(dims: nil, value: nil, index_type: nil) ⇒ Object
- .filter_by_last_component_dataset(input_dataset: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .filter_dataset(input_dataset: nil, other_arguments: nil, predicate: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .fingerprint(data: nil, method: nil) ⇒ Object
- .fixed_length_record_dataset(filenames: nil, header_bytes: nil, record_bytes: nil, footer_bytes: nil, buffer_size: nil) ⇒ Object
- .fixed_length_record_dataset_v2(filenames: nil, header_bytes: nil, record_bytes: nil, footer_bytes: nil, buffer_size: nil, compression_type: nil) ⇒ Object
- .fixed_length_record_reader(header_bytes: nil, record_bytes: nil, footer_bytes: nil, hop_bytes: nil, container: nil, shared_name: nil) ⇒ Object
- .fixed_length_record_reader_v2(header_bytes: nil, record_bytes: nil, footer_bytes: nil, hop_bytes: nil, container: nil, shared_name: nil, encoding: nil) ⇒ Object
- .fixed_unigram_candidate_sampler(true_classes: nil, num_true: nil, num_sampled: nil, unique: nil, range_max: nil, vocab_file: nil, distortion: nil, num_reserved_ids: nil, num_shards: nil, shard: nil, unigrams: nil, seed: nil, seed2: nil) ⇒ Object
- .flat_map_dataset(input_dataset: nil, other_arguments: nil, f: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .floor(x: nil) ⇒ Object
- .floor_div(x: nil, y: nil) ⇒ Object
- .floor_mod(x: nil, y: nil) ⇒ Object
- .flush_summary_writer(writer: nil) ⇒ Object
- .for(start: nil, limit: nil, delta: nil, input: nil, body: nil) ⇒ Object
- .fractional_avg_pool(value: nil, pooling_ratio: nil, pseudo_random: nil, overlapping: nil, deterministic: nil, seed: nil, seed2: nil) ⇒ Object
- .fractional_avg_pool_grad(orig_input_tensor_shape: nil, out_backprop: nil, row_pooling_sequence: nil, col_pooling_sequence: nil, overlapping: nil) ⇒ Object
- .fractional_max_pool(value: nil, pooling_ratio: nil, pseudo_random: nil, overlapping: nil, deterministic: nil, seed: nil, seed2: nil) ⇒ Object
- .fractional_max_pool_grad(orig_input: nil, orig_output: nil, out_backprop: nil, row_pooling_sequence: nil, col_pooling_sequence: nil, overlapping: nil) ⇒ Object
- .fused_batch_norm(x: nil, scale: nil, offset: nil, mean: nil, variance: nil, epsilon: nil, data_format: nil, is_training: nil) ⇒ Object
- .fused_batch_norm_grad(y_backprop: nil, x: nil, scale: nil, reserve_space_1: nil, reserve_space_2: nil, epsilon: nil, data_format: nil, is_training: nil) ⇒ Object
- .fused_batch_norm_grad_v2(y_backprop: nil, x: nil, scale: nil, reserve_space_1: nil, reserve_space_2: nil, epsilon: nil, data_format: nil, is_training: nil) ⇒ Object
- .fused_batch_norm_grad_v3(y_backprop: nil, x: nil, scale: nil, reserve_space_1: nil, reserve_space_2: nil, reserve_space_3: nil, epsilon: nil, data_format: nil, is_training: nil) ⇒ Object
- .fused_batch_norm_v2(x: nil, scale: nil, offset: nil, mean: nil, variance: nil, epsilon: nil, data_format: nil, is_training: nil) ⇒ Object
- .fused_batch_norm_v3(x: nil, scale: nil, offset: nil, mean: nil, variance: nil, epsilon: nil, data_format: nil, is_training: nil) ⇒ Object
- .fused_pad_conv2d(input: nil, paddings: nil, filter: nil, mode: nil, strides: nil, padding: nil) ⇒ Object
- .fused_resize_and_pad_conv2d(input: nil, size: nil, paddings: nil, filter: nil, resize_align_corners: nil, mode: nil, strides: nil, padding: nil) ⇒ Object
- .gather(params: nil, indices: nil, validate_indices: nil) ⇒ Object
- .gather_nd(params: nil, indices: nil) ⇒ Object
- .gather_v2(params: nil, indices: nil, axis: nil, batch_dims: nil) ⇒ Object
- .gcs_configure_block_cache(max_cache_size: nil, block_size: nil, max_staleness: nil) ⇒ Object
- .gcs_configure_credentials(json: nil) ⇒ Object
- .generate_big_query_reader_partitions(project_id: nil, dataset_id: nil, table_id: nil, columns: nil, timestamp_millis: nil, num_partitions: nil, test_end_point: nil) ⇒ Object
- .generate_vocab_remapping(new_vocab_file: nil, old_vocab_file: nil, new_vocab_offset: nil, num_new_vocab: nil, old_vocab_size: nil) ⇒ Object
- .generator_dataset(init_func_other_args: nil, next_func_other_args: nil, finalize_func_other_args: nil, init_func: nil, next_func: nil, finalize_func: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .get_session_handle(value: nil) ⇒ Object
- .get_session_handle_v2(value: nil) ⇒ Object
- .get_session_tensor(handle: nil, dtype: nil) ⇒ Object
- .greater(x: nil, y: nil) ⇒ Object
- .greater_equal(x: nil, y: nil) ⇒ Object
- .guarantee_const(input: nil) ⇒ Object
- .hash_table(container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil) ⇒ Object
- .hash_table_v2(container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil) ⇒ Object
- .histogram_fixed_width(values: nil, value_range: nil, nbins: nil, dtype: nil) ⇒ Object
- .histogram_summary(tag: nil, values: nil) ⇒ Object
- .host_const(value: nil, dtype: nil) ⇒ Object
- .hsv_to_rgb(images: nil) ⇒ Object
- .identity(input: nil) ⇒ Object
- .identity_n(input: nil) ⇒ Object
- .identity_reader(container: nil, shared_name: nil) ⇒ Object
- .identity_reader_v2(container: nil, shared_name: nil) ⇒ Object
- .if(cond: nil, input: nil, then_branch: nil, else_branch: nil, output_shapes: nil) ⇒ Object
- .ifft(input: nil) ⇒ Object
- .ifft2d(input: nil) ⇒ Object
- .ifft3d(input: nil) ⇒ Object
- .igamma(a: nil, x: nil) ⇒ Object
- .igamma_grad_a(a: nil, x: nil) ⇒ Object
- .igammac(a: nil, x: nil) ⇒ Object
- .imag(input: nil) ⇒ Object
- .image_summary(tag: nil, tensor: nil, max_images: nil, bad_color: nil) ⇒ Object
- .immutable_const(dtype: nil, shape: nil, memory_region_name: nil) ⇒ Object
- .import_event(writer: nil, event: nil) ⇒ Object
- .in_top_k(predictions: nil, targets: nil, k: nil) ⇒ Object
- .in_top_kv2(predictions: nil, targets: nil, k: nil) ⇒ Object
- .infeed_dequeue(dtype: nil, shape: nil) ⇒ Object
- .infeed_dequeue_tuple(dtypes: nil, shapes: nil) ⇒ Object
- .infeed_enqueue(input: nil, dtype: nil, shape: nil, layout: nil, device_ordinal: nil) ⇒ Object
- .infeed_enqueue_prelinearized_buffer(input: nil, device_ordinal: nil) ⇒ Object
- .infeed_enqueue_tuple(inputs: nil, dtypes: nil, shapes: nil, layouts: nil, device_ordinal: nil) ⇒ Object
- .initialize_table(table_handle: nil, keys: nil, values: nil) ⇒ Object
- .initialize_table_from_text_file(table_handle: nil, filename: nil, key_index: nil, value_index: nil, vocab_size: nil, delimiter: nil) ⇒ Object
- .initialize_table_from_text_file_v2(table_handle: nil, filename: nil, key_index: nil, value_index: nil, vocab_size: nil, delimiter: nil) ⇒ Object
- .initialize_table_v2(table_handle: nil, keys: nil, values: nil) ⇒ Object
- .inplace_add(x: nil, i: nil, v: nil) ⇒ Object
- .inplace_sub(x: nil, i: nil, v: nil) ⇒ Object
- .inplace_update(x: nil, i: nil, v: nil) ⇒ Object
- .interleave_dataset(input_dataset: nil, other_arguments: nil, cycle_length: nil, block_length: nil, f: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .inv(x: nil) ⇒ Object
- .inv_grad(y: nil, dy: nil) ⇒ Object
- .invert(x: nil) ⇒ Object
- .invert_permutation(x: nil) ⇒ Object
- .irfft(input: nil, fft_length: nil) ⇒ Object
- .irfft2d(input: nil, fft_length: nil) ⇒ Object
- .irfft3d(input: nil, fft_length: nil) ⇒ Object
- .is_boosted_trees_ensemble_initialized(tree_ensemble_handle: nil) ⇒ Object
- .is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle: nil) ⇒ Object
- .is_finite(x: nil) ⇒ Object
- .is_inf(x: nil) ⇒ Object
- .is_nan(x: nil) ⇒ Object
- .is_variable_initialized(ref: nil, dtype: nil) ⇒ Object
- .iterator(shared_name: nil, container: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .iterator_from_string_handle(string_handle: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .iterator_from_string_handle_v2(string_handle: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .iterator_get_next(iterator: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .iterator_get_next_as_optional(iterator: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .iterator_get_next_sync(iterator: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .iterator_to_string_handle(resource_handle: nil) ⇒ Object
- .iterator_v2(shared_name: nil, container: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .kmc2_chain_initialization(distances: nil, seed: nil) ⇒ Object
- .kmeans_plus_plus_initialization(points: nil, num_to_sample: nil, seed: nil, num_retries_per_sample: nil) ⇒ Object
- .l2_loss(t: nil) ⇒ Object
- .leaky_relu(features: nil, alpha: nil) ⇒ Object
- .leaky_relu_grad(gradients: nil, features: nil, alpha: nil) ⇒ Object
- .learned_unigram_candidate_sampler(true_classes: nil, num_true: nil, num_sampled: nil, unique: nil, range_max: nil, seed: nil, seed2: nil) ⇒ Object
- .left_shift(x: nil, y: nil) ⇒ Object
- .less(x: nil, y: nil) ⇒ Object
- .less_equal(x: nil, y: nil) ⇒ Object
- .lgamma(x: nil) ⇒ Object
- .lin_space(start: nil, stop: nil, num: nil) ⇒ Object
- .list_diff(x: nil, y: nil, out_idx: nil) ⇒ Object
- .lmdb_reader(container: nil, shared_name: nil) ⇒ Object
- .load_and_remap_matrix(ckpt_path: nil, old_tensor_name: nil, row_remapping: nil, col_remapping: nil, initializing_values: nil, num_rows: nil, num_cols: nil, max_rows_in_memory: nil) ⇒ Object
- .load_tpu_embedding_adadelta_parameters(parameters: nil, accumulators: nil, updates: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters: nil, accumulators: nil, updates: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .load_tpu_embedding_adagrad_parameters(parameters: nil, accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .load_tpu_embedding_adagrad_parameters_grad_accum_debug(parameters: nil, accumulators: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .load_tpu_embedding_adam_parameters(parameters: nil, momenta: nil, velocities: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .load_tpu_embedding_adam_parameters_grad_accum_debug(parameters: nil, momenta: nil, velocities: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .load_tpu_embedding_centered_rms_prop_parameters(parameters: nil, ms: nil, mom: nil, mg: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .load_tpu_embedding_ftrl_parameters(parameters: nil, accumulators: nil, linears: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters: nil, accumulators: nil, linears: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .load_tpu_embedding_mdl_adagrad_light_parameters(parameters: nil, accumulators: nil, weights: nil, benefits: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .load_tpu_embedding_momentum_parameters(parameters: nil, momenta: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .load_tpu_embedding_momentum_parameters_grad_accum_debug(parameters: nil, momenta: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .load_tpu_embedding_proximal_adagrad_parameters(parameters: nil, accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters: nil, accumulators: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .load_tpu_embedding_rms_prop_parameters(parameters: nil, ms: nil, mom: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .load_tpu_embedding_rms_prop_parameters_grad_accum_debug(parameters: nil, ms: nil, mom: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .load_tpu_embedding_stochastic_gradient_descent_parameters(parameters: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .log(x: nil) ⇒ Object
- .log1p(x: nil) ⇒ Object
- .log_matrix_determinant(input: nil) ⇒ Object
- .log_softmax(logits: nil) ⇒ Object
- .log_uniform_candidate_sampler(true_classes: nil, num_true: nil, num_sampled: nil, unique: nil, range_max: nil, seed: nil, seed2: nil) ⇒ Object
- .logical_and(x: nil, y: nil) ⇒ Object
- .logical_not(x: nil) ⇒ Object
- .logical_or(x: nil, y: nil) ⇒ Object
- .lookup_table_export(table_handle: nil) ⇒ Object
- .lookup_table_export_v2(table_handle: nil) ⇒ Object
- .lookup_table_find(table_handle: nil, keys: nil, default_value: nil) ⇒ Object
- .lookup_table_find_v2(table_handle: nil, keys: nil, default_value: nil) ⇒ Object
- .lookup_table_import(table_handle: nil, keys: nil, values: nil) ⇒ Object
- .lookup_table_import_v2(table_handle: nil, keys: nil, values: nil) ⇒ Object
- .lookup_table_insert(table_handle: nil, keys: nil, values: nil) ⇒ Object
- .lookup_table_insert_v2(table_handle: nil, keys: nil, values: nil) ⇒ Object
- .lookup_table_remove_v2(table_handle: nil, keys: nil) ⇒ Object
- .lookup_table_size(table_handle: nil) ⇒ Object
- .lookup_table_size_v2(table_handle: nil) ⇒ Object
- .loop_cond(input: nil) ⇒ Object
- .lower_bound(sorted_inputs: nil, values: nil, out_type: nil) ⇒ Object
- .lrn(input: nil, depth_radius: nil, bias: nil, alpha: nil, beta: nil) ⇒ Object
- .lrn_grad(input_grads: nil, input_image: nil, output_image: nil, depth_radius: nil, bias: nil, alpha: nil, beta: nil) ⇒ Object
- .lu(input: nil, output_idx_type: nil) ⇒ Object
- .make_iterator(dataset: nil, iterator: nil) ⇒ Object
- .map_clear(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .map_dataset(input_dataset: nil, other_arguments: nil, f: nil, output_types: nil, output_shapes: nil, use_inter_op_parallelism: nil, preserve_cardinality: nil) ⇒ Object
- .map_defun(arguments: nil, captured_inputs: nil, output_types: nil, output_shapes: nil, f: nil, max_intra_op_parallelism: nil) ⇒ Object
- .map_incomplete_size(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .map_peek(key: nil, indices: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .map_size(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .map_stage(key: nil, indices: nil, values: nil, capacity: nil, memory_limit: nil, dtypes: nil, fake_dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .map_unstage(key: nil, indices: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .map_unstage_no_key(indices: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .mat_mul(a: nil, b: nil, transpose_a: nil, transpose_b: nil) ⇒ Object
- .matching_files(pattern: nil) ⇒ Object
- .matrix_band_part(input: nil, num_lower: nil, num_upper: nil) ⇒ Object
- .matrix_determinant(input: nil) ⇒ Object
- .matrix_diag(diagonal: nil) ⇒ Object
- .matrix_diag_part(input: nil) ⇒ Object
- .matrix_exponential(input: nil) ⇒ Object
- .matrix_inverse(input: nil, adjoint: nil) ⇒ Object
- .matrix_logarithm(input: nil) ⇒ Object
- .matrix_set_diag(input: nil, diagonal: nil) ⇒ Object
- .matrix_solve(matrix: nil, rhs: nil, adjoint: nil) ⇒ Object
- .matrix_solve_ls(matrix: nil, rhs: nil, l2_regularizer: nil, fast: nil) ⇒ Object
- .matrix_square_root(input: nil) ⇒ Object
- .matrix_triangular_solve(matrix: nil, rhs: nil, lower: nil, adjoint: nil) ⇒ Object
- .max(input: nil, reduction_indices: nil, keep_dims: nil) ⇒ Object
- .max_pool(input: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
- .max_pool3d(input: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
- .max_pool3d_grad(orig_input: nil, orig_output: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
- .max_pool3d_grad_grad(orig_input: nil, orig_output: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
- .max_pool_grad(orig_input: nil, orig_output: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
- .max_pool_grad_grad(orig_input: nil, orig_output: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
- .max_pool_grad_grad_v2(orig_input: nil, orig_output: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
- .max_pool_grad_grad_with_argmax(input: nil, grad: nil, argmax: nil, ksize: nil, strides: nil, padding: nil, include_batch_in_index: nil) ⇒ Object
- .max_pool_grad_v2(orig_input: nil, orig_output: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
- .max_pool_grad_with_argmax(input: nil, grad: nil, argmax: nil, ksize: nil, strides: nil, padding: nil, include_batch_in_index: nil) ⇒ Object
- .max_pool_v2(input: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
- .max_pool_with_argmax(input: nil, ksize: nil, strides: nil, padding: nil, include_batch_in_index: nil) ⇒ Object
- .maximum(x: nil, y: nil) ⇒ Object
- .mean(input: nil, reduction_indices: nil, keep_dims: nil) ⇒ Object
- .merge(inputs: nil) ⇒ Object
- .merge_summary(inputs: nil) ⇒ Object
- .merge_v2_checkpoints(checkpoint_prefixes: nil, destination_prefix: nil, delete_old_dirs: nil) ⇒ Object
- .mfcc(spectrogram: nil, sample_rate: nil, upper_frequency_limit: nil, lower_frequency_limit: nil, filterbank_channel_count: nil, dct_coefficient_count: nil) ⇒ Object
- .min(input: nil, reduction_indices: nil, keep_dims: nil) ⇒ Object
- .minimum(x: nil, y: nil) ⇒ Object
- .mirror_pad(input: nil, paddings: nil, mode: nil) ⇒ Object
- .mirror_pad_grad(input: nil, paddings: nil, mode: nil) ⇒ Object
- .mod(x: nil, y: nil) ⇒ Object
- .model_dataset(input_dataset: nil, cpu_budget: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .mul(x: nil, y: nil) ⇒ Object
- .mul_no_nan(x: nil, y: nil) ⇒ Object
- .multi_device_iterator(devices: nil, shared_name: nil, container: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .multi_device_iterator_from_string_handle(string_handle: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .multi_device_iterator_get_next_from_shard(multi_device_iterator: nil, shard_num: nil, incarnation_id: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .multi_device_iterator_init(dataset: nil, multi_device_iterator: nil, max_buffer_size: nil) ⇒ Object
- .multi_device_iterator_to_string_handle(multi_device_iterator: nil) ⇒ Object
- .multinomial(logits: nil, num_samples: nil, seed: nil, seed2: nil, output_dtype: nil) ⇒ Object
- .mutable_dense_hash_table(empty_key: nil, container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil, value_shape: nil, initial_num_buckets: nil, max_load_factor: nil) ⇒ Object
- .mutable_dense_hash_table_v2(empty_key: nil, deleted_key: nil, container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil, value_shape: nil, initial_num_buckets: nil, max_load_factor: nil) ⇒ Object
- .mutable_hash_table(container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil) ⇒ Object
- .mutable_hash_table_of_tensors(container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil, value_shape: nil) ⇒ Object
- .mutable_hash_table_of_tensors_v2(container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil, value_shape: nil) ⇒ Object
- .mutable_hash_table_v2(container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil) ⇒ Object
- .mutex_lock(mutex: nil) ⇒ Object
- .mutex_v2(container: nil, shared_name: nil) ⇒ Object
- .nccl_all_reduce(input: nil, reduction: nil, num_devices: nil, shared_name: nil) ⇒ Object
- .nccl_broadcast(input: nil, shape: nil) ⇒ Object
- .nccl_reduce(input: nil, reduction: nil, num_devices: nil) ⇒ Object
- .nearest_neighbors(points: nil, centers: nil, k: nil) ⇒ Object
- .neg(x: nil) ⇒ Object
- .neg_train(w_in: nil, w_out: nil, examples: nil, labels: nil, lr: nil, vocab_count: nil, num_negative_samples: nil) ⇒ Object
- .next_after(x1: nil, x2: nil) ⇒ Object
- .next_iteration(data: nil) ⇒ Object
- .no_op ⇒ Object
- .non_deterministic_ints(shape: nil, dtype: nil, shape_dtype: nil) ⇒ Object
- .non_max_suppression(boxes: nil, scores: nil, max_output_size: nil, iou_threshold: nil) ⇒ Object
- .non_max_suppression_v2(boxes: nil, scores: nil, max_output_size: nil, iou_threshold: nil) ⇒ Object
- .non_max_suppression_v3(boxes: nil, scores: nil, max_output_size: nil, iou_threshold: nil, score_threshold: nil) ⇒ Object
- .non_max_suppression_v4(boxes: nil, scores: nil, max_output_size: nil, iou_threshold: nil, score_threshold: nil, pad_to_max_output_size: nil) ⇒ Object
- .non_max_suppression_with_overlaps(overlaps: nil, scores: nil, max_output_size: nil, overlap_threshold: nil, score_threshold: nil) ⇒ Object
- .not_equal(x: nil, y: nil) ⇒ Object
- .nth_element(input: nil, n: nil, reverse: nil) ⇒ Object
- .one_hot(indices: nil, depth: nil, on_value: nil, off_value: nil, axis: nil) ⇒ Object
- .one_shot_iterator(dataset_factory: nil, output_types: nil, output_shapes: nil, container: nil, shared_name: nil) ⇒ Object
- .ones_like(x: nil) ⇒ Object
- .optimize_dataset(input_dataset: nil, optimizations: nil, output_types: nil, output_shapes: nil, optimization_configs: nil) ⇒ Object
- .optional_from_value(components: nil) ⇒ Object
- .optional_get_value(optional: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .optional_has_value(optional: nil) ⇒ Object
- .optional_none ⇒ Object
- .ordered_map_clear(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .ordered_map_incomplete_size(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .ordered_map_peek(key: nil, indices: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .ordered_map_size(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .ordered_map_stage(key: nil, indices: nil, values: nil, capacity: nil, memory_limit: nil, dtypes: nil, fake_dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .ordered_map_unstage(key: nil, indices: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .ordered_map_unstage_no_key(indices: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .outfeed_dequeue(dtype: nil, shape: nil, device_ordinal: nil) ⇒ Object
- .outfeed_dequeue_tuple(dtypes: nil, shapes: nil, device_ordinal: nil) ⇒ Object
- .outfeed_enqueue(input: nil, dtype: nil) ⇒ Object
- .outfeed_enqueue_tuple(inputs: nil, dtypes: nil) ⇒ Object
- .pack(values: nil, axis: nil) ⇒ Object
- .pad(input: nil, paddings: nil) ⇒ Object
- .pad_v2(input: nil, paddings: nil, constant_values: nil) ⇒ Object
- .padded_batch_dataset(input_dataset: nil, batch_size: nil, padded_shapes: nil, padding_values: nil, output_shapes: nil) ⇒ Object
- .padded_batch_dataset_v2(input_dataset: nil, batch_size: nil, padded_shapes: nil, padding_values: nil, drop_remainder: nil, parallel_copy: nil, output_shapes: nil) ⇒ Object
- .padding_fifo_queue(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) ⇒ Object
- .padding_fifo_queue_v2(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) ⇒ Object
- .parallel_concat(values: nil, shape: nil) ⇒ Object
- .parallel_dynamic_stitch(indices: nil, data: nil) ⇒ Object
- .parallel_interleave_dataset_v2(input_dataset: nil, other_arguments: nil, cycle_length: nil, block_length: nil, num_parallel_calls: nil, f: nil, output_types: nil, output_shapes: nil, sloppy: nil) ⇒ Object
- .parallel_map_dataset(input_dataset: nil, other_arguments: nil, num_parallel_calls: nil, f: nil, output_types: nil, output_shapes: nil, use_inter_op_parallelism: nil, sloppy: nil, preserve_cardinality: nil) ⇒ Object
- .parameterized_truncated_normal(shape: nil, means: nil, stdevs: nil, minvals: nil, maxvals: nil, seed: nil, seed2: nil, dtype: nil) ⇒ Object
- .parse_example(serialized: nil, names: nil, sparse_keys: nil, dense_keys: nil, dense_defaults: nil, sparse_types: nil, dense_shapes: nil) ⇒ Object
- .parse_sequence_example(serialized: nil, debug_name: nil, context_dense_defaults: nil, feature_list_dense_missing_assumed_empty: nil, context_sparse_keys: nil, context_dense_keys: nil, feature_list_sparse_keys: nil, feature_list_dense_keys: nil, context_sparse_types: nil, feature_list_dense_types: nil, context_dense_shapes: nil, feature_list_sparse_types: nil, feature_list_dense_shapes: nil) ⇒ Object
- .parse_single_example(serialized: nil, dense_defaults: nil, num_sparse: nil, sparse_keys: nil, dense_keys: nil, sparse_types: nil, dense_shapes: nil) ⇒ Object
- .parse_single_sequence_example(serialized: nil, feature_list_dense_missing_assumed_empty: nil, context_sparse_keys: nil, context_dense_keys: nil, feature_list_sparse_keys: nil, feature_list_dense_keys: nil, context_dense_defaults: nil, debug_name: nil, context_sparse_types: nil, feature_list_dense_types: nil, context_dense_shapes: nil, feature_list_sparse_types: nil, feature_list_dense_shapes: nil) ⇒ Object
- .parse_tensor(serialized: nil, out_type: nil) ⇒ Object
- .partitioned_call(args: nil, f: nil, config: nil, config_proto: nil, executor_type: nil) ⇒ Object
- .placeholder(dtype: nil, shape: nil) ⇒ Object
- .placeholder_v2(dtype: nil, shape: nil) ⇒ Object
- .placeholder_with_default(input: nil, dtype: nil, shape: nil) ⇒ Object
- .polygamma(a: nil, x: nil) ⇒ Object
- .population_count(x: nil) ⇒ Object
- .pow(x: nil, y: nil) ⇒ Object
- .prefetch_dataset(input_dataset: nil, buffer_size: nil, output_types: nil, output_shapes: nil, slack_period: nil) ⇒ Object
- .prelinearize(input: nil, dtype: nil, shape: nil, layout: nil) ⇒ Object
- .prelinearize_tuple(inputs: nil, dtypes: nil, shapes: nil, layouts: nil) ⇒ Object
- .prevent_gradient(input: nil, message: nil) ⇒ Object
- .print(input: nil, data: nil, message: nil, first_n: nil, summarize: nil) ⇒ Object
- .print_v2(input: nil, output_stream: nil, stop: nil) ⇒ Object
- .priority_queue(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) ⇒ Object
- .priority_queue_v2(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) ⇒ Object
- .prod(input: nil, reduction_indices: nil, keep_dims: nil) ⇒ Object
- .py_func(input: nil, token: nil) ⇒ Object
- .py_func_stateless(input: nil, token: nil) ⇒ Object
- .qr(input: nil, full_matrices: nil) ⇒ Object
- .quantize_and_dequantize(input: nil, signed_input: nil, num_bits: nil, range_given: nil, input_min: nil, input_max: nil) ⇒ Object
- .quantize_and_dequantize_v2(input: nil, input_min: nil, input_max: nil, signed_input: nil, num_bits: nil, range_given: nil, round_mode: nil) ⇒ Object
- .quantize_and_dequantize_v3(input: nil, input_min: nil, input_max: nil, num_bits: nil, signed_input: nil, range_given: nil) ⇒ Object
- .quantize_down_and_shrink_range(input: nil, input_min: nil, input_max: nil, out_type: nil) ⇒ Object
- .quantize_v2(input: nil, min_range: nil, max_range: nil, mode: nil, round_mode: nil) ⇒ Object
- .quantized_add(x: nil, y: nil, min_x: nil, max_x: nil, min_y: nil, max_y: nil) ⇒ Object
- .quantized_avg_pool(input: nil, min_input: nil, max_input: nil, ksize: nil, strides: nil, padding: nil) ⇒ Object
- .quantized_batch_norm_with_global_normalization(t: nil, t_min: nil, t_max: nil, m: nil, m_min: nil, m_max: nil, v: nil, v_min: nil, v_max: nil, beta: nil, beta_min: nil, beta_max: nil, gamma: nil, gamma_min: nil, gamma_max: nil, out_type: nil, variance_epsilon: nil, scale_after_normalization: nil) ⇒ Object
- .quantized_bias_add(input: nil, bias: nil, min_input: nil, max_input: nil, min_bias: nil, max_bias: nil, out_type: nil) ⇒ Object
- .quantized_concat(concat_dim: nil, values: nil, input_mins: nil, input_maxes: nil) ⇒ Object
- .quantized_conv2d(input: nil, filter: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil) ⇒ Object
- .quantized_conv2d_and_relu(input: nil, filter: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
- .quantized_conv2d_and_relu_and_requantize(input: nil, filter: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
- .quantized_conv2d_and_requantize(input: nil, filter: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
- .quantized_conv2d_per_channel(input: nil, filter: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil) ⇒ Object
- .quantized_conv2d_with_bias(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
- .quantized_conv2d_with_bias_and_relu(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
- .quantized_conv2d_with_bias_and_relu_and_requantize(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
- .quantized_conv2d_with_bias_and_requantize(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
- .quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, summand: nil, min_summand: nil, max_summand: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
- .quantized_conv2d_with_bias_sum_and_relu(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, summand: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
- .quantized_conv2d_with_bias_sum_and_relu_and_requantize(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, summand: nil, min_summand: nil, max_summand: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
- .quantized_depthwise_conv2d(input: nil, filter: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil) ⇒ Object
- .quantized_depthwise_conv2d_with_bias(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil) ⇒ Object
- .quantized_depthwise_conv2d_with_bias_and_relu(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil) ⇒ Object
- .quantized_depthwise_conv2d_with_bias_and_relu_and_requantize(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, out_type: nil, strides: nil, padding: nil, dilations: nil) ⇒ Object
- .quantized_instance_norm(x: nil, x_min: nil, x_max: nil, output_range_given: nil, given_y_min: nil, given_y_max: nil, variance_epsilon: nil, min_separation: nil) ⇒ Object
- .quantized_mat_mul(a: nil, b: nil, min_a: nil, max_a: nil, min_b: nil, max_b: nil, transpose_a: nil, transpose_b: nil) ⇒ Object
- .quantized_max_pool(input: nil, min_input: nil, max_input: nil, ksize: nil, strides: nil, padding: nil) ⇒ Object
- .quantized_mul(x: nil, y: nil, min_x: nil, max_x: nil, min_y: nil, max_y: nil) ⇒ Object
- .quantized_relu(features: nil, min_features: nil, max_features: nil, out_type: nil) ⇒ Object
- .quantized_relu6(features: nil, min_features: nil, max_features: nil, out_type: nil) ⇒ Object
- .quantized_relu_x(features: nil, max_value: nil, min_features: nil, max_features: nil, out_type: nil) ⇒ Object
- .quantized_reshape(tensor: nil, shape: nil, input_min: nil, input_max: nil) ⇒ Object
- .quantized_resize_bilinear(images: nil, size: nil, min: nil, max: nil, align_corners: nil, half_pixel_centers: nil) ⇒ Object
- .queue_close(handle: nil, cancel_pending_enqueues: nil) ⇒ Object
- .queue_close_v2(handle: nil, cancel_pending_enqueues: nil) ⇒ Object
- .queue_dequeue(handle: nil, component_types: nil, timeout_ms: nil) ⇒ Object
- .queue_dequeue_many(handle: nil, n: nil, component_types: nil, timeout_ms: nil) ⇒ Object
- .queue_dequeue_many_v2(handle: nil, n: nil, component_types: nil, timeout_ms: nil) ⇒ Object
- .queue_dequeue_up_to(handle: nil, n: nil, component_types: nil, timeout_ms: nil) ⇒ Object
- .queue_dequeue_up_to_v2(handle: nil, n: nil, component_types: nil, timeout_ms: nil) ⇒ Object
- .queue_dequeue_v2(handle: nil, component_types: nil, timeout_ms: nil) ⇒ Object
- .queue_enqueue(handle: nil, components: nil, timeout_ms: nil) ⇒ Object
- .queue_enqueue_many(handle: nil, components: nil, timeout_ms: nil) ⇒ Object
- .queue_enqueue_many_v2(handle: nil, components: nil, timeout_ms: nil) ⇒ Object
- .queue_enqueue_v2(handle: nil, components: nil, timeout_ms: nil) ⇒ Object
- .queue_is_closed(handle: nil) ⇒ Object
- .queue_is_closed_v2(handle: nil) ⇒ Object
- .queue_size(handle: nil) ⇒ Object
- .queue_size_v2(handle: nil) ⇒ Object
- .ragged_gather(params_nested_splits: nil, params_dense_values: nil, indices: nil) ⇒ Object
- .ragged_range(starts: nil, limits: nil, deltas: nil) ⇒ Object
- .ragged_tensor_from_variant(encoded_ragged: nil, input_ragged_rank: nil, output_ragged_rank: nil) ⇒ Object
- .ragged_tensor_to_sparse(rt_nested_splits: nil, rt_dense_values: nil) ⇒ Object
- .ragged_tensor_to_variant(rt_nested_splits: nil, rt_dense_values: nil, batched_input: nil) ⇒ Object
- .random_crop(image: nil, size: nil, seed: nil, seed2: nil) ⇒ Object
- .random_gamma(shape: nil, alpha: nil, seed: nil, seed2: nil) ⇒ Object
- .random_gamma_grad(alpha: nil, sample: nil) ⇒ Object
- .random_poisson(shape: nil, rate: nil, seed: nil, seed2: nil, dtype: nil) ⇒ Object
- .random_poisson_v2(shape: nil, rate: nil, seed: nil, seed2: nil, dtype: nil) ⇒ Object
- .random_shuffle(value: nil, seed: nil, seed2: nil) ⇒ Object
- .random_shuffle_queue(component_types: nil, shapes: nil, capacity: nil, min_after_dequeue: nil, seed: nil, seed2: nil, container: nil, shared_name: nil) ⇒ Object
- .random_shuffle_queue_v2(component_types: nil, shapes: nil, capacity: nil, min_after_dequeue: nil, seed: nil, seed2: nil, container: nil, shared_name: nil) ⇒ Object
- .random_standard_normal(shape: nil, seed: nil, seed2: nil, dtype: nil) ⇒ Object
- .random_uniform(shape: nil, seed: nil, seed2: nil, dtype: nil) ⇒ Object
- .random_uniform_int(shape: nil, minval: nil, maxval: nil, seed: nil, seed2: nil) ⇒ Object
- .range(start: nil, limit: nil, delta: nil) ⇒ Object
- .range_dataset(start: nil, stop: nil, step: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .rank(input: nil) ⇒ Object
- .read_file(filename: nil) ⇒ Object
- .read_variable_op(resource: nil, dtype: nil) ⇒ Object
- .reader_num_records_produced(reader_handle: nil) ⇒ Object
- .reader_num_records_produced_v2(reader_handle: nil) ⇒ Object
- .reader_num_work_units_completed(reader_handle: nil) ⇒ Object
- .reader_num_work_units_completed_v2(reader_handle: nil) ⇒ Object
- .reader_read(reader_handle: nil, queue_handle: nil) ⇒ Object
- .reader_read_up_to(reader_handle: nil, queue_handle: nil, num_records: nil) ⇒ Object
- .reader_read_up_to_v2(reader_handle: nil, queue_handle: nil, num_records: nil) ⇒ Object
- .reader_read_v2(reader_handle: nil, queue_handle: nil) ⇒ Object
- .reader_reset(reader_handle: nil) ⇒ Object
- .reader_reset_v2(reader_handle: nil) ⇒ Object
- .reader_restore_state(reader_handle: nil, state: nil) ⇒ Object
- .reader_restore_state_v2(reader_handle: nil, state: nil) ⇒ Object
- .reader_serialize_state(reader_handle: nil) ⇒ Object
- .reader_serialize_state_v2(reader_handle: nil) ⇒ Object
- .real(input: nil) ⇒ Object
- .real_div(x: nil, y: nil) ⇒ Object
- .reciprocal(x: nil) ⇒ Object
- .reciprocal_grad(y: nil, dy: nil) ⇒ Object
- .record_input(file_pattern: nil, file_random_seed: nil, file_shuffle_shift_ratio: nil, file_buffer_size: nil, file_parallelism: nil, batch_size: nil, compression_type: nil) ⇒ Object
- .recv_tpu_embedding_activations(num_outputs: nil, config: nil) ⇒ Object
- .reduce_dataset(input_dataset: nil, initial_state: nil, other_arguments: nil, f: nil, output_types: nil, output_shapes: nil, use_inter_op_parallelism: nil) ⇒ Object
- .reduce_join(inputs: nil, reduction_indices: nil, keep_dims: nil, separator: nil) ⇒ Object
- .ref_enter(data: nil, frame_name: nil, is_constant: nil, parallel_iterations: nil) ⇒ Object
- .ref_exit(data: nil) ⇒ Object
- .ref_identity(input: nil) ⇒ Object
- .ref_merge(inputs: nil) ⇒ Object
- .ref_next_iteration(data: nil) ⇒ Object
- .ref_select(index: nil, inputs: nil) ⇒ Object
- .ref_switch(data: nil, pred: nil) ⇒ Object
- .regex_full_match(input: nil, pattern: nil) ⇒ Object
- .regex_replace(input: nil, pattern: nil, rewrite: nil, replace_global: nil) ⇒ Object
- .relu(features: nil) ⇒ Object
- .relu6(features: nil) ⇒ Object
- .relu6_grad(gradients: nil, features: nil) ⇒ Object
- .relu_grad(gradients: nil, features: nil) ⇒ Object
- .remote_call(target: nil, args: nil, f: nil) ⇒ Object
- .remote_fused_graph_execute(inputs: nil, serialized_remote_fused_graph_execute_info: nil) ⇒ Object
- .repeat_dataset(input_dataset: nil, count: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .requantization_range(input: nil, input_min: nil, input_max: nil) ⇒ Object
- .requantization_range_per_channel(input: nil, input_min: nil, input_max: nil, clip_value_max: nil) ⇒ Object
- .requantize(input: nil, input_min: nil, input_max: nil, requested_output_min: nil, requested_output_max: nil, out_type: nil) ⇒ Object
- .requantize_per_channel(input: nil, input_min: nil, input_max: nil, requested_output_min: nil, requested_output_max: nil, out_type: nil) ⇒ Object
- .reshape(tensor: nil, shape: nil) ⇒ Object
- .resize_area(images: nil, size: nil, align_corners: nil) ⇒ Object
- .resize_bicubic(images: nil, size: nil, align_corners: nil, half_pixel_centers: nil) ⇒ Object
- .resize_bicubic_grad(grads: nil, original_image: nil, align_corners: nil, half_pixel_centers: nil) ⇒ Object
- .resize_bilinear(images: nil, size: nil, align_corners: nil, half_pixel_centers: nil) ⇒ Object
- .resize_bilinear_grad(grads: nil, original_image: nil, align_corners: nil, half_pixel_centers: nil) ⇒ Object
- .resize_nearest_neighbor(images: nil, size: nil, align_corners: nil, half_pixel_centers: nil) ⇒ Object
- .resize_nearest_neighbor_grad(grads: nil, size: nil, align_corners: nil, half_pixel_centers: nil) ⇒ Object
- .resource_apply_ada_max(var: nil, m: nil, v: nil, beta1_power: nil, lr: nil, beta1: nil, beta2: nil, epsilon: nil, grad: nil, use_locking: nil) ⇒ Object
- .resource_apply_adadelta(var: nil, accum: nil, accum_update: nil, lr: nil, rho: nil, epsilon: nil, grad: nil, use_locking: nil) ⇒ Object
- .resource_apply_adagrad(var: nil, accum: nil, lr: nil, grad: nil, use_locking: nil, update_slots: nil) ⇒ Object
- .resource_apply_adagrad_da(var: nil, gradient_accumulator: nil, gradient_squared_accumulator: nil, grad: nil, lr: nil, l1: nil, l2: nil, global_step: nil, use_locking: nil) ⇒ Object
- .resource_apply_adam(var: nil, m: nil, v: nil, beta1_power: nil, beta2_power: nil, lr: nil, beta1: nil, beta2: nil, epsilon: nil, grad: nil, use_locking: nil, use_nesterov: nil) ⇒ Object
- .resource_apply_adam_with_amsgrad(var: nil, m: nil, v: nil, vhat: nil, beta1_power: nil, beta2_power: nil, lr: nil, beta1: nil, beta2: nil, epsilon: nil, grad: nil, use_locking: nil) ⇒ Object
- .resource_apply_add_sign(var: nil, m: nil, lr: nil, alpha: nil, sign_decay: nil, beta: nil, grad: nil, use_locking: nil) ⇒ Object
- .resource_apply_centered_rms_prop(var: nil, mg: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, use_locking: nil) ⇒ Object
- .resource_apply_ftrl(var: nil, accum: nil, linear: nil, grad: nil, lr: nil, l1: nil, l2: nil, lr_power: nil, use_locking: nil) ⇒ Object
- .resource_apply_ftrl_v2(var: nil, accum: nil, linear: nil, grad: nil, lr: nil, l1: nil, l2: nil, l2_shrinkage: nil, lr_power: nil, use_locking: nil) ⇒ Object
- .resource_apply_gradient_descent(var: nil, alpha: nil, delta: nil, use_locking: nil) ⇒ Object
- .resource_apply_keras_momentum(var: nil, accum: nil, lr: nil, grad: nil, momentum: nil, use_locking: nil, use_nesterov: nil) ⇒ Object
- .resource_apply_momentum(var: nil, accum: nil, lr: nil, grad: nil, momentum: nil, use_locking: nil, use_nesterov: nil) ⇒ Object
- .resource_apply_power_sign(var: nil, m: nil, lr: nil, logbase: nil, sign_decay: nil, beta: nil, grad: nil, use_locking: nil) ⇒ Object
- .resource_apply_proximal_adagrad(var: nil, accum: nil, lr: nil, l1: nil, l2: nil, grad: nil, use_locking: nil) ⇒ Object
- .resource_apply_proximal_gradient_descent(var: nil, alpha: nil, l1: nil, l2: nil, delta: nil, use_locking: nil) ⇒ Object
- .resource_apply_rms_prop(var: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, use_locking: nil) ⇒ Object
- .resource_count_up_to(resource: nil, limit: nil) ⇒ Object
- .resource_gather(resource: nil, indices: nil, batch_dims: nil, validate_indices: nil, dtype: nil) ⇒ Object
- .resource_gather_nd(resource: nil, indices: nil, dtype: nil) ⇒ Object
- .resource_scatter_add(resource: nil, indices: nil, updates: nil, dtype: nil) ⇒ Object
- .resource_scatter_div(resource: nil, indices: nil, updates: nil, dtype: nil) ⇒ Object
- .resource_scatter_max(resource: nil, indices: nil, updates: nil, dtype: nil) ⇒ Object
- .resource_scatter_min(resource: nil, indices: nil, updates: nil, dtype: nil) ⇒ Object
- .resource_scatter_mul(resource: nil, indices: nil, updates: nil, dtype: nil) ⇒ Object
- .resource_scatter_nd_add(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
- .resource_scatter_nd_sub(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
- .resource_scatter_nd_update(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
- .resource_scatter_sub(resource: nil, indices: nil, updates: nil, dtype: nil) ⇒ Object
- .resource_scatter_update(resource: nil, indices: nil, updates: nil, dtype: nil) ⇒ Object
- .resource_sparse_apply_adadelta(var: nil, accum: nil, accum_update: nil, lr: nil, rho: nil, epsilon: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
- .resource_sparse_apply_adagrad(var: nil, accum: nil, lr: nil, grad: nil, indices: nil, use_locking: nil, update_slots: nil) ⇒ Object
- .resource_sparse_apply_adagrad_da(var: nil, gradient_accumulator: nil, gradient_squared_accumulator: nil, grad: nil, indices: nil, lr: nil, l1: nil, l2: nil, global_step: nil, use_locking: nil) ⇒ Object
- .resource_sparse_apply_centered_rms_prop(var: nil, mg: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
- .resource_sparse_apply_ftrl(var: nil, accum: nil, linear: nil, grad: nil, indices: nil, lr: nil, l1: nil, l2: nil, lr_power: nil, use_locking: nil) ⇒ Object
- .resource_sparse_apply_ftrl_v2(var: nil, accum: nil, linear: nil, grad: nil, indices: nil, lr: nil, l1: nil, l2: nil, l2_shrinkage: nil, lr_power: nil, use_locking: nil) ⇒ Object
- .resource_sparse_apply_keras_momentum(var: nil, accum: nil, lr: nil, grad: nil, indices: nil, momentum: nil, use_locking: nil, use_nesterov: nil) ⇒ Object
- .resource_sparse_apply_momentum(var: nil, accum: nil, lr: nil, grad: nil, indices: nil, momentum: nil, use_locking: nil, use_nesterov: nil) ⇒ Object
- .resource_sparse_apply_proximal_adagrad(var: nil, accum: nil, lr: nil, l1: nil, l2: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
- .resource_sparse_apply_proximal_gradient_descent(var: nil, alpha: nil, l1: nil, l2: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
- .resource_sparse_apply_rms_prop(var: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
- .resource_strided_slice_assign(ref: nil, start: nil, stop: nil, strides: nil, value: nil, begin_mask: nil, end_mask: nil, ellipsis_mask: nil, new_axis_mask: nil, shrink_axis_mask: nil) ⇒ Object
- .restore(file_pattern: nil, tensor_name: nil, dt: nil, preferred_shard: nil) ⇒ Object
- .restore_slice(file_pattern: nil, tensor_name: nil, shape_and_slice: nil, dt: nil, preferred_shard: nil) ⇒ Object
- .restore_v2(prefix: nil, tensor_names: nil, shape_and_slices: nil, dtypes: nil) ⇒ Object
- .retrieve_tpu_embedding_adadelta_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .retrieve_tpu_embedding_adagrad_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .retrieve_tpu_embedding_adam_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .retrieve_tpu_embedding_adam_parameters_grad_accum_debug(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .retrieve_tpu_embedding_centered_rms_prop_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .retrieve_tpu_embedding_ftrl_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .retrieve_tpu_embedding_mdl_adagrad_light_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .retrieve_tpu_embedding_momentum_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .retrieve_tpu_embedding_momentum_parameters_grad_accum_debug(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .retrieve_tpu_embedding_proximal_adagrad_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .retrieve_tpu_embedding_rms_prop_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .retrieve_tpu_embedding_stochastic_gradient_descent_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
- .reverse(tensor: nil, dims: nil) ⇒ Object
- .reverse_sequence(input: nil, seq_lengths: nil, seq_dim: nil, batch_dim: nil) ⇒ Object
- .reverse_v2(tensor: nil, axis: nil) ⇒ Object
- .rfft(input: nil, fft_length: nil) ⇒ Object
- .rfft2d(input: nil, fft_length: nil) ⇒ Object
- .rfft3d(input: nil, fft_length: nil) ⇒ Object
- .rgb_to_hsv(images: nil) ⇒ Object
- .right_shift(x: nil, y: nil) ⇒ Object
- .rint(x: nil) ⇒ Object
- .rng_skip(resource: nil, algorithm: nil, delta: nil) ⇒ Object
- .roll(input: nil, shift: nil, axis: nil) ⇒ Object
- .round(x: nil) ⇒ Object
- .rpc(address: nil, method: nil, request: nil, protocol: nil, fail_fast: nil, timeout_in_ms: nil) ⇒ Object
- .rsqrt(x: nil) ⇒ Object
- .rsqrt_grad(y: nil, dy: nil) ⇒ Object
- .sample_distorted_bounding_box(image_size: nil, bounding_boxes: nil, seed: nil, seed2: nil, min_object_covered: nil, aspect_ratio_range: nil, area_range: nil, max_attempts: nil, use_image_if_no_bounding_boxes: nil) ⇒ Object
- .sample_distorted_bounding_box_v2(image_size: nil, bounding_boxes: nil, min_object_covered: nil, seed: nil, seed2: nil, aspect_ratio_range: nil, area_range: nil, max_attempts: nil, use_image_if_no_bounding_boxes: nil) ⇒ Object
- .sampling_dataset(input_dataset: nil, rate: nil, seed: nil, seed2: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .save(filename: nil, tensor_names: nil, data: nil) ⇒ Object
- .save_slices(filename: nil, tensor_names: nil, shapes_and_slices: nil, data: nil) ⇒ Object
- .save_v2(prefix: nil, tensor_names: nil, shape_and_slices: nil, tensors: nil, dtypes: nil) ⇒ Object
- .scalar_summary(tags: nil, values: nil) ⇒ Object
- .scale_and_translate(images: nil, size: nil, scale: nil, translation: nil, kernel_type: nil, antialias: nil) ⇒ Object
- .scale_and_translate_grad(grads: nil, original_image: nil, scale: nil, translation: nil, kernel_type: nil, antialias: nil) ⇒ Object
- .scatter_add(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
- .scatter_div(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
- .scatter_max(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
- .scatter_min(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
- .scatter_mul(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
- .scatter_nd(indices: nil, updates: nil, shape: nil) ⇒ Object
- .scatter_nd_add(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
- .scatter_nd_non_aliasing_add(input: nil, indices: nil, updates: nil) ⇒ Object
- .scatter_nd_sub(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
- .scatter_nd_update(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
- .scatter_sub(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
- .scatter_update(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
- .sdca_fprint(input: nil) ⇒ Object
- .sdca_optimizer(sparse_example_indices: nil, sparse_feature_indices: nil, sparse_feature_values: nil, dense_features: nil, example_weights: nil, example_labels: nil, sparse_indices: nil, sparse_weights: nil, dense_weights: nil, example_state_data: nil, loss_type: nil, adaptative: nil, num_sparse_features: nil, num_sparse_features_with_values: nil, num_dense_features: nil, l1: nil, l2: nil, num_loss_partitions: nil, num_inner_iterations: nil) ⇒ Object
- .sdca_optimizer_v2(sparse_example_indices: nil, sparse_feature_indices: nil, sparse_feature_values: nil, dense_features: nil, example_weights: nil, example_labels: nil, sparse_indices: nil, sparse_weights: nil, dense_weights: nil, example_state_data: nil, loss_type: nil, adaptive: nil, num_sparse_features: nil, num_sparse_features_with_values: nil, num_dense_features: nil, l1: nil, l2: nil, num_loss_partitions: nil, num_inner_iterations: nil) ⇒ Object
- .sdca_shrink_l1(weights: nil, num_features: nil, l1: nil, l2: nil) ⇒ Object
- .segment_max(data: nil, segment_ids: nil) ⇒ Object
- .segment_mean(data: nil, segment_ids: nil) ⇒ Object
- .segment_min(data: nil, segment_ids: nil) ⇒ Object
- .segment_prod(data: nil, segment_ids: nil) ⇒ Object
- .segment_sum(data: nil, segment_ids: nil) ⇒ Object
- .select(condition: nil, t: nil, e: nil) ⇒ Object
- .select_v2(condition: nil, t: nil, e: nil) ⇒ Object
- .self_adjoint_eig(input: nil) ⇒ Object
- .self_adjoint_eig_v2(input: nil, compute_v: nil) ⇒ Object
- .selu(features: nil) ⇒ Object
- .selu_grad(gradients: nil, outputs: nil) ⇒ Object
- .send_tpu_embedding_gradients(inputs: nil, learning_rates: nil, config: nil) ⇒ Object
- .serialize_iterator(resource_handle: nil) ⇒ Object
- .serialize_many_sparse(sparse_indices: nil, sparse_values: nil, sparse_shape: nil, out_type: nil) ⇒ Object
- .serialize_sparse(sparse_indices: nil, sparse_values: nil, sparse_shape: nil, out_type: nil) ⇒ Object
- .serialize_tensor(tensor: nil) ⇒ Object
- .set_size(set_indices: nil, set_values: nil, set_shape: nil, validate_indices: nil) ⇒ Object
- .shape(input: nil, out_type: nil) ⇒ Object
- .shape_n(input: nil, out_type: nil) ⇒ Object
- .shard_dataset(input_dataset: nil, num_shards: nil, index: nil, require_non_empty: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .sharded_filename(basename: nil, shard: nil, num_shards: nil) ⇒ Object
- .sharded_filespec(basename: nil, num_shards: nil) ⇒ Object
- .shuffle_and_repeat_dataset(input_dataset: nil, buffer_size: nil, seed: nil, seed2: nil, count: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .shuffle_dataset(input_dataset: nil, buffer_size: nil, seed: nil, seed2: nil, reshuffle_each_iteration: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .shutdown_distributed_tpu ⇒ Object
- .sigmoid(x: nil) ⇒ Object
- .sigmoid_grad(y: nil, dy: nil) ⇒ Object
- .sign(x: nil) ⇒ Object
- .sin(x: nil) ⇒ Object
- .sinh(x: nil) ⇒ Object
- .size(input: nil, out_type: nil) ⇒ Object
- .skip_dataset(input_dataset: nil, count: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .skipgram(filename: nil, batch_size: nil, window_size: nil, min_count: nil, subsample: nil) ⇒ Object
- .slice(input: nil, start: nil, size: nil) ⇒ Object
- .snapshot(input: nil) ⇒ Object
- .snapshot_dataset(input_dataset: nil, path: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .softmax(logits: nil) ⇒ Object
- .softmax_cross_entropy_with_logits(features: nil, labels: nil) ⇒ Object
- .softplus(features: nil) ⇒ Object
- .softplus_grad(gradients: nil, features: nil) ⇒ Object
- .softsign(features: nil) ⇒ Object
- .softsign_grad(gradients: nil, features: nil) ⇒ Object
- .space_to_batch(input: nil, paddings: nil, block_size: nil) ⇒ Object
- .space_to_batch_nd(input: nil, block_shape: nil, paddings: nil) ⇒ Object
- .space_to_depth(input: nil, block_size: nil, data_format: nil) ⇒ Object
- .sparse_accumulator_apply_gradient(handle: nil, local_step: nil, gradient_indices: nil, gradient_values: nil, gradient_shape: nil, dtype: nil, has_known_shape: nil) ⇒ Object
- .sparse_accumulator_take_gradient(handle: nil, num_required: nil, dtype: nil) ⇒ Object
- .sparse_add(a_indices: nil, a_values: nil, a_shape: nil, b_indices: nil, b_values: nil, b_shape: nil, thresh: nil) ⇒ Object
- .sparse_add_grad(backprop_val_grad: nil, a_indices: nil, b_indices: nil, sum_indices: nil) ⇒ Object
- .sparse_apply_adadelta(var: nil, accum: nil, accum_update: nil, lr: nil, rho: nil, epsilon: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
- .sparse_apply_adagrad(var: nil, accum: nil, lr: nil, grad: nil, indices: nil, use_locking: nil, update_slots: nil) ⇒ Object
- .sparse_apply_adagrad_da(var: nil, gradient_accumulator: nil, gradient_squared_accumulator: nil, grad: nil, indices: nil, lr: nil, l1: nil, l2: nil, global_step: nil, use_locking: nil) ⇒ Object
- .sparse_apply_centered_rms_prop(var: nil, mg: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
- .sparse_apply_ftrl(var: nil, accum: nil, linear: nil, grad: nil, indices: nil, lr: nil, l1: nil, l2: nil, lr_power: nil, use_locking: nil) ⇒ Object
- .sparse_apply_ftrl_v2(var: nil, accum: nil, linear: nil, grad: nil, indices: nil, lr: nil, l1: nil, l2: nil, l2_shrinkage: nil, lr_power: nil, use_locking: nil) ⇒ Object
- .sparse_apply_momentum(var: nil, accum: nil, lr: nil, grad: nil, indices: nil, momentum: nil, use_locking: nil, use_nesterov: nil) ⇒ Object
- .sparse_apply_proximal_adagrad(var: nil, accum: nil, lr: nil, l1: nil, l2: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
- .sparse_apply_proximal_gradient_descent(var: nil, alpha: nil, l1: nil, l2: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
- .sparse_apply_rms_prop(var: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
- .sparse_concat(indices: nil, values: nil, shapes: nil, concat_dim: nil) ⇒ Object
- .sparse_conditional_accumulator(dtype: nil, shape: nil, container: nil, shared_name: nil, reduction_type: nil) ⇒ Object
- .sparse_cross(indices: nil, values: nil, shapes: nil, dense_inputs: nil, hashed_output: nil, num_buckets: nil, hash_key: nil, sparse_types: nil, dense_types: nil, out_type: nil, internal_type: nil) ⇒ Object
- .sparse_dense_cwise_add(sp_indices: nil, sp_values: nil, sp_shape: nil, dense: nil) ⇒ Object
- .sparse_dense_cwise_div(sp_indices: nil, sp_values: nil, sp_shape: nil, dense: nil) ⇒ Object
- .sparse_dense_cwise_mul(sp_indices: nil, sp_values: nil, sp_shape: nil, dense: nil) ⇒ Object
- .sparse_fill_empty_rows(indices: nil, values: nil, dense_shape: nil, default_value: nil) ⇒ Object
- .sparse_fill_empty_rows_grad(reverse_index_map: nil, grad_values: nil) ⇒ Object
- .sparse_mat_mul(a: nil, b: nil, transpose_a: nil, transpose_b: nil, a_is_sparse: nil, b_is_sparse: nil) ⇒ Object
- .sparse_reduce_max(input_indices: nil, input_values: nil, input_shape: nil, reduction_axes: nil, keep_dims: nil) ⇒ Object
- .sparse_reduce_max_sparse(input_indices: nil, input_values: nil, input_shape: nil, reduction_axes: nil, keep_dims: nil) ⇒ Object
- .sparse_reduce_sum(input_indices: nil, input_values: nil, input_shape: nil, reduction_axes: nil, keep_dims: nil) ⇒ Object
- .sparse_reduce_sum_sparse(input_indices: nil, input_values: nil, input_shape: nil, reduction_axes: nil, keep_dims: nil) ⇒ Object
- .sparse_reorder(input_indices: nil, input_values: nil, input_shape: nil) ⇒ Object
- .sparse_reshape(input_indices: nil, input_shape: nil, new_shape: nil) ⇒ Object
- .sparse_segment_mean(data: nil, indices: nil, segment_ids: nil) ⇒ Object
- .sparse_segment_mean_grad(grad: nil, indices: nil, segment_ids: nil, output_dim0: nil) ⇒ Object
- .sparse_segment_mean_with_num_segments(data: nil, indices: nil, segment_ids: nil, num_segments: nil) ⇒ Object
- .sparse_segment_sqrt_n(data: nil, indices: nil, segment_ids: nil) ⇒ Object
- .sparse_segment_sqrt_n_grad(grad: nil, indices: nil, segment_ids: nil, output_dim0: nil) ⇒ Object
- .sparse_segment_sqrt_n_with_num_segments(data: nil, indices: nil, segment_ids: nil, num_segments: nil) ⇒ Object
- .sparse_segment_sum(data: nil, indices: nil, segment_ids: nil) ⇒ Object
- .sparse_segment_sum_with_num_segments(data: nil, indices: nil, segment_ids: nil, num_segments: nil) ⇒ Object
- .sparse_slice(indices: nil, values: nil, shape: nil, start: nil, size: nil) ⇒ Object
- .sparse_slice_grad(backprop_val_grad: nil, input_indices: nil, input_start: nil, output_indices: nil) ⇒ Object
- .sparse_softmax(sp_indices: nil, sp_values: nil, sp_shape: nil) ⇒ Object
- .sparse_softmax_cross_entropy_with_logits(features: nil, labels: nil) ⇒ Object
- .sparse_sparse_maximum(a_indices: nil, a_values: nil, a_shape: nil, b_indices: nil, b_values: nil, b_shape: nil) ⇒ Object
- .sparse_sparse_minimum(a_indices: nil, a_values: nil, a_shape: nil, b_indices: nil, b_values: nil, b_shape: nil) ⇒ Object
- .sparse_split(split_dim: nil, indices: nil, values: nil, shape: nil, num_split: nil) ⇒ Object
- .sparse_tensor_dense_add(a_indices: nil, a_values: nil, a_shape: nil, b: nil) ⇒ Object
- .sparse_tensor_dense_mat_mul(a_indices: nil, a_values: nil, a_shape: nil, b: nil, adjoint_a: nil, adjoint_b: nil) ⇒ Object
- .sparse_tensor_slice_dataset(indices: nil, values: nil, dense_shape: nil) ⇒ Object
- .sparse_to_dense(sparse_indices: nil, output_shape: nil, sparse_values: nil, default_value: nil, validate_indices: nil) ⇒ Object
- .sparse_to_sparse_set_operation(set1_indices: nil, set1_values: nil, set1_shape: nil, set2_indices: nil, set2_values: nil, set2_shape: nil, set_operation: nil, validate_indices: nil) ⇒ Object
- .split(split_dim: nil, value: nil, num_split: nil) ⇒ Object
- .split_v(value: nil, size_splits: nil, split_dim: nil, num_split: nil) ⇒ Object
- .sqrt(x: nil) ⇒ Object
- .sqrt_grad(y: nil, dy: nil) ⇒ Object
- .square(x: nil) ⇒ Object
- .squared_difference(x: nil, y: nil) ⇒ Object
- .squeeze(input: nil, squeeze_dims: nil) ⇒ Object
- .stack(elem_type: nil, stack_name: nil) ⇒ Object
- .stack_close(handle: nil) ⇒ Object
- .stack_close_v2(handle: nil) ⇒ Object
- .stack_pop(handle: nil, elem_type: nil) ⇒ Object
- .stack_pop_v2(handle: nil, elem_type: nil) ⇒ Object
- .stack_push(handle: nil, elem: nil, swap_memory: nil) ⇒ Object
- .stack_push_v2(handle: nil, elem: nil, swap_memory: nil) ⇒ Object
- .stack_v2(max_size: nil, elem_type: nil, stack_name: nil) ⇒ Object
- .stage(values: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .stage_clear(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .stage_peek(index: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .stage_size(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .stateful_partitioned_call(args: nil, f: nil, config: nil, config_proto: nil, executor_type: nil) ⇒ Object
- .stateful_random_binomial(resource: nil, algorithm: nil, shape: nil, counts: nil, probs: nil, dtype: nil) ⇒ Object
- .stateful_standard_normal(resource: nil, shape: nil, dtype: nil, shape_dtype: nil) ⇒ Object
- .stateful_standard_normal_v2(resource: nil, algorithm: nil, shape: nil, dtype: nil, shape_dtype: nil) ⇒ Object
- .stateful_truncated_normal(resource: nil, algorithm: nil, shape: nil, dtype: nil, shape_dtype: nil) ⇒ Object
- .stateful_uniform(resource: nil, algorithm: nil, shape: nil, dtype: nil, shape_dtype: nil) ⇒ Object
- .stateful_uniform_full_int(resource: nil, algorithm: nil, shape: nil, dtype: nil, shape_dtype: nil) ⇒ Object
- .stateful_uniform_int(resource: nil, algorithm: nil, shape: nil, minval: nil, maxval: nil, dtype: nil, shape_dtype: nil) ⇒ Object
- .stateless_if(cond: nil, input: nil, then_branch: nil, else_branch: nil) ⇒ Object
- .stateless_multinomial(logits: nil, num_samples: nil, seed: nil, output_dtype: nil) ⇒ Object
- .stateless_random_normal(shape: nil, seed: nil, dtype: nil) ⇒ Object
- .stateless_random_uniform(shape: nil, seed: nil, dtype: nil) ⇒ Object
- .stateless_random_uniform_int(shape: nil, seed: nil, minval: nil, maxval: nil, dtype: nil) ⇒ Object
- .stateless_truncated_normal(shape: nil, seed: nil, dtype: nil) ⇒ Object
- .stateless_while(input: nil, cond: nil, body: nil) ⇒ Object
- .static_regex_full_match(input: nil, pattern: nil) ⇒ Object
- .static_regex_replace(input: nil, pattern: nil, rewrite: nil, replace_global: nil) ⇒ Object
- .stats_aggregator_handle_v2(container: nil, shared_name: nil) ⇒ Object
- .stats_aggregator_set_summary_writer(stats_aggregator: nil, summary: nil) ⇒ Object
- .stop_gradient(input: nil) ⇒ Object
- .strided_slice(input: nil, start: nil, stop: nil, strides: nil, begin_mask: nil, end_mask: nil, ellipsis_mask: nil, new_axis_mask: nil, shrink_axis_mask: nil) ⇒ Object
- .strided_slice_assign(ref: nil, start: nil, stop: nil, strides: nil, value: nil, begin_mask: nil, end_mask: nil, ellipsis_mask: nil, new_axis_mask: nil, shrink_axis_mask: nil) ⇒ Object
- .strided_slice_grad(shape: nil, start: nil, stop: nil, strides: nil, dy: nil, begin_mask: nil, end_mask: nil, ellipsis_mask: nil, new_axis_mask: nil, shrink_axis_mask: nil) ⇒ Object
- .string_format(inputs: nil, template: nil, placeholder: nil, summarize: nil) ⇒ Object
- .string_join(inputs: nil, separator: nil) ⇒ Object
- .string_length(input: nil, unit: nil) ⇒ Object
- .string_lower(input: nil, encoding: nil) ⇒ Object
- .string_split(input: nil, delimiter: nil, skip_empty: nil) ⇒ Object
- .string_split_v2(input: nil, sep: nil, maxsplit: nil) ⇒ Object
- .string_strip(input: nil) ⇒ Object
- .string_to_hash_bucket(string_tensor: nil, num_buckets: nil) ⇒ Object
- .string_to_hash_bucket_fast(input: nil, num_buckets: nil) ⇒ Object
- .string_to_hash_bucket_strong(input: nil, num_buckets: nil, key: nil) ⇒ Object
- .string_to_number(string_tensor: nil, out_type: nil) ⇒ Object
- .string_upper(input: nil, encoding: nil) ⇒ Object
- .sub(x: nil, y: nil) ⇒ Object
- .substr(input: nil, pos: nil, len: nil, unit: nil) ⇒ Object
- .sum(input: nil, reduction_indices: nil, keep_dims: nil) ⇒ Object
- .summary_writer(shared_name: nil, container: nil) ⇒ Object
- .svd(input: nil, compute_uv: nil, full_matrices: nil) ⇒ Object
- .switch(data: nil, pred: nil) ⇒ Object
- .symbolic_gradient(input: nil, f: nil) ⇒ Object
- .take_dataset(input_dataset: nil, count: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .take_many_sparse_from_tensors_map(sparse_handles: nil, dtype: nil, container: nil, shared_name: nil) ⇒ Object
- .tan(x: nil) ⇒ Object
- .tanh(x: nil) ⇒ Object
- .tanh_grad(y: nil, dy: nil) ⇒ Object
- .temporary_variable(shape: nil, dtype: nil, var_name: nil) ⇒ Object
- .tensor_array(size: nil, dtype: nil, dynamic_size: nil, clear_after_read: nil, tensor_array_name: nil, element_shape: nil) ⇒ Object
- .tensor_array_close(handle: nil) ⇒ Object
- .tensor_array_close_v2(handle: nil) ⇒ Object
- .tensor_array_close_v3(handle: nil) ⇒ Object
- .tensor_array_concat(handle: nil, flow_in: nil, dtype: nil, element_shape_except0: nil) ⇒ Object
- .tensor_array_concat_v2(handle: nil, flow_in: nil, dtype: nil, element_shape_except0: nil) ⇒ Object
- .tensor_array_concat_v3(handle: nil, flow_in: nil, dtype: nil, element_shape_except0: nil) ⇒ Object
- .tensor_array_gather(handle: nil, indices: nil, flow_in: nil, dtype: nil, element_shape: nil) ⇒ Object
- .tensor_array_gather_v2(handle: nil, indices: nil, flow_in: nil, dtype: nil, element_shape: nil) ⇒ Object
- .tensor_array_gather_v3(handle: nil, indices: nil, flow_in: nil, dtype: nil, element_shape: nil) ⇒ Object
- .tensor_array_grad(handle: nil, flow_in: nil, source: nil) ⇒ Object
- .tensor_array_grad_v2(handle: nil, flow_in: nil, source: nil) ⇒ Object
- .tensor_array_grad_v3(handle: nil, flow_in: nil, source: nil) ⇒ Object
- .tensor_array_grad_with_shape(handle: nil, flow_in: nil, shape_to_prepend: nil, source: nil) ⇒ Object
- .tensor_array_pack(handle: nil, flow_in: nil, dtype: nil, element_shape: nil) ⇒ Object
- .tensor_array_read(handle: nil, index: nil, flow_in: nil, dtype: nil) ⇒ Object
- .tensor_array_read_v2(handle: nil, index: nil, flow_in: nil, dtype: nil) ⇒ Object
- .tensor_array_read_v3(handle: nil, index: nil, flow_in: nil, dtype: nil) ⇒ Object
- .tensor_array_scatter(handle: nil, indices: nil, value: nil, flow_in: nil) ⇒ Object
- .tensor_array_scatter_v2(handle: nil, indices: nil, value: nil, flow_in: nil) ⇒ Object
- .tensor_array_scatter_v3(handle: nil, indices: nil, value: nil, flow_in: nil) ⇒ Object
- .tensor_array_size(handle: nil, flow_in: nil) ⇒ Object
- .tensor_array_size_v2(handle: nil, flow_in: nil) ⇒ Object
- .tensor_array_size_v3(handle: nil, flow_in: nil) ⇒ Object
- .tensor_array_split(handle: nil, value: nil, lengths: nil, flow_in: nil) ⇒ Object
- .tensor_array_split_v2(handle: nil, value: nil, lengths: nil, flow_in: nil) ⇒ Object
- .tensor_array_split_v3(handle: nil, value: nil, lengths: nil, flow_in: nil) ⇒ Object
- .tensor_array_unpack(handle: nil, value: nil, flow_in: nil) ⇒ Object
- .tensor_array_v2(size: nil, dtype: nil, element_shape: nil, dynamic_size: nil, clear_after_read: nil, tensor_array_name: nil) ⇒ Object
- .tensor_array_v3(size: nil, dtype: nil, element_shape: nil, dynamic_size: nil, clear_after_read: nil, identical_element_shapes: nil, tensor_array_name: nil) ⇒ Object
- .tensor_array_write(handle: nil, index: nil, value: nil, flow_in: nil) ⇒ Object
- .tensor_array_write_v2(handle: nil, index: nil, value: nil, flow_in: nil) ⇒ Object
- .tensor_array_write_v3(handle: nil, index: nil, value: nil, flow_in: nil) ⇒ Object
- .tensor_dataset(components: nil, output_shapes: nil) ⇒ Object
- .tensor_forest_create_tree_variable(tree_handle: nil, tree_config: nil) ⇒ Object
- .tensor_forest_tree_deserialize(tree_handle: nil, tree_config: nil) ⇒ Object
- .tensor_forest_tree_is_initialized_op(tree_handle: nil) ⇒ Object
- .tensor_forest_tree_predict(tree_handle: nil, dense_features: nil, logits_dimension: nil) ⇒ Object
- .tensor_forest_tree_resource_handle_op(container: nil, shared_name: nil) ⇒ Object
- .tensor_forest_tree_serialize(tree_handle: nil) ⇒ Object
- .tensor_forest_tree_size(tree_handle: nil) ⇒ Object
- .tensor_list_concat(input_handle: nil, element_dtype: nil, element_shape: nil) ⇒ Object
- .tensor_list_concat_lists(input_a: nil, input_b: nil, element_dtype: nil) ⇒ Object
- .tensor_list_concat_v2(input_handle: nil, element_shape: nil, leading_dims: nil, element_dtype: nil, shape_type: nil) ⇒ Object
- .tensor_list_element_shape(input_handle: nil, shape_type: nil) ⇒ Object
- .tensor_list_from_tensor(tensor: nil, element_shape: nil, element_dtype: nil, shape_type: nil) ⇒ Object
- .tensor_list_gather(input_handle: nil, indices: nil, element_shape: nil, element_dtype: nil) ⇒ Object
- .tensor_list_get_item(input_handle: nil, index: nil, element_shape: nil, element_dtype: nil) ⇒ Object
- .tensor_list_length(input_handle: nil) ⇒ Object
- .tensor_list_pop_back(input_handle: nil, element_shape: nil, element_dtype: nil) ⇒ Object
- .tensor_list_push_back(input_handle: nil, tensor: nil, element_dtype: nil) ⇒ Object
- .tensor_list_push_back_batch(input_handles: nil, tensor: nil, element_dtype: nil) ⇒ Object
- .tensor_list_reserve(element_shape: nil, num_elements: nil, element_dtype: nil, shape_type: nil) ⇒ Object
- .tensor_list_resize(input_handle: nil, size: nil) ⇒ Object
- .tensor_list_scatter(tensor: nil, indices: nil, element_shape: nil, element_dtype: nil, shape_type: nil) ⇒ Object
- .tensor_list_scatter_into_existing_list(input_handle: nil, tensor: nil, indices: nil, element_dtype: nil) ⇒ Object
- .tensor_list_scatter_v2(tensor: nil, indices: nil, element_shape: nil, num_elements: nil, element_dtype: nil, shape_type: nil) ⇒ Object
- .tensor_list_set_item(input_handle: nil, index: nil, item: nil, element_dtype: nil) ⇒ Object
- .tensor_list_split(tensor: nil, element_shape: nil, lengths: nil, element_dtype: nil, shape_type: nil) ⇒ Object
- .tensor_list_stack(input_handle: nil, element_shape: nil, element_dtype: nil, num_elements: nil) ⇒ Object
- .tensor_scatter_add(tensor: nil, indices: nil, updates: nil) ⇒ Object
- .tensor_scatter_sub(tensor: nil, indices: nil, updates: nil) ⇒ Object
- .tensor_scatter_update(tensor: nil, indices: nil, updates: nil) ⇒ Object
- .tensor_slice_dataset(components: nil, output_shapes: nil) ⇒ Object
- .tensor_strided_slice_update(input: nil, start: nil, stop: nil, strides: nil, value: nil, begin_mask: nil, end_mask: nil, ellipsis_mask: nil, new_axis_mask: nil, shrink_axis_mask: nil) ⇒ Object
- .tensor_summary(tensor: nil, description: nil, labels: nil, display_name: nil) ⇒ Object
- .tensor_summary_v2(tag: nil, tensor: nil, serialized_summary_metadata: nil) ⇒ Object
- .text_line_dataset(filenames: nil, compression_type: nil, buffer_size: nil) ⇒ Object
- .text_line_reader(skip_header_lines: nil, container: nil, shared_name: nil) ⇒ Object
- .text_line_reader_v2(skip_header_lines: nil, container: nil, shared_name: nil) ⇒ Object
- .tf_record_dataset(filenames: nil, compression_type: nil, buffer_size: nil) ⇒ Object
- .tf_record_reader(container: nil, shared_name: nil, compression_type: nil) ⇒ Object
- .tf_record_reader_v2(container: nil, shared_name: nil, compression_type: nil) ⇒ Object
- .thread_unsafe_unigram_candidate_sampler(true_classes: nil, num_true: nil, num_sampled: nil, unique: nil, range_max: nil, seed: nil, seed2: nil) ⇒ Object
- .tile(input: nil, multiples: nil) ⇒ Object
- .tile_grad(input: nil, multiples: nil) ⇒ Object
- .timestamp ⇒ Object
- .top_k(input: nil, k: nil, sorted: nil) ⇒ Object
- .top_kv2(input: nil, k: nil, sorted: nil) ⇒ Object
- .tpu_compilation_result ⇒ Object
- .tpu_embedding_activations(embedding_variable: nil, sliced_activations: nil, table_id: nil, lookup_id: nil) ⇒ Object
- .tpu_ordinal_selector ⇒ Object
- .tpu_partitioned_call(args: nil, device_ordinal: nil, f: nil) ⇒ Object
- .tpu_replicate_metadata(num_replicas: nil, num_cores_per_replica: nil, topology: nil, use_tpu: nil, device_assignment: nil, computation_shape: nil, host_compute_core: nil, padding_map: nil, step_marker_location: nil) ⇒ Object
- .tpu_replicated_input(inputs: nil) ⇒ Object
- .tpu_replicated_output(input: nil, num_replicas: nil) ⇒ Object
- .transpose(x: nil, perm: nil) ⇒ Object
- .tridiagonal_mat_mul(superdiag: nil, maindiag: nil, subdiag: nil, rhs: nil) ⇒ Object
- .tridiagonal_solve(diagonals: nil, rhs: nil, partial_pivoting: nil) ⇒ Object
- .truncate_div(x: nil, y: nil) ⇒ Object
- .truncate_mod(x: nil, y: nil) ⇒ Object
- .truncated_normal(shape: nil, seed: nil, seed2: nil, dtype: nil) ⇒ Object
- .try_rpc(address: nil, method: nil, request: nil, protocol: nil, fail_fast: nil, timeout_in_ms: nil) ⇒ Object
- .unbatch(batched_tensor: nil, batch_index: nil, id: nil, timeout_micros: nil, container: nil, shared_name: nil) ⇒ Object
- .unbatch_grad(original_input: nil, batch_index: nil, grad: nil, id: nil, container: nil, shared_name: nil) ⇒ Object
- .unicode_decode(input: nil, input_encoding: nil, errors: nil, replacement_char: nil, replace_control_characters: nil) ⇒ Object
- .unicode_decode_with_offsets(input: nil, input_encoding: nil, errors: nil, replacement_char: nil, replace_control_characters: nil) ⇒ Object
- .unicode_encode(input_values: nil, input_splits: nil, errors: nil, output_encoding: nil, replacement_char: nil) ⇒ Object
- .unicode_script(input: nil) ⇒ Object
- .unicode_transcode(input: nil, input_encoding: nil, output_encoding: nil, errors: nil, replacement_char: nil, replace_control_characters: nil) ⇒ Object
- .uniform_candidate_sampler(true_classes: nil, num_true: nil, num_sampled: nil, unique: nil, range_max: nil, seed: nil, seed2: nil) ⇒ Object
- .unique(x: nil, out_idx: nil) ⇒ Object
- .unique_v2(x: nil, axis: nil, out_idx: nil) ⇒ Object
- .unique_with_counts(x: nil, out_idx: nil) ⇒ Object
- .unique_with_counts_v2(x: nil, axis: nil, out_idx: nil) ⇒ Object
- .unpack(value: nil, num: nil, axis: nil) ⇒ Object
- .unravel_index(indices: nil, dims: nil) ⇒ Object
- .unsorted_segment_max(data: nil, segment_ids: nil, num_segments: nil) ⇒ Object
- .unsorted_segment_min(data: nil, segment_ids: nil, num_segments: nil) ⇒ Object
- .unsorted_segment_prod(data: nil, segment_ids: nil, num_segments: nil) ⇒ Object
- .unsorted_segment_sum(data: nil, segment_ids: nil, num_segments: nil) ⇒ Object
- .unstage(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
- .unwrap_dataset_variant(input_handle: nil) ⇒ Object
- .upper_bound(sorted_inputs: nil, values: nil, out_type: nil) ⇒ Object
- .var_handle_op(container: nil, shared_name: nil, dtype: nil, shape: nil) ⇒ Object
- .var_is_initialized_op(resource: nil) ⇒ Object
- .variable(shape: nil, dtype: nil, container: nil, shared_name: nil) ⇒ Object
- .variable_shape(input: nil, out_type: nil) ⇒ Object
- .variable_v2(shape: nil, dtype: nil, container: nil, shared_name: nil) ⇒ Object
- .where(input: nil) ⇒ Object
- .while(input: nil, cond: nil, body: nil, output_shapes: nil, parallel_iterations: nil) ⇒ Object
- .whole_file_reader(container: nil, shared_name: nil) ⇒ Object
- .whole_file_reader_v2(container: nil, shared_name: nil) ⇒ Object
- .window_dataset(input_dataset: nil, size: nil, shift: nil, stride: nil, drop_remainder: nil, output_types: nil, output_shapes: nil) ⇒ Object
- .worker_heartbeat(request: nil) ⇒ Object
- .wrap_dataset_variant(input_handle: nil) ⇒ Object
- .write_audio_summary(writer: nil, step: nil, tag: nil, tensor: nil, sample_rate: nil, max_outputs: nil) ⇒ Object
- .write_file(filename: nil, contents: nil) ⇒ Object
- .write_graph_summary(writer: nil, step: nil, tensor: nil) ⇒ Object
- .write_histogram_summary(writer: nil, step: nil, tag: nil, values: nil) ⇒ Object
- .write_image_summary(writer: nil, step: nil, tag: nil, tensor: nil, bad_color: nil, max_images: nil) ⇒ Object
- .write_raw_proto_summary(writer: nil, step: nil, tensor: nil) ⇒ Object
- .write_scalar_summary(writer: nil, step: nil, tag: nil, value: nil) ⇒ Object
- .write_summary(writer: nil, step: nil, tensor: nil, tag: nil, summary_metadata: nil) ⇒ Object
- .xdivy(x: nil, y: nil) ⇒ Object
- .xlogy(x: nil, y: nil) ⇒ Object
- .zeros_like(x: nil) ⇒ Object
- .zeta(x: nil, q: nil) ⇒ Object
- .zip_dataset(input_datasets: nil, output_types: nil, output_shapes: nil) ⇒ Object
Class Method Details
.abort(error_msg: nil, exit_without_error: nil) ⇒ Object
5 6 7 |
# File 'lib/tensorflow/raw_ops.rb', line 5 def abort(error_msg: nil, exit_without_error: nil) Utils.execute("Abort", [], error_msg: error_msg, exit_without_error: exit_without_error) end |
.abs(x: nil) ⇒ Object
9 10 11 |
# File 'lib/tensorflow/raw_ops.rb', line 9 def abs(x: nil) Utils.execute("Abs", [x]) end |
.accumulate_nv2(inputs: nil, shape: nil) ⇒ Object
13 14 15 |
# File 'lib/tensorflow/raw_ops.rb', line 13 def accumulate_nv2(inputs: nil, shape: nil) Utils.execute("AccumulateNV2", [inputs], shape: shape) end |
.accumulator_apply_gradient(handle: nil, local_step: nil, gradient: nil, dtype: nil) ⇒ Object
17 18 19 |
# File 'lib/tensorflow/raw_ops.rb', line 17 def accumulator_apply_gradient(handle: nil, local_step: nil, gradient: nil, dtype: nil) Utils.execute("AccumulatorApplyGradient", [handle, local_step, gradient], dtype: dtype) end |
.accumulator_num_accumulated(handle: nil) ⇒ Object
21 22 23 |
# File 'lib/tensorflow/raw_ops.rb', line 21 def accumulator_num_accumulated(handle: nil) Utils.execute("AccumulatorNumAccumulated", [handle]) end |
.accumulator_set_global_step(handle: nil, new_global_step: nil) ⇒ Object
25 26 27 |
# File 'lib/tensorflow/raw_ops.rb', line 25 def accumulator_set_global_step(handle: nil, new_global_step: nil) Utils.execute("AccumulatorSetGlobalStep", [handle, new_global_step]) end |
.accumulator_take_gradient(handle: nil, num_required: nil, dtype: nil) ⇒ Object
29 30 31 |
# File 'lib/tensorflow/raw_ops.rb', line 29 def accumulator_take_gradient(handle: nil, num_required: nil, dtype: nil) Utils.execute("AccumulatorTakeGradient", [handle, num_required], dtype: dtype) end |
.acos(x: nil) ⇒ Object
33 34 35 |
# File 'lib/tensorflow/raw_ops.rb', line 33 def acos(x: nil) Utils.execute("Acos", [x]) end |
.acosh(x: nil) ⇒ Object
37 38 39 |
# File 'lib/tensorflow/raw_ops.rb', line 37 def acosh(x: nil) Utils.execute("Acosh", [x]) end |
.add(x: nil, y: nil) ⇒ Object
41 42 43 |
# File 'lib/tensorflow/raw_ops.rb', line 41 def add(x: nil, y: nil) Utils.execute("Add", [x, y]) end |
.add_many_sparse_to_tensors_map(sparse_indices: nil, sparse_values: nil, sparse_shape: nil, container: nil, shared_name: nil) ⇒ Object
45 46 47 |
# File 'lib/tensorflow/raw_ops.rb', line 45 def add_many_sparse_to_tensors_map(sparse_indices: nil, sparse_values: nil, sparse_shape: nil, container: nil, shared_name: nil) Utils.execute("AddManySparseToTensorsMap", [sparse_indices, sparse_values, sparse_shape], container: container, shared_name: shared_name) end |
.add_n(inputs: nil) ⇒ Object
49 50 51 |
# File 'lib/tensorflow/raw_ops.rb', line 49 def add_n(inputs: nil) Utils.execute("AddN", [inputs]) end |
.add_sparse_to_tensors_map(sparse_indices: nil, sparse_values: nil, sparse_shape: nil, container: nil, shared_name: nil) ⇒ Object
53 54 55 |
# File 'lib/tensorflow/raw_ops.rb', line 53 def add_sparse_to_tensors_map(sparse_indices: nil, sparse_values: nil, sparse_shape: nil, container: nil, shared_name: nil) Utils.execute("AddSparseToTensorsMap", [sparse_indices, sparse_values, sparse_shape], container: container, shared_name: shared_name) end |
.add_v2(x: nil, y: nil) ⇒ Object
57 58 59 |
# File 'lib/tensorflow/raw_ops.rb', line 57 def add_v2(x: nil, y: nil) Utils.execute("AddV2", [x, y]) end |
.adjust_contrast(images: nil, contrast_factor: nil, min_value: nil, max_value: nil) ⇒ Object
61 62 63 |
# File 'lib/tensorflow/raw_ops.rb', line 61 def adjust_contrast(images: nil, contrast_factor: nil, min_value: nil, max_value: nil) Utils.execute("AdjustContrast", [images, contrast_factor, min_value, max_value]) end |
.adjust_contrastv2(images: nil, contrast_factor: nil) ⇒ Object
65 66 67 |
# File 'lib/tensorflow/raw_ops.rb', line 65 def adjust_contrastv2(images: nil, contrast_factor: nil) Utils.execute("AdjustContrastv2", [images, contrast_factor]) end |
.adjust_hue(images: nil, delta: nil) ⇒ Object
69 70 71 |
# File 'lib/tensorflow/raw_ops.rb', line 69 def adjust_hue(images: nil, delta: nil) Utils.execute("AdjustHue", [images, delta]) end |
.adjust_saturation(images: nil, scale: nil) ⇒ Object
73 74 75 |
# File 'lib/tensorflow/raw_ops.rb', line 73 def adjust_saturation(images: nil, scale: nil) Utils.execute("AdjustSaturation", [images, scale]) end |
.all(input: nil, reduction_indices: nil, keep_dims: nil) ⇒ Object
77 78 79 |
# File 'lib/tensorflow/raw_ops.rb', line 77 def all(input: nil, reduction_indices: nil, keep_dims: nil) Utils.execute("All", [input, reduction_indices], keep_dims: keep_dims) end |
.all_candidate_sampler(true_classes: nil, num_true: nil, num_sampled: nil, unique: nil, seed: nil, seed2: nil) ⇒ Object
81 82 83 |
# File 'lib/tensorflow/raw_ops.rb', line 81 def all_candidate_sampler(true_classes: nil, num_true: nil, num_sampled: nil, unique: nil, seed: nil, seed2: nil) Utils.execute("AllCandidateSampler", [true_classes], num_true: num_true, num_sampled: num_sampled, unique: unique, seed: seed, seed2: seed2) end |
.all_to_all(input: nil, group_assignment: nil, concat_dimension: nil, split_dimension: nil, split_count: nil) ⇒ Object
85 86 87 |
# File 'lib/tensorflow/raw_ops.rb', line 85 def all_to_all(input: nil, group_assignment: nil, concat_dimension: nil, split_dimension: nil, split_count: nil) Utils.execute("AllToAll", [input, group_assignment], concat_dimension: concat_dimension, split_dimension: split_dimension, split_count: split_count) end |
.angle(input: nil) ⇒ Object
89 90 91 |
# File 'lib/tensorflow/raw_ops.rb', line 89 def angle(input: nil) Utils.execute("Angle", [input]) end |
.anonymous_iterator(output_types: nil, output_shapes: nil) ⇒ Object
93 94 95 |
# File 'lib/tensorflow/raw_ops.rb', line 93 def anonymous_iterator(output_types: nil, output_shapes: nil) Utils.execute("AnonymousIterator", [], output_types: output_types, output_shapes: output_shapes) end |
.anonymous_iterator_v2(output_types: nil, output_shapes: nil) ⇒ Object
97 98 99 |
# File 'lib/tensorflow/raw_ops.rb', line 97 def anonymous_iterator_v2(output_types: nil, output_shapes: nil) Utils.execute("AnonymousIteratorV2", [], output_types: output_types, output_shapes: output_shapes) end |
.any(input: nil, reduction_indices: nil, keep_dims: nil) ⇒ Object
101 102 103 |
# File 'lib/tensorflow/raw_ops.rb', line 101 def any(input: nil, reduction_indices: nil, keep_dims: nil) Utils.execute("Any", [input, reduction_indices], keep_dims: keep_dims) end |
.apply_ada_max(var: nil, m: nil, v: nil, beta1_power: nil, lr: nil, beta1: nil, beta2: nil, epsilon: nil, grad: nil, use_locking: nil) ⇒ Object
105 106 107 |
# File 'lib/tensorflow/raw_ops.rb', line 105 def apply_ada_max(var: nil, m: nil, v: nil, beta1_power: nil, lr: nil, beta1: nil, beta2: nil, epsilon: nil, grad: nil, use_locking: nil) Utils.execute("ApplyAdaMax", [var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad], use_locking: use_locking) end |
.apply_adadelta(var: nil, accum: nil, accum_update: nil, lr: nil, rho: nil, epsilon: nil, grad: nil, use_locking: nil) ⇒ Object
109 110 111 |
# File 'lib/tensorflow/raw_ops.rb', line 109 def apply_adadelta(var: nil, accum: nil, accum_update: nil, lr: nil, rho: nil, epsilon: nil, grad: nil, use_locking: nil) Utils.execute("ApplyAdadelta", [var, accum, accum_update, lr, rho, epsilon, grad], use_locking: use_locking) end |
.apply_adagrad(var: nil, accum: nil, lr: nil, grad: nil, use_locking: nil, update_slots: nil) ⇒ Object
113 114 115 |
# File 'lib/tensorflow/raw_ops.rb', line 113 def apply_adagrad(var: nil, accum: nil, lr: nil, grad: nil, use_locking: nil, update_slots: nil) Utils.execute("ApplyAdagrad", [var, accum, lr, grad], use_locking: use_locking, update_slots: update_slots) end |
.apply_adagrad_da(var: nil, gradient_accumulator: nil, gradient_squared_accumulator: nil, grad: nil, lr: nil, l1: nil, l2: nil, global_step: nil, use_locking: nil) ⇒ Object
117 118 119 |
# File 'lib/tensorflow/raw_ops.rb', line 117 def apply_adagrad_da(var: nil, gradient_accumulator: nil, gradient_squared_accumulator: nil, grad: nil, lr: nil, l1: nil, l2: nil, global_step: nil, use_locking: nil) Utils.execute("ApplyAdagradDA", [var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step], use_locking: use_locking) end |
.apply_adam(var: nil, m: nil, v: nil, beta1_power: nil, beta2_power: nil, lr: nil, beta1: nil, beta2: nil, epsilon: nil, grad: nil, use_locking: nil, use_nesterov: nil) ⇒ Object
121 122 123 |
# File 'lib/tensorflow/raw_ops.rb', line 121 def apply_adam(var: nil, m: nil, v: nil, beta1_power: nil, beta2_power: nil, lr: nil, beta1: nil, beta2: nil, epsilon: nil, grad: nil, use_locking: nil, use_nesterov: nil) Utils.execute("ApplyAdam", [var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad], use_locking: use_locking, use_nesterov: use_nesterov) end |
.apply_add_sign(var: nil, m: nil, lr: nil, alpha: nil, sign_decay: nil, beta: nil, grad: nil, use_locking: nil) ⇒ Object
125 126 127 |
# File 'lib/tensorflow/raw_ops.rb', line 125 def apply_add_sign(var: nil, m: nil, lr: nil, alpha: nil, sign_decay: nil, beta: nil, grad: nil, use_locking: nil) Utils.execute("ApplyAddSign", [var, m, lr, alpha, sign_decay, beta, grad], use_locking: use_locking) end |
.apply_centered_rms_prop(var: nil, mg: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, use_locking: nil) ⇒ Object
129 130 131 |
# File 'lib/tensorflow/raw_ops.rb', line 129 def apply_centered_rms_prop(var: nil, mg: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, use_locking: nil) Utils.execute("ApplyCenteredRMSProp", [var, mg, ms, mom, lr, rho, momentum, epsilon, grad], use_locking: use_locking) end |
.apply_ftrl(var: nil, accum: nil, linear: nil, grad: nil, lr: nil, l1: nil, l2: nil, lr_power: nil, use_locking: nil) ⇒ Object
133 134 135 |
# File 'lib/tensorflow/raw_ops.rb', line 133 def apply_ftrl(var: nil, accum: nil, linear: nil, grad: nil, lr: nil, l1: nil, l2: nil, lr_power: nil, use_locking: nil) Utils.execute("ApplyFtrl", [var, accum, linear, grad, lr, l1, l2, lr_power], use_locking: use_locking) end |
.apply_ftrl_v2(var: nil, accum: nil, linear: nil, grad: nil, lr: nil, l1: nil, l2: nil, l2_shrinkage: nil, lr_power: nil, use_locking: nil) ⇒ Object
137 138 139 |
# File 'lib/tensorflow/raw_ops.rb', line 137 def apply_ftrl_v2(var: nil, accum: nil, linear: nil, grad: nil, lr: nil, l1: nil, l2: nil, l2_shrinkage: nil, lr_power: nil, use_locking: nil) Utils.execute("ApplyFtrlV2", [var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power], use_locking: use_locking) end |
.apply_gradient_descent(var: nil, alpha: nil, delta: nil, use_locking: nil) ⇒ Object
141 142 143 |
# File 'lib/tensorflow/raw_ops.rb', line 141 def apply_gradient_descent(var: nil, alpha: nil, delta: nil, use_locking: nil) Utils.execute("ApplyGradientDescent", [var, alpha, delta], use_locking: use_locking) end |
.apply_momentum(var: nil, accum: nil, lr: nil, grad: nil, momentum: nil, use_locking: nil, use_nesterov: nil) ⇒ Object
145 146 147 |
# File 'lib/tensorflow/raw_ops.rb', line 145 def apply_momentum(var: nil, accum: nil, lr: nil, grad: nil, momentum: nil, use_locking: nil, use_nesterov: nil) Utils.execute("ApplyMomentum", [var, accum, lr, grad, momentum], use_locking: use_locking, use_nesterov: use_nesterov) end |
.apply_power_sign(var: nil, m: nil, lr: nil, logbase: nil, sign_decay: nil, beta: nil, grad: nil, use_locking: nil) ⇒ Object
149 150 151 |
# File 'lib/tensorflow/raw_ops.rb', line 149 def apply_power_sign(var: nil, m: nil, lr: nil, logbase: nil, sign_decay: nil, beta: nil, grad: nil, use_locking: nil) Utils.execute("ApplyPowerSign", [var, m, lr, logbase, sign_decay, beta, grad], use_locking: use_locking) end |
.apply_proximal_adagrad(var: nil, accum: nil, lr: nil, l1: nil, l2: nil, grad: nil, use_locking: nil) ⇒ Object
153 154 155 |
# File 'lib/tensorflow/raw_ops.rb', line 153 def apply_proximal_adagrad(var: nil, accum: nil, lr: nil, l1: nil, l2: nil, grad: nil, use_locking: nil) Utils.execute("ApplyProximalAdagrad", [var, accum, lr, l1, l2, grad], use_locking: use_locking) end |
.apply_proximal_gradient_descent(var: nil, alpha: nil, l1: nil, l2: nil, delta: nil, use_locking: nil) ⇒ Object
157 158 159 |
# File 'lib/tensorflow/raw_ops.rb', line 157 def apply_proximal_gradient_descent(var: nil, alpha: nil, l1: nil, l2: nil, delta: nil, use_locking: nil) Utils.execute("ApplyProximalGradientDescent", [var, alpha, l1, l2, delta], use_locking: use_locking) end |
.apply_rms_prop(var: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, use_locking: nil) ⇒ Object
161 162 163 |
# File 'lib/tensorflow/raw_ops.rb', line 161 def apply_rms_prop(var: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, use_locking: nil) Utils.execute("ApplyRMSProp", [var, ms, mom, lr, rho, momentum, epsilon, grad], use_locking: use_locking) end |
.approximate_equal(x: nil, y: nil, tolerance: nil) ⇒ Object
165 166 167 |
# File 'lib/tensorflow/raw_ops.rb', line 165 def approximate_equal(x: nil, y: nil, tolerance: nil) Utils.execute("ApproximateEqual", [x, y], tolerance: tolerance) end |
.arg_max(input: nil, dimension: nil, output_type: nil) ⇒ Object
169 170 171 |
# File 'lib/tensorflow/raw_ops.rb', line 169 def arg_max(input: nil, dimension: nil, output_type: nil) Utils.execute("ArgMax", [input, dimension], output_type: output_type) end |
.arg_min(input: nil, dimension: nil, output_type: nil) ⇒ Object
173 174 175 |
# File 'lib/tensorflow/raw_ops.rb', line 173 def arg_min(input: nil, dimension: nil, output_type: nil) Utils.execute("ArgMin", [input, dimension], output_type: output_type) end |
.as_string(input: nil, precision: nil, scientific: nil, shortest: nil, width: nil, fill: nil) ⇒ Object
177 178 179 |
# File 'lib/tensorflow/raw_ops.rb', line 177 def as_string(input: nil, precision: nil, scientific: nil, shortest: nil, width: nil, fill: nil) Utils.execute("AsString", [input], precision: precision, scientific: scientific, shortest: shortest, width: width, fill: fill) end |
.asin(x: nil) ⇒ Object
181 182 183 |
# File 'lib/tensorflow/raw_ops.rb', line 181 def asin(x: nil) Utils.execute("Asin", [x]) end |
.asinh(x: nil) ⇒ Object
185 186 187 |
# File 'lib/tensorflow/raw_ops.rb', line 185 def asinh(x: nil) Utils.execute("Asinh", [x]) end |
.assert(condition: nil, data: nil, summarize: nil) ⇒ Object
189 190 191 |
# File 'lib/tensorflow/raw_ops.rb', line 189 def assert(condition: nil, data: nil, summarize: nil) Utils.execute("Assert", [condition, data], summarize: summarize) end |
.assign(ref: nil, value: nil, validate_shape: nil, use_locking: nil) ⇒ Object
193 194 195 |
# File 'lib/tensorflow/raw_ops.rb', line 193 def assign(ref: nil, value: nil, validate_shape: nil, use_locking: nil) Utils.execute("Assign", [ref, value], validate_shape: validate_shape, use_locking: use_locking) end |
.assign_add(ref: nil, value: nil, use_locking: nil) ⇒ Object
197 198 199 |
# File 'lib/tensorflow/raw_ops.rb', line 197 def assign_add(ref: nil, value: nil, use_locking: nil) Utils.execute("AssignAdd", [ref, value], use_locking: use_locking) end |
.assign_add_variable_op(resource: nil, value: nil, dtype: nil) ⇒ Object
201 202 203 |
# File 'lib/tensorflow/raw_ops.rb', line 201 def assign_add_variable_op(resource: nil, value: nil, dtype: nil) Utils.execute("AssignAddVariableOp", [resource, value], dtype: dtype) end |
.assign_sub(ref: nil, value: nil, use_locking: nil) ⇒ Object
205 206 207 |
# File 'lib/tensorflow/raw_ops.rb', line 205 def assign_sub(ref: nil, value: nil, use_locking: nil) Utils.execute("AssignSub", [ref, value], use_locking: use_locking) end |
.assign_sub_variable_op(resource: nil, value: nil, dtype: nil) ⇒ Object
209 210 211 |
# File 'lib/tensorflow/raw_ops.rb', line 209 def assign_sub_variable_op(resource: nil, value: nil, dtype: nil) Utils.execute("AssignSubVariableOp", [resource, value], dtype: dtype) end |
.assign_variable_op(resource: nil, value: nil, dtype: nil) ⇒ Object
213 214 215 |
# File 'lib/tensorflow/raw_ops.rb', line 213 def assign_variable_op(resource: nil, value: nil, dtype: nil) Utils.execute("AssignVariableOp", [resource, value], dtype: dtype) end |
.atan(x: nil) ⇒ Object
217 218 219 |
# File 'lib/tensorflow/raw_ops.rb', line 217 def atan(x: nil) Utils.execute("Atan", [x]) end |
.atan2(y: nil, x: nil) ⇒ Object
221 222 223 |
# File 'lib/tensorflow/raw_ops.rb', line 221 def atan2(y: nil, x: nil) Utils.execute("Atan2", [y, x]) end |
.atanh(x: nil) ⇒ Object
225 226 227 |
# File 'lib/tensorflow/raw_ops.rb', line 225 def atanh(x: nil) Utils.execute("Atanh", [x]) end |
.audio_spectrogram(input: nil, window_size: nil, stride: nil, magnitude_squared: nil) ⇒ Object
229 230 231 |
# File 'lib/tensorflow/raw_ops.rb', line 229 def audio_spectrogram(input: nil, window_size: nil, stride: nil, magnitude_squared: nil) Utils.execute("AudioSpectrogram", [input], window_size: window_size, stride: stride, magnitude_squared: magnitude_squared) end |
.audio_summary(tag: nil, tensor: nil, sample_rate: nil, max_outputs: nil) ⇒ Object
233 234 235 |
# File 'lib/tensorflow/raw_ops.rb', line 233 def audio_summary(tag: nil, tensor: nil, sample_rate: nil, max_outputs: nil) Utils.execute("AudioSummary", [tag, tensor], sample_rate: sample_rate, max_outputs: max_outputs) end |
.audio_summary_v2(tag: nil, tensor: nil, sample_rate: nil, max_outputs: nil) ⇒ Object
237 238 239 |
# File 'lib/tensorflow/raw_ops.rb', line 237 def audio_summary_v2(tag: nil, tensor: nil, sample_rate: nil, max_outputs: nil) Utils.execute("AudioSummaryV2", [tag, tensor, sample_rate], max_outputs: max_outputs) end |
.avg_pool(value: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
241 242 243 |
# File 'lib/tensorflow/raw_ops.rb', line 241 def avg_pool(value: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) Utils.execute("AvgPool", [value], ksize: ksize, strides: strides, padding: padding, data_format: data_format) end |
.avg_pool3d(input: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
245 246 247 |
# File 'lib/tensorflow/raw_ops.rb', line 245 def avg_pool3d(input: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) Utils.execute("AvgPool3D", [input], ksize: ksize, strides: strides, padding: padding, data_format: data_format) end |
.avg_pool3d_grad(orig_input_shape: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
249 250 251 |
# File 'lib/tensorflow/raw_ops.rb', line 249 def avg_pool3d_grad(orig_input_shape: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) Utils.execute("AvgPool3DGrad", [orig_input_shape, grad], ksize: ksize, strides: strides, padding: padding, data_format: data_format) end |
.avg_pool_grad(orig_input_shape: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
253 254 255 |
# File 'lib/tensorflow/raw_ops.rb', line 253 def avg_pool_grad(orig_input_shape: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) Utils.execute("AvgPoolGrad", [orig_input_shape, grad], ksize: ksize, strides: strides, padding: padding, data_format: data_format) end |
.barrier(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) ⇒ Object
257 258 259 |
# File 'lib/tensorflow/raw_ops.rb', line 257 def (component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) Utils.execute("Barrier", [], component_types: component_types, shapes: shapes, capacity: capacity, container: container, shared_name: shared_name) end |
.barrier_close(handle: nil, cancel_pending_enqueues: nil) ⇒ Object
261 262 263 |
# File 'lib/tensorflow/raw_ops.rb', line 261 def (handle: nil, cancel_pending_enqueues: nil) Utils.execute("BarrierClose", [handle], cancel_pending_enqueues: cancel_pending_enqueues) end |
.barrier_incomplete_size(handle: nil) ⇒ Object
265 266 267 |
# File 'lib/tensorflow/raw_ops.rb', line 265 def (handle: nil) Utils.execute("BarrierIncompleteSize", [handle]) end |
.barrier_insert_many(handle: nil, keys: nil, values: nil, component_index: nil) ⇒ Object
269 270 271 |
# File 'lib/tensorflow/raw_ops.rb', line 269 def (handle: nil, keys: nil, values: nil, component_index: nil) Utils.execute("BarrierInsertMany", [handle, keys, values], component_index: component_index) end |
.barrier_ready_size(handle: nil) ⇒ Object
273 274 275 |
# File 'lib/tensorflow/raw_ops.rb', line 273 def (handle: nil) Utils.execute("BarrierReadySize", [handle]) end |
.barrier_take_many(handle: nil, num_elements: nil, component_types: nil, allow_small_batch: nil, wait_for_incomplete: nil, timeout_ms: nil) ⇒ Object
277 278 279 |
# File 'lib/tensorflow/raw_ops.rb', line 277 def (handle: nil, num_elements: nil, component_types: nil, allow_small_batch: nil, wait_for_incomplete: nil, timeout_ms: nil) Utils.execute("BarrierTakeMany", [handle, num_elements], component_types: component_types, allow_small_batch: allow_small_batch, wait_for_incomplete: wait_for_incomplete, timeout_ms: timeout_ms) end |
.batch(in_tensors: nil, num_batch_threads: nil, max_batch_size: nil, max_enqueued_batches: nil, batch_timeout_micros: nil, allowed_batch_sizes: nil, grad_timeout_micros: nil, container: nil, shared_name: nil, batching_queue: nil) ⇒ Object
281 282 283 |
# File 'lib/tensorflow/raw_ops.rb', line 281 def batch(in_tensors: nil, num_batch_threads: nil, max_batch_size: nil, max_enqueued_batches: nil, batch_timeout_micros: nil, allowed_batch_sizes: nil, grad_timeout_micros: nil, container: nil, shared_name: nil, batching_queue: nil) Utils.execute("Batch", [in_tensors], num_batch_threads: num_batch_threads, max_batch_size: max_batch_size, max_enqueued_batches: max_enqueued_batches, batch_timeout_micros: batch_timeout_micros, allowed_batch_sizes: allowed_batch_sizes, grad_timeout_micros: grad_timeout_micros, container: container, shared_name: shared_name, batching_queue: batching_queue) end |
.batch_cholesky(input: nil) ⇒ Object
285 286 287 |
# File 'lib/tensorflow/raw_ops.rb', line 285 def batch_cholesky(input: nil) Utils.execute("BatchCholesky", [input]) end |
.batch_cholesky_grad(l: nil, grad: nil) ⇒ Object
289 290 291 |
# File 'lib/tensorflow/raw_ops.rb', line 289 def batch_cholesky_grad(l: nil, grad: nil) Utils.execute("BatchCholeskyGrad", [l, grad]) end |
.batch_dataset(input_dataset: nil, batch_size: nil, output_types: nil, output_shapes: nil) ⇒ Object
293 294 295 |
# File 'lib/tensorflow/raw_ops.rb', line 293 def batch_dataset(input_dataset: nil, batch_size: nil, output_types: nil, output_shapes: nil) Utils.execute("BatchDataset", [input_dataset, batch_size], output_types: output_types, output_shapes: output_shapes) end |
.batch_dataset_v2(input_dataset: nil, batch_size: nil, drop_remainder: nil, parallel_copy: nil, output_types: nil, output_shapes: nil) ⇒ Object
297 298 299 |
# File 'lib/tensorflow/raw_ops.rb', line 297 def batch_dataset_v2(input_dataset: nil, batch_size: nil, drop_remainder: nil, parallel_copy: nil, output_types: nil, output_shapes: nil) Utils.execute("BatchDatasetV2", [input_dataset, batch_size, drop_remainder], parallel_copy: parallel_copy, output_types: output_types, output_shapes: output_shapes) end |
.batch_fft(input: nil) ⇒ Object
301 302 303 |
# File 'lib/tensorflow/raw_ops.rb', line 301 def batch_fft(input: nil) Utils.execute("BatchFFT", [input]) end |
.batch_fft2d(input: nil) ⇒ Object
305 306 307 |
# File 'lib/tensorflow/raw_ops.rb', line 305 def batch_fft2d(input: nil) Utils.execute("BatchFFT2D", [input]) end |
.batch_fft3d(input: nil) ⇒ Object
309 310 311 |
# File 'lib/tensorflow/raw_ops.rb', line 309 def batch_fft3d(input: nil) Utils.execute("BatchFFT3D", [input]) end |
.batch_function(in_tensors: nil, captured_tensors: nil, f: nil, num_batch_threads: nil, max_batch_size: nil, batch_timeout_micros: nil, max_enqueued_batches: nil, allowed_batch_sizes: nil, container: nil, shared_name: nil, batching_queue: nil) ⇒ Object
313 314 315 |
# File 'lib/tensorflow/raw_ops.rb', line 313 def batch_function(in_tensors: nil, captured_tensors: nil, f: nil, num_batch_threads: nil, max_batch_size: nil, batch_timeout_micros: nil, max_enqueued_batches: nil, allowed_batch_sizes: nil, container: nil, shared_name: nil, batching_queue: nil) Utils.execute("BatchFunction", [in_tensors, captured_tensors], f: f, num_batch_threads: num_batch_threads, max_batch_size: max_batch_size, batch_timeout_micros: batch_timeout_micros, max_enqueued_batches: max_enqueued_batches, allowed_batch_sizes: allowed_batch_sizes, container: container, shared_name: shared_name, batching_queue: batching_queue) end |
.batch_ifft(input: nil) ⇒ Object
317 318 319 |
# File 'lib/tensorflow/raw_ops.rb', line 317 def batch_ifft(input: nil) Utils.execute("BatchIFFT", [input]) end |
.batch_ifft2d(input: nil) ⇒ Object
321 322 323 |
# File 'lib/tensorflow/raw_ops.rb', line 321 def batch_ifft2d(input: nil) Utils.execute("BatchIFFT2D", [input]) end |
.batch_ifft3d(input: nil) ⇒ Object
325 326 327 |
# File 'lib/tensorflow/raw_ops.rb', line 325 def batch_ifft3d(input: nil) Utils.execute("BatchIFFT3D", [input]) end |
.batch_mat_mul(x: nil, y: nil, adj_x: nil, adj_y: nil) ⇒ Object
329 330 331 |
# File 'lib/tensorflow/raw_ops.rb', line 329 def batch_mat_mul(x: nil, y: nil, adj_x: nil, adj_y: nil) Utils.execute("BatchMatMul", [x, y], adj_x: adj_x, adj_y: adj_y) end |
.batch_mat_mul_v2(x: nil, y: nil, adj_x: nil, adj_y: nil) ⇒ Object
333 334 335 |
# File 'lib/tensorflow/raw_ops.rb', line 333 def batch_mat_mul_v2(x: nil, y: nil, adj_x: nil, adj_y: nil) Utils.execute("BatchMatMulV2", [x, y], adj_x: adj_x, adj_y: adj_y) end |
.batch_matrix_band_part(input: nil, num_lower: nil, num_upper: nil) ⇒ Object
337 338 339 |
# File 'lib/tensorflow/raw_ops.rb', line 337 def batch_matrix_band_part(input: nil, num_lower: nil, num_upper: nil) Utils.execute("BatchMatrixBandPart", [input, num_lower, num_upper]) end |
.batch_matrix_determinant(input: nil) ⇒ Object
341 342 343 |
# File 'lib/tensorflow/raw_ops.rb', line 341 def batch_matrix_determinant(input: nil) Utils.execute("BatchMatrixDeterminant", [input]) end |
.batch_matrix_diag(diagonal: nil) ⇒ Object
345 346 347 |
# File 'lib/tensorflow/raw_ops.rb', line 345 def batch_matrix_diag(diagonal: nil) Utils.execute("BatchMatrixDiag", [diagonal]) end |
.batch_matrix_diag_part(input: nil) ⇒ Object
349 350 351 |
# File 'lib/tensorflow/raw_ops.rb', line 349 def batch_matrix_diag_part(input: nil) Utils.execute("BatchMatrixDiagPart", [input]) end |
.batch_matrix_inverse(input: nil, adjoint: nil) ⇒ Object
353 354 355 |
# File 'lib/tensorflow/raw_ops.rb', line 353 def batch_matrix_inverse(input: nil, adjoint: nil) Utils.execute("BatchMatrixInverse", [input], adjoint: adjoint) end |
.batch_matrix_set_diag(input: nil, diagonal: nil) ⇒ Object
357 358 359 |
# File 'lib/tensorflow/raw_ops.rb', line 357 def batch_matrix_set_diag(input: nil, diagonal: nil) Utils.execute("BatchMatrixSetDiag", [input, diagonal]) end |
.batch_matrix_solve(matrix: nil, rhs: nil, adjoint: nil) ⇒ Object
361 362 363 |
# File 'lib/tensorflow/raw_ops.rb', line 361 def batch_matrix_solve(matrix: nil, rhs: nil, adjoint: nil) Utils.execute("BatchMatrixSolve", [matrix, rhs], adjoint: adjoint) end |
.batch_matrix_solve_ls(matrix: nil, rhs: nil, l2_regularizer: nil, fast: nil) ⇒ Object
365 366 367 |
# File 'lib/tensorflow/raw_ops.rb', line 365 def batch_matrix_solve_ls(matrix: nil, rhs: nil, l2_regularizer: nil, fast: nil) Utils.execute("BatchMatrixSolveLs", [matrix, rhs, l2_regularizer], fast: fast) end |
.batch_matrix_triangular_solve(matrix: nil, rhs: nil, lower: nil, adjoint: nil) ⇒ Object
369 370 371 |
# File 'lib/tensorflow/raw_ops.rb', line 369 def batch_matrix_triangular_solve(matrix: nil, rhs: nil, lower: nil, adjoint: nil) Utils.execute("BatchMatrixTriangularSolve", [matrix, rhs], lower: lower, adjoint: adjoint) end |
.batch_norm_with_global_normalization(t: nil, m: nil, v: nil, beta: nil, gamma: nil, variance_epsilon: nil, scale_after_normalization: nil) ⇒ Object
373 374 375 |
# File 'lib/tensorflow/raw_ops.rb', line 373 def batch_norm_with_global_normalization(t: nil, m: nil, v: nil, beta: nil, gamma: nil, variance_epsilon: nil, scale_after_normalization: nil) Utils.execute("BatchNormWithGlobalNormalization", [t, m, v, beta, gamma], variance_epsilon: variance_epsilon, scale_after_normalization: scale_after_normalization) end |
.batch_norm_with_global_normalization_grad(t: nil, m: nil, v: nil, gamma: nil, backprop: nil, variance_epsilon: nil, scale_after_normalization: nil) ⇒ Object
377 378 379 |
# File 'lib/tensorflow/raw_ops.rb', line 377 def batch_norm_with_global_normalization_grad(t: nil, m: nil, v: nil, gamma: nil, backprop: nil, variance_epsilon: nil, scale_after_normalization: nil) Utils.execute("BatchNormWithGlobalNormalizationGrad", [t, m, v, gamma, backprop], variance_epsilon: variance_epsilon, scale_after_normalization: scale_after_normalization) end |
.batch_self_adjoint_eig(input: nil) ⇒ Object
381 382 383 |
# File 'lib/tensorflow/raw_ops.rb', line 381 def batch_self_adjoint_eig(input: nil) Utils.execute("BatchSelfAdjointEig", [input]) end |
.batch_self_adjoint_eig_v2(input: nil, compute_v: nil) ⇒ Object
385 386 387 |
# File 'lib/tensorflow/raw_ops.rb', line 385 def batch_self_adjoint_eig_v2(input: nil, compute_v: nil) Utils.execute("BatchSelfAdjointEigV2", [input], compute_v: compute_v) end |
.batch_svd(input: nil, compute_uv: nil, full_matrices: nil) ⇒ Object
389 390 391 |
# File 'lib/tensorflow/raw_ops.rb', line 389 def batch_svd(input: nil, compute_uv: nil, full_matrices: nil) Utils.execute("BatchSvd", [input], compute_uv: compute_uv, full_matrices: full_matrices) end |
.batch_to_space(input: nil, crops: nil, block_size: nil) ⇒ Object
393 394 395 |
# File 'lib/tensorflow/raw_ops.rb', line 393 def batch_to_space(input: nil, crops: nil, block_size: nil) Utils.execute("BatchToSpace", [input, crops], block_size: block_size) end |
.batch_to_space_nd(input: nil, block_shape: nil, crops: nil) ⇒ Object
397 398 399 |
# File 'lib/tensorflow/raw_ops.rb', line 397 def batch_to_space_nd(input: nil, block_shape: nil, crops: nil) Utils.execute("BatchToSpaceND", [input, block_shape, crops]) end |
.bessel_i0e(x: nil) ⇒ Object
401 402 403 |
# File 'lib/tensorflow/raw_ops.rb', line 401 def bessel_i0e(x: nil) Utils.execute("BesselI0e", [x]) end |
.bessel_i1e(x: nil) ⇒ Object
405 406 407 |
# File 'lib/tensorflow/raw_ops.rb', line 405 def bessel_i1e(x: nil) Utils.execute("BesselI1e", [x]) end |
.betainc(a: nil, b: nil, x: nil) ⇒ Object
409 410 411 |
# File 'lib/tensorflow/raw_ops.rb', line 409 def betainc(a: nil, b: nil, x: nil) Utils.execute("Betainc", [a, b, x]) end |
.bias_add(value: nil, bias: nil, data_format: nil) ⇒ Object
413 414 415 |
# File 'lib/tensorflow/raw_ops.rb', line 413 def bias_add(value: nil, bias: nil, data_format: nil) Utils.execute("BiasAdd", [value, bias], data_format: data_format) end |
.bias_add_grad(out_backprop: nil, data_format: nil) ⇒ Object
417 418 419 |
# File 'lib/tensorflow/raw_ops.rb', line 417 def bias_add_grad(out_backprop: nil, data_format: nil) Utils.execute("BiasAddGrad", [out_backprop], data_format: data_format) end |
.bias_add_v1(value: nil, bias: nil) ⇒ Object
421 422 423 |
# File 'lib/tensorflow/raw_ops.rb', line 421 def bias_add_v1(value: nil, bias: nil) Utils.execute("BiasAddV1", [value, bias]) end |
.big_query_reader(container: nil, shared_name: nil, project_id: nil, dataset_id: nil, table_id: nil, columns: nil, timestamp_millis: nil, test_end_point: nil) ⇒ Object
425 426 427 |
# File 'lib/tensorflow/raw_ops.rb', line 425 def big_query_reader(container: nil, shared_name: nil, project_id: nil, dataset_id: nil, table_id: nil, columns: nil, timestamp_millis: nil, test_end_point: nil) Utils.execute("BigQueryReader", [], container: container, shared_name: shared_name, project_id: project_id, dataset_id: dataset_id, table_id: table_id, columns: columns, timestamp_millis: , test_end_point: test_end_point) end |
.bincount(arr: nil, size: nil, weights: nil) ⇒ Object
429 430 431 |
# File 'lib/tensorflow/raw_ops.rb', line 429 def bincount(arr: nil, size: nil, weights: nil) Utils.execute("Bincount", [arr, size, weights]) end |
.bitcast(input: nil, type: nil) ⇒ Object
433 434 435 |
# File 'lib/tensorflow/raw_ops.rb', line 433 def bitcast(input: nil, type: nil) Utils.execute("Bitcast", [input], type: type) end |
.bitwise_and(x: nil, y: nil) ⇒ Object
437 438 439 |
# File 'lib/tensorflow/raw_ops.rb', line 437 def bitwise_and(x: nil, y: nil) Utils.execute("BitwiseAnd", [x, y]) end |
.bitwise_or(x: nil, y: nil) ⇒ Object
441 442 443 |
# File 'lib/tensorflow/raw_ops.rb', line 441 def bitwise_or(x: nil, y: nil) Utils.execute("BitwiseOr", [x, y]) end |
.bitwise_xor(x: nil, y: nil) ⇒ Object
445 446 447 |
# File 'lib/tensorflow/raw_ops.rb', line 445 def bitwise_xor(x: nil, y: nil) Utils.execute("BitwiseXor", [x, y]) end |
.boosted_trees_aggregate_stats(node_ids: nil, gradients: nil, hessians: nil, feature: nil, max_splits: nil, num_buckets: nil) ⇒ Object
449 450 451 |
# File 'lib/tensorflow/raw_ops.rb', line 449 def boosted_trees_aggregate_stats(node_ids: nil, gradients: nil, hessians: nil, feature: nil, max_splits: nil, num_buckets: nil) Utils.execute("BoostedTreesAggregateStats", [node_ids, gradients, hessians, feature], max_splits: max_splits, num_buckets: num_buckets) end |
.boosted_trees_bucketize(float_values: nil, bucket_boundaries: nil, num_features: nil) ⇒ Object
453 454 455 |
# File 'lib/tensorflow/raw_ops.rb', line 453 def boosted_trees_bucketize(float_values: nil, bucket_boundaries: nil, num_features: nil) Utils.execute("BoostedTreesBucketize", [float_values, bucket_boundaries], num_features: num_features) end |
.boosted_trees_calculate_best_feature_split(node_id_range: nil, stats_summary: nil, l1: nil, l2: nil, tree_complexity: nil, min_node_weight: nil, logits_dimension: nil, split_type: nil) ⇒ Object
457 458 459 |
# File 'lib/tensorflow/raw_ops.rb', line 457 def boosted_trees_calculate_best_feature_split(node_id_range: nil, stats_summary: nil, l1: nil, l2: nil, tree_complexity: nil, min_node_weight: nil, logits_dimension: nil, split_type: nil) Utils.execute("BoostedTreesCalculateBestFeatureSplit", [node_id_range, stats_summary, l1, l2, tree_complexity, min_node_weight], logits_dimension: logits_dimension, split_type: split_type) end |
.boosted_trees_calculate_best_gains_per_feature(node_id_range: nil, stats_summary_list: nil, l1: nil, l2: nil, tree_complexity: nil, min_node_weight: nil, max_splits: nil, num_features: nil) ⇒ Object
461 462 463 |
# File 'lib/tensorflow/raw_ops.rb', line 461 def boosted_trees_calculate_best_gains_per_feature(node_id_range: nil, stats_summary_list: nil, l1: nil, l2: nil, tree_complexity: nil, min_node_weight: nil, max_splits: nil, num_features: nil) Utils.execute("BoostedTreesCalculateBestGainsPerFeature", [node_id_range, stats_summary_list, l1, l2, tree_complexity, min_node_weight], max_splits: max_splits, num_features: num_features) end |
.boosted_trees_center_bias(tree_ensemble_handle: nil, mean_gradients: nil, mean_hessians: nil, l1: nil, l2: nil) ⇒ Object
465 466 467 |
# File 'lib/tensorflow/raw_ops.rb', line 465 def boosted_trees_center_bias(tree_ensemble_handle: nil, mean_gradients: nil, mean_hessians: nil, l1: nil, l2: nil) Utils.execute("BoostedTreesCenterBias", [tree_ensemble_handle, mean_gradients, mean_hessians, l1, l2]) end |
.boosted_trees_create_ensemble(tree_ensemble_handle: nil, stamp_token: nil, tree_ensemble_serialized: nil) ⇒ Object
469 470 471 |
# File 'lib/tensorflow/raw_ops.rb', line 469 def boosted_trees_create_ensemble(tree_ensemble_handle: nil, stamp_token: nil, tree_ensemble_serialized: nil) Utils.execute("BoostedTreesCreateEnsemble", [tree_ensemble_handle, stamp_token, tree_ensemble_serialized]) end |
.boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle: nil, epsilon: nil, num_streams: nil, max_elements: nil) ⇒ Object
473 474 475 |
# File 'lib/tensorflow/raw_ops.rb', line 473 def boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle: nil, epsilon: nil, num_streams: nil, max_elements: nil) Utils.execute("BoostedTreesCreateQuantileStreamResource", [quantile_stream_resource_handle, epsilon, num_streams], max_elements: max_elements) end |
.boosted_trees_deserialize_ensemble(tree_ensemble_handle: nil, stamp_token: nil, tree_ensemble_serialized: nil) ⇒ Object
477 478 479 |
# File 'lib/tensorflow/raw_ops.rb', line 477 def boosted_trees_deserialize_ensemble(tree_ensemble_handle: nil, stamp_token: nil, tree_ensemble_serialized: nil) Utils.execute("BoostedTreesDeserializeEnsemble", [tree_ensemble_handle, stamp_token, tree_ensemble_serialized]) end |
.boosted_trees_ensemble_resource_handle_op(container: nil, shared_name: nil) ⇒ Object
481 482 483 |
# File 'lib/tensorflow/raw_ops.rb', line 481 def boosted_trees_ensemble_resource_handle_op(container: nil, shared_name: nil) Utils.execute("BoostedTreesEnsembleResourceHandleOp", [], container: container, shared_name: shared_name) end |
.boosted_trees_example_debug_outputs(tree_ensemble_handle: nil, bucketized_features: nil, num_bucketized_features: nil, logits_dimension: nil) ⇒ Object
485 486 487 |
# File 'lib/tensorflow/raw_ops.rb', line 485 def boosted_trees_example_debug_outputs(tree_ensemble_handle: nil, bucketized_features: nil, num_bucketized_features: nil, logits_dimension: nil) Utils.execute("BoostedTreesExampleDebugOutputs", [tree_ensemble_handle, bucketized_features], num_bucketized_features: num_bucketized_features, logits_dimension: logits_dimension) end |
.boosted_trees_get_ensemble_states(tree_ensemble_handle: nil) ⇒ Object
489 490 491 |
# File 'lib/tensorflow/raw_ops.rb', line 489 def boosted_trees_get_ensemble_states(tree_ensemble_handle: nil) Utils.execute("BoostedTreesGetEnsembleStates", [tree_ensemble_handle]) end |
.boosted_trees_make_quantile_summaries(float_values: nil, example_weights: nil, epsilon: nil, num_features: nil) ⇒ Object
493 494 495 |
# File 'lib/tensorflow/raw_ops.rb', line 493 def boosted_trees_make_quantile_summaries(float_values: nil, example_weights: nil, epsilon: nil, num_features: nil) Utils.execute("BoostedTreesMakeQuantileSummaries", [float_values, example_weights, epsilon], num_features: num_features) end |
.boosted_trees_make_stats_summary(node_ids: nil, gradients: nil, hessians: nil, bucketized_features_list: nil, max_splits: nil, num_buckets: nil, num_features: nil) ⇒ Object
497 498 499 |
# File 'lib/tensorflow/raw_ops.rb', line 497 def boosted_trees_make_stats_summary(node_ids: nil, gradients: nil, hessians: nil, bucketized_features_list: nil, max_splits: nil, num_buckets: nil, num_features: nil) Utils.execute("BoostedTreesMakeStatsSummary", [node_ids, gradients, hessians, bucketized_features_list], max_splits: max_splits, num_buckets: num_buckets, num_features: num_features) end |
.boosted_trees_predict(tree_ensemble_handle: nil, bucketized_features: nil, num_bucketized_features: nil, logits_dimension: nil) ⇒ Object
501 502 503 |
# File 'lib/tensorflow/raw_ops.rb', line 501 def boosted_trees_predict(tree_ensemble_handle: nil, bucketized_features: nil, num_bucketized_features: nil, logits_dimension: nil) Utils.execute("BoostedTreesPredict", [tree_ensemble_handle, bucketized_features], num_bucketized_features: num_bucketized_features, logits_dimension: logits_dimension) end |
.boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle: nil, summaries: nil, num_features: nil) ⇒ Object
505 506 507 |
# File 'lib/tensorflow/raw_ops.rb', line 505 def boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle: nil, summaries: nil, num_features: nil) Utils.execute("BoostedTreesQuantileStreamResourceAddSummaries", [quantile_stream_resource_handle, summaries], num_features: num_features) end |
.boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle: nil, bucket_boundaries: nil, num_streams: nil) ⇒ Object
509 510 511 |
# File 'lib/tensorflow/raw_ops.rb', line 509 def boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle: nil, bucket_boundaries: nil, num_streams: nil) Utils.execute("BoostedTreesQuantileStreamResourceDeserialize", [quantile_stream_resource_handle, bucket_boundaries], num_streams: num_streams) end |
.boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle: nil, num_buckets: nil, generate_quantiles: nil) ⇒ Object
513 514 515 |
# File 'lib/tensorflow/raw_ops.rb', line 513 def boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle: nil, num_buckets: nil, generate_quantiles: nil) Utils.execute("BoostedTreesQuantileStreamResourceFlush", [quantile_stream_resource_handle, num_buckets], generate_quantiles: generate_quantiles) end |
.boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle: nil, num_features: nil) ⇒ Object
517 518 519 |
# File 'lib/tensorflow/raw_ops.rb', line 517 def boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle: nil, num_features: nil) Utils.execute("BoostedTreesQuantileStreamResourceGetBucketBoundaries", [quantile_stream_resource_handle], num_features: num_features) end |
.boosted_trees_quantile_stream_resource_handle_op(container: nil, shared_name: nil) ⇒ Object
521 522 523 |
# File 'lib/tensorflow/raw_ops.rb', line 521 def boosted_trees_quantile_stream_resource_handle_op(container: nil, shared_name: nil) Utils.execute("BoostedTreesQuantileStreamResourceHandleOp", [], container: container, shared_name: shared_name) end |
.boosted_trees_serialize_ensemble(tree_ensemble_handle: nil) ⇒ Object
525 526 527 |
# File 'lib/tensorflow/raw_ops.rb', line 525 def boosted_trees_serialize_ensemble(tree_ensemble_handle: nil) Utils.execute("BoostedTreesSerializeEnsemble", [tree_ensemble_handle]) end |
.boosted_trees_training_predict(tree_ensemble_handle: nil, cached_tree_ids: nil, cached_node_ids: nil, bucketized_features: nil, num_bucketized_features: nil, logits_dimension: nil) ⇒ Object
529 530 531 |
# File 'lib/tensorflow/raw_ops.rb', line 529 def boosted_trees_training_predict(tree_ensemble_handle: nil, cached_tree_ids: nil, cached_node_ids: nil, bucketized_features: nil, num_bucketized_features: nil, logits_dimension: nil) Utils.execute("BoostedTreesTrainingPredict", [tree_ensemble_handle, cached_tree_ids, cached_node_ids, bucketized_features], num_bucketized_features: num_bucketized_features, logits_dimension: logits_dimension) end |
.boosted_trees_update_ensemble(tree_ensemble_handle: nil, feature_ids: nil, node_ids: nil, gains: nil, thresholds: nil, left_node_contribs: nil, right_node_contribs: nil, max_depth: nil, learning_rate: nil, pruning_mode: nil, num_features: nil) ⇒ Object
533 534 535 |
# File 'lib/tensorflow/raw_ops.rb', line 533 def boosted_trees_update_ensemble(tree_ensemble_handle: nil, feature_ids: nil, node_ids: nil, gains: nil, thresholds: nil, left_node_contribs: nil, right_node_contribs: nil, max_depth: nil, learning_rate: nil, pruning_mode: nil, num_features: nil) Utils.execute("BoostedTreesUpdateEnsemble", [tree_ensemble_handle, feature_ids, node_ids, gains, thresholds, left_node_contribs, right_node_contribs, max_depth, learning_rate], pruning_mode: pruning_mode, num_features: num_features) end |
.broadcast_args(s0: nil, s1: nil) ⇒ Object
537 538 539 |
# File 'lib/tensorflow/raw_ops.rb', line 537 def broadcast_args(s0: nil, s1: nil) Utils.execute("BroadcastArgs", [s0, s1]) end |
.broadcast_gradient_args(s0: nil, s1: nil) ⇒ Object
541 542 543 |
# File 'lib/tensorflow/raw_ops.rb', line 541 def broadcast_gradient_args(s0: nil, s1: nil) Utils.execute("BroadcastGradientArgs", [s0, s1]) end |
.broadcast_to(input: nil, shape: nil) ⇒ Object
545 546 547 |
# File 'lib/tensorflow/raw_ops.rb', line 545 def broadcast_to(input: nil, shape: nil) Utils.execute("BroadcastTo", [input, shape]) end |
.bucketize(input: nil, boundaries: nil) ⇒ Object
549 550 551 |
# File 'lib/tensorflow/raw_ops.rb', line 549 def bucketize(input: nil, boundaries: nil) Utils.execute("Bucketize", [input], boundaries: boundaries) end |
.cache_dataset(input_dataset: nil, filename: nil, output_types: nil, output_shapes: nil) ⇒ Object
565 566 567 |
# File 'lib/tensorflow/raw_ops.rb', line 565 def cache_dataset(input_dataset: nil, filename: nil, output_types: nil, output_shapes: nil) Utils.execute("CacheDataset", [input_dataset, filename], output_types: output_types, output_shapes: output_shapes) end |
.case(branch_index: nil, input: nil, branches: nil, output_shapes: nil) ⇒ Object
569 570 571 |
# File 'lib/tensorflow/raw_ops.rb', line 569 def case(branch_index: nil, input: nil, branches: nil, output_shapes: nil) Utils.execute("Case", [branch_index, input], branches: branches, output_shapes: output_shapes) end |
.cast(x: nil) ⇒ Object
573 574 575 |
# File 'lib/tensorflow/raw_ops.rb', line 573 def cast(x: nil) Utils.execute("Cast", [x]) end |
.ceil(x: nil) ⇒ Object
577 578 579 |
# File 'lib/tensorflow/raw_ops.rb', line 577 def ceil(x: nil) Utils.execute("Ceil", [x]) end |
.check_numerics(tensor: nil, message: nil) ⇒ Object
581 582 583 |
# File 'lib/tensorflow/raw_ops.rb', line 581 def check_numerics(tensor: nil, message: nil) Utils.execute("CheckNumerics", [tensor], message: ) end |
.cholesky(input: nil) ⇒ Object
585 586 587 |
# File 'lib/tensorflow/raw_ops.rb', line 585 def cholesky(input: nil) Utils.execute("Cholesky", [input]) end |
.cholesky_grad(l: nil, grad: nil) ⇒ Object
589 590 591 |
# File 'lib/tensorflow/raw_ops.rb', line 589 def cholesky_grad(l: nil, grad: nil) Utils.execute("CholeskyGrad", [l, grad]) end |
.choose_fastest_branch_dataset(input_dataset: nil, ratio_numerator: nil, ratio_denominator: nil, other_arguments: nil, num_elements_per_branch: nil, branches: nil, other_arguments_lengths: nil, output_types: nil, output_shapes: nil) ⇒ Object
593 594 595 |
# File 'lib/tensorflow/raw_ops.rb', line 593 def choose_fastest_branch_dataset(input_dataset: nil, ratio_numerator: nil, ratio_denominator: nil, other_arguments: nil, num_elements_per_branch: nil, branches: nil, other_arguments_lengths: nil, output_types: nil, output_shapes: nil) Utils.execute("ChooseFastestBranchDataset", [input_dataset, ratio_numerator, ratio_denominator, other_arguments], num_elements_per_branch: num_elements_per_branch, branches: branches, other_arguments_lengths: other_arguments_lengths, output_types: output_types, output_shapes: output_shapes) end |
.clip_by_value(t: nil, clip_value_min: nil, clip_value_max: nil) ⇒ Object
597 598 599 |
# File 'lib/tensorflow/raw_ops.rb', line 597 def clip_by_value(t: nil, clip_value_min: nil, clip_value_max: nil) Utils.execute("ClipByValue", [t, clip_value_min, clip_value_max]) end |
.close_summary_writer(writer: nil) ⇒ Object
601 602 603 |
# File 'lib/tensorflow/raw_ops.rb', line 601 def close_summary_writer(writer: nil) Utils.execute("CloseSummaryWriter", [writer]) end |
.collective_bcast_recv(group_size: nil, group_key: nil, instance_key: nil, shape: nil) ⇒ Object
605 606 607 |
# File 'lib/tensorflow/raw_ops.rb', line 605 def collective_bcast_recv(group_size: nil, group_key: nil, instance_key: nil, shape: nil) Utils.execute("CollectiveBcastRecv", [], group_size: group_size, group_key: group_key, instance_key: instance_key, shape: shape) end |
.collective_bcast_send(input: nil, group_size: nil, group_key: nil, instance_key: nil, shape: nil) ⇒ Object
609 610 611 |
# File 'lib/tensorflow/raw_ops.rb', line 609 def collective_bcast_send(input: nil, group_size: nil, group_key: nil, instance_key: nil, shape: nil) Utils.execute("CollectiveBcastSend", [input], group_size: group_size, group_key: group_key, instance_key: instance_key, shape: shape) end |
.collective_gather(input: nil, group_size: nil, group_key: nil, instance_key: nil, shape: nil) ⇒ Object
613 614 615 |
# File 'lib/tensorflow/raw_ops.rb', line 613 def collective_gather(input: nil, group_size: nil, group_key: nil, instance_key: nil, shape: nil) Utils.execute("CollectiveGather", [input], group_size: group_size, group_key: group_key, instance_key: instance_key, shape: shape) end |
.collective_permute(input: nil, source_target_pairs: nil) ⇒ Object
617 618 619 |
# File 'lib/tensorflow/raw_ops.rb', line 617 def collective_permute(input: nil, source_target_pairs: nil) Utils.execute("CollectivePermute", [input, source_target_pairs]) end |
.collective_reduce(input: nil, group_size: nil, group_key: nil, instance_key: nil, merge_op: nil, final_op: nil, subdiv_offsets: nil, wait_for: nil) ⇒ Object
621 622 623 |
# File 'lib/tensorflow/raw_ops.rb', line 621 def collective_reduce(input: nil, group_size: nil, group_key: nil, instance_key: nil, merge_op: nil, final_op: nil, subdiv_offsets: nil, wait_for: nil) Utils.execute("CollectiveReduce", [input], group_size: group_size, group_key: group_key, instance_key: instance_key, merge_op: merge_op, final_op: final_op, subdiv_offsets: subdiv_offsets, wait_for: wait_for) end |
.combined_non_max_suppression(boxes: nil, scores: nil, max_output_size_per_class: nil, max_total_size: nil, iou_threshold: nil, score_threshold: nil, pad_per_class: nil, clip_boxes: nil) ⇒ Object
625 626 627 |
# File 'lib/tensorflow/raw_ops.rb', line 625 def combined_non_max_suppression(boxes: nil, scores: nil, max_output_size_per_class: nil, max_total_size: nil, iou_threshold: nil, score_threshold: nil, pad_per_class: nil, clip_boxes: nil) Utils.execute("CombinedNonMaxSuppression", [boxes, scores, max_output_size_per_class, max_total_size, iou_threshold, score_threshold], pad_per_class: pad_per_class, clip_boxes: clip_boxes) end |
.compare_and_bitpack(input: nil, threshold: nil) ⇒ Object
629 630 631 |
# File 'lib/tensorflow/raw_ops.rb', line 629 def compare_and_bitpack(input: nil, threshold: nil) Utils.execute("CompareAndBitpack", [input, threshold]) end |
.complex(real: nil, imag: nil) ⇒ Object
633 634 635 |
# File 'lib/tensorflow/raw_ops.rb', line 633 def complex(real: nil, imag: nil) Utils.execute("Complex", [real, imag]) end |
.complex_abs(x: nil) ⇒ Object
637 638 639 |
# File 'lib/tensorflow/raw_ops.rb', line 637 def complex_abs(x: nil) Utils.execute("ComplexAbs", [x]) end |
.compute_accidental_hits(true_classes: nil, sampled_candidates: nil, num_true: nil, seed: nil, seed2: nil) ⇒ Object
641 642 643 |
# File 'lib/tensorflow/raw_ops.rb', line 641 def compute_accidental_hits(true_classes: nil, sampled_candidates: nil, num_true: nil, seed: nil, seed2: nil) Utils.execute("ComputeAccidentalHits", [true_classes, sampled_candidates], num_true: num_true, seed: seed, seed2: seed2) end |
.concat(concat_dim: nil, values: nil) ⇒ Object
645 646 647 |
# File 'lib/tensorflow/raw_ops.rb', line 645 def concat(concat_dim: nil, values: nil) Utils.execute("Concat", [concat_dim, values]) end |
.concat_offset(concat_dim: nil, shape: nil) ⇒ Object
649 650 651 |
# File 'lib/tensorflow/raw_ops.rb', line 649 def concat_offset(concat_dim: nil, shape: nil) Utils.execute("ConcatOffset", [concat_dim, shape]) end |
.concat_v2(values: nil, axis: nil) ⇒ Object
653 654 655 |
# File 'lib/tensorflow/raw_ops.rb', line 653 def concat_v2(values: nil, axis: nil) Utils.execute("ConcatV2", [values, axis]) end |
.concatenate_dataset(input_dataset: nil, another_dataset: nil, output_types: nil, output_shapes: nil) ⇒ Object
657 658 659 |
# File 'lib/tensorflow/raw_ops.rb', line 657 def concatenate_dataset(input_dataset: nil, another_dataset: nil, output_types: nil, output_shapes: nil) Utils.execute("ConcatenateDataset", [input_dataset, another_dataset], output_types: output_types, output_shapes: output_shapes) end |
.conditional_accumulator(dtype: nil, shape: nil, container: nil, shared_name: nil, reduction_type: nil) ⇒ Object
661 662 663 |
# File 'lib/tensorflow/raw_ops.rb', line 661 def conditional_accumulator(dtype: nil, shape: nil, container: nil, shared_name: nil, reduction_type: nil) Utils.execute("ConditionalAccumulator", [], dtype: dtype, shape: shape, container: container, shared_name: shared_name, reduction_type: reduction_type) end |
.configure_distributed_tpu(embedding_config: nil, tpu_embedding_config: nil, is_global_init: nil) ⇒ Object
665 666 667 |
# File 'lib/tensorflow/raw_ops.rb', line 665 def configure_distributed_tpu(embedding_config: nil, tpu_embedding_config: nil, is_global_init: nil) Utils.execute("ConfigureDistributedTPU", [], embedding_config: , tpu_embedding_config: , is_global_init: is_global_init) end |
.conj(input: nil) ⇒ Object
669 670 671 |
# File 'lib/tensorflow/raw_ops.rb', line 669 def conj(input: nil) Utils.execute("Conj", [input]) end |
.conjugate_transpose(x: nil, perm: nil) ⇒ Object
673 674 675 |
# File 'lib/tensorflow/raw_ops.rb', line 673 def conjugate_transpose(x: nil, perm: nil) Utils.execute("ConjugateTranspose", [x, perm]) end |
.const(value: nil, dtype: nil) ⇒ Object
677 678 679 |
# File 'lib/tensorflow/raw_ops.rb', line 677 def const(value: nil, dtype: nil) Utils.execute("Const", [], value: value, dtype: dtype) end |
.consume_mutex_lock(mutex_lock: nil) ⇒ Object
681 682 683 |
# File 'lib/tensorflow/raw_ops.rb', line 681 def consume_mutex_lock(mutex_lock: nil) Utils.execute("ConsumeMutexLock", [mutex_lock]) end |
.control_trigger ⇒ Object
685 686 687 |
# File 'lib/tensorflow/raw_ops.rb', line 685 def control_trigger Utils.execute("ControlTrigger", []) end |
.conv2d(input: nil, filter: nil, strides: nil, use_cudnn_on_gpu: nil, padding: nil, explicit_paddings: nil, data_format: nil, dilations: nil) ⇒ Object
689 690 691 |
# File 'lib/tensorflow/raw_ops.rb', line 689 def conv2d(input: nil, filter: nil, strides: nil, use_cudnn_on_gpu: nil, padding: nil, explicit_paddings: nil, data_format: nil, dilations: nil) Utils.execute("Conv2D", [input, filter], strides: strides, use_cudnn_on_gpu: use_cudnn_on_gpu, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, dilations: dilations) end |
.conv2d_backprop_filter(input: nil, filter_sizes: nil, out_backprop: nil, strides: nil, use_cudnn_on_gpu: nil, padding: nil, explicit_paddings: nil, data_format: nil, dilations: nil) ⇒ Object
693 694 695 |
# File 'lib/tensorflow/raw_ops.rb', line 693 def conv2d_backprop_filter(input: nil, filter_sizes: nil, out_backprop: nil, strides: nil, use_cudnn_on_gpu: nil, padding: nil, explicit_paddings: nil, data_format: nil, dilations: nil) Utils.execute("Conv2DBackpropFilter", [input, filter_sizes, out_backprop], strides: strides, use_cudnn_on_gpu: use_cudnn_on_gpu, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, dilations: dilations) end |
.conv2d_backprop_input(input_sizes: nil, filter: nil, out_backprop: nil, strides: nil, use_cudnn_on_gpu: nil, padding: nil, explicit_paddings: nil, data_format: nil, dilations: nil) ⇒ Object
697 698 699 |
# File 'lib/tensorflow/raw_ops.rb', line 697 def conv2d_backprop_input(input_sizes: nil, filter: nil, out_backprop: nil, strides: nil, use_cudnn_on_gpu: nil, padding: nil, explicit_paddings: nil, data_format: nil, dilations: nil) Utils.execute("Conv2DBackpropInput", [input_sizes, filter, out_backprop], strides: strides, use_cudnn_on_gpu: use_cudnn_on_gpu, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, dilations: dilations) end |
.conv3d(input: nil, filter: nil, strides: nil, padding: nil, data_format: nil, dilations: nil) ⇒ Object
701 702 703 |
# File 'lib/tensorflow/raw_ops.rb', line 701 def conv3d(input: nil, filter: nil, strides: nil, padding: nil, data_format: nil, dilations: nil) Utils.execute("Conv3D", [input, filter], strides: strides, padding: padding, data_format: data_format, dilations: dilations) end |
.conv3d_backprop_filter(input: nil, filter: nil, out_backprop: nil, strides: nil, padding: nil, dilations: nil) ⇒ Object
705 706 707 |
# File 'lib/tensorflow/raw_ops.rb', line 705 def conv3d_backprop_filter(input: nil, filter: nil, out_backprop: nil, strides: nil, padding: nil, dilations: nil) Utils.execute("Conv3DBackpropFilter", [input, filter, out_backprop], strides: strides, padding: padding, dilations: dilations) end |
.conv3d_backprop_filter_v2(input: nil, filter_sizes: nil, out_backprop: nil, strides: nil, padding: nil, data_format: nil, dilations: nil) ⇒ Object
709 710 711 |
# File 'lib/tensorflow/raw_ops.rb', line 709 def conv3d_backprop_filter_v2(input: nil, filter_sizes: nil, out_backprop: nil, strides: nil, padding: nil, data_format: nil, dilations: nil) Utils.execute("Conv3DBackpropFilterV2", [input, filter_sizes, out_backprop], strides: strides, padding: padding, data_format: data_format, dilations: dilations) end |
.conv3d_backprop_input(input: nil, filter: nil, out_backprop: nil, strides: nil, padding: nil, dilations: nil) ⇒ Object
713 714 715 |
# File 'lib/tensorflow/raw_ops.rb', line 713 def conv3d_backprop_input(input: nil, filter: nil, out_backprop: nil, strides: nil, padding: nil, dilations: nil) Utils.execute("Conv3DBackpropInput", [input, filter, out_backprop], strides: strides, padding: padding, dilations: dilations) end |
.conv3d_backprop_input_v2(input_sizes: nil, filter: nil, out_backprop: nil, strides: nil, padding: nil, data_format: nil, dilations: nil) ⇒ Object
717 718 719 |
# File 'lib/tensorflow/raw_ops.rb', line 717 def conv3d_backprop_input_v2(input_sizes: nil, filter: nil, out_backprop: nil, strides: nil, padding: nil, data_format: nil, dilations: nil) Utils.execute("Conv3DBackpropInputV2", [input_sizes, filter, out_backprop], strides: strides, padding: padding, data_format: data_format, dilations: dilations) end |
.copy(input: nil, tensor_name: nil, debug_ops_spec: nil) ⇒ Object
721 722 723 |
# File 'lib/tensorflow/raw_ops.rb', line 721 def copy(input: nil, tensor_name: nil, debug_ops_spec: nil) Utils.execute("Copy", [input], tensor_name: tensor_name, debug_ops_spec: debug_ops_spec) end |
.copy_host(input: nil, tensor_name: nil, debug_ops_spec: nil) ⇒ Object
725 726 727 |
# File 'lib/tensorflow/raw_ops.rb', line 725 def copy_host(input: nil, tensor_name: nil, debug_ops_spec: nil) Utils.execute("CopyHost", [input], tensor_name: tensor_name, debug_ops_spec: debug_ops_spec) end |
.cos(x: nil) ⇒ Object
729 730 731 |
# File 'lib/tensorflow/raw_ops.rb', line 729 def cos(x: nil) Utils.execute("Cos", [x]) end |
.cosh(x: nil) ⇒ Object
733 734 735 |
# File 'lib/tensorflow/raw_ops.rb', line 733 def cosh(x: nil) Utils.execute("Cosh", [x]) end |
.count_up_to(ref: nil, limit: nil) ⇒ Object
737 738 739 |
# File 'lib/tensorflow/raw_ops.rb', line 737 def count_up_to(ref: nil, limit: nil) Utils.execute("CountUpTo", [ref], limit: limit) end |
.create_summary_db_writer(writer: nil, db_uri: nil, experiment_name: nil, run_name: nil, user_name: nil) ⇒ Object
741 742 743 |
# File 'lib/tensorflow/raw_ops.rb', line 741 def create_summary_db_writer(writer: nil, db_uri: nil, experiment_name: nil, run_name: nil, user_name: nil) Utils.execute("CreateSummaryDbWriter", [writer, db_uri, experiment_name, run_name, user_name]) end |
.create_summary_file_writer(writer: nil, logdir: nil, max_queue: nil, flush_millis: nil, filename_suffix: nil) ⇒ Object
745 746 747 |
# File 'lib/tensorflow/raw_ops.rb', line 745 def create_summary_file_writer(writer: nil, logdir: nil, max_queue: nil, flush_millis: nil, filename_suffix: nil) Utils.execute("CreateSummaryFileWriter", [writer, logdir, max_queue, flush_millis, filename_suffix]) end |
.crop_and_resize(image: nil, boxes: nil, box_ind: nil, crop_size: nil, method: nil, extrapolation_value: nil) ⇒ Object
749 750 751 |
# File 'lib/tensorflow/raw_ops.rb', line 749 def crop_and_resize(image: nil, boxes: nil, box_ind: nil, crop_size: nil, method: nil, extrapolation_value: nil) Utils.execute("CropAndResize", [image, boxes, box_ind, crop_size], method: method, extrapolation_value: extrapolation_value) end |
.crop_and_resize_grad_boxes(grads: nil, image: nil, boxes: nil, box_ind: nil, method: nil) ⇒ Object
753 754 755 |
# File 'lib/tensorflow/raw_ops.rb', line 753 def crop_and_resize_grad_boxes(grads: nil, image: nil, boxes: nil, box_ind: nil, method: nil) Utils.execute("CropAndResizeGradBoxes", [grads, image, boxes, box_ind], method: method) end |
.crop_and_resize_grad_image(grads: nil, boxes: nil, box_ind: nil, image_size: nil, method: nil) ⇒ Object
757 758 759 |
# File 'lib/tensorflow/raw_ops.rb', line 757 def crop_and_resize_grad_image(grads: nil, boxes: nil, box_ind: nil, image_size: nil, method: nil) Utils.execute("CropAndResizeGradImage", [grads, boxes, box_ind, image_size], method: method) end |
.cross(a: nil, b: nil) ⇒ Object
761 762 763 |
# File 'lib/tensorflow/raw_ops.rb', line 761 def cross(a: nil, b: nil) Utils.execute("Cross", [a, b]) end |
.cross_replica_sum(input: nil, group_assignment: nil) ⇒ Object
765 766 767 |
# File 'lib/tensorflow/raw_ops.rb', line 765 def cross_replica_sum(input: nil, group_assignment: nil) Utils.execute("CrossReplicaSum", [input, group_assignment]) end |
.ctc_beam_search_decoder(inputs: nil, sequence_length: nil, beam_width: nil, top_paths: nil, merge_repeated: nil) ⇒ Object
553 554 555 |
# File 'lib/tensorflow/raw_ops.rb', line 553 def ctc_beam_search_decoder(inputs: nil, sequence_length: nil, beam_width: nil, top_paths: nil, merge_repeated: nil) Utils.execute("CTCBeamSearchDecoder", [inputs, sequence_length], beam_width: beam_width, top_paths: top_paths, merge_repeated: merge_repeated) end |
.ctc_greedy_decoder(inputs: nil, sequence_length: nil, merge_repeated: nil) ⇒ Object
557 558 559 |
# File 'lib/tensorflow/raw_ops.rb', line 557 def ctc_greedy_decoder(inputs: nil, sequence_length: nil, merge_repeated: nil) Utils.execute("CTCGreedyDecoder", [inputs, sequence_length], merge_repeated: merge_repeated) end |
.ctc_loss(inputs: nil, labels_indices: nil, labels_values: nil, sequence_length: nil, preprocess_collapse_repeated: nil, ctc_merge_repeated: nil, ignore_longer_outputs_than_inputs: nil) ⇒ Object
561 562 563 |
# File 'lib/tensorflow/raw_ops.rb', line 561 def ctc_loss(inputs: nil, labels_indices: nil, labels_values: nil, sequence_length: nil, preprocess_collapse_repeated: nil, ctc_merge_repeated: nil, ignore_longer_outputs_than_inputs: nil) Utils.execute("CTCLoss", [inputs, labels_indices, labels_values, sequence_length], preprocess_collapse_repeated: preprocess_collapse_repeated, ctc_merge_repeated: ctc_merge_repeated, ignore_longer_outputs_than_inputs: ignore_longer_outputs_than_inputs) end |
.cudnn_rnn(input: nil, input_h: nil, input_c: nil, params: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil, is_training: nil) ⇒ Object
769 770 771 |
# File 'lib/tensorflow/raw_ops.rb', line 769 def cudnn_rnn(input: nil, input_h: nil, input_c: nil, params: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil, is_training: nil) Utils.execute("CudnnRNN", [input, input_h, input_c, params], rnn_mode: rnn_mode, input_mode: input_mode, direction: direction, dropout: dropout, seed: seed, seed2: seed2, is_training: is_training) end |
.cudnn_rnn_backprop(input: nil, input_h: nil, input_c: nil, params: nil, output: nil, output_h: nil, output_c: nil, output_backprop: nil, output_h_backprop: nil, output_c_backprop: nil, reserve_space: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil) ⇒ Object
773 774 775 |
# File 'lib/tensorflow/raw_ops.rb', line 773 def cudnn_rnn_backprop(input: nil, input_h: nil, input_c: nil, params: nil, output: nil, output_h: nil, output_c: nil, output_backprop: nil, output_h_backprop: nil, output_c_backprop: nil, reserve_space: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil) Utils.execute("CudnnRNNBackprop", [input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space], rnn_mode: rnn_mode, input_mode: input_mode, direction: direction, dropout: dropout, seed: seed, seed2: seed2) end |
.cudnn_rnn_backprop_v2(input: nil, input_h: nil, input_c: nil, params: nil, output: nil, output_h: nil, output_c: nil, output_backprop: nil, output_h_backprop: nil, output_c_backprop: nil, reserve_space: nil, host_reserved: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil) ⇒ Object
777 778 779 |
# File 'lib/tensorflow/raw_ops.rb', line 777 def cudnn_rnn_backprop_v2(input: nil, input_h: nil, input_c: nil, params: nil, output: nil, output_h: nil, output_c: nil, output_backprop: nil, output_h_backprop: nil, output_c_backprop: nil, reserve_space: nil, host_reserved: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil) Utils.execute("CudnnRNNBackpropV2", [input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved], rnn_mode: rnn_mode, input_mode: input_mode, direction: direction, dropout: dropout, seed: seed, seed2: seed2) end |
.cudnn_rnn_backprop_v3(input: nil, input_h: nil, input_c: nil, params: nil, sequence_lengths: nil, output: nil, output_h: nil, output_c: nil, output_backprop: nil, output_h_backprop: nil, output_c_backprop: nil, reserve_space: nil, host_reserved: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil, time_major: nil) ⇒ Object
781 782 783 |
# File 'lib/tensorflow/raw_ops.rb', line 781 def cudnn_rnn_backprop_v3(input: nil, input_h: nil, input_c: nil, params: nil, sequence_lengths: nil, output: nil, output_h: nil, output_c: nil, output_backprop: nil, output_h_backprop: nil, output_c_backprop: nil, reserve_space: nil, host_reserved: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil, time_major: nil) Utils.execute("CudnnRNNBackpropV3", [input, input_h, input_c, params, sequence_lengths, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved], rnn_mode: rnn_mode, input_mode: input_mode, direction: direction, dropout: dropout, seed: seed, seed2: seed2, time_major: time_major) end |
.cudnn_rnn_canonical_to_params(num_layers: nil, num_units: nil, input_size: nil, weights: nil, biases: nil, num_params: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil) ⇒ Object
785 786 787 |
# File 'lib/tensorflow/raw_ops.rb', line 785 def cudnn_rnn_canonical_to_params(num_layers: nil, num_units: nil, input_size: nil, weights: nil, biases: nil, num_params: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil) Utils.execute("CudnnRNNCanonicalToParams", [num_layers, num_units, input_size, weights, biases], num_params: num_params, rnn_mode: rnn_mode, input_mode: input_mode, direction: direction, dropout: dropout, seed: seed, seed2: seed2) end |
.cudnn_rnn_params_size(num_layers: nil, num_units: nil, input_size: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil) ⇒ Object
789 790 791 |
# File 'lib/tensorflow/raw_ops.rb', line 789 def cudnn_rnn_params_size(num_layers: nil, num_units: nil, input_size: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil) Utils.execute("CudnnRNNParamsSize", [num_layers, num_units, input_size], rnn_mode: rnn_mode, input_mode: input_mode, direction: direction, dropout: dropout, seed: seed, seed2: seed2) end |
.cudnn_rnn_params_to_canonical(num_layers: nil, num_units: nil, input_size: nil, params: nil, num_params: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil) ⇒ Object
793 794 795 |
# File 'lib/tensorflow/raw_ops.rb', line 793 def cudnn_rnn_params_to_canonical(num_layers: nil, num_units: nil, input_size: nil, params: nil, num_params: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil) Utils.execute("CudnnRNNParamsToCanonical", [num_layers, num_units, input_size, params], num_params: num_params, rnn_mode: rnn_mode, input_mode: input_mode, direction: direction, dropout: dropout, seed: seed, seed2: seed2) end |
.cudnn_rnnv2(input: nil, input_h: nil, input_c: nil, params: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil, is_training: nil) ⇒ Object
797 798 799 |
# File 'lib/tensorflow/raw_ops.rb', line 797 def cudnn_rnnv2(input: nil, input_h: nil, input_c: nil, params: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil, is_training: nil) Utils.execute("CudnnRNNV2", [input, input_h, input_c, params], rnn_mode: rnn_mode, input_mode: input_mode, direction: direction, dropout: dropout, seed: seed, seed2: seed2, is_training: is_training) end |
.cudnn_rnnv3(input: nil, input_h: nil, input_c: nil, params: nil, sequence_lengths: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil, is_training: nil, time_major: nil) ⇒ Object
801 802 803 |
# File 'lib/tensorflow/raw_ops.rb', line 801 def cudnn_rnnv3(input: nil, input_h: nil, input_c: nil, params: nil, sequence_lengths: nil, rnn_mode: nil, input_mode: nil, direction: nil, dropout: nil, seed: nil, seed2: nil, is_training: nil, time_major: nil) Utils.execute("CudnnRNNV3", [input, input_h, input_c, params, sequence_lengths], rnn_mode: rnn_mode, input_mode: input_mode, direction: direction, dropout: dropout, seed: seed, seed2: seed2, is_training: is_training, time_major: time_major) end |
.cumprod(x: nil, axis: nil, exclusive: nil, reverse: nil) ⇒ Object
805 806 807 |
# File 'lib/tensorflow/raw_ops.rb', line 805 def cumprod(x: nil, axis: nil, exclusive: nil, reverse: nil) Utils.execute("Cumprod", [x, axis], exclusive: exclusive, reverse: reverse) end |
.cumsum(x: nil, axis: nil, exclusive: nil, reverse: nil) ⇒ Object
809 810 811 |
# File 'lib/tensorflow/raw_ops.rb', line 809 def cumsum(x: nil, axis: nil, exclusive: nil, reverse: nil) Utils.execute("Cumsum", [x, axis], exclusive: exclusive, reverse: reverse) end |
.data_format_dim_map(x: nil, src_format: nil, dst_format: nil) ⇒ Object
813 814 815 |
# File 'lib/tensorflow/raw_ops.rb', line 813 def data_format_dim_map(x: nil, src_format: nil, dst_format: nil) Utils.execute("DataFormatDimMap", [x], src_format: src_format, dst_format: dst_format) end |
.data_format_vec_permute(x: nil, src_format: nil, dst_format: nil) ⇒ Object
817 818 819 |
# File 'lib/tensorflow/raw_ops.rb', line 817 def data_format_vec_permute(x: nil, src_format: nil, dst_format: nil) Utils.execute("DataFormatVecPermute", [x], src_format: src_format, dst_format: dst_format) end |
.dataset_to_graph(input_dataset: nil) ⇒ Object
821 822 823 |
# File 'lib/tensorflow/raw_ops.rb', line 821 def dataset_to_graph(input_dataset: nil) Utils.execute("DatasetToGraph", [input_dataset]) end |
.dataset_to_single_element(dataset: nil, output_types: nil, output_shapes: nil) ⇒ Object
825 826 827 |
# File 'lib/tensorflow/raw_ops.rb', line 825 def dataset_to_single_element(dataset: nil, output_types: nil, output_shapes: nil) Utils.execute("DatasetToSingleElement", [dataset], output_types: output_types, output_shapes: output_shapes) end |
.debug_gradient_identity(input: nil) ⇒ Object
829 830 831 |
# File 'lib/tensorflow/raw_ops.rb', line 829 def debug_gradient_identity(input: nil) Utils.execute("DebugGradientIdentity", [input]) end |
.debug_gradient_ref_identity(input: nil) ⇒ Object
833 834 835 |
# File 'lib/tensorflow/raw_ops.rb', line 833 def debug_gradient_ref_identity(input: nil) Utils.execute("DebugGradientRefIdentity", [input]) end |
.debug_identity(input: nil, device_name: nil, tensor_name: nil, debug_urls: nil, gated_grpc: nil) ⇒ Object
837 838 839 |
# File 'lib/tensorflow/raw_ops.rb', line 837 def debug_identity(input: nil, device_name: nil, tensor_name: nil, debug_urls: nil, gated_grpc: nil) Utils.execute("DebugIdentity", [input], device_name: device_name, tensor_name: tensor_name, debug_urls: debug_urls, gated_grpc: gated_grpc) end |
.debug_nan_count(input: nil, device_name: nil, tensor_name: nil, debug_urls: nil, gated_grpc: nil) ⇒ Object
841 842 843 |
# File 'lib/tensorflow/raw_ops.rb', line 841 def debug_nan_count(input: nil, device_name: nil, tensor_name: nil, debug_urls: nil, gated_grpc: nil) Utils.execute("DebugNanCount", [input], device_name: device_name, tensor_name: tensor_name, debug_urls: debug_urls, gated_grpc: gated_grpc) end |
.debug_numeric_summary(input: nil, device_name: nil, tensor_name: nil, debug_urls: nil, lower_bound: nil, upper_bound: nil, mute_if_healthy: nil, gated_grpc: nil) ⇒ Object
845 846 847 |
# File 'lib/tensorflow/raw_ops.rb', line 845 def debug_numeric_summary(input: nil, device_name: nil, tensor_name: nil, debug_urls: nil, lower_bound: nil, upper_bound: nil, mute_if_healthy: nil, gated_grpc: nil) Utils.execute("DebugNumericSummary", [input], device_name: device_name, tensor_name: tensor_name, debug_urls: debug_urls, lower_bound: lower_bound, upper_bound: upper_bound, mute_if_healthy: mute_if_healthy, gated_grpc: gated_grpc) end |
.decode_and_crop_jpeg(contents: nil, crop_window: nil, channels: nil, ratio: nil, fancy_upscaling: nil, try_recover_truncated: nil, acceptable_fraction: nil, dct_method: nil) ⇒ Object
849 850 851 |
# File 'lib/tensorflow/raw_ops.rb', line 849 def decode_and_crop_jpeg(contents: nil, crop_window: nil, channels: nil, ratio: nil, fancy_upscaling: nil, try_recover_truncated: nil, acceptable_fraction: nil, dct_method: nil) Utils.execute("DecodeAndCropJpeg", [contents, crop_window], channels: channels, ratio: ratio, fancy_upscaling: fancy_upscaling, try_recover_truncated: try_recover_truncated, acceptable_fraction: acceptable_fraction, dct_method: dct_method) end |
.decode_base64(input: nil) ⇒ Object
853 854 855 |
# File 'lib/tensorflow/raw_ops.rb', line 853 def decode_base64(input: nil) Utils.execute("DecodeBase64", [input]) end |
.decode_bmp(contents: nil, channels: nil) ⇒ Object
857 858 859 |
# File 'lib/tensorflow/raw_ops.rb', line 857 def decode_bmp(contents: nil, channels: nil) Utils.execute("DecodeBmp", [contents], channels: channels) end |
.decode_compressed(bytes: nil, compression_type: nil) ⇒ Object
865 866 867 |
# File 'lib/tensorflow/raw_ops.rb', line 865 def decode_compressed(bytes: nil, compression_type: nil) Utils.execute("DecodeCompressed", [bytes], compression_type: compression_type) end |
.decode_csv(records: nil, record_defaults: nil, field_delim: nil, use_quote_delim: nil, na_value: nil, select_cols: nil) ⇒ Object
861 862 863 |
# File 'lib/tensorflow/raw_ops.rb', line 861 def decode_csv(records: nil, record_defaults: nil, field_delim: nil, use_quote_delim: nil, na_value: nil, select_cols: nil) Utils.execute("DecodeCSV", [records, record_defaults], field_delim: field_delim, use_quote_delim: use_quote_delim, na_value: na_value, select_cols: select_cols) end |
.decode_gif(contents: nil) ⇒ Object
869 870 871 |
# File 'lib/tensorflow/raw_ops.rb', line 869 def decode_gif(contents: nil) Utils.execute("DecodeGif", [contents]) end |
.decode_jpeg(contents: nil, channels: nil, ratio: nil, fancy_upscaling: nil, try_recover_truncated: nil, acceptable_fraction: nil, dct_method: nil) ⇒ Object
877 878 879 |
# File 'lib/tensorflow/raw_ops.rb', line 877 def decode_jpeg(contents: nil, channels: nil, ratio: nil, fancy_upscaling: nil, try_recover_truncated: nil, acceptable_fraction: nil, dct_method: nil) Utils.execute("DecodeJpeg", [contents], channels: channels, ratio: ratio, fancy_upscaling: fancy_upscaling, try_recover_truncated: try_recover_truncated, acceptable_fraction: acceptable_fraction, dct_method: dct_method) end |
.decode_json_example(json_examples: nil) ⇒ Object
873 874 875 |
# File 'lib/tensorflow/raw_ops.rb', line 873 def decode_json_example(json_examples: nil) Utils.execute("DecodeJSONExample", [json_examples]) end |
.decode_padded_raw(input_bytes: nil, fixed_length: nil, out_type: nil, little_endian: nil) ⇒ Object
881 882 883 |
# File 'lib/tensorflow/raw_ops.rb', line 881 def decode_padded_raw(input_bytes: nil, fixed_length: nil, out_type: nil, little_endian: nil) Utils.execute("DecodePaddedRaw", [input_bytes, fixed_length], out_type: out_type, little_endian: little_endian) end |
.decode_png(contents: nil, channels: nil, dtype: nil) ⇒ Object
885 886 887 |
# File 'lib/tensorflow/raw_ops.rb', line 885 def decode_png(contents: nil, channels: nil, dtype: nil) Utils.execute("DecodePng", [contents], channels: channels, dtype: dtype) end |
.decode_proto_v2(bytes: nil, message_type: nil, field_names: nil, output_types: nil, descriptor_source: nil, message_format: nil, sanitize: nil) ⇒ Object
889 890 891 |
# File 'lib/tensorflow/raw_ops.rb', line 889 def decode_proto_v2(bytes: nil, message_type: nil, field_names: nil, output_types: nil, descriptor_source: nil, message_format: nil, sanitize: nil) Utils.execute("DecodeProtoV2", [bytes], message_type: , field_names: field_names, output_types: output_types, descriptor_source: descriptor_source, message_format: , sanitize: sanitize) end |
.decode_raw(bytes: nil, out_type: nil, little_endian: nil) ⇒ Object
893 894 895 |
# File 'lib/tensorflow/raw_ops.rb', line 893 def decode_raw(bytes: nil, out_type: nil, little_endian: nil) Utils.execute("DecodeRaw", [bytes], out_type: out_type, little_endian: little_endian) end |
.decode_wav(contents: nil, desired_channels: nil, desired_samples: nil) ⇒ Object
897 898 899 |
# File 'lib/tensorflow/raw_ops.rb', line 897 def decode_wav(contents: nil, desired_channels: nil, desired_samples: nil) Utils.execute("DecodeWav", [contents], desired_channels: desired_channels, desired_samples: desired_samples) end |
.deep_copy(x: nil) ⇒ Object
901 902 903 |
# File 'lib/tensorflow/raw_ops.rb', line 901 def deep_copy(x: nil) Utils.execute("DeepCopy", [x]) end |
.delete_iterator(handle: nil, deleter: nil) ⇒ Object
905 906 907 |
# File 'lib/tensorflow/raw_ops.rb', line 905 def delete_iterator(handle: nil, deleter: nil) Utils.execute("DeleteIterator", [handle, deleter]) end |
.delete_session_tensor(handle: nil) ⇒ Object
909 910 911 |
# File 'lib/tensorflow/raw_ops.rb', line 909 def delete_session_tensor(handle: nil) Utils.execute("DeleteSessionTensor", [handle]) end |
.dense_to_dense_set_operation(set1: nil, set2: nil, set_operation: nil, validate_indices: nil) ⇒ Object
913 914 915 |
# File 'lib/tensorflow/raw_ops.rb', line 913 def dense_to_dense_set_operation(set1: nil, set2: nil, set_operation: nil, validate_indices: nil) Utils.execute("DenseToDenseSetOperation", [set1, set2], set_operation: set_operation, validate_indices: validate_indices) end |
.dense_to_sparse_set_operation(set1: nil, set2_indices: nil, set2_values: nil, set2_shape: nil, set_operation: nil, validate_indices: nil) ⇒ Object
917 918 919 |
# File 'lib/tensorflow/raw_ops.rb', line 917 def dense_to_sparse_set_operation(set1: nil, set2_indices: nil, set2_values: nil, set2_shape: nil, set_operation: nil, validate_indices: nil) Utils.execute("DenseToSparseSetOperation", [set1, set2_indices, set2_values, set2_shape], set_operation: set_operation, validate_indices: validate_indices) end |
.depth_to_space(input: nil, block_size: nil, data_format: nil) ⇒ Object
921 922 923 |
# File 'lib/tensorflow/raw_ops.rb', line 921 def depth_to_space(input: nil, block_size: nil, data_format: nil) Utils.execute("DepthToSpace", [input], block_size: block_size, data_format: data_format) end |
.depthwise_conv2d_native(input: nil, filter: nil, strides: nil, padding: nil, data_format: nil, dilations: nil) ⇒ Object
925 926 927 |
# File 'lib/tensorflow/raw_ops.rb', line 925 def depthwise_conv2d_native(input: nil, filter: nil, strides: nil, padding: nil, data_format: nil, dilations: nil) Utils.execute("DepthwiseConv2dNative", [input, filter], strides: strides, padding: padding, data_format: data_format, dilations: dilations) end |
.depthwise_conv2d_native_backprop_filter(input: nil, filter_sizes: nil, out_backprop: nil, strides: nil, padding: nil, data_format: nil, dilations: nil) ⇒ Object
929 930 931 |
# File 'lib/tensorflow/raw_ops.rb', line 929 def depthwise_conv2d_native_backprop_filter(input: nil, filter_sizes: nil, out_backprop: nil, strides: nil, padding: nil, data_format: nil, dilations: nil) Utils.execute("DepthwiseConv2dNativeBackpropFilter", [input, filter_sizes, out_backprop], strides: strides, padding: padding, data_format: data_format, dilations: dilations) end |
.depthwise_conv2d_native_backprop_input(input_sizes: nil, filter: nil, out_backprop: nil, strides: nil, padding: nil, data_format: nil, dilations: nil) ⇒ Object
933 934 935 |
# File 'lib/tensorflow/raw_ops.rb', line 933 def depthwise_conv2d_native_backprop_input(input_sizes: nil, filter: nil, out_backprop: nil, strides: nil, padding: nil, data_format: nil, dilations: nil) Utils.execute("DepthwiseConv2dNativeBackpropInput", [input_sizes, filter, out_backprop], strides: strides, padding: padding, data_format: data_format, dilations: dilations) end |
.dequantize(input: nil, min_range: nil, max_range: nil, mode: nil) ⇒ Object
937 938 939 |
# File 'lib/tensorflow/raw_ops.rb', line 937 def dequantize(input: nil, min_range: nil, max_range: nil, mode: nil) Utils.execute("Dequantize", [input, min_range, max_range], mode: mode) end |
.deserialize_iterator(resource_handle: nil, serialized: nil) ⇒ Object
941 942 943 |
# File 'lib/tensorflow/raw_ops.rb', line 941 def deserialize_iterator(resource_handle: nil, serialized: nil) Utils.execute("DeserializeIterator", [resource_handle, serialized]) end |
.deserialize_many_sparse(serialized_sparse: nil, dtype: nil) ⇒ Object
945 946 947 |
# File 'lib/tensorflow/raw_ops.rb', line 945 def deserialize_many_sparse(serialized_sparse: nil, dtype: nil) Utils.execute("DeserializeManySparse", [serialized_sparse], dtype: dtype) end |
.deserialize_sparse(serialized_sparse: nil, dtype: nil) ⇒ Object
949 950 951 |
# File 'lib/tensorflow/raw_ops.rb', line 949 def deserialize_sparse(serialized_sparse: nil, dtype: nil) Utils.execute("DeserializeSparse", [serialized_sparse], dtype: dtype) end |
.destroy_resource_op(resource: nil, ignore_lookup_error: nil) ⇒ Object
953 954 955 |
# File 'lib/tensorflow/raw_ops.rb', line 953 def destroy_resource_op(resource: nil, ignore_lookup_error: nil) Utils.execute("DestroyResourceOp", [resource], ignore_lookup_error: ignore_lookup_error) end |
.destroy_temporary_variable(ref: nil, var_name: nil) ⇒ Object
957 958 959 |
# File 'lib/tensorflow/raw_ops.rb', line 957 def destroy_temporary_variable(ref: nil, var_name: nil) Utils.execute("DestroyTemporaryVariable", [ref], var_name: var_name) end |
.diag(diagonal: nil) ⇒ Object
961 962 963 |
# File 'lib/tensorflow/raw_ops.rb', line 961 def diag(diagonal: nil) Utils.execute("Diag", [diagonal]) end |
.diag_part(input: nil) ⇒ Object
965 966 967 |
# File 'lib/tensorflow/raw_ops.rb', line 965 def diag_part(input: nil) Utils.execute("DiagPart", [input]) end |
.digamma(x: nil) ⇒ Object
969 970 971 |
# File 'lib/tensorflow/raw_ops.rb', line 969 def digamma(x: nil) Utils.execute("Digamma", [x]) end |
.dilation2d(input: nil, filter: nil, strides: nil, rates: nil, padding: nil) ⇒ Object
973 974 975 |
# File 'lib/tensorflow/raw_ops.rb', line 973 def dilation2d(input: nil, filter: nil, strides: nil, rates: nil, padding: nil) Utils.execute("Dilation2D", [input, filter], strides: strides, rates: rates, padding: padding) end |
.dilation2d_backprop_filter(input: nil, filter: nil, out_backprop: nil, strides: nil, rates: nil, padding: nil) ⇒ Object
977 978 979 |
# File 'lib/tensorflow/raw_ops.rb', line 977 def dilation2d_backprop_filter(input: nil, filter: nil, out_backprop: nil, strides: nil, rates: nil, padding: nil) Utils.execute("Dilation2DBackpropFilter", [input, filter, out_backprop], strides: strides, rates: rates, padding: padding) end |
.dilation2d_backprop_input(input: nil, filter: nil, out_backprop: nil, strides: nil, rates: nil, padding: nil) ⇒ Object
981 982 983 |
# File 'lib/tensorflow/raw_ops.rb', line 981 def dilation2d_backprop_input(input: nil, filter: nil, out_backprop: nil, strides: nil, rates: nil, padding: nil) Utils.execute("Dilation2DBackpropInput", [input, filter, out_backprop], strides: strides, rates: rates, padding: padding) end |
.div(x: nil, y: nil) ⇒ Object
985 986 987 |
# File 'lib/tensorflow/raw_ops.rb', line 985 def div(x: nil, y: nil) Utils.execute("Div", [x, y]) end |
.div_no_nan(x: nil, y: nil) ⇒ Object
989 990 991 |
# File 'lib/tensorflow/raw_ops.rb', line 989 def div_no_nan(x: nil, y: nil) Utils.execute("DivNoNan", [x, y]) end |
.draw_bounding_boxes(images: nil, boxes: nil) ⇒ Object
993 994 995 |
# File 'lib/tensorflow/raw_ops.rb', line 993 def draw_bounding_boxes(images: nil, boxes: nil) Utils.execute("DrawBoundingBoxes", [images, boxes]) end |
.draw_bounding_boxes_v2(images: nil, boxes: nil, colors: nil) ⇒ Object
997 998 999 |
# File 'lib/tensorflow/raw_ops.rb', line 997 def draw_bounding_boxes_v2(images: nil, boxes: nil, colors: nil) Utils.execute("DrawBoundingBoxesV2", [images, boxes, colors]) end |
.dynamic_partition(data: nil, partitions: nil, num_partitions: nil) ⇒ Object
1001 1002 1003 |
# File 'lib/tensorflow/raw_ops.rb', line 1001 def dynamic_partition(data: nil, partitions: nil, num_partitions: nil) Utils.execute("DynamicPartition", [data, partitions], num_partitions: num_partitions) end |
.dynamic_stitch(indices: nil, data: nil) ⇒ Object
1005 1006 1007 |
# File 'lib/tensorflow/raw_ops.rb', line 1005 def dynamic_stitch(indices: nil, data: nil) Utils.execute("DynamicStitch", [indices, data]) end |
.eager_py_func(input: nil, token: nil) ⇒ Object
1009 1010 1011 |
# File 'lib/tensorflow/raw_ops.rb', line 1009 def eager_py_func(input: nil, token: nil) Utils.execute("EagerPyFunc", [input], token: token) end |
.edit_distance(hypothesis_indices: nil, hypothesis_values: nil, hypothesis_shape: nil, truth_indices: nil, truth_values: nil, truth_shape: nil, normalize: nil) ⇒ Object
1013 1014 1015 |
# File 'lib/tensorflow/raw_ops.rb', line 1013 def edit_distance(hypothesis_indices: nil, hypothesis_values: nil, hypothesis_shape: nil, truth_indices: nil, truth_values: nil, truth_shape: nil, normalize: nil) Utils.execute("EditDistance", [hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape], normalize: normalize) end |
.elu(features: nil) ⇒ Object
1017 1018 1019 |
# File 'lib/tensorflow/raw_ops.rb', line 1017 def elu(features: nil) Utils.execute("Elu", [features]) end |
.elu_grad(gradients: nil, outputs: nil) ⇒ Object
1021 1022 1023 |
# File 'lib/tensorflow/raw_ops.rb', line 1021 def elu_grad(gradients: nil, outputs: nil) Utils.execute("EluGrad", [gradients, outputs]) end |
.empty(shape: nil, dtype: nil, init: nil) ⇒ Object
1025 1026 1027 |
# File 'lib/tensorflow/raw_ops.rb', line 1025 def empty(shape: nil, dtype: nil, init: nil) Utils.execute("Empty", [shape], dtype: dtype, init: init) end |
.empty_tensor_list(element_shape: nil, max_num_elements: nil, element_dtype: nil, shape_type: nil) ⇒ Object
1029 1030 1031 |
# File 'lib/tensorflow/raw_ops.rb', line 1029 def empty_tensor_list(element_shape: nil, max_num_elements: nil, element_dtype: nil, shape_type: nil) Utils.execute("EmptyTensorList", [element_shape, max_num_elements], element_dtype: element_dtype, shape_type: shape_type) end |
.encode_base64(input: nil, pad: nil) ⇒ Object
1033 1034 1035 |
# File 'lib/tensorflow/raw_ops.rb', line 1033 def encode_base64(input: nil, pad: nil) Utils.execute("EncodeBase64", [input], pad: pad) end |
.encode_jpeg(image: nil, format: nil, quality: nil, progressive: nil, optimize_size: nil, chroma_downsampling: nil, density_unit: nil, x_density: nil, y_density: nil, xmp_metadata: nil) ⇒ Object
1037 1038 1039 |
# File 'lib/tensorflow/raw_ops.rb', line 1037 def encode_jpeg(image: nil, format: nil, quality: nil, progressive: nil, optimize_size: nil, chroma_downsampling: nil, density_unit: nil, x_density: nil, y_density: nil, xmp_metadata: nil) Utils.execute("EncodeJpeg", [image], format: format, quality: quality, progressive: progressive, optimize_size: optimize_size, chroma_downsampling: chroma_downsampling, density_unit: density_unit, x_density: x_density, y_density: y_density, xmp_metadata: ) end |
.encode_jpeg_variable_quality(images: nil, quality: nil) ⇒ Object
1041 1042 1043 |
# File 'lib/tensorflow/raw_ops.rb', line 1041 def encode_jpeg_variable_quality(images: nil, quality: nil) Utils.execute("EncodeJpegVariableQuality", [images, quality]) end |
.encode_png(image: nil, compression: nil) ⇒ Object
1045 1046 1047 |
# File 'lib/tensorflow/raw_ops.rb', line 1045 def encode_png(image: nil, compression: nil) Utils.execute("EncodePng", [image], compression: compression) end |
.encode_proto(sizes: nil, values: nil, field_names: nil, message_type: nil, descriptor_source: nil) ⇒ Object
1049 1050 1051 |
# File 'lib/tensorflow/raw_ops.rb', line 1049 def encode_proto(sizes: nil, values: nil, field_names: nil, message_type: nil, descriptor_source: nil) Utils.execute("EncodeProto", [sizes, values], field_names: field_names, message_type: , descriptor_source: descriptor_source) end |
.encode_wav(audio: nil, sample_rate: nil) ⇒ Object
1053 1054 1055 |
# File 'lib/tensorflow/raw_ops.rb', line 1053 def encode_wav(audio: nil, sample_rate: nil) Utils.execute("EncodeWav", [audio, sample_rate]) end |
.enqueue_tpu_embedding_integer_batch(batch: nil, mode_override: nil, device_ordinal: nil) ⇒ Object
1057 1058 1059 |
# File 'lib/tensorflow/raw_ops.rb', line 1057 def (batch: nil, mode_override: nil, device_ordinal: nil) Utils.execute("EnqueueTPUEmbeddingIntegerBatch", [batch, mode_override], device_ordinal: device_ordinal) end |
.enqueue_tpu_embedding_sparse_batch(sample_indices: nil, embedding_indices: nil, aggregation_weights: nil, mode_override: nil, device_ordinal: nil, combiners: nil) ⇒ Object
1061 1062 1063 |
# File 'lib/tensorflow/raw_ops.rb', line 1061 def (sample_indices: nil, embedding_indices: nil, aggregation_weights: nil, mode_override: nil, device_ordinal: nil, combiners: nil) Utils.execute("EnqueueTPUEmbeddingSparseBatch", [sample_indices, , aggregation_weights, mode_override], device_ordinal: device_ordinal, combiners: combiners) end |
.enqueue_tpu_embedding_sparse_tensor_batch(sample_indices: nil, embedding_indices: nil, aggregation_weights: nil, mode_override: nil, device_ordinal: nil, combiners: nil, table_ids: nil, max_sequence_lengths: nil) ⇒ Object
1065 1066 1067 |
# File 'lib/tensorflow/raw_ops.rb', line 1065 def (sample_indices: nil, embedding_indices: nil, aggregation_weights: nil, mode_override: nil, device_ordinal: nil, combiners: nil, table_ids: nil, max_sequence_lengths: nil) Utils.execute("EnqueueTPUEmbeddingSparseTensorBatch", [sample_indices, , aggregation_weights, mode_override], device_ordinal: device_ordinal, combiners: combiners, table_ids: table_ids, max_sequence_lengths: max_sequence_lengths) end |
.ensure_shape(input: nil, shape: nil) ⇒ Object
1069 1070 1071 |
# File 'lib/tensorflow/raw_ops.rb', line 1069 def ensure_shape(input: nil, shape: nil) Utils.execute("EnsureShape", [input], shape: shape) end |
.enter(data: nil, frame_name: nil, is_constant: nil, parallel_iterations: nil) ⇒ Object
1073 1074 1075 |
# File 'lib/tensorflow/raw_ops.rb', line 1073 def enter(data: nil, frame_name: nil, is_constant: nil, parallel_iterations: nil) Utils.execute("Enter", [data], frame_name: frame_name, is_constant: is_constant, parallel_iterations: parallel_iterations) end |
.equal(x: nil, y: nil) ⇒ Object
1077 1078 1079 |
# File 'lib/tensorflow/raw_ops.rb', line 1077 def equal(x: nil, y: nil) Utils.execute("Equal", [x, y]) end |
.erf(x: nil) ⇒ Object
1081 1082 1083 |
# File 'lib/tensorflow/raw_ops.rb', line 1081 def erf(x: nil) Utils.execute("Erf", [x]) end |
.erfc(x: nil) ⇒ Object
1085 1086 1087 |
# File 'lib/tensorflow/raw_ops.rb', line 1085 def erfc(x: nil) Utils.execute("Erfc", [x]) end |
.euclidean_norm(input: nil, reduction_indices: nil, keep_dims: nil) ⇒ Object
1089 1090 1091 |
# File 'lib/tensorflow/raw_ops.rb', line 1089 def euclidean_norm(input: nil, reduction_indices: nil, keep_dims: nil) Utils.execute("EuclideanNorm", [input, reduction_indices], keep_dims: keep_dims) end |
.exit(data: nil) ⇒ Object
1093 1094 1095 |
# File 'lib/tensorflow/raw_ops.rb', line 1093 def exit(data: nil) Utils.execute("Exit", [data]) end |
.exp(x: nil) ⇒ Object
1097 1098 1099 |
# File 'lib/tensorflow/raw_ops.rb', line 1097 def exp(x: nil) Utils.execute("Exp", [x]) end |
.expand_dims(input: nil, dim: nil) ⇒ Object
1101 1102 1103 |
# File 'lib/tensorflow/raw_ops.rb', line 1101 def (input: nil, dim: nil) Utils.execute("ExpandDims", [input, dim]) end |
.experimental_assert_next_dataset(input_dataset: nil, transformations: nil, output_types: nil, output_shapes: nil) ⇒ Object
1105 1106 1107 |
# File 'lib/tensorflow/raw_ops.rb', line 1105 def experimental_assert_next_dataset(input_dataset: nil, transformations: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalAssertNextDataset", [input_dataset, transformations], output_types: output_types, output_shapes: output_shapes) end |
.experimental_auto_shard_dataset(input_dataset: nil, num_workers: nil, index: nil, output_types: nil, output_shapes: nil) ⇒ Object
1109 1110 1111 |
# File 'lib/tensorflow/raw_ops.rb', line 1109 def experimental_auto_shard_dataset(input_dataset: nil, num_workers: nil, index: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalAutoShardDataset", [input_dataset, num_workers, index], output_types: output_types, output_shapes: output_shapes) end |
.experimental_bytes_produced_stats_dataset(input_dataset: nil, tag: nil, output_types: nil, output_shapes: nil) ⇒ Object
1113 1114 1115 |
# File 'lib/tensorflow/raw_ops.rb', line 1113 def experimental_bytes_produced_stats_dataset(input_dataset: nil, tag: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalBytesProducedStatsDataset", [input_dataset, tag], output_types: output_types, output_shapes: output_shapes) end |
.experimental_choose_fastest_dataset(input_datasets: nil, num_experiments: nil, output_types: nil, output_shapes: nil) ⇒ Object
1121 1122 1123 |
# File 'lib/tensorflow/raw_ops.rb', line 1121 def experimental_choose_fastest_dataset(input_datasets: nil, num_experiments: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalChooseFastestDataset", [input_datasets], num_experiments: num_experiments, output_types: output_types, output_shapes: output_shapes) end |
.experimental_csv_dataset(filenames: nil, compression_type: nil, buffer_size: nil, header: nil, field_delim: nil, use_quote_delim: nil, na_value: nil, select_cols: nil, record_defaults: nil, output_types: nil, output_shapes: nil) ⇒ Object
1117 1118 1119 |
# File 'lib/tensorflow/raw_ops.rb', line 1117 def experimental_csv_dataset(filenames: nil, compression_type: nil, buffer_size: nil, header: nil, field_delim: nil, use_quote_delim: nil, na_value: nil, select_cols: nil, record_defaults: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalCSVDataset", [filenames, compression_type, buffer_size, header, field_delim, use_quote_delim, na_value, select_cols, record_defaults], output_types: output_types, output_shapes: output_shapes) end |
.experimental_dataset_cardinality(input_dataset: nil) ⇒ Object
1125 1126 1127 |
# File 'lib/tensorflow/raw_ops.rb', line 1125 def experimental_dataset_cardinality(input_dataset: nil) Utils.execute("ExperimentalDatasetCardinality", [input_dataset]) end |
.experimental_dataset_to_tf_record(input_dataset: nil, filename: nil, compression_type: nil) ⇒ Object
1129 1130 1131 |
# File 'lib/tensorflow/raw_ops.rb', line 1129 def experimental_dataset_to_tf_record(input_dataset: nil, filename: nil, compression_type: nil) Utils.execute("ExperimentalDatasetToTFRecord", [input_dataset, filename, compression_type]) end |
.experimental_dense_to_sparse_batch_dataset(input_dataset: nil, batch_size: nil, row_shape: nil, output_types: nil, output_shapes: nil) ⇒ Object
1133 1134 1135 |
# File 'lib/tensorflow/raw_ops.rb', line 1133 def experimental_dense_to_sparse_batch_dataset(input_dataset: nil, batch_size: nil, row_shape: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalDenseToSparseBatchDataset", [input_dataset, batch_size, row_shape], output_types: output_types, output_shapes: output_shapes) end |
.experimental_directed_interleave_dataset(selector_input_dataset: nil, data_input_datasets: nil, output_types: nil, output_shapes: nil) ⇒ Object
1137 1138 1139 |
# File 'lib/tensorflow/raw_ops.rb', line 1137 def experimental_directed_interleave_dataset(selector_input_dataset: nil, data_input_datasets: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalDirectedInterleaveDataset", [selector_input_dataset, data_input_datasets], output_types: output_types, output_shapes: output_shapes) end |
.experimental_group_by_reducer_dataset(input_dataset: nil, key_func_other_arguments: nil, init_func_other_arguments: nil, reduce_func_other_arguments: nil, finalize_func_other_arguments: nil, key_func: nil, init_func: nil, reduce_func: nil, finalize_func: nil, output_types: nil, output_shapes: nil) ⇒ Object
1141 1142 1143 |
# File 'lib/tensorflow/raw_ops.rb', line 1141 def experimental_group_by_reducer_dataset(input_dataset: nil, key_func_other_arguments: nil, init_func_other_arguments: nil, reduce_func_other_arguments: nil, finalize_func_other_arguments: nil, key_func: nil, init_func: nil, reduce_func: nil, finalize_func: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalGroupByReducerDataset", [input_dataset, key_func_other_arguments, init_func_other_arguments, reduce_func_other_arguments, finalize_func_other_arguments], key_func: key_func, init_func: init_func, reduce_func: reduce_func, finalize_func: finalize_func, output_types: output_types, output_shapes: output_shapes) end |
.experimental_group_by_window_dataset(input_dataset: nil, key_func_other_arguments: nil, reduce_func_other_arguments: nil, window_size_func_other_arguments: nil, key_func: nil, reduce_func: nil, window_size_func: nil, output_types: nil, output_shapes: nil) ⇒ Object
1145 1146 1147 |
# File 'lib/tensorflow/raw_ops.rb', line 1145 def experimental_group_by_window_dataset(input_dataset: nil, key_func_other_arguments: nil, reduce_func_other_arguments: nil, window_size_func_other_arguments: nil, key_func: nil, reduce_func: nil, window_size_func: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalGroupByWindowDataset", [input_dataset, key_func_other_arguments, reduce_func_other_arguments, window_size_func_other_arguments], key_func: key_func, reduce_func: reduce_func, window_size_func: window_size_func, output_types: output_types, output_shapes: output_shapes) end |
.experimental_identity_indexed_dataset(size: nil) ⇒ Object
1149 1150 1151 |
# File 'lib/tensorflow/raw_ops.rb', line 1149 def experimental_identity_indexed_dataset(size: nil) Utils.execute("ExperimentalIdentityIndexedDataset", [size]) end |
.experimental_ignore_errors_dataset(input_dataset: nil, output_types: nil, output_shapes: nil) ⇒ Object
1153 1154 1155 |
# File 'lib/tensorflow/raw_ops.rb', line 1153 def experimental_ignore_errors_dataset(input_dataset: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalIgnoreErrorsDataset", [input_dataset], output_types: output_types, output_shapes: output_shapes) end |
.experimental_indexed_dataset_get(materialized: nil, index: nil, output_types: nil, output_shapes: nil) ⇒ Object
1157 1158 1159 |
# File 'lib/tensorflow/raw_ops.rb', line 1157 def experimental_indexed_dataset_get(materialized: nil, index: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalIndexedDatasetGet", [materialized, index], output_types: output_types, output_shapes: output_shapes) end |
.experimental_indexed_dataset_materialize(dataset: nil, materialized: nil) ⇒ Object
1161 1162 1163 |
# File 'lib/tensorflow/raw_ops.rb', line 1161 def experimental_indexed_dataset_materialize(dataset: nil, materialized: nil) Utils.execute("ExperimentalIndexedDatasetMaterialize", [dataset, materialized]) end |
.experimental_iterator_get_device(resource: nil) ⇒ Object
1165 1166 1167 |
# File 'lib/tensorflow/raw_ops.rb', line 1165 def experimental_iterator_get_device(resource: nil) Utils.execute("ExperimentalIteratorGetDevice", [resource]) end |
.experimental_latency_stats_dataset(input_dataset: nil, tag: nil, output_types: nil, output_shapes: nil) ⇒ Object
1173 1174 1175 |
# File 'lib/tensorflow/raw_ops.rb', line 1173 def experimental_latency_stats_dataset(input_dataset: nil, tag: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalLatencyStatsDataset", [input_dataset, tag], output_types: output_types, output_shapes: output_shapes) end |
.experimental_lmdb_dataset(filenames: nil, output_types: nil, output_shapes: nil) ⇒ Object
1169 1170 1171 |
# File 'lib/tensorflow/raw_ops.rb', line 1169 def experimental_lmdb_dataset(filenames: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalLMDBDataset", [filenames], output_types: output_types, output_shapes: output_shapes) end |
.experimental_map_and_batch_dataset(input_dataset: nil, other_arguments: nil, batch_size: nil, num_parallel_calls: nil, drop_remainder: nil, f: nil, output_types: nil, output_shapes: nil, preserve_cardinality: nil) ⇒ Object
1177 1178 1179 |
# File 'lib/tensorflow/raw_ops.rb', line 1177 def experimental_map_and_batch_dataset(input_dataset: nil, other_arguments: nil, batch_size: nil, num_parallel_calls: nil, drop_remainder: nil, f: nil, output_types: nil, output_shapes: nil, preserve_cardinality: nil) Utils.execute("ExperimentalMapAndBatchDataset", [input_dataset, other_arguments, batch_size, num_parallel_calls, drop_remainder], f: f, output_types: output_types, output_shapes: output_shapes, preserve_cardinality: preserve_cardinality) end |
.experimental_map_dataset(input_dataset: nil, other_arguments: nil, f: nil, output_types: nil, output_shapes: nil, use_inter_op_parallelism: nil, preserve_cardinality: nil) ⇒ Object
1181 1182 1183 |
# File 'lib/tensorflow/raw_ops.rb', line 1181 def experimental_map_dataset(input_dataset: nil, other_arguments: nil, f: nil, output_types: nil, output_shapes: nil, use_inter_op_parallelism: nil, preserve_cardinality: nil) Utils.execute("ExperimentalMapDataset", [input_dataset, other_arguments], f: f, output_types: output_types, output_shapes: output_shapes, use_inter_op_parallelism: use_inter_op_parallelism, preserve_cardinality: preserve_cardinality) end |
.experimental_matching_files_dataset(patterns: nil) ⇒ Object
1185 1186 1187 |
# File 'lib/tensorflow/raw_ops.rb', line 1185 def experimental_matching_files_dataset(patterns: nil) Utils.execute("ExperimentalMatchingFilesDataset", [patterns]) end |
.experimental_materialized_index_dataset_handle(container: nil, shared_name: nil, output_types: nil, output_shapes: nil) ⇒ Object
1189 1190 1191 |
# File 'lib/tensorflow/raw_ops.rb', line 1189 def experimental_materialized_index_dataset_handle(container: nil, shared_name: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalMaterializedIndexDatasetHandle", [], container: container, shared_name: shared_name, output_types: output_types, output_shapes: output_shapes) end |
.experimental_max_intra_op_parallelism_dataset(input_dataset: nil, max_intra_op_parallelism: nil, output_types: nil, output_shapes: nil) ⇒ Object
1193 1194 1195 |
# File 'lib/tensorflow/raw_ops.rb', line 1193 def experimental_max_intra_op_parallelism_dataset(input_dataset: nil, max_intra_op_parallelism: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalMaxIntraOpParallelismDataset", [input_dataset, max_intra_op_parallelism], output_types: output_types, output_shapes: output_shapes) end |
.experimental_non_serializable_dataset(input_dataset: nil, output_types: nil, output_shapes: nil) ⇒ Object
1197 1198 1199 |
# File 'lib/tensorflow/raw_ops.rb', line 1197 def experimental_non_serializable_dataset(input_dataset: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalNonSerializableDataset", [input_dataset], output_types: output_types, output_shapes: output_shapes) end |
.experimental_numa_map_and_batch_dataset(input_dataset: nil, other_arguments: nil, batch_size: nil, num_parallel_calls: nil, drop_remainder: nil, f: nil, output_types: nil, output_shapes: nil, preserve_cardinality: nil) ⇒ Object
1201 1202 1203 |
# File 'lib/tensorflow/raw_ops.rb', line 1201 def experimental_numa_map_and_batch_dataset(input_dataset: nil, other_arguments: nil, batch_size: nil, num_parallel_calls: nil, drop_remainder: nil, f: nil, output_types: nil, output_shapes: nil, preserve_cardinality: nil) Utils.execute("ExperimentalNumaMapAndBatchDataset", [input_dataset, other_arguments, batch_size, num_parallel_calls, drop_remainder], f: f, output_types: output_types, output_shapes: output_shapes, preserve_cardinality: preserve_cardinality) end |
.experimental_parallel_interleave_dataset(input_dataset: nil, other_arguments: nil, cycle_length: nil, block_length: nil, sloppy: nil, buffer_output_elements: nil, prefetch_input_elements: nil, f: nil, output_types: nil, output_shapes: nil) ⇒ Object
1205 1206 1207 |
# File 'lib/tensorflow/raw_ops.rb', line 1205 def experimental_parallel_interleave_dataset(input_dataset: nil, other_arguments: nil, cycle_length: nil, block_length: nil, sloppy: nil, buffer_output_elements: nil, prefetch_input_elements: nil, f: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalParallelInterleaveDataset", [input_dataset, other_arguments, cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements], f: f, output_types: output_types, output_shapes: output_shapes) end |
.experimental_parse_example_dataset(input_dataset: nil, num_parallel_calls: nil, dense_defaults: nil, sparse_keys: nil, dense_keys: nil, sparse_types: nil, dense_shapes: nil, output_types: nil, output_shapes: nil, sloppy: nil) ⇒ Object
1209 1210 1211 |
# File 'lib/tensorflow/raw_ops.rb', line 1209 def experimental_parse_example_dataset(input_dataset: nil, num_parallel_calls: nil, dense_defaults: nil, sparse_keys: nil, dense_keys: nil, sparse_types: nil, dense_shapes: nil, output_types: nil, output_shapes: nil, sloppy: nil) Utils.execute("ExperimentalParseExampleDataset", [input_dataset, num_parallel_calls, dense_defaults], sparse_keys: sparse_keys, dense_keys: dense_keys, sparse_types: sparse_types, dense_shapes: dense_shapes, output_types: output_types, output_shapes: output_shapes, sloppy: sloppy) end |
.experimental_private_thread_pool_dataset(input_dataset: nil, num_threads: nil, output_types: nil, output_shapes: nil) ⇒ Object
1213 1214 1215 |
# File 'lib/tensorflow/raw_ops.rb', line 1213 def experimental_private_thread_pool_dataset(input_dataset: nil, num_threads: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalPrivateThreadPoolDataset", [input_dataset, num_threads], output_types: output_types, output_shapes: output_shapes) end |
.experimental_random_dataset(seed: nil, seed2: nil, output_types: nil, output_shapes: nil) ⇒ Object
1217 1218 1219 |
# File 'lib/tensorflow/raw_ops.rb', line 1217 def experimental_random_dataset(seed: nil, seed2: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalRandomDataset", [seed, seed2], output_types: output_types, output_shapes: output_shapes) end |
.experimental_rebatch_dataset(input_dataset: nil, num_workers: nil, output_types: nil, output_shapes: nil) ⇒ Object
1221 1222 1223 |
# File 'lib/tensorflow/raw_ops.rb', line 1221 def experimental_rebatch_dataset(input_dataset: nil, num_workers: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalRebatchDataset", [input_dataset, num_workers], output_types: output_types, output_shapes: output_shapes) end |
.experimental_scan_dataset(input_dataset: nil, initial_state: nil, other_arguments: nil, f: nil, output_types: nil, output_shapes: nil, preserve_cardinality: nil) ⇒ Object
1225 1226 1227 |
# File 'lib/tensorflow/raw_ops.rb', line 1225 def experimental_scan_dataset(input_dataset: nil, initial_state: nil, other_arguments: nil, f: nil, output_types: nil, output_shapes: nil, preserve_cardinality: nil) Utils.execute("ExperimentalScanDataset", [input_dataset, initial_state, other_arguments], f: f, output_types: output_types, output_shapes: output_shapes, preserve_cardinality: preserve_cardinality) end |
.experimental_set_stats_aggregator_dataset(input_dataset: nil, stats_aggregator: nil, tag: nil, counter_prefix: nil, output_types: nil, output_shapes: nil) ⇒ Object
1229 1230 1231 |
# File 'lib/tensorflow/raw_ops.rb', line 1229 def experimental_set_stats_aggregator_dataset(input_dataset: nil, stats_aggregator: nil, tag: nil, counter_prefix: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalSetStatsAggregatorDataset", [input_dataset, stats_aggregator, tag, counter_prefix], output_types: output_types, output_shapes: output_shapes) end |
.experimental_sleep_dataset(input_dataset: nil, sleep_microseconds: nil, output_types: nil, output_shapes: nil) ⇒ Object
1233 1234 1235 |
# File 'lib/tensorflow/raw_ops.rb', line 1233 def experimental_sleep_dataset(input_dataset: nil, sleep_microseconds: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalSleepDataset", [input_dataset, sleep_microseconds], output_types: output_types, output_shapes: output_shapes) end |
.experimental_sliding_window_dataset(input_dataset: nil, window_size: nil, window_shift: nil, window_stride: nil, output_types: nil, output_shapes: nil) ⇒ Object
1237 1238 1239 |
# File 'lib/tensorflow/raw_ops.rb', line 1237 def experimental_sliding_window_dataset(input_dataset: nil, window_size: nil, window_shift: nil, window_stride: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalSlidingWindowDataset", [input_dataset, window_size, window_shift, window_stride], output_types: output_types, output_shapes: output_shapes) end |
.experimental_sql_dataset(driver_name: nil, data_source_name: nil, query: nil, output_types: nil, output_shapes: nil) ⇒ Object
1241 1242 1243 |
# File 'lib/tensorflow/raw_ops.rb', line 1241 def experimental_sql_dataset(driver_name: nil, data_source_name: nil, query: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalSqlDataset", [driver_name, data_source_name, query], output_types: output_types, output_shapes: output_shapes) end |
.experimental_stats_aggregator_handle(container: nil, shared_name: nil) ⇒ Object
1245 1246 1247 |
# File 'lib/tensorflow/raw_ops.rb', line 1245 def experimental_stats_aggregator_handle(container: nil, shared_name: nil) Utils.execute("ExperimentalStatsAggregatorHandle", [], container: container, shared_name: shared_name) end |
.experimental_stats_aggregator_summary(iterator: nil) ⇒ Object
1249 1250 1251 |
# File 'lib/tensorflow/raw_ops.rb', line 1249 def experimental_stats_aggregator_summary(iterator: nil) Utils.execute("ExperimentalStatsAggregatorSummary", [iterator]) end |
.experimental_take_while_dataset(input_dataset: nil, other_arguments: nil, predicate: nil, output_types: nil, output_shapes: nil) ⇒ Object
1253 1254 1255 |
# File 'lib/tensorflow/raw_ops.rb', line 1253 def experimental_take_while_dataset(input_dataset: nil, other_arguments: nil, predicate: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalTakeWhileDataset", [input_dataset, other_arguments], predicate: predicate, output_types: output_types, output_shapes: output_shapes) end |
.experimental_thread_pool_dataset(input_dataset: nil, thread_pool: nil, output_types: nil, output_shapes: nil) ⇒ Object
1257 1258 1259 |
# File 'lib/tensorflow/raw_ops.rb', line 1257 def experimental_thread_pool_dataset(input_dataset: nil, thread_pool: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalThreadPoolDataset", [input_dataset, thread_pool], output_types: output_types, output_shapes: output_shapes) end |
.experimental_thread_pool_handle(num_threads: nil, max_intra_op_parallelism: nil, display_name: nil, container: nil, shared_name: nil) ⇒ Object
1261 1262 1263 |
# File 'lib/tensorflow/raw_ops.rb', line 1261 def experimental_thread_pool_handle(num_threads: nil, max_intra_op_parallelism: nil, display_name: nil, container: nil, shared_name: nil) Utils.execute("ExperimentalThreadPoolHandle", [], num_threads: num_threads, max_intra_op_parallelism: max_intra_op_parallelism, display_name: display_name, container: container, shared_name: shared_name) end |
.experimental_unbatch_dataset(input_dataset: nil, output_types: nil, output_shapes: nil) ⇒ Object
1265 1266 1267 |
# File 'lib/tensorflow/raw_ops.rb', line 1265 def experimental_unbatch_dataset(input_dataset: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalUnbatchDataset", [input_dataset], output_types: output_types, output_shapes: output_shapes) end |
.experimental_unique_dataset(input_dataset: nil, output_types: nil, output_shapes: nil) ⇒ Object
1269 1270 1271 |
# File 'lib/tensorflow/raw_ops.rb', line 1269 def experimental_unique_dataset(input_dataset: nil, output_types: nil, output_shapes: nil) Utils.execute("ExperimentalUniqueDataset", [input_dataset], output_types: output_types, output_shapes: output_shapes) end |
.expm1(x: nil) ⇒ Object
1273 1274 1275 |
# File 'lib/tensorflow/raw_ops.rb', line 1273 def expm1(x: nil) Utils.execute("Expm1", [x]) end |
.extract_glimpse(input: nil, size: nil, offsets: nil, centered: nil, normalized: nil, uniform_noise: nil, noise: nil) ⇒ Object
1277 1278 1279 |
# File 'lib/tensorflow/raw_ops.rb', line 1277 def extract_glimpse(input: nil, size: nil, offsets: nil, centered: nil, normalized: nil, uniform_noise: nil, noise: nil) Utils.execute("ExtractGlimpse", [input, size, offsets], centered: centered, normalized: normalized, uniform_noise: uniform_noise, noise: noise) end |
.extract_image_patches(images: nil, ksizes: nil, strides: nil, rates: nil, padding: nil) ⇒ Object
1281 1282 1283 |
# File 'lib/tensorflow/raw_ops.rb', line 1281 def extract_image_patches(images: nil, ksizes: nil, strides: nil, rates: nil, padding: nil) Utils.execute("ExtractImagePatches", [images], ksizes: ksizes, strides: strides, rates: rates, padding: padding) end |
.extract_jpeg_shape(contents: nil, output_type: nil) ⇒ Object
1285 1286 1287 |
# File 'lib/tensorflow/raw_ops.rb', line 1285 def extract_jpeg_shape(contents: nil, output_type: nil) Utils.execute("ExtractJpegShape", [contents], output_type: output_type) end |
.extract_volume_patches(input: nil, ksizes: nil, strides: nil, padding: nil) ⇒ Object
1289 1290 1291 |
# File 'lib/tensorflow/raw_ops.rb', line 1289 def extract_volume_patches(input: nil, ksizes: nil, strides: nil, padding: nil) Utils.execute("ExtractVolumePatches", [input], ksizes: ksizes, strides: strides, padding: padding) end |
.fact ⇒ Object
1313 1314 1315 |
# File 'lib/tensorflow/raw_ops.rb', line 1313 def fact Utils.execute("Fact", []) end |
.fake_param(dtype: nil, shape: nil) ⇒ Object
1317 1318 1319 |
# File 'lib/tensorflow/raw_ops.rb', line 1317 def fake_param(dtype: nil, shape: nil) Utils.execute("FakeParam", [], dtype: dtype, shape: shape) end |
.fake_quant_with_min_max_args(inputs: nil, min: nil, max: nil, num_bits: nil, narrow_range: nil) ⇒ Object
1321 1322 1323 |
# File 'lib/tensorflow/raw_ops.rb', line 1321 def fake_quant_with_min_max_args(inputs: nil, min: nil, max: nil, num_bits: nil, narrow_range: nil) Utils.execute("FakeQuantWithMinMaxArgs", [inputs], min: min, max: max, num_bits: num_bits, narrow_range: narrow_range) end |
.fake_quant_with_min_max_args_gradient(gradients: nil, inputs: nil, min: nil, max: nil, num_bits: nil, narrow_range: nil) ⇒ Object
1325 1326 1327 |
# File 'lib/tensorflow/raw_ops.rb', line 1325 def fake_quant_with_min_max_args_gradient(gradients: nil, inputs: nil, min: nil, max: nil, num_bits: nil, narrow_range: nil) Utils.execute("FakeQuantWithMinMaxArgsGradient", [gradients, inputs], min: min, max: max, num_bits: num_bits, narrow_range: narrow_range) end |
.fake_quant_with_min_max_vars(inputs: nil, min: nil, max: nil, num_bits: nil, narrow_range: nil) ⇒ Object
1329 1330 1331 |
# File 'lib/tensorflow/raw_ops.rb', line 1329 def fake_quant_with_min_max_vars(inputs: nil, min: nil, max: nil, num_bits: nil, narrow_range: nil) Utils.execute("FakeQuantWithMinMaxVars", [inputs, min, max], num_bits: num_bits, narrow_range: narrow_range) end |
.fake_quant_with_min_max_vars_gradient(gradients: nil, inputs: nil, min: nil, max: nil, num_bits: nil, narrow_range: nil) ⇒ Object
1333 1334 1335 |
# File 'lib/tensorflow/raw_ops.rb', line 1333 def fake_quant_with_min_max_vars_gradient(gradients: nil, inputs: nil, min: nil, max: nil, num_bits: nil, narrow_range: nil) Utils.execute("FakeQuantWithMinMaxVarsGradient", [gradients, inputs, min, max], num_bits: num_bits, narrow_range: narrow_range) end |
.fake_quant_with_min_max_vars_per_channel(inputs: nil, min: nil, max: nil, num_bits: nil, narrow_range: nil) ⇒ Object
1337 1338 1339 |
# File 'lib/tensorflow/raw_ops.rb', line 1337 def fake_quant_with_min_max_vars_per_channel(inputs: nil, min: nil, max: nil, num_bits: nil, narrow_range: nil) Utils.execute("FakeQuantWithMinMaxVarsPerChannel", [inputs, min, max], num_bits: num_bits, narrow_range: narrow_range) end |
.fake_quant_with_min_max_vars_per_channel_gradient(gradients: nil, inputs: nil, min: nil, max: nil, num_bits: nil, narrow_range: nil) ⇒ Object
1341 1342 1343 |
# File 'lib/tensorflow/raw_ops.rb', line 1341 def fake_quant_with_min_max_vars_per_channel_gradient(gradients: nil, inputs: nil, min: nil, max: nil, num_bits: nil, narrow_range: nil) Utils.execute("FakeQuantWithMinMaxVarsPerChannelGradient", [gradients, inputs, min, max], num_bits: num_bits, narrow_range: narrow_range) end |
.fake_queue(resource: nil) ⇒ Object
1345 1346 1347 |
# File 'lib/tensorflow/raw_ops.rb', line 1345 def fake_queue(resource: nil) Utils.execute("FakeQueue", [resource]) end |
.fft(input: nil) ⇒ Object
1293 1294 1295 |
# File 'lib/tensorflow/raw_ops.rb', line 1293 def fft(input: nil) Utils.execute("FFT", [input]) end |
.fft2d(input: nil) ⇒ Object
1297 1298 1299 |
# File 'lib/tensorflow/raw_ops.rb', line 1297 def fft2d(input: nil) Utils.execute("FFT2D", [input]) end |
.fft3d(input: nil) ⇒ Object
1301 1302 1303 |
# File 'lib/tensorflow/raw_ops.rb', line 1301 def fft3d(input: nil) Utils.execute("FFT3D", [input]) end |
.fifo_queue(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) ⇒ Object
1305 1306 1307 |
# File 'lib/tensorflow/raw_ops.rb', line 1305 def fifo_queue(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) Utils.execute("FIFOQueue", [], component_types: component_types, shapes: shapes, capacity: capacity, container: container, shared_name: shared_name) end |
.fifo_queue_v2(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) ⇒ Object
1309 1310 1311 |
# File 'lib/tensorflow/raw_ops.rb', line 1309 def fifo_queue_v2(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) Utils.execute("FIFOQueueV2", [], component_types: component_types, shapes: shapes, capacity: capacity, container: container, shared_name: shared_name) end |
.fill(dims: nil, value: nil, index_type: nil) ⇒ Object
1349 1350 1351 |
# File 'lib/tensorflow/raw_ops.rb', line 1349 def fill(dims: nil, value: nil, index_type: nil) Utils.execute("Fill", [dims, value], index_type: index_type) end |
.filter_by_last_component_dataset(input_dataset: nil, output_types: nil, output_shapes: nil) ⇒ Object
1353 1354 1355 |
# File 'lib/tensorflow/raw_ops.rb', line 1353 def filter_by_last_component_dataset(input_dataset: nil, output_types: nil, output_shapes: nil) Utils.execute("FilterByLastComponentDataset", [input_dataset], output_types: output_types, output_shapes: output_shapes) end |
.filter_dataset(input_dataset: nil, other_arguments: nil, predicate: nil, output_types: nil, output_shapes: nil) ⇒ Object
1357 1358 1359 |
# File 'lib/tensorflow/raw_ops.rb', line 1357 def filter_dataset(input_dataset: nil, other_arguments: nil, predicate: nil, output_types: nil, output_shapes: nil) Utils.execute("FilterDataset", [input_dataset, other_arguments], predicate: predicate, output_types: output_types, output_shapes: output_shapes) end |
.fingerprint(data: nil, method: nil) ⇒ Object
1361 1362 1363 |
# File 'lib/tensorflow/raw_ops.rb', line 1361 def fingerprint(data: nil, method: nil) Utils.execute("Fingerprint", [data, method]) end |
.fixed_length_record_dataset(filenames: nil, header_bytes: nil, record_bytes: nil, footer_bytes: nil, buffer_size: nil) ⇒ Object
1365 1366 1367 |
# File 'lib/tensorflow/raw_ops.rb', line 1365 def fixed_length_record_dataset(filenames: nil, header_bytes: nil, record_bytes: nil, footer_bytes: nil, buffer_size: nil) Utils.execute("FixedLengthRecordDataset", [filenames, header_bytes, record_bytes, , buffer_size]) end |
.fixed_length_record_dataset_v2(filenames: nil, header_bytes: nil, record_bytes: nil, footer_bytes: nil, buffer_size: nil, compression_type: nil) ⇒ Object
1369 1370 1371 |
# File 'lib/tensorflow/raw_ops.rb', line 1369 def fixed_length_record_dataset_v2(filenames: nil, header_bytes: nil, record_bytes: nil, footer_bytes: nil, buffer_size: nil, compression_type: nil) Utils.execute("FixedLengthRecordDatasetV2", [filenames, header_bytes, record_bytes, , buffer_size, compression_type]) end |
.fixed_length_record_reader(header_bytes: nil, record_bytes: nil, footer_bytes: nil, hop_bytes: nil, container: nil, shared_name: nil) ⇒ Object
1373 1374 1375 |
# File 'lib/tensorflow/raw_ops.rb', line 1373 def fixed_length_record_reader(header_bytes: nil, record_bytes: nil, footer_bytes: nil, hop_bytes: nil, container: nil, shared_name: nil) Utils.execute("FixedLengthRecordReader", [], header_bytes: header_bytes, record_bytes: record_bytes, footer_bytes: , hop_bytes: hop_bytes, container: container, shared_name: shared_name) end |
.fixed_length_record_reader_v2(header_bytes: nil, record_bytes: nil, footer_bytes: nil, hop_bytes: nil, container: nil, shared_name: nil, encoding: nil) ⇒ Object
1377 1378 1379 |
# File 'lib/tensorflow/raw_ops.rb', line 1377 def fixed_length_record_reader_v2(header_bytes: nil, record_bytes: nil, footer_bytes: nil, hop_bytes: nil, container: nil, shared_name: nil, encoding: nil) Utils.execute("FixedLengthRecordReaderV2", [], header_bytes: header_bytes, record_bytes: record_bytes, footer_bytes: , hop_bytes: hop_bytes, container: container, shared_name: shared_name, encoding: encoding) end |
.fixed_unigram_candidate_sampler(true_classes: nil, num_true: nil, num_sampled: nil, unique: nil, range_max: nil, vocab_file: nil, distortion: nil, num_reserved_ids: nil, num_shards: nil, shard: nil, unigrams: nil, seed: nil, seed2: nil) ⇒ Object
1381 1382 1383 |
# File 'lib/tensorflow/raw_ops.rb', line 1381 def fixed_unigram_candidate_sampler(true_classes: nil, num_true: nil, num_sampled: nil, unique: nil, range_max: nil, vocab_file: nil, distortion: nil, num_reserved_ids: nil, num_shards: nil, shard: nil, unigrams: nil, seed: nil, seed2: nil) Utils.execute("FixedUnigramCandidateSampler", [true_classes], num_true: num_true, num_sampled: num_sampled, unique: unique, range_max: range_max, vocab_file: vocab_file, distortion: distortion, num_reserved_ids: num_reserved_ids, num_shards: num_shards, shard: shard, unigrams: unigrams, seed: seed, seed2: seed2) end |
.flat_map_dataset(input_dataset: nil, other_arguments: nil, f: nil, output_types: nil, output_shapes: nil) ⇒ Object
1385 1386 1387 |
# File 'lib/tensorflow/raw_ops.rb', line 1385 def flat_map_dataset(input_dataset: nil, other_arguments: nil, f: nil, output_types: nil, output_shapes: nil) Utils.execute("FlatMapDataset", [input_dataset, other_arguments], f: f, output_types: output_types, output_shapes: output_shapes) end |
.floor(x: nil) ⇒ Object
1389 1390 1391 |
# File 'lib/tensorflow/raw_ops.rb', line 1389 def floor(x: nil) Utils.execute("Floor", [x]) end |
.floor_div(x: nil, y: nil) ⇒ Object
1393 1394 1395 |
# File 'lib/tensorflow/raw_ops.rb', line 1393 def floor_div(x: nil, y: nil) Utils.execute("FloorDiv", [x, y]) end |
.floor_mod(x: nil, y: nil) ⇒ Object
1397 1398 1399 |
# File 'lib/tensorflow/raw_ops.rb', line 1397 def floor_mod(x: nil, y: nil) Utils.execute("FloorMod", [x, y]) end |
.flush_summary_writer(writer: nil) ⇒ Object
1401 1402 1403 |
# File 'lib/tensorflow/raw_ops.rb', line 1401 def flush_summary_writer(writer: nil) Utils.execute("FlushSummaryWriter", [writer]) end |
.for(start: nil, limit: nil, delta: nil, input: nil, body: nil) ⇒ Object
1405 1406 1407 |
# File 'lib/tensorflow/raw_ops.rb', line 1405 def for(start: nil, limit: nil, delta: nil, input: nil, body: nil) Utils.execute("For", [start, limit, delta, input], body: body) end |
.fractional_avg_pool(value: nil, pooling_ratio: nil, pseudo_random: nil, overlapping: nil, deterministic: nil, seed: nil, seed2: nil) ⇒ Object
1409 1410 1411 |
# File 'lib/tensorflow/raw_ops.rb', line 1409 def fractional_avg_pool(value: nil, pooling_ratio: nil, pseudo_random: nil, overlapping: nil, deterministic: nil, seed: nil, seed2: nil) Utils.execute("FractionalAvgPool", [value], pooling_ratio: pooling_ratio, pseudo_random: pseudo_random, overlapping: overlapping, deterministic: deterministic, seed: seed, seed2: seed2) end |
.fractional_avg_pool_grad(orig_input_tensor_shape: nil, out_backprop: nil, row_pooling_sequence: nil, col_pooling_sequence: nil, overlapping: nil) ⇒ Object
1413 1414 1415 |
# File 'lib/tensorflow/raw_ops.rb', line 1413 def fractional_avg_pool_grad(orig_input_tensor_shape: nil, out_backprop: nil, row_pooling_sequence: nil, col_pooling_sequence: nil, overlapping: nil) Utils.execute("FractionalAvgPoolGrad", [orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence], overlapping: overlapping) end |
.fractional_max_pool(value: nil, pooling_ratio: nil, pseudo_random: nil, overlapping: nil, deterministic: nil, seed: nil, seed2: nil) ⇒ Object
1417 1418 1419 |
# File 'lib/tensorflow/raw_ops.rb', line 1417 def fractional_max_pool(value: nil, pooling_ratio: nil, pseudo_random: nil, overlapping: nil, deterministic: nil, seed: nil, seed2: nil) Utils.execute("FractionalMaxPool", [value], pooling_ratio: pooling_ratio, pseudo_random: pseudo_random, overlapping: overlapping, deterministic: deterministic, seed: seed, seed2: seed2) end |
.fractional_max_pool_grad(orig_input: nil, orig_output: nil, out_backprop: nil, row_pooling_sequence: nil, col_pooling_sequence: nil, overlapping: nil) ⇒ Object
1421 1422 1423 |
# File 'lib/tensorflow/raw_ops.rb', line 1421 def fractional_max_pool_grad(orig_input: nil, orig_output: nil, out_backprop: nil, row_pooling_sequence: nil, col_pooling_sequence: nil, overlapping: nil) Utils.execute("FractionalMaxPoolGrad", [orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence], overlapping: overlapping) end |
.fused_batch_norm(x: nil, scale: nil, offset: nil, mean: nil, variance: nil, epsilon: nil, data_format: nil, is_training: nil) ⇒ Object
1425 1426 1427 |
# File 'lib/tensorflow/raw_ops.rb', line 1425 def fused_batch_norm(x: nil, scale: nil, offset: nil, mean: nil, variance: nil, epsilon: nil, data_format: nil, is_training: nil) Utils.execute("FusedBatchNorm", [x, scale, offset, mean, variance], epsilon: epsilon, data_format: data_format, is_training: is_training) end |
.fused_batch_norm_grad(y_backprop: nil, x: nil, scale: nil, reserve_space_1: nil, reserve_space_2: nil, epsilon: nil, data_format: nil, is_training: nil) ⇒ Object
1429 1430 1431 |
# File 'lib/tensorflow/raw_ops.rb', line 1429 def fused_batch_norm_grad(y_backprop: nil, x: nil, scale: nil, reserve_space_1: nil, reserve_space_2: nil, epsilon: nil, data_format: nil, is_training: nil) Utils.execute("FusedBatchNormGrad", [y_backprop, x, scale, reserve_space_1, reserve_space_2], epsilon: epsilon, data_format: data_format, is_training: is_training) end |
.fused_batch_norm_grad_v2(y_backprop: nil, x: nil, scale: nil, reserve_space_1: nil, reserve_space_2: nil, epsilon: nil, data_format: nil, is_training: nil) ⇒ Object
1433 1434 1435 |
# File 'lib/tensorflow/raw_ops.rb', line 1433 def fused_batch_norm_grad_v2(y_backprop: nil, x: nil, scale: nil, reserve_space_1: nil, reserve_space_2: nil, epsilon: nil, data_format: nil, is_training: nil) Utils.execute("FusedBatchNormGradV2", [y_backprop, x, scale, reserve_space_1, reserve_space_2], epsilon: epsilon, data_format: data_format, is_training: is_training) end |
.fused_batch_norm_grad_v3(y_backprop: nil, x: nil, scale: nil, reserve_space_1: nil, reserve_space_2: nil, reserve_space_3: nil, epsilon: nil, data_format: nil, is_training: nil) ⇒ Object
1437 1438 1439 |
# File 'lib/tensorflow/raw_ops.rb', line 1437 def fused_batch_norm_grad_v3(y_backprop: nil, x: nil, scale: nil, reserve_space_1: nil, reserve_space_2: nil, reserve_space_3: nil, epsilon: nil, data_format: nil, is_training: nil) Utils.execute("FusedBatchNormGradV3", [y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3], epsilon: epsilon, data_format: data_format, is_training: is_training) end |
.fused_batch_norm_v2(x: nil, scale: nil, offset: nil, mean: nil, variance: nil, epsilon: nil, data_format: nil, is_training: nil) ⇒ Object
1441 1442 1443 |
# File 'lib/tensorflow/raw_ops.rb', line 1441 def fused_batch_norm_v2(x: nil, scale: nil, offset: nil, mean: nil, variance: nil, epsilon: nil, data_format: nil, is_training: nil) Utils.execute("FusedBatchNormV2", [x, scale, offset, mean, variance], epsilon: epsilon, data_format: data_format, is_training: is_training) end |
.fused_batch_norm_v3(x: nil, scale: nil, offset: nil, mean: nil, variance: nil, epsilon: nil, data_format: nil, is_training: nil) ⇒ Object
1445 1446 1447 |
# File 'lib/tensorflow/raw_ops.rb', line 1445 def fused_batch_norm_v3(x: nil, scale: nil, offset: nil, mean: nil, variance: nil, epsilon: nil, data_format: nil, is_training: nil) Utils.execute("FusedBatchNormV3", [x, scale, offset, mean, variance], epsilon: epsilon, data_format: data_format, is_training: is_training) end |
.fused_pad_conv2d(input: nil, paddings: nil, filter: nil, mode: nil, strides: nil, padding: nil) ⇒ Object
1449 1450 1451 |
# File 'lib/tensorflow/raw_ops.rb', line 1449 def fused_pad_conv2d(input: nil, paddings: nil, filter: nil, mode: nil, strides: nil, padding: nil) Utils.execute("FusedPadConv2D", [input, paddings, filter], mode: mode, strides: strides, padding: padding) end |
.fused_resize_and_pad_conv2d(input: nil, size: nil, paddings: nil, filter: nil, resize_align_corners: nil, mode: nil, strides: nil, padding: nil) ⇒ Object
1453 1454 1455 |
# File 'lib/tensorflow/raw_ops.rb', line 1453 def fused_resize_and_pad_conv2d(input: nil, size: nil, paddings: nil, filter: nil, resize_align_corners: nil, mode: nil, strides: nil, padding: nil) Utils.execute("FusedResizeAndPadConv2D", [input, size, paddings, filter], resize_align_corners: resize_align_corners, mode: mode, strides: strides, padding: padding) end |
.gather(params: nil, indices: nil, validate_indices: nil) ⇒ Object
1457 1458 1459 |
# File 'lib/tensorflow/raw_ops.rb', line 1457 def gather(params: nil, indices: nil, validate_indices: nil) Utils.execute("Gather", [params, indices], validate_indices: validate_indices) end |
.gather_nd(params: nil, indices: nil) ⇒ Object
1461 1462 1463 |
# File 'lib/tensorflow/raw_ops.rb', line 1461 def gather_nd(params: nil, indices: nil) Utils.execute("GatherNd", [params, indices]) end |
.gather_v2(params: nil, indices: nil, axis: nil, batch_dims: nil) ⇒ Object
1465 1466 1467 |
# File 'lib/tensorflow/raw_ops.rb', line 1465 def gather_v2(params: nil, indices: nil, axis: nil, batch_dims: nil) Utils.execute("GatherV2", [params, indices, axis], batch_dims: batch_dims) end |
.gcs_configure_block_cache(max_cache_size: nil, block_size: nil, max_staleness: nil) ⇒ Object
1469 1470 1471 |
# File 'lib/tensorflow/raw_ops.rb', line 1469 def gcs_configure_block_cache(max_cache_size: nil, block_size: nil, max_staleness: nil) Utils.execute("GcsConfigureBlockCache", [max_cache_size, block_size, max_staleness]) end |
.gcs_configure_credentials(json: nil) ⇒ Object
1473 1474 1475 |
# File 'lib/tensorflow/raw_ops.rb', line 1473 def gcs_configure_credentials(json: nil) Utils.execute("GcsConfigureCredentials", [json]) end |
.generate_big_query_reader_partitions(project_id: nil, dataset_id: nil, table_id: nil, columns: nil, timestamp_millis: nil, num_partitions: nil, test_end_point: nil) ⇒ Object
1477 1478 1479 |
# File 'lib/tensorflow/raw_ops.rb', line 1477 def generate_big_query_reader_partitions(project_id: nil, dataset_id: nil, table_id: nil, columns: nil, timestamp_millis: nil, num_partitions: nil, test_end_point: nil) Utils.execute("GenerateBigQueryReaderPartitions", [], project_id: project_id, dataset_id: dataset_id, table_id: table_id, columns: columns, timestamp_millis: , num_partitions: num_partitions, test_end_point: test_end_point) end |
.generate_vocab_remapping(new_vocab_file: nil, old_vocab_file: nil, new_vocab_offset: nil, num_new_vocab: nil, old_vocab_size: nil) ⇒ Object
1481 1482 1483 |
# File 'lib/tensorflow/raw_ops.rb', line 1481 def generate_vocab_remapping(new_vocab_file: nil, old_vocab_file: nil, new_vocab_offset: nil, num_new_vocab: nil, old_vocab_size: nil) Utils.execute("GenerateVocabRemapping", [new_vocab_file, old_vocab_file], new_vocab_offset: new_vocab_offset, num_new_vocab: num_new_vocab, old_vocab_size: old_vocab_size) end |
.generator_dataset(init_func_other_args: nil, next_func_other_args: nil, finalize_func_other_args: nil, init_func: nil, next_func: nil, finalize_func: nil, output_types: nil, output_shapes: nil) ⇒ Object
1485 1486 1487 |
# File 'lib/tensorflow/raw_ops.rb', line 1485 def generator_dataset(init_func_other_args: nil, next_func_other_args: nil, finalize_func_other_args: nil, init_func: nil, next_func: nil, finalize_func: nil, output_types: nil, output_shapes: nil) Utils.execute("GeneratorDataset", [init_func_other_args, next_func_other_args, finalize_func_other_args], init_func: init_func, next_func: next_func, finalize_func: finalize_func, output_types: output_types, output_shapes: output_shapes) end |
.get_session_handle(value: nil) ⇒ Object
1489 1490 1491 |
# File 'lib/tensorflow/raw_ops.rb', line 1489 def get_session_handle(value: nil) Utils.execute("GetSessionHandle", [value]) end |
.get_session_handle_v2(value: nil) ⇒ Object
1493 1494 1495 |
# File 'lib/tensorflow/raw_ops.rb', line 1493 def get_session_handle_v2(value: nil) Utils.execute("GetSessionHandleV2", [value]) end |
.get_session_tensor(handle: nil, dtype: nil) ⇒ Object
1497 1498 1499 |
# File 'lib/tensorflow/raw_ops.rb', line 1497 def get_session_tensor(handle: nil, dtype: nil) Utils.execute("GetSessionTensor", [handle], dtype: dtype) end |
.greater(x: nil, y: nil) ⇒ Object
1501 1502 1503 |
# File 'lib/tensorflow/raw_ops.rb', line 1501 def greater(x: nil, y: nil) Utils.execute("Greater", [x, y]) end |
.greater_equal(x: nil, y: nil) ⇒ Object
1505 1506 1507 |
# File 'lib/tensorflow/raw_ops.rb', line 1505 def greater_equal(x: nil, y: nil) Utils.execute("GreaterEqual", [x, y]) end |
.guarantee_const(input: nil) ⇒ Object
1509 1510 1511 |
# File 'lib/tensorflow/raw_ops.rb', line 1509 def guarantee_const(input: nil) Utils.execute("GuaranteeConst", [input]) end |
.hash_table(container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil) ⇒ Object
1517 1518 1519 |
# File 'lib/tensorflow/raw_ops.rb', line 1517 def hash_table(container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil) Utils.execute("HashTable", [], container: container, shared_name: shared_name, use_node_name_sharing: use_node_name_sharing, key_dtype: key_dtype, value_dtype: value_dtype) end |
.hash_table_v2(container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil) ⇒ Object
1521 1522 1523 |
# File 'lib/tensorflow/raw_ops.rb', line 1521 def hash_table_v2(container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil) Utils.execute("HashTableV2", [], container: container, shared_name: shared_name, use_node_name_sharing: use_node_name_sharing, key_dtype: key_dtype, value_dtype: value_dtype) end |
.histogram_fixed_width(values: nil, value_range: nil, nbins: nil, dtype: nil) ⇒ Object
1525 1526 1527 |
# File 'lib/tensorflow/raw_ops.rb', line 1525 def histogram_fixed_width(values: nil, value_range: nil, nbins: nil, dtype: nil) Utils.execute("HistogramFixedWidth", [values, value_range, nbins], dtype: dtype) end |
.histogram_summary(tag: nil, values: nil) ⇒ Object
1529 1530 1531 |
# File 'lib/tensorflow/raw_ops.rb', line 1529 def histogram_summary(tag: nil, values: nil) Utils.execute("HistogramSummary", [tag, values]) end |
.host_const(value: nil, dtype: nil) ⇒ Object
1533 1534 1535 |
# File 'lib/tensorflow/raw_ops.rb', line 1533 def host_const(value: nil, dtype: nil) Utils.execute("HostConst", [], value: value, dtype: dtype) end |
.hsv_to_rgb(images: nil) ⇒ Object
1513 1514 1515 |
# File 'lib/tensorflow/raw_ops.rb', line 1513 def hsv_to_rgb(images: nil) Utils.execute("HSVToRGB", [images]) end |
.identity(input: nil) ⇒ Object
1561 1562 1563 |
# File 'lib/tensorflow/raw_ops.rb', line 1561 def identity(input: nil) Utils.execute("Identity", [input]) end |
.identity_n(input: nil) ⇒ Object
1565 1566 1567 |
# File 'lib/tensorflow/raw_ops.rb', line 1565 def identity_n(input: nil) Utils.execute("IdentityN", [input]) end |
.identity_reader(container: nil, shared_name: nil) ⇒ Object
1569 1570 1571 |
# File 'lib/tensorflow/raw_ops.rb', line 1569 def identity_reader(container: nil, shared_name: nil) Utils.execute("IdentityReader", [], container: container, shared_name: shared_name) end |
.identity_reader_v2(container: nil, shared_name: nil) ⇒ Object
1573 1574 1575 |
# File 'lib/tensorflow/raw_ops.rb', line 1573 def identity_reader_v2(container: nil, shared_name: nil) Utils.execute("IdentityReaderV2", [], container: container, shared_name: shared_name) end |
.if(cond: nil, input: nil, then_branch: nil, else_branch: nil, output_shapes: nil) ⇒ Object
1577 1578 1579 |
# File 'lib/tensorflow/raw_ops.rb', line 1577 def if(cond: nil, input: nil, then_branch: nil, else_branch: nil, output_shapes: nil) Utils.execute("If", [cond, input], then_branch: then_branch, else_branch: else_branch, output_shapes: output_shapes) end |
.ifft(input: nil) ⇒ Object
1537 1538 1539 |
# File 'lib/tensorflow/raw_ops.rb', line 1537 def ifft(input: nil) Utils.execute("IFFT", [input]) end |
.ifft2d(input: nil) ⇒ Object
1541 1542 1543 |
# File 'lib/tensorflow/raw_ops.rb', line 1541 def ifft2d(input: nil) Utils.execute("IFFT2D", [input]) end |
.ifft3d(input: nil) ⇒ Object
1545 1546 1547 |
# File 'lib/tensorflow/raw_ops.rb', line 1545 def ifft3d(input: nil) Utils.execute("IFFT3D", [input]) end |
.igamma(a: nil, x: nil) ⇒ Object
1581 1582 1583 |
# File 'lib/tensorflow/raw_ops.rb', line 1581 def igamma(a: nil, x: nil) Utils.execute("Igamma", [a, x]) end |
.igamma_grad_a(a: nil, x: nil) ⇒ Object
1585 1586 1587 |
# File 'lib/tensorflow/raw_ops.rb', line 1585 def igamma_grad_a(a: nil, x: nil) Utils.execute("IgammaGradA", [a, x]) end |
.igammac(a: nil, x: nil) ⇒ Object
1589 1590 1591 |
# File 'lib/tensorflow/raw_ops.rb', line 1589 def igammac(a: nil, x: nil) Utils.execute("Igammac", [a, x]) end |
.imag(input: nil) ⇒ Object
1593 1594 1595 |
# File 'lib/tensorflow/raw_ops.rb', line 1593 def imag(input: nil) Utils.execute("Imag", [input]) end |
.image_summary(tag: nil, tensor: nil, max_images: nil, bad_color: nil) ⇒ Object
1597 1598 1599 |
# File 'lib/tensorflow/raw_ops.rb', line 1597 def image_summary(tag: nil, tensor: nil, max_images: nil, bad_color: nil) Utils.execute("ImageSummary", [tag, tensor], max_images: max_images, bad_color: bad_color) end |
.immutable_const(dtype: nil, shape: nil, memory_region_name: nil) ⇒ Object
1601 1602 1603 |
# File 'lib/tensorflow/raw_ops.rb', line 1601 def immutable_const(dtype: nil, shape: nil, memory_region_name: nil) Utils.execute("ImmutableConst", [], dtype: dtype, shape: shape, memory_region_name: memory_region_name) end |
.import_event(writer: nil, event: nil) ⇒ Object
1605 1606 1607 |
# File 'lib/tensorflow/raw_ops.rb', line 1605 def import_event(writer: nil, event: nil) Utils.execute("ImportEvent", [writer, event]) end |
.in_top_k(predictions: nil, targets: nil, k: nil) ⇒ Object
1609 1610 1611 |
# File 'lib/tensorflow/raw_ops.rb', line 1609 def in_top_k(predictions: nil, targets: nil, k: nil) Utils.execute("InTopK", [predictions, targets], k: k) end |
.in_top_kv2(predictions: nil, targets: nil, k: nil) ⇒ Object
1613 1614 1615 |
# File 'lib/tensorflow/raw_ops.rb', line 1613 def in_top_kv2(predictions: nil, targets: nil, k: nil) Utils.execute("InTopKV2", [predictions, targets, k]) end |
.infeed_dequeue(dtype: nil, shape: nil) ⇒ Object
1617 1618 1619 |
# File 'lib/tensorflow/raw_ops.rb', line 1617 def infeed_dequeue(dtype: nil, shape: nil) Utils.execute("InfeedDequeue", [], dtype: dtype, shape: shape) end |
.infeed_dequeue_tuple(dtypes: nil, shapes: nil) ⇒ Object
1621 1622 1623 |
# File 'lib/tensorflow/raw_ops.rb', line 1621 def infeed_dequeue_tuple(dtypes: nil, shapes: nil) Utils.execute("InfeedDequeueTuple", [], dtypes: dtypes, shapes: shapes) end |
.infeed_enqueue(input: nil, dtype: nil, shape: nil, layout: nil, device_ordinal: nil) ⇒ Object
1625 1626 1627 |
# File 'lib/tensorflow/raw_ops.rb', line 1625 def infeed_enqueue(input: nil, dtype: nil, shape: nil, layout: nil, device_ordinal: nil) Utils.execute("InfeedEnqueue", [input], dtype: dtype, shape: shape, layout: layout, device_ordinal: device_ordinal) end |
.infeed_enqueue_prelinearized_buffer(input: nil, device_ordinal: nil) ⇒ Object
1629 1630 1631 |
# File 'lib/tensorflow/raw_ops.rb', line 1629 def infeed_enqueue_prelinearized_buffer(input: nil, device_ordinal: nil) Utils.execute("InfeedEnqueuePrelinearizedBuffer", [input], device_ordinal: device_ordinal) end |
.infeed_enqueue_tuple(inputs: nil, dtypes: nil, shapes: nil, layouts: nil, device_ordinal: nil) ⇒ Object
1633 1634 1635 |
# File 'lib/tensorflow/raw_ops.rb', line 1633 def infeed_enqueue_tuple(inputs: nil, dtypes: nil, shapes: nil, layouts: nil, device_ordinal: nil) Utils.execute("InfeedEnqueueTuple", [inputs], dtypes: dtypes, shapes: shapes, layouts: layouts, device_ordinal: device_ordinal) end |
.initialize_table(table_handle: nil, keys: nil, values: nil) ⇒ Object
1637 1638 1639 |
# File 'lib/tensorflow/raw_ops.rb', line 1637 def initialize_table(table_handle: nil, keys: nil, values: nil) Utils.execute("InitializeTable", [table_handle, keys, values]) end |
.initialize_table_from_text_file(table_handle: nil, filename: nil, key_index: nil, value_index: nil, vocab_size: nil, delimiter: nil) ⇒ Object
1641 1642 1643 |
# File 'lib/tensorflow/raw_ops.rb', line 1641 def initialize_table_from_text_file(table_handle: nil, filename: nil, key_index: nil, value_index: nil, vocab_size: nil, delimiter: nil) Utils.execute("InitializeTableFromTextFile", [table_handle, filename], key_index: key_index, value_index: value_index, vocab_size: vocab_size, delimiter: delimiter) end |
.initialize_table_from_text_file_v2(table_handle: nil, filename: nil, key_index: nil, value_index: nil, vocab_size: nil, delimiter: nil) ⇒ Object
1645 1646 1647 |
# File 'lib/tensorflow/raw_ops.rb', line 1645 def initialize_table_from_text_file_v2(table_handle: nil, filename: nil, key_index: nil, value_index: nil, vocab_size: nil, delimiter: nil) Utils.execute("InitializeTableFromTextFileV2", [table_handle, filename], key_index: key_index, value_index: value_index, vocab_size: vocab_size, delimiter: delimiter) end |
.initialize_table_v2(table_handle: nil, keys: nil, values: nil) ⇒ Object
1649 1650 1651 |
# File 'lib/tensorflow/raw_ops.rb', line 1649 def initialize_table_v2(table_handle: nil, keys: nil, values: nil) Utils.execute("InitializeTableV2", [table_handle, keys, values]) end |
.inplace_add(x: nil, i: nil, v: nil) ⇒ Object
1653 1654 1655 |
# File 'lib/tensorflow/raw_ops.rb', line 1653 def inplace_add(x: nil, i: nil, v: nil) Utils.execute("InplaceAdd", [x, i, v]) end |
.inplace_sub(x: nil, i: nil, v: nil) ⇒ Object
1657 1658 1659 |
# File 'lib/tensorflow/raw_ops.rb', line 1657 def inplace_sub(x: nil, i: nil, v: nil) Utils.execute("InplaceSub", [x, i, v]) end |
.inplace_update(x: nil, i: nil, v: nil) ⇒ Object
1661 1662 1663 |
# File 'lib/tensorflow/raw_ops.rb', line 1661 def inplace_update(x: nil, i: nil, v: nil) Utils.execute("InplaceUpdate", [x, i, v]) end |
.interleave_dataset(input_dataset: nil, other_arguments: nil, cycle_length: nil, block_length: nil, f: nil, output_types: nil, output_shapes: nil) ⇒ Object
1665 1666 1667 |
# File 'lib/tensorflow/raw_ops.rb', line 1665 def interleave_dataset(input_dataset: nil, other_arguments: nil, cycle_length: nil, block_length: nil, f: nil, output_types: nil, output_shapes: nil) Utils.execute("InterleaveDataset", [input_dataset, other_arguments, cycle_length, block_length], f: f, output_types: output_types, output_shapes: output_shapes) end |
.inv(x: nil) ⇒ Object
1669 1670 1671 |
# File 'lib/tensorflow/raw_ops.rb', line 1669 def inv(x: nil) Utils.execute("Inv", [x]) end |
.inv_grad(y: nil, dy: nil) ⇒ Object
1673 1674 1675 |
# File 'lib/tensorflow/raw_ops.rb', line 1673 def inv_grad(y: nil, dy: nil) Utils.execute("InvGrad", [y, dy]) end |
.invert(x: nil) ⇒ Object
1677 1678 1679 |
# File 'lib/tensorflow/raw_ops.rb', line 1677 def invert(x: nil) Utils.execute("Invert", [x]) end |
.invert_permutation(x: nil) ⇒ Object
1681 1682 1683 |
# File 'lib/tensorflow/raw_ops.rb', line 1681 def invert_permutation(x: nil) Utils.execute("InvertPermutation", [x]) end |
.irfft(input: nil, fft_length: nil) ⇒ Object
1549 1550 1551 |
# File 'lib/tensorflow/raw_ops.rb', line 1549 def irfft(input: nil, fft_length: nil) Utils.execute("IRFFT", [input, fft_length]) end |
.irfft2d(input: nil, fft_length: nil) ⇒ Object
1553 1554 1555 |
# File 'lib/tensorflow/raw_ops.rb', line 1553 def irfft2d(input: nil, fft_length: nil) Utils.execute("IRFFT2D", [input, fft_length]) end |
.irfft3d(input: nil, fft_length: nil) ⇒ Object
1557 1558 1559 |
# File 'lib/tensorflow/raw_ops.rb', line 1557 def irfft3d(input: nil, fft_length: nil) Utils.execute("IRFFT3D", [input, fft_length]) end |
.is_boosted_trees_ensemble_initialized(tree_ensemble_handle: nil) ⇒ Object
1685 1686 1687 |
# File 'lib/tensorflow/raw_ops.rb', line 1685 def is_boosted_trees_ensemble_initialized(tree_ensemble_handle: nil) Utils.execute("IsBoostedTreesEnsembleInitialized", [tree_ensemble_handle]) end |
.is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle: nil) ⇒ Object
1689 1690 1691 |
# File 'lib/tensorflow/raw_ops.rb', line 1689 def is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle: nil) Utils.execute("IsBoostedTreesQuantileStreamResourceInitialized", [quantile_stream_resource_handle]) end |
.is_finite(x: nil) ⇒ Object
1693 1694 1695 |
# File 'lib/tensorflow/raw_ops.rb', line 1693 def is_finite(x: nil) Utils.execute("IsFinite", [x]) end |
.is_inf(x: nil) ⇒ Object
1697 1698 1699 |
# File 'lib/tensorflow/raw_ops.rb', line 1697 def is_inf(x: nil) Utils.execute("IsInf", [x]) end |
.is_nan(x: nil) ⇒ Object
1701 1702 1703 |
# File 'lib/tensorflow/raw_ops.rb', line 1701 def is_nan(x: nil) Utils.execute("IsNan", [x]) end |
.is_variable_initialized(ref: nil, dtype: nil) ⇒ Object
1705 1706 1707 |
# File 'lib/tensorflow/raw_ops.rb', line 1705 def is_variable_initialized(ref: nil, dtype: nil) Utils.execute("IsVariableInitialized", [ref], dtype: dtype) end |
.iterator(shared_name: nil, container: nil, output_types: nil, output_shapes: nil) ⇒ Object
1709 1710 1711 |
# File 'lib/tensorflow/raw_ops.rb', line 1709 def iterator(shared_name: nil, container: nil, output_types: nil, output_shapes: nil) Utils.execute("Iterator", [], shared_name: shared_name, container: container, output_types: output_types, output_shapes: output_shapes) end |
.iterator_from_string_handle(string_handle: nil, output_types: nil, output_shapes: nil) ⇒ Object
1713 1714 1715 |
# File 'lib/tensorflow/raw_ops.rb', line 1713 def iterator_from_string_handle(string_handle: nil, output_types: nil, output_shapes: nil) Utils.execute("IteratorFromStringHandle", [string_handle], output_types: output_types, output_shapes: output_shapes) end |
.iterator_from_string_handle_v2(string_handle: nil, output_types: nil, output_shapes: nil) ⇒ Object
1717 1718 1719 |
# File 'lib/tensorflow/raw_ops.rb', line 1717 def iterator_from_string_handle_v2(string_handle: nil, output_types: nil, output_shapes: nil) Utils.execute("IteratorFromStringHandleV2", [string_handle], output_types: output_types, output_shapes: output_shapes) end |
.iterator_get_next(iterator: nil, output_types: nil, output_shapes: nil) ⇒ Object
1721 1722 1723 |
# File 'lib/tensorflow/raw_ops.rb', line 1721 def iterator_get_next(iterator: nil, output_types: nil, output_shapes: nil) Utils.execute("IteratorGetNext", [iterator], output_types: output_types, output_shapes: output_shapes) end |
.iterator_get_next_as_optional(iterator: nil, output_types: nil, output_shapes: nil) ⇒ Object
1725 1726 1727 |
# File 'lib/tensorflow/raw_ops.rb', line 1725 def iterator_get_next_as_optional(iterator: nil, output_types: nil, output_shapes: nil) Utils.execute("IteratorGetNextAsOptional", [iterator], output_types: output_types, output_shapes: output_shapes) end |
.iterator_get_next_sync(iterator: nil, output_types: nil, output_shapes: nil) ⇒ Object
1729 1730 1731 |
# File 'lib/tensorflow/raw_ops.rb', line 1729 def iterator_get_next_sync(iterator: nil, output_types: nil, output_shapes: nil) Utils.execute("IteratorGetNextSync", [iterator], output_types: output_types, output_shapes: output_shapes) end |
.iterator_to_string_handle(resource_handle: nil) ⇒ Object
1733 1734 1735 |
# File 'lib/tensorflow/raw_ops.rb', line 1733 def iterator_to_string_handle(resource_handle: nil) Utils.execute("IteratorToStringHandle", [resource_handle]) end |
.iterator_v2(shared_name: nil, container: nil, output_types: nil, output_shapes: nil) ⇒ Object
1737 1738 1739 |
# File 'lib/tensorflow/raw_ops.rb', line 1737 def iterator_v2(shared_name: nil, container: nil, output_types: nil, output_shapes: nil) Utils.execute("IteratorV2", [], shared_name: shared_name, container: container, output_types: output_types, output_shapes: output_shapes) end |
.kmc2_chain_initialization(distances: nil, seed: nil) ⇒ Object
1741 1742 1743 |
# File 'lib/tensorflow/raw_ops.rb', line 1741 def kmc2_chain_initialization(distances: nil, seed: nil) Utils.execute("KMC2ChainInitialization", [distances, seed]) end |
.kmeans_plus_plus_initialization(points: nil, num_to_sample: nil, seed: nil, num_retries_per_sample: nil) ⇒ Object
1745 1746 1747 |
# File 'lib/tensorflow/raw_ops.rb', line 1745 def kmeans_plus_plus_initialization(points: nil, num_to_sample: nil, seed: nil, num_retries_per_sample: nil) Utils.execute("KmeansPlusPlusInitialization", [points, num_to_sample, seed, num_retries_per_sample]) end |
.l2_loss(t: nil) ⇒ Object
1749 1750 1751 |
# File 'lib/tensorflow/raw_ops.rb', line 1749 def l2_loss(t: nil) Utils.execute("L2Loss", [t]) end |
.leaky_relu(features: nil, alpha: nil) ⇒ Object
1765 1766 1767 |
# File 'lib/tensorflow/raw_ops.rb', line 1765 def leaky_relu(features: nil, alpha: nil) Utils.execute("LeakyRelu", [features], alpha: alpha) end |
.leaky_relu_grad(gradients: nil, features: nil, alpha: nil) ⇒ Object
1769 1770 1771 |
# File 'lib/tensorflow/raw_ops.rb', line 1769 def leaky_relu_grad(gradients: nil, features: nil, alpha: nil) Utils.execute("LeakyReluGrad", [gradients, features], alpha: alpha) end |
.learned_unigram_candidate_sampler(true_classes: nil, num_true: nil, num_sampled: nil, unique: nil, range_max: nil, seed: nil, seed2: nil) ⇒ Object
1773 1774 1775 |
# File 'lib/tensorflow/raw_ops.rb', line 1773 def learned_unigram_candidate_sampler(true_classes: nil, num_true: nil, num_sampled: nil, unique: nil, range_max: nil, seed: nil, seed2: nil) Utils.execute("LearnedUnigramCandidateSampler", [true_classes], num_true: num_true, num_sampled: num_sampled, unique: unique, range_max: range_max, seed: seed, seed2: seed2) end |
.left_shift(x: nil, y: nil) ⇒ Object
1777 1778 1779 |
# File 'lib/tensorflow/raw_ops.rb', line 1777 def left_shift(x: nil, y: nil) Utils.execute("LeftShift", [x, y]) end |
.less(x: nil, y: nil) ⇒ Object
1781 1782 1783 |
# File 'lib/tensorflow/raw_ops.rb', line 1781 def less(x: nil, y: nil) Utils.execute("Less", [x, y]) end |
.less_equal(x: nil, y: nil) ⇒ Object
1785 1786 1787 |
# File 'lib/tensorflow/raw_ops.rb', line 1785 def less_equal(x: nil, y: nil) Utils.execute("LessEqual", [x, y]) end |
.lgamma(x: nil) ⇒ Object
1789 1790 1791 |
# File 'lib/tensorflow/raw_ops.rb', line 1789 def lgamma(x: nil) Utils.execute("Lgamma", [x]) end |
.lin_space(start: nil, stop: nil, num: nil) ⇒ Object
1793 1794 1795 |
# File 'lib/tensorflow/raw_ops.rb', line 1793 def lin_space(start: nil, stop: nil, num: nil) Utils.execute("LinSpace", [start, stop, num]) end |
.list_diff(x: nil, y: nil, out_idx: nil) ⇒ Object
1797 1798 1799 |
# File 'lib/tensorflow/raw_ops.rb', line 1797 def list_diff(x: nil, y: nil, out_idx: nil) Utils.execute("ListDiff", [x, y], out_idx: out_idx) end |
.lmdb_reader(container: nil, shared_name: nil) ⇒ Object
1753 1754 1755 |
# File 'lib/tensorflow/raw_ops.rb', line 1753 def lmdb_reader(container: nil, shared_name: nil) Utils.execute("LMDBReader", [], container: container, shared_name: shared_name) end |
.load_and_remap_matrix(ckpt_path: nil, old_tensor_name: nil, row_remapping: nil, col_remapping: nil, initializing_values: nil, num_rows: nil, num_cols: nil, max_rows_in_memory: nil) ⇒ Object
1801 1802 1803 |
# File 'lib/tensorflow/raw_ops.rb', line 1801 def load_and_remap_matrix(ckpt_path: nil, old_tensor_name: nil, row_remapping: nil, col_remapping: nil, initializing_values: nil, num_rows: nil, num_cols: nil, max_rows_in_memory: nil) Utils.execute("LoadAndRemapMatrix", [ckpt_path, old_tensor_name, row_remapping, col_remapping, initializing_values], num_rows: num_rows, num_cols: num_cols, max_rows_in_memory: max_rows_in_memory) end |
.load_tpu_embedding_adadelta_parameters(parameters: nil, accumulators: nil, updates: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
1813 1814 1815 |
# File 'lib/tensorflow/raw_ops.rb', line 1813 def (parameters: nil, accumulators: nil, updates: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("LoadTPUEmbeddingAdadeltaParameters", [parameters, accumulators, updates], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters: nil, accumulators: nil, updates: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
1817 1818 1819 |
# File 'lib/tensorflow/raw_ops.rb', line 1817 def (parameters: nil, accumulators: nil, updates: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug", [parameters, accumulators, updates, gradient_accumulators], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.load_tpu_embedding_adagrad_parameters(parameters: nil, accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
1821 1822 1823 |
# File 'lib/tensorflow/raw_ops.rb', line 1821 def (parameters: nil, accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("LoadTPUEmbeddingAdagradParameters", [parameters, accumulators], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.load_tpu_embedding_adagrad_parameters_grad_accum_debug(parameters: nil, accumulators: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
1825 1826 1827 |
# File 'lib/tensorflow/raw_ops.rb', line 1825 def (parameters: nil, accumulators: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("LoadTPUEmbeddingAdagradParametersGradAccumDebug", [parameters, accumulators, gradient_accumulators], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.load_tpu_embedding_adam_parameters(parameters: nil, momenta: nil, velocities: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
1805 1806 1807 |
# File 'lib/tensorflow/raw_ops.rb', line 1805 def (parameters: nil, momenta: nil, velocities: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("LoadTPUEmbeddingADAMParameters", [parameters, momenta, velocities], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.load_tpu_embedding_adam_parameters_grad_accum_debug(parameters: nil, momenta: nil, velocities: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
1809 1810 1811 |
# File 'lib/tensorflow/raw_ops.rb', line 1809 def (parameters: nil, momenta: nil, velocities: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("LoadTPUEmbeddingADAMParametersGradAccumDebug", [parameters, momenta, velocities, gradient_accumulators], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.load_tpu_embedding_centered_rms_prop_parameters(parameters: nil, ms: nil, mom: nil, mg: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
1829 1830 1831 |
# File 'lib/tensorflow/raw_ops.rb', line 1829 def (parameters: nil, ms: nil, mom: nil, mg: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("LoadTPUEmbeddingCenteredRMSPropParameters", [parameters, ms, mom, mg], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.load_tpu_embedding_ftrl_parameters(parameters: nil, accumulators: nil, linears: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
1833 1834 1835 |
# File 'lib/tensorflow/raw_ops.rb', line 1833 def (parameters: nil, accumulators: nil, linears: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("LoadTPUEmbeddingFTRLParameters", [parameters, accumulators, linears], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters: nil, accumulators: nil, linears: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
1837 1838 1839 |
# File 'lib/tensorflow/raw_ops.rb', line 1837 def (parameters: nil, accumulators: nil, linears: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("LoadTPUEmbeddingFTRLParametersGradAccumDebug", [parameters, accumulators, linears, gradient_accumulators], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.load_tpu_embedding_mdl_adagrad_light_parameters(parameters: nil, accumulators: nil, weights: nil, benefits: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
1841 1842 1843 |
# File 'lib/tensorflow/raw_ops.rb', line 1841 def (parameters: nil, accumulators: nil, weights: nil, benefits: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("LoadTPUEmbeddingMDLAdagradLightParameters", [parameters, accumulators, weights, benefits], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.load_tpu_embedding_momentum_parameters(parameters: nil, momenta: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
1845 1846 1847 |
# File 'lib/tensorflow/raw_ops.rb', line 1845 def (parameters: nil, momenta: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("LoadTPUEmbeddingMomentumParameters", [parameters, momenta], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.load_tpu_embedding_momentum_parameters_grad_accum_debug(parameters: nil, momenta: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
1849 1850 1851 |
# File 'lib/tensorflow/raw_ops.rb', line 1849 def (parameters: nil, momenta: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("LoadTPUEmbeddingMomentumParametersGradAccumDebug", [parameters, momenta, gradient_accumulators], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.load_tpu_embedding_proximal_adagrad_parameters(parameters: nil, accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
1853 1854 1855 |
# File 'lib/tensorflow/raw_ops.rb', line 1853 def (parameters: nil, accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("LoadTPUEmbeddingProximalAdagradParameters", [parameters, accumulators], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters: nil, accumulators: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
1857 1858 1859 |
# File 'lib/tensorflow/raw_ops.rb', line 1857 def (parameters: nil, accumulators: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug", [parameters, accumulators, gradient_accumulators], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.load_tpu_embedding_rms_prop_parameters(parameters: nil, ms: nil, mom: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
1861 1862 1863 |
# File 'lib/tensorflow/raw_ops.rb', line 1861 def (parameters: nil, ms: nil, mom: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("LoadTPUEmbeddingRMSPropParameters", [parameters, ms, mom], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.load_tpu_embedding_rms_prop_parameters_grad_accum_debug(parameters: nil, ms: nil, mom: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
1865 1866 1867 |
# File 'lib/tensorflow/raw_ops.rb', line 1865 def (parameters: nil, ms: nil, mom: nil, gradient_accumulators: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("LoadTPUEmbeddingRMSPropParametersGradAccumDebug", [parameters, ms, mom, gradient_accumulators], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.load_tpu_embedding_stochastic_gradient_descent_parameters(parameters: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
1869 1870 1871 |
# File 'lib/tensorflow/raw_ops.rb', line 1869 def (parameters: nil, table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("LoadTPUEmbeddingStochasticGradientDescentParameters", [parameters], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.log(x: nil) ⇒ Object
1873 1874 1875 |
# File 'lib/tensorflow/raw_ops.rb', line 1873 def log(x: nil) Utils.execute("Log", [x]) end |
.log1p(x: nil) ⇒ Object
1877 1878 1879 |
# File 'lib/tensorflow/raw_ops.rb', line 1877 def log1p(x: nil) Utils.execute("Log1p", [x]) end |
.log_matrix_determinant(input: nil) ⇒ Object
1881 1882 1883 |
# File 'lib/tensorflow/raw_ops.rb', line 1881 def log_matrix_determinant(input: nil) Utils.execute("LogMatrixDeterminant", [input]) end |
.log_softmax(logits: nil) ⇒ Object
1885 1886 1887 |
# File 'lib/tensorflow/raw_ops.rb', line 1885 def log_softmax(logits: nil) Utils.execute("LogSoftmax", [logits]) end |
.log_uniform_candidate_sampler(true_classes: nil, num_true: nil, num_sampled: nil, unique: nil, range_max: nil, seed: nil, seed2: nil) ⇒ Object
1889 1890 1891 |
# File 'lib/tensorflow/raw_ops.rb', line 1889 def log_uniform_candidate_sampler(true_classes: nil, num_true: nil, num_sampled: nil, unique: nil, range_max: nil, seed: nil, seed2: nil) Utils.execute("LogUniformCandidateSampler", [true_classes], num_true: num_true, num_sampled: num_sampled, unique: unique, range_max: range_max, seed: seed, seed2: seed2) end |
.logical_and(x: nil, y: nil) ⇒ Object
1893 1894 1895 |
# File 'lib/tensorflow/raw_ops.rb', line 1893 def logical_and(x: nil, y: nil) Utils.execute("LogicalAnd", [x, y]) end |
.logical_not(x: nil) ⇒ Object
1897 1898 1899 |
# File 'lib/tensorflow/raw_ops.rb', line 1897 def logical_not(x: nil) Utils.execute("LogicalNot", [x]) end |
.logical_or(x: nil, y: nil) ⇒ Object
1901 1902 1903 |
# File 'lib/tensorflow/raw_ops.rb', line 1901 def logical_or(x: nil, y: nil) Utils.execute("LogicalOr", [x, y]) end |
.lookup_table_export(table_handle: nil) ⇒ Object
1905 1906 1907 |
# File 'lib/tensorflow/raw_ops.rb', line 1905 def lookup_table_export(table_handle: nil) Utils.execute("LookupTableExport", [table_handle]) end |
.lookup_table_export_v2(table_handle: nil) ⇒ Object
1909 1910 1911 |
# File 'lib/tensorflow/raw_ops.rb', line 1909 def lookup_table_export_v2(table_handle: nil) Utils.execute("LookupTableExportV2", [table_handle]) end |
.lookup_table_find(table_handle: nil, keys: nil, default_value: nil) ⇒ Object
1913 1914 1915 |
# File 'lib/tensorflow/raw_ops.rb', line 1913 def lookup_table_find(table_handle: nil, keys: nil, default_value: nil) Utils.execute("LookupTableFind", [table_handle, keys, default_value]) end |
.lookup_table_find_v2(table_handle: nil, keys: nil, default_value: nil) ⇒ Object
1917 1918 1919 |
# File 'lib/tensorflow/raw_ops.rb', line 1917 def lookup_table_find_v2(table_handle: nil, keys: nil, default_value: nil) Utils.execute("LookupTableFindV2", [table_handle, keys, default_value]) end |
.lookup_table_import(table_handle: nil, keys: nil, values: nil) ⇒ Object
1921 1922 1923 |
# File 'lib/tensorflow/raw_ops.rb', line 1921 def lookup_table_import(table_handle: nil, keys: nil, values: nil) Utils.execute("LookupTableImport", [table_handle, keys, values]) end |
.lookup_table_import_v2(table_handle: nil, keys: nil, values: nil) ⇒ Object
1925 1926 1927 |
# File 'lib/tensorflow/raw_ops.rb', line 1925 def lookup_table_import_v2(table_handle: nil, keys: nil, values: nil) Utils.execute("LookupTableImportV2", [table_handle, keys, values]) end |
.lookup_table_insert(table_handle: nil, keys: nil, values: nil) ⇒ Object
1929 1930 1931 |
# File 'lib/tensorflow/raw_ops.rb', line 1929 def lookup_table_insert(table_handle: nil, keys: nil, values: nil) Utils.execute("LookupTableInsert", [table_handle, keys, values]) end |
.lookup_table_insert_v2(table_handle: nil, keys: nil, values: nil) ⇒ Object
1933 1934 1935 |
# File 'lib/tensorflow/raw_ops.rb', line 1933 def lookup_table_insert_v2(table_handle: nil, keys: nil, values: nil) Utils.execute("LookupTableInsertV2", [table_handle, keys, values]) end |
.lookup_table_remove_v2(table_handle: nil, keys: nil) ⇒ Object
1937 1938 1939 |
# File 'lib/tensorflow/raw_ops.rb', line 1937 def lookup_table_remove_v2(table_handle: nil, keys: nil) Utils.execute("LookupTableRemoveV2", [table_handle, keys]) end |
.lookup_table_size(table_handle: nil) ⇒ Object
1941 1942 1943 |
# File 'lib/tensorflow/raw_ops.rb', line 1941 def lookup_table_size(table_handle: nil) Utils.execute("LookupTableSize", [table_handle]) end |
.lookup_table_size_v2(table_handle: nil) ⇒ Object
1945 1946 1947 |
# File 'lib/tensorflow/raw_ops.rb', line 1945 def lookup_table_size_v2(table_handle: nil) Utils.execute("LookupTableSizeV2", [table_handle]) end |
.loop_cond(input: nil) ⇒ Object
1949 1950 1951 |
# File 'lib/tensorflow/raw_ops.rb', line 1949 def loop_cond(input: nil) Utils.execute("LoopCond", [input]) end |
.lower_bound(sorted_inputs: nil, values: nil, out_type: nil) ⇒ Object
1953 1954 1955 |
# File 'lib/tensorflow/raw_ops.rb', line 1953 def lower_bound(sorted_inputs: nil, values: nil, out_type: nil) Utils.execute("LowerBound", [sorted_inputs, values], out_type: out_type) end |
.lrn(input: nil, depth_radius: nil, bias: nil, alpha: nil, beta: nil) ⇒ Object
1757 1758 1759 |
# File 'lib/tensorflow/raw_ops.rb', line 1757 def lrn(input: nil, depth_radius: nil, bias: nil, alpha: nil, beta: nil) Utils.execute("LRN", [input], depth_radius: depth_radius, bias: bias, alpha: alpha, beta: beta) end |
.lrn_grad(input_grads: nil, input_image: nil, output_image: nil, depth_radius: nil, bias: nil, alpha: nil, beta: nil) ⇒ Object
1761 1762 1763 |
# File 'lib/tensorflow/raw_ops.rb', line 1761 def lrn_grad(input_grads: nil, input_image: nil, output_image: nil, depth_radius: nil, bias: nil, alpha: nil, beta: nil) Utils.execute("LRNGrad", [input_grads, input_image, output_image], depth_radius: depth_radius, bias: bias, alpha: alpha, beta: beta) end |
.lu(input: nil, output_idx_type: nil) ⇒ Object
1957 1958 1959 |
# File 'lib/tensorflow/raw_ops.rb', line 1957 def lu(input: nil, output_idx_type: nil) Utils.execute("Lu", [input], output_idx_type: output_idx_type) end |
.make_iterator(dataset: nil, iterator: nil) ⇒ Object
1961 1962 1963 |
# File 'lib/tensorflow/raw_ops.rb', line 1961 def make_iterator(dataset: nil, iterator: nil) Utils.execute("MakeIterator", [dataset, iterator]) end |
.map_clear(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
1965 1966 1967 |
# File 'lib/tensorflow/raw_ops.rb', line 1965 def map_clear(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) Utils.execute("MapClear", [], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, container: container, shared_name: shared_name) end |
.map_dataset(input_dataset: nil, other_arguments: nil, f: nil, output_types: nil, output_shapes: nil, use_inter_op_parallelism: nil, preserve_cardinality: nil) ⇒ Object
1969 1970 1971 |
# File 'lib/tensorflow/raw_ops.rb', line 1969 def map_dataset(input_dataset: nil, other_arguments: nil, f: nil, output_types: nil, output_shapes: nil, use_inter_op_parallelism: nil, preserve_cardinality: nil) Utils.execute("MapDataset", [input_dataset, other_arguments], f: f, output_types: output_types, output_shapes: output_shapes, use_inter_op_parallelism: use_inter_op_parallelism, preserve_cardinality: preserve_cardinality) end |
.map_defun(arguments: nil, captured_inputs: nil, output_types: nil, output_shapes: nil, f: nil, max_intra_op_parallelism: nil) ⇒ Object
1973 1974 1975 |
# File 'lib/tensorflow/raw_ops.rb', line 1973 def map_defun(arguments: nil, captured_inputs: nil, output_types: nil, output_shapes: nil, f: nil, max_intra_op_parallelism: nil) Utils.execute("MapDefun", [arguments, captured_inputs], output_types: output_types, output_shapes: output_shapes, f: f, max_intra_op_parallelism: max_intra_op_parallelism) end |
.map_incomplete_size(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
1977 1978 1979 |
# File 'lib/tensorflow/raw_ops.rb', line 1977 def map_incomplete_size(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) Utils.execute("MapIncompleteSize", [], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, container: container, shared_name: shared_name) end |
.map_peek(key: nil, indices: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
1981 1982 1983 |
# File 'lib/tensorflow/raw_ops.rb', line 1981 def map_peek(key: nil, indices: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) Utils.execute("MapPeek", [key, indices], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, container: container, shared_name: shared_name) end |
.map_size(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
1985 1986 1987 |
# File 'lib/tensorflow/raw_ops.rb', line 1985 def map_size(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) Utils.execute("MapSize", [], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, container: container, shared_name: shared_name) end |
.map_stage(key: nil, indices: nil, values: nil, capacity: nil, memory_limit: nil, dtypes: nil, fake_dtypes: nil, container: nil, shared_name: nil) ⇒ Object
1989 1990 1991 |
# File 'lib/tensorflow/raw_ops.rb', line 1989 def map_stage(key: nil, indices: nil, values: nil, capacity: nil, memory_limit: nil, dtypes: nil, fake_dtypes: nil, container: nil, shared_name: nil) Utils.execute("MapStage", [key, indices, values], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, fake_dtypes: fake_dtypes, container: container, shared_name: shared_name) end |
.map_unstage(key: nil, indices: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
1993 1994 1995 |
# File 'lib/tensorflow/raw_ops.rb', line 1993 def map_unstage(key: nil, indices: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) Utils.execute("MapUnstage", [key, indices], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, container: container, shared_name: shared_name) end |
.map_unstage_no_key(indices: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
1997 1998 1999 |
# File 'lib/tensorflow/raw_ops.rb', line 1997 def map_unstage_no_key(indices: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) Utils.execute("MapUnstageNoKey", [indices], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, container: container, shared_name: shared_name) end |
.mat_mul(a: nil, b: nil, transpose_a: nil, transpose_b: nil) ⇒ Object
2001 2002 2003 |
# File 'lib/tensorflow/raw_ops.rb', line 2001 def mat_mul(a: nil, b: nil, transpose_a: nil, transpose_b: nil) Utils.execute("MatMul", [a, b], transpose_a: transpose_a, transpose_b: transpose_b) end |
.matching_files(pattern: nil) ⇒ Object
2005 2006 2007 |
# File 'lib/tensorflow/raw_ops.rb', line 2005 def matching_files(pattern: nil) Utils.execute("MatchingFiles", [pattern]) end |
.matrix_band_part(input: nil, num_lower: nil, num_upper: nil) ⇒ Object
2009 2010 2011 |
# File 'lib/tensorflow/raw_ops.rb', line 2009 def matrix_band_part(input: nil, num_lower: nil, num_upper: nil) Utils.execute("MatrixBandPart", [input, num_lower, num_upper]) end |
.matrix_determinant(input: nil) ⇒ Object
2013 2014 2015 |
# File 'lib/tensorflow/raw_ops.rb', line 2013 def matrix_determinant(input: nil) Utils.execute("MatrixDeterminant", [input]) end |
.matrix_diag(diagonal: nil) ⇒ Object
2017 2018 2019 |
# File 'lib/tensorflow/raw_ops.rb', line 2017 def matrix_diag(diagonal: nil) Utils.execute("MatrixDiag", [diagonal]) end |
.matrix_diag_part(input: nil) ⇒ Object
2021 2022 2023 |
# File 'lib/tensorflow/raw_ops.rb', line 2021 def matrix_diag_part(input: nil) Utils.execute("MatrixDiagPart", [input]) end |
.matrix_exponential(input: nil) ⇒ Object
2025 2026 2027 |
# File 'lib/tensorflow/raw_ops.rb', line 2025 def matrix_exponential(input: nil) Utils.execute("MatrixExponential", [input]) end |
.matrix_inverse(input: nil, adjoint: nil) ⇒ Object
2029 2030 2031 |
# File 'lib/tensorflow/raw_ops.rb', line 2029 def matrix_inverse(input: nil, adjoint: nil) Utils.execute("MatrixInverse", [input], adjoint: adjoint) end |
.matrix_logarithm(input: nil) ⇒ Object
2033 2034 2035 |
# File 'lib/tensorflow/raw_ops.rb', line 2033 def matrix_logarithm(input: nil) Utils.execute("MatrixLogarithm", [input]) end |
.matrix_set_diag(input: nil, diagonal: nil) ⇒ Object
2037 2038 2039 |
# File 'lib/tensorflow/raw_ops.rb', line 2037 def matrix_set_diag(input: nil, diagonal: nil) Utils.execute("MatrixSetDiag", [input, diagonal]) end |
.matrix_solve(matrix: nil, rhs: nil, adjoint: nil) ⇒ Object
2041 2042 2043 |
# File 'lib/tensorflow/raw_ops.rb', line 2041 def matrix_solve(matrix: nil, rhs: nil, adjoint: nil) Utils.execute("MatrixSolve", [matrix, rhs], adjoint: adjoint) end |
.matrix_solve_ls(matrix: nil, rhs: nil, l2_regularizer: nil, fast: nil) ⇒ Object
2045 2046 2047 |
# File 'lib/tensorflow/raw_ops.rb', line 2045 def matrix_solve_ls(matrix: nil, rhs: nil, l2_regularizer: nil, fast: nil) Utils.execute("MatrixSolveLs", [matrix, rhs, l2_regularizer], fast: fast) end |
.matrix_square_root(input: nil) ⇒ Object
2049 2050 2051 |
# File 'lib/tensorflow/raw_ops.rb', line 2049 def matrix_square_root(input: nil) Utils.execute("MatrixSquareRoot", [input]) end |
.matrix_triangular_solve(matrix: nil, rhs: nil, lower: nil, adjoint: nil) ⇒ Object
2053 2054 2055 |
# File 'lib/tensorflow/raw_ops.rb', line 2053 def matrix_triangular_solve(matrix: nil, rhs: nil, lower: nil, adjoint: nil) Utils.execute("MatrixTriangularSolve", [matrix, rhs], lower: lower, adjoint: adjoint) end |
.max(input: nil, reduction_indices: nil, keep_dims: nil) ⇒ Object
2057 2058 2059 |
# File 'lib/tensorflow/raw_ops.rb', line 2057 def max(input: nil, reduction_indices: nil, keep_dims: nil) Utils.execute("Max", [input, reduction_indices], keep_dims: keep_dims) end |
.max_pool(input: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
2061 2062 2063 |
# File 'lib/tensorflow/raw_ops.rb', line 2061 def max_pool(input: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) Utils.execute("MaxPool", [input], ksize: ksize, strides: strides, padding: padding, data_format: data_format) end |
.max_pool3d(input: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
2065 2066 2067 |
# File 'lib/tensorflow/raw_ops.rb', line 2065 def max_pool3d(input: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) Utils.execute("MaxPool3D", [input], ksize: ksize, strides: strides, padding: padding, data_format: data_format) end |
.max_pool3d_grad(orig_input: nil, orig_output: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
2069 2070 2071 |
# File 'lib/tensorflow/raw_ops.rb', line 2069 def max_pool3d_grad(orig_input: nil, orig_output: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) Utils.execute("MaxPool3DGrad", [orig_input, orig_output, grad], ksize: ksize, strides: strides, padding: padding, data_format: data_format) end |
.max_pool3d_grad_grad(orig_input: nil, orig_output: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
2073 2074 2075 |
# File 'lib/tensorflow/raw_ops.rb', line 2073 def max_pool3d_grad_grad(orig_input: nil, orig_output: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) Utils.execute("MaxPool3DGradGrad", [orig_input, orig_output, grad], ksize: ksize, strides: strides, padding: padding, data_format: data_format) end |
.max_pool_grad(orig_input: nil, orig_output: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
2077 2078 2079 |
# File 'lib/tensorflow/raw_ops.rb', line 2077 def max_pool_grad(orig_input: nil, orig_output: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) Utils.execute("MaxPoolGrad", [orig_input, orig_output, grad], ksize: ksize, strides: strides, padding: padding, data_format: data_format) end |
.max_pool_grad_grad(orig_input: nil, orig_output: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
2081 2082 2083 |
# File 'lib/tensorflow/raw_ops.rb', line 2081 def max_pool_grad_grad(orig_input: nil, orig_output: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) Utils.execute("MaxPoolGradGrad", [orig_input, orig_output, grad], ksize: ksize, strides: strides, padding: padding, data_format: data_format) end |
.max_pool_grad_grad_v2(orig_input: nil, orig_output: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
2085 2086 2087 |
# File 'lib/tensorflow/raw_ops.rb', line 2085 def max_pool_grad_grad_v2(orig_input: nil, orig_output: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) Utils.execute("MaxPoolGradGradV2", [orig_input, orig_output, grad, ksize, strides], padding: padding, data_format: data_format) end |
.max_pool_grad_grad_with_argmax(input: nil, grad: nil, argmax: nil, ksize: nil, strides: nil, padding: nil, include_batch_in_index: nil) ⇒ Object
2089 2090 2091 |
# File 'lib/tensorflow/raw_ops.rb', line 2089 def max_pool_grad_grad_with_argmax(input: nil, grad: nil, argmax: nil, ksize: nil, strides: nil, padding: nil, include_batch_in_index: nil) Utils.execute("MaxPoolGradGradWithArgmax", [input, grad, argmax], ksize: ksize, strides: strides, padding: padding, include_batch_in_index: include_batch_in_index) end |
.max_pool_grad_v2(orig_input: nil, orig_output: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
2093 2094 2095 |
# File 'lib/tensorflow/raw_ops.rb', line 2093 def max_pool_grad_v2(orig_input: nil, orig_output: nil, grad: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) Utils.execute("MaxPoolGradV2", [orig_input, orig_output, grad, ksize, strides], padding: padding, data_format: data_format) end |
.max_pool_grad_with_argmax(input: nil, grad: nil, argmax: nil, ksize: nil, strides: nil, padding: nil, include_batch_in_index: nil) ⇒ Object
2097 2098 2099 |
# File 'lib/tensorflow/raw_ops.rb', line 2097 def max_pool_grad_with_argmax(input: nil, grad: nil, argmax: nil, ksize: nil, strides: nil, padding: nil, include_batch_in_index: nil) Utils.execute("MaxPoolGradWithArgmax", [input, grad, argmax], ksize: ksize, strides: strides, padding: padding, include_batch_in_index: include_batch_in_index) end |
.max_pool_v2(input: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) ⇒ Object
2101 2102 2103 |
# File 'lib/tensorflow/raw_ops.rb', line 2101 def max_pool_v2(input: nil, ksize: nil, strides: nil, padding: nil, data_format: nil) Utils.execute("MaxPoolV2", [input, ksize, strides], padding: padding, data_format: data_format) end |
.max_pool_with_argmax(input: nil, ksize: nil, strides: nil, padding: nil, include_batch_in_index: nil) ⇒ Object
2105 2106 2107 |
# File 'lib/tensorflow/raw_ops.rb', line 2105 def max_pool_with_argmax(input: nil, ksize: nil, strides: nil, padding: nil, include_batch_in_index: nil) Utils.execute("MaxPoolWithArgmax", [input], ksize: ksize, strides: strides, padding: padding, include_batch_in_index: include_batch_in_index) end |
.maximum(x: nil, y: nil) ⇒ Object
2109 2110 2111 |
# File 'lib/tensorflow/raw_ops.rb', line 2109 def maximum(x: nil, y: nil) Utils.execute("Maximum", [x, y]) end |
.mean(input: nil, reduction_indices: nil, keep_dims: nil) ⇒ Object
2113 2114 2115 |
# File 'lib/tensorflow/raw_ops.rb', line 2113 def mean(input: nil, reduction_indices: nil, keep_dims: nil) Utils.execute("Mean", [input, reduction_indices], keep_dims: keep_dims) end |
.merge(inputs: nil) ⇒ Object
2117 2118 2119 |
# File 'lib/tensorflow/raw_ops.rb', line 2117 def merge(inputs: nil) Utils.execute("Merge", [inputs]) end |
.merge_summary(inputs: nil) ⇒ Object
2121 2122 2123 |
# File 'lib/tensorflow/raw_ops.rb', line 2121 def merge_summary(inputs: nil) Utils.execute("MergeSummary", [inputs]) end |
.merge_v2_checkpoints(checkpoint_prefixes: nil, destination_prefix: nil, delete_old_dirs: nil) ⇒ Object
2125 2126 2127 |
# File 'lib/tensorflow/raw_ops.rb', line 2125 def merge_v2_checkpoints(checkpoint_prefixes: nil, destination_prefix: nil, delete_old_dirs: nil) Utils.execute("MergeV2Checkpoints", [checkpoint_prefixes, destination_prefix], delete_old_dirs: delete_old_dirs) end |
.mfcc(spectrogram: nil, sample_rate: nil, upper_frequency_limit: nil, lower_frequency_limit: nil, filterbank_channel_count: nil, dct_coefficient_count: nil) ⇒ Object
2129 2130 2131 |
# File 'lib/tensorflow/raw_ops.rb', line 2129 def mfcc(spectrogram: nil, sample_rate: nil, upper_frequency_limit: nil, lower_frequency_limit: nil, filterbank_channel_count: nil, dct_coefficient_count: nil) Utils.execute("Mfcc", [spectrogram, sample_rate], upper_frequency_limit: upper_frequency_limit, lower_frequency_limit: lower_frequency_limit, filterbank_channel_count: filterbank_channel_count, dct_coefficient_count: dct_coefficient_count) end |
.min(input: nil, reduction_indices: nil, keep_dims: nil) ⇒ Object
2133 2134 2135 |
# File 'lib/tensorflow/raw_ops.rb', line 2133 def min(input: nil, reduction_indices: nil, keep_dims: nil) Utils.execute("Min", [input, reduction_indices], keep_dims: keep_dims) end |
.minimum(x: nil, y: nil) ⇒ Object
2137 2138 2139 |
# File 'lib/tensorflow/raw_ops.rb', line 2137 def minimum(x: nil, y: nil) Utils.execute("Minimum", [x, y]) end |
.mirror_pad(input: nil, paddings: nil, mode: nil) ⇒ Object
2141 2142 2143 |
# File 'lib/tensorflow/raw_ops.rb', line 2141 def mirror_pad(input: nil, paddings: nil, mode: nil) Utils.execute("MirrorPad", [input, paddings], mode: mode) end |
.mirror_pad_grad(input: nil, paddings: nil, mode: nil) ⇒ Object
2145 2146 2147 |
# File 'lib/tensorflow/raw_ops.rb', line 2145 def mirror_pad_grad(input: nil, paddings: nil, mode: nil) Utils.execute("MirrorPadGrad", [input, paddings], mode: mode) end |
.mod(x: nil, y: nil) ⇒ Object
2149 2150 2151 |
# File 'lib/tensorflow/raw_ops.rb', line 2149 def mod(x: nil, y: nil) Utils.execute("Mod", [x, y]) end |
.model_dataset(input_dataset: nil, cpu_budget: nil, output_types: nil, output_shapes: nil) ⇒ Object
2153 2154 2155 |
# File 'lib/tensorflow/raw_ops.rb', line 2153 def model_dataset(input_dataset: nil, cpu_budget: nil, output_types: nil, output_shapes: nil) Utils.execute("ModelDataset", [input_dataset], cpu_budget: cpu_budget, output_types: output_types, output_shapes: output_shapes) end |
.mul(x: nil, y: nil) ⇒ Object
2157 2158 2159 |
# File 'lib/tensorflow/raw_ops.rb', line 2157 def mul(x: nil, y: nil) Utils.execute("Mul", [x, y]) end |
.mul_no_nan(x: nil, y: nil) ⇒ Object
2161 2162 2163 |
# File 'lib/tensorflow/raw_ops.rb', line 2161 def mul_no_nan(x: nil, y: nil) Utils.execute("MulNoNan", [x, y]) end |
.multi_device_iterator(devices: nil, shared_name: nil, container: nil, output_types: nil, output_shapes: nil) ⇒ Object
2165 2166 2167 |
# File 'lib/tensorflow/raw_ops.rb', line 2165 def multi_device_iterator(devices: nil, shared_name: nil, container: nil, output_types: nil, output_shapes: nil) Utils.execute("MultiDeviceIterator", [], devices: devices, shared_name: shared_name, container: container, output_types: output_types, output_shapes: output_shapes) end |
.multi_device_iterator_from_string_handle(string_handle: nil, output_types: nil, output_shapes: nil) ⇒ Object
2169 2170 2171 |
# File 'lib/tensorflow/raw_ops.rb', line 2169 def multi_device_iterator_from_string_handle(string_handle: nil, output_types: nil, output_shapes: nil) Utils.execute("MultiDeviceIteratorFromStringHandle", [string_handle], output_types: output_types, output_shapes: output_shapes) end |
.multi_device_iterator_get_next_from_shard(multi_device_iterator: nil, shard_num: nil, incarnation_id: nil, output_types: nil, output_shapes: nil) ⇒ Object
2173 2174 2175 |
# File 'lib/tensorflow/raw_ops.rb', line 2173 def multi_device_iterator_get_next_from_shard(multi_device_iterator: nil, shard_num: nil, incarnation_id: nil, output_types: nil, output_shapes: nil) Utils.execute("MultiDeviceIteratorGetNextFromShard", [multi_device_iterator, shard_num, incarnation_id], output_types: output_types, output_shapes: output_shapes) end |
.multi_device_iterator_init(dataset: nil, multi_device_iterator: nil, max_buffer_size: nil) ⇒ Object
2177 2178 2179 |
# File 'lib/tensorflow/raw_ops.rb', line 2177 def multi_device_iterator_init(dataset: nil, multi_device_iterator: nil, max_buffer_size: nil) Utils.execute("MultiDeviceIteratorInit", [dataset, multi_device_iterator, max_buffer_size]) end |
.multi_device_iterator_to_string_handle(multi_device_iterator: nil) ⇒ Object
2181 2182 2183 |
# File 'lib/tensorflow/raw_ops.rb', line 2181 def multi_device_iterator_to_string_handle(multi_device_iterator: nil) Utils.execute("MultiDeviceIteratorToStringHandle", [multi_device_iterator]) end |
.multinomial(logits: nil, num_samples: nil, seed: nil, seed2: nil, output_dtype: nil) ⇒ Object
2185 2186 2187 |
# File 'lib/tensorflow/raw_ops.rb', line 2185 def multinomial(logits: nil, num_samples: nil, seed: nil, seed2: nil, output_dtype: nil) Utils.execute("Multinomial", [logits, num_samples], seed: seed, seed2: seed2, output_dtype: output_dtype) end |
.mutable_dense_hash_table(empty_key: nil, container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil, value_shape: nil, initial_num_buckets: nil, max_load_factor: nil) ⇒ Object
2189 2190 2191 |
# File 'lib/tensorflow/raw_ops.rb', line 2189 def mutable_dense_hash_table(empty_key: nil, container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil, value_shape: nil, initial_num_buckets: nil, max_load_factor: nil) Utils.execute("MutableDenseHashTable", [empty_key], container: container, shared_name: shared_name, use_node_name_sharing: use_node_name_sharing, key_dtype: key_dtype, value_dtype: value_dtype, value_shape: value_shape, initial_num_buckets: initial_num_buckets, max_load_factor: max_load_factor) end |
.mutable_dense_hash_table_v2(empty_key: nil, deleted_key: nil, container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil, value_shape: nil, initial_num_buckets: nil, max_load_factor: nil) ⇒ Object
2193 2194 2195 |
# File 'lib/tensorflow/raw_ops.rb', line 2193 def mutable_dense_hash_table_v2(empty_key: nil, deleted_key: nil, container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil, value_shape: nil, initial_num_buckets: nil, max_load_factor: nil) Utils.execute("MutableDenseHashTableV2", [empty_key, deleted_key], container: container, shared_name: shared_name, use_node_name_sharing: use_node_name_sharing, key_dtype: key_dtype, value_dtype: value_dtype, value_shape: value_shape, initial_num_buckets: initial_num_buckets, max_load_factor: max_load_factor) end |
.mutable_hash_table(container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil) ⇒ Object
2197 2198 2199 |
# File 'lib/tensorflow/raw_ops.rb', line 2197 def mutable_hash_table(container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil) Utils.execute("MutableHashTable", [], container: container, shared_name: shared_name, use_node_name_sharing: use_node_name_sharing, key_dtype: key_dtype, value_dtype: value_dtype) end |
.mutable_hash_table_of_tensors(container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil, value_shape: nil) ⇒ Object
2201 2202 2203 |
# File 'lib/tensorflow/raw_ops.rb', line 2201 def mutable_hash_table_of_tensors(container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil, value_shape: nil) Utils.execute("MutableHashTableOfTensors", [], container: container, shared_name: shared_name, use_node_name_sharing: use_node_name_sharing, key_dtype: key_dtype, value_dtype: value_dtype, value_shape: value_shape) end |
.mutable_hash_table_of_tensors_v2(container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil, value_shape: nil) ⇒ Object
2205 2206 2207 |
# File 'lib/tensorflow/raw_ops.rb', line 2205 def mutable_hash_table_of_tensors_v2(container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil, value_shape: nil) Utils.execute("MutableHashTableOfTensorsV2", [], container: container, shared_name: shared_name, use_node_name_sharing: use_node_name_sharing, key_dtype: key_dtype, value_dtype: value_dtype, value_shape: value_shape) end |
.mutable_hash_table_v2(container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil) ⇒ Object
2209 2210 2211 |
# File 'lib/tensorflow/raw_ops.rb', line 2209 def mutable_hash_table_v2(container: nil, shared_name: nil, use_node_name_sharing: nil, key_dtype: nil, value_dtype: nil) Utils.execute("MutableHashTableV2", [], container: container, shared_name: shared_name, use_node_name_sharing: use_node_name_sharing, key_dtype: key_dtype, value_dtype: value_dtype) end |
.mutex_lock(mutex: nil) ⇒ Object
2213 2214 2215 |
# File 'lib/tensorflow/raw_ops.rb', line 2213 def mutex_lock(mutex: nil) Utils.execute("MutexLock", [mutex]) end |
.mutex_v2(container: nil, shared_name: nil) ⇒ Object
2217 2218 2219 |
# File 'lib/tensorflow/raw_ops.rb', line 2217 def mutex_v2(container: nil, shared_name: nil) Utils.execute("MutexV2", [], container: container, shared_name: shared_name) end |
.nccl_all_reduce(input: nil, reduction: nil, num_devices: nil, shared_name: nil) ⇒ Object
2221 2222 2223 |
# File 'lib/tensorflow/raw_ops.rb', line 2221 def nccl_all_reduce(input: nil, reduction: nil, num_devices: nil, shared_name: nil) Utils.execute("NcclAllReduce", [input], reduction: reduction, num_devices: num_devices, shared_name: shared_name) end |
.nccl_broadcast(input: nil, shape: nil) ⇒ Object
2225 2226 2227 |
# File 'lib/tensorflow/raw_ops.rb', line 2225 def nccl_broadcast(input: nil, shape: nil) Utils.execute("NcclBroadcast", [input], shape: shape) end |
.nccl_reduce(input: nil, reduction: nil, num_devices: nil) ⇒ Object
2229 2230 2231 |
# File 'lib/tensorflow/raw_ops.rb', line 2229 def nccl_reduce(input: nil, reduction: nil, num_devices: nil) Utils.execute("NcclReduce", [input], reduction: reduction, num_devices: num_devices) end |
.nearest_neighbors(points: nil, centers: nil, k: nil) ⇒ Object
2233 2234 2235 |
# File 'lib/tensorflow/raw_ops.rb', line 2233 def nearest_neighbors(points: nil, centers: nil, k: nil) Utils.execute("NearestNeighbors", [points, centers, k]) end |
.neg(x: nil) ⇒ Object
2237 2238 2239 |
# File 'lib/tensorflow/raw_ops.rb', line 2237 def neg(x: nil) Utils.execute("Neg", [x]) end |
.neg_train(w_in: nil, w_out: nil, examples: nil, labels: nil, lr: nil, vocab_count: nil, num_negative_samples: nil) ⇒ Object
2241 2242 2243 |
# File 'lib/tensorflow/raw_ops.rb', line 2241 def neg_train(w_in: nil, w_out: nil, examples: nil, labels: nil, lr: nil, vocab_count: nil, num_negative_samples: nil) Utils.execute("NegTrain", [w_in, w_out, examples, labels, lr], vocab_count: vocab_count, num_negative_samples: num_negative_samples) end |
.next_after(x1: nil, x2: nil) ⇒ Object
2245 2246 2247 |
# File 'lib/tensorflow/raw_ops.rb', line 2245 def next_after(x1: nil, x2: nil) Utils.execute("NextAfter", [x1, x2]) end |
.next_iteration(data: nil) ⇒ Object
2249 2250 2251 |
# File 'lib/tensorflow/raw_ops.rb', line 2249 def next_iteration(data: nil) Utils.execute("NextIteration", [data]) end |
.no_op ⇒ Object
2253 2254 2255 |
# File 'lib/tensorflow/raw_ops.rb', line 2253 def no_op Utils.execute("NoOp", []) end |
.non_deterministic_ints(shape: nil, dtype: nil, shape_dtype: nil) ⇒ Object
2257 2258 2259 |
# File 'lib/tensorflow/raw_ops.rb', line 2257 def non_deterministic_ints(shape: nil, dtype: nil, shape_dtype: nil) Utils.execute("NonDeterministicInts", [shape], dtype: dtype, shape_dtype: shape_dtype) end |
.non_max_suppression(boxes: nil, scores: nil, max_output_size: nil, iou_threshold: nil) ⇒ Object
2261 2262 2263 |
# File 'lib/tensorflow/raw_ops.rb', line 2261 def non_max_suppression(boxes: nil, scores: nil, max_output_size: nil, iou_threshold: nil) Utils.execute("NonMaxSuppression", [boxes, scores, max_output_size], iou_threshold: iou_threshold) end |
.non_max_suppression_v2(boxes: nil, scores: nil, max_output_size: nil, iou_threshold: nil) ⇒ Object
2265 2266 2267 |
# File 'lib/tensorflow/raw_ops.rb', line 2265 def non_max_suppression_v2(boxes: nil, scores: nil, max_output_size: nil, iou_threshold: nil) Utils.execute("NonMaxSuppressionV2", [boxes, scores, max_output_size, iou_threshold]) end |
.non_max_suppression_v3(boxes: nil, scores: nil, max_output_size: nil, iou_threshold: nil, score_threshold: nil) ⇒ Object
2269 2270 2271 |
# File 'lib/tensorflow/raw_ops.rb', line 2269 def non_max_suppression_v3(boxes: nil, scores: nil, max_output_size: nil, iou_threshold: nil, score_threshold: nil) Utils.execute("NonMaxSuppressionV3", [boxes, scores, max_output_size, iou_threshold, score_threshold]) end |
.non_max_suppression_v4(boxes: nil, scores: nil, max_output_size: nil, iou_threshold: nil, score_threshold: nil, pad_to_max_output_size: nil) ⇒ Object
2273 2274 2275 |
# File 'lib/tensorflow/raw_ops.rb', line 2273 def non_max_suppression_v4(boxes: nil, scores: nil, max_output_size: nil, iou_threshold: nil, score_threshold: nil, pad_to_max_output_size: nil) Utils.execute("NonMaxSuppressionV4", [boxes, scores, max_output_size, iou_threshold, score_threshold], pad_to_max_output_size: pad_to_max_output_size) end |
.non_max_suppression_with_overlaps(overlaps: nil, scores: nil, max_output_size: nil, overlap_threshold: nil, score_threshold: nil) ⇒ Object
2277 2278 2279 |
# File 'lib/tensorflow/raw_ops.rb', line 2277 def non_max_suppression_with_overlaps(overlaps: nil, scores: nil, max_output_size: nil, overlap_threshold: nil, score_threshold: nil) Utils.execute("NonMaxSuppressionWithOverlaps", [overlaps, scores, max_output_size, overlap_threshold, score_threshold]) end |
.not_equal(x: nil, y: nil) ⇒ Object
2281 2282 2283 |
# File 'lib/tensorflow/raw_ops.rb', line 2281 def not_equal(x: nil, y: nil) Utils.execute("NotEqual", [x, y]) end |
.nth_element(input: nil, n: nil, reverse: nil) ⇒ Object
2285 2286 2287 |
# File 'lib/tensorflow/raw_ops.rb', line 2285 def nth_element(input: nil, n: nil, reverse: nil) Utils.execute("NthElement", [input, n], reverse: reverse) end |
.one_hot(indices: nil, depth: nil, on_value: nil, off_value: nil, axis: nil) ⇒ Object
2289 2290 2291 |
# File 'lib/tensorflow/raw_ops.rb', line 2289 def one_hot(indices: nil, depth: nil, on_value: nil, off_value: nil, axis: nil) Utils.execute("OneHot", [indices, depth, on_value, off_value], axis: axis) end |
.one_shot_iterator(dataset_factory: nil, output_types: nil, output_shapes: nil, container: nil, shared_name: nil) ⇒ Object
2293 2294 2295 |
# File 'lib/tensorflow/raw_ops.rb', line 2293 def one_shot_iterator(dataset_factory: nil, output_types: nil, output_shapes: nil, container: nil, shared_name: nil) Utils.execute("OneShotIterator", [], dataset_factory: dataset_factory, output_types: output_types, output_shapes: output_shapes, container: container, shared_name: shared_name) end |
.ones_like(x: nil) ⇒ Object
2297 2298 2299 |
# File 'lib/tensorflow/raw_ops.rb', line 2297 def ones_like(x: nil) Utils.execute("OnesLike", [x]) end |
.optimize_dataset(input_dataset: nil, optimizations: nil, output_types: nil, output_shapes: nil, optimization_configs: nil) ⇒ Object
2301 2302 2303 |
# File 'lib/tensorflow/raw_ops.rb', line 2301 def optimize_dataset(input_dataset: nil, optimizations: nil, output_types: nil, output_shapes: nil, optimization_configs: nil) Utils.execute("OptimizeDataset", [input_dataset, optimizations], output_types: output_types, output_shapes: output_shapes, optimization_configs: optimization_configs) end |
.optional_from_value(components: nil) ⇒ Object
2305 2306 2307 |
# File 'lib/tensorflow/raw_ops.rb', line 2305 def optional_from_value(components: nil) Utils.execute("OptionalFromValue", [components]) end |
.optional_get_value(optional: nil, output_types: nil, output_shapes: nil) ⇒ Object
2309 2310 2311 |
# File 'lib/tensorflow/raw_ops.rb', line 2309 def optional_get_value(optional: nil, output_types: nil, output_shapes: nil) Utils.execute("OptionalGetValue", [optional], output_types: output_types, output_shapes: output_shapes) end |
.optional_has_value(optional: nil) ⇒ Object
2313 2314 2315 |
# File 'lib/tensorflow/raw_ops.rb', line 2313 def optional_has_value(optional: nil) Utils.execute("OptionalHasValue", [optional]) end |
.optional_none ⇒ Object
2317 2318 2319 |
# File 'lib/tensorflow/raw_ops.rb', line 2317 def optional_none Utils.execute("OptionalNone", []) end |
.ordered_map_clear(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
2321 2322 2323 |
# File 'lib/tensorflow/raw_ops.rb', line 2321 def ordered_map_clear(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) Utils.execute("OrderedMapClear", [], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, container: container, shared_name: shared_name) end |
.ordered_map_incomplete_size(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
2325 2326 2327 |
# File 'lib/tensorflow/raw_ops.rb', line 2325 def ordered_map_incomplete_size(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) Utils.execute("OrderedMapIncompleteSize", [], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, container: container, shared_name: shared_name) end |
.ordered_map_peek(key: nil, indices: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
2329 2330 2331 |
# File 'lib/tensorflow/raw_ops.rb', line 2329 def ordered_map_peek(key: nil, indices: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) Utils.execute("OrderedMapPeek", [key, indices], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, container: container, shared_name: shared_name) end |
.ordered_map_size(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
2333 2334 2335 |
# File 'lib/tensorflow/raw_ops.rb', line 2333 def ordered_map_size(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) Utils.execute("OrderedMapSize", [], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, container: container, shared_name: shared_name) end |
.ordered_map_stage(key: nil, indices: nil, values: nil, capacity: nil, memory_limit: nil, dtypes: nil, fake_dtypes: nil, container: nil, shared_name: nil) ⇒ Object
2337 2338 2339 |
# File 'lib/tensorflow/raw_ops.rb', line 2337 def ordered_map_stage(key: nil, indices: nil, values: nil, capacity: nil, memory_limit: nil, dtypes: nil, fake_dtypes: nil, container: nil, shared_name: nil) Utils.execute("OrderedMapStage", [key, indices, values], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, fake_dtypes: fake_dtypes, container: container, shared_name: shared_name) end |
.ordered_map_unstage(key: nil, indices: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
2341 2342 2343 |
# File 'lib/tensorflow/raw_ops.rb', line 2341 def ordered_map_unstage(key: nil, indices: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) Utils.execute("OrderedMapUnstage", [key, indices], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, container: container, shared_name: shared_name) end |
.ordered_map_unstage_no_key(indices: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
2345 2346 2347 |
# File 'lib/tensorflow/raw_ops.rb', line 2345 def ordered_map_unstage_no_key(indices: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) Utils.execute("OrderedMapUnstageNoKey", [indices], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, container: container, shared_name: shared_name) end |
.outfeed_dequeue(dtype: nil, shape: nil, device_ordinal: nil) ⇒ Object
2349 2350 2351 |
# File 'lib/tensorflow/raw_ops.rb', line 2349 def outfeed_dequeue(dtype: nil, shape: nil, device_ordinal: nil) Utils.execute("OutfeedDequeue", [], dtype: dtype, shape: shape, device_ordinal: device_ordinal) end |
.outfeed_dequeue_tuple(dtypes: nil, shapes: nil, device_ordinal: nil) ⇒ Object
2353 2354 2355 |
# File 'lib/tensorflow/raw_ops.rb', line 2353 def outfeed_dequeue_tuple(dtypes: nil, shapes: nil, device_ordinal: nil) Utils.execute("OutfeedDequeueTuple", [], dtypes: dtypes, shapes: shapes, device_ordinal: device_ordinal) end |
.outfeed_enqueue(input: nil, dtype: nil) ⇒ Object
2357 2358 2359 |
# File 'lib/tensorflow/raw_ops.rb', line 2357 def outfeed_enqueue(input: nil, dtype: nil) Utils.execute("OutfeedEnqueue", [input], dtype: dtype) end |
.outfeed_enqueue_tuple(inputs: nil, dtypes: nil) ⇒ Object
2361 2362 2363 |
# File 'lib/tensorflow/raw_ops.rb', line 2361 def outfeed_enqueue_tuple(inputs: nil, dtypes: nil) Utils.execute("OutfeedEnqueueTuple", [inputs], dtypes: dtypes) end |
.pack(values: nil, axis: nil) ⇒ Object
2365 2366 2367 |
# File 'lib/tensorflow/raw_ops.rb', line 2365 def pack(values: nil, axis: nil) Utils.execute("Pack", [values], axis: axis) end |
.pad(input: nil, paddings: nil) ⇒ Object
2369 2370 2371 |
# File 'lib/tensorflow/raw_ops.rb', line 2369 def pad(input: nil, paddings: nil) Utils.execute("Pad", [input, paddings]) end |
.pad_v2(input: nil, paddings: nil, constant_values: nil) ⇒ Object
2373 2374 2375 |
# File 'lib/tensorflow/raw_ops.rb', line 2373 def pad_v2(input: nil, paddings: nil, constant_values: nil) Utils.execute("PadV2", [input, paddings, constant_values]) end |
.padded_batch_dataset(input_dataset: nil, batch_size: nil, padded_shapes: nil, padding_values: nil, output_shapes: nil) ⇒ Object
2377 2378 2379 |
# File 'lib/tensorflow/raw_ops.rb', line 2377 def padded_batch_dataset(input_dataset: nil, batch_size: nil, padded_shapes: nil, padding_values: nil, output_shapes: nil) Utils.execute("PaddedBatchDataset", [input_dataset, batch_size, padded_shapes, padding_values], output_shapes: output_shapes) end |
.padded_batch_dataset_v2(input_dataset: nil, batch_size: nil, padded_shapes: nil, padding_values: nil, drop_remainder: nil, parallel_copy: nil, output_shapes: nil) ⇒ Object
2381 2382 2383 |
# File 'lib/tensorflow/raw_ops.rb', line 2381 def padded_batch_dataset_v2(input_dataset: nil, batch_size: nil, padded_shapes: nil, padding_values: nil, drop_remainder: nil, parallel_copy: nil, output_shapes: nil) Utils.execute("PaddedBatchDatasetV2", [input_dataset, batch_size, padded_shapes, padding_values, drop_remainder], parallel_copy: parallel_copy, output_shapes: output_shapes) end |
.padding_fifo_queue(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) ⇒ Object
2385 2386 2387 |
# File 'lib/tensorflow/raw_ops.rb', line 2385 def padding_fifo_queue(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) Utils.execute("PaddingFIFOQueue", [], component_types: component_types, shapes: shapes, capacity: capacity, container: container, shared_name: shared_name) end |
.padding_fifo_queue_v2(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) ⇒ Object
2389 2390 2391 |
# File 'lib/tensorflow/raw_ops.rb', line 2389 def padding_fifo_queue_v2(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) Utils.execute("PaddingFIFOQueueV2", [], component_types: component_types, shapes: shapes, capacity: capacity, container: container, shared_name: shared_name) end |
.parallel_concat(values: nil, shape: nil) ⇒ Object
2393 2394 2395 |
# File 'lib/tensorflow/raw_ops.rb', line 2393 def parallel_concat(values: nil, shape: nil) Utils.execute("ParallelConcat", [values], shape: shape) end |
.parallel_dynamic_stitch(indices: nil, data: nil) ⇒ Object
2397 2398 2399 |
# File 'lib/tensorflow/raw_ops.rb', line 2397 def parallel_dynamic_stitch(indices: nil, data: nil) Utils.execute("ParallelDynamicStitch", [indices, data]) end |
.parallel_interleave_dataset_v2(input_dataset: nil, other_arguments: nil, cycle_length: nil, block_length: nil, num_parallel_calls: nil, f: nil, output_types: nil, output_shapes: nil, sloppy: nil) ⇒ Object
2401 2402 2403 |
# File 'lib/tensorflow/raw_ops.rb', line 2401 def parallel_interleave_dataset_v2(input_dataset: nil, other_arguments: nil, cycle_length: nil, block_length: nil, num_parallel_calls: nil, f: nil, output_types: nil, output_shapes: nil, sloppy: nil) Utils.execute("ParallelInterleaveDatasetV2", [input_dataset, other_arguments, cycle_length, block_length, num_parallel_calls], f: f, output_types: output_types, output_shapes: output_shapes, sloppy: sloppy) end |
.parallel_map_dataset(input_dataset: nil, other_arguments: nil, num_parallel_calls: nil, f: nil, output_types: nil, output_shapes: nil, use_inter_op_parallelism: nil, sloppy: nil, preserve_cardinality: nil) ⇒ Object
2405 2406 2407 |
# File 'lib/tensorflow/raw_ops.rb', line 2405 def parallel_map_dataset(input_dataset: nil, other_arguments: nil, num_parallel_calls: nil, f: nil, output_types: nil, output_shapes: nil, use_inter_op_parallelism: nil, sloppy: nil, preserve_cardinality: nil) Utils.execute("ParallelMapDataset", [input_dataset, other_arguments, num_parallel_calls], f: f, output_types: output_types, output_shapes: output_shapes, use_inter_op_parallelism: use_inter_op_parallelism, sloppy: sloppy, preserve_cardinality: preserve_cardinality) end |
.parameterized_truncated_normal(shape: nil, means: nil, stdevs: nil, minvals: nil, maxvals: nil, seed: nil, seed2: nil, dtype: nil) ⇒ Object
2409 2410 2411 |
# File 'lib/tensorflow/raw_ops.rb', line 2409 def parameterized_truncated_normal(shape: nil, means: nil, stdevs: nil, minvals: nil, maxvals: nil, seed: nil, seed2: nil, dtype: nil) Utils.execute("ParameterizedTruncatedNormal", [shape, means, stdevs, minvals, maxvals], seed: seed, seed2: seed2, dtype: dtype) end |
.parse_example(serialized: nil, names: nil, sparse_keys: nil, dense_keys: nil, dense_defaults: nil, sparse_types: nil, dense_shapes: nil) ⇒ Object
2413 2414 2415 |
# File 'lib/tensorflow/raw_ops.rb', line 2413 def parse_example(serialized: nil, names: nil, sparse_keys: nil, dense_keys: nil, dense_defaults: nil, sparse_types: nil, dense_shapes: nil) Utils.execute("ParseExample", [serialized, names, sparse_keys, dense_keys, dense_defaults], sparse_types: sparse_types, dense_shapes: dense_shapes) end |
.parse_sequence_example(serialized: nil, debug_name: nil, context_dense_defaults: nil, feature_list_dense_missing_assumed_empty: nil, context_sparse_keys: nil, context_dense_keys: nil, feature_list_sparse_keys: nil, feature_list_dense_keys: nil, context_sparse_types: nil, feature_list_dense_types: nil, context_dense_shapes: nil, feature_list_sparse_types: nil, feature_list_dense_shapes: nil) ⇒ Object
2417 2418 2419 |
# File 'lib/tensorflow/raw_ops.rb', line 2417 def parse_sequence_example(serialized: nil, debug_name: nil, context_dense_defaults: nil, feature_list_dense_missing_assumed_empty: nil, context_sparse_keys: nil, context_dense_keys: nil, feature_list_sparse_keys: nil, feature_list_dense_keys: nil, context_sparse_types: nil, feature_list_dense_types: nil, context_dense_shapes: nil, feature_list_sparse_types: nil, feature_list_dense_shapes: nil) Utils.execute("ParseSequenceExample", [serialized, debug_name, context_dense_defaults], feature_list_dense_missing_assumed_empty: feature_list_dense_missing_assumed_empty, context_sparse_keys: context_sparse_keys, context_dense_keys: context_dense_keys, feature_list_sparse_keys: feature_list_sparse_keys, feature_list_dense_keys: feature_list_dense_keys, context_sparse_types: context_sparse_types, feature_list_dense_types: feature_list_dense_types, context_dense_shapes: context_dense_shapes, feature_list_sparse_types: feature_list_sparse_types, feature_list_dense_shapes: feature_list_dense_shapes) end |
.parse_single_example(serialized: nil, dense_defaults: nil, num_sparse: nil, sparse_keys: nil, dense_keys: nil, sparse_types: nil, dense_shapes: nil) ⇒ Object
2421 2422 2423 |
# File 'lib/tensorflow/raw_ops.rb', line 2421 def parse_single_example(serialized: nil, dense_defaults: nil, num_sparse: nil, sparse_keys: nil, dense_keys: nil, sparse_types: nil, dense_shapes: nil) Utils.execute("ParseSingleExample", [serialized, dense_defaults], num_sparse: num_sparse, sparse_keys: sparse_keys, dense_keys: dense_keys, sparse_types: sparse_types, dense_shapes: dense_shapes) end |
.parse_single_sequence_example(serialized: nil, feature_list_dense_missing_assumed_empty: nil, context_sparse_keys: nil, context_dense_keys: nil, feature_list_sparse_keys: nil, feature_list_dense_keys: nil, context_dense_defaults: nil, debug_name: nil, context_sparse_types: nil, feature_list_dense_types: nil, context_dense_shapes: nil, feature_list_sparse_types: nil, feature_list_dense_shapes: nil) ⇒ Object
2425 2426 2427 |
# File 'lib/tensorflow/raw_ops.rb', line 2425 def parse_single_sequence_example(serialized: nil, feature_list_dense_missing_assumed_empty: nil, context_sparse_keys: nil, context_dense_keys: nil, feature_list_sparse_keys: nil, feature_list_dense_keys: nil, context_dense_defaults: nil, debug_name: nil, context_sparse_types: nil, feature_list_dense_types: nil, context_dense_shapes: nil, feature_list_sparse_types: nil, feature_list_dense_shapes: nil) Utils.execute("ParseSingleSequenceExample", [serialized, feature_list_dense_missing_assumed_empty, context_sparse_keys, context_dense_keys, feature_list_sparse_keys, feature_list_dense_keys, context_dense_defaults, debug_name], context_sparse_types: context_sparse_types, feature_list_dense_types: feature_list_dense_types, context_dense_shapes: context_dense_shapes, feature_list_sparse_types: feature_list_sparse_types, feature_list_dense_shapes: feature_list_dense_shapes) end |
.parse_tensor(serialized: nil, out_type: nil) ⇒ Object
2429 2430 2431 |
# File 'lib/tensorflow/raw_ops.rb', line 2429 def parse_tensor(serialized: nil, out_type: nil) Utils.execute("ParseTensor", [serialized], out_type: out_type) end |
.partitioned_call(args: nil, f: nil, config: nil, config_proto: nil, executor_type: nil) ⇒ Object
2433 2434 2435 |
# File 'lib/tensorflow/raw_ops.rb', line 2433 def partitioned_call(args: nil, f: nil, config: nil, config_proto: nil, executor_type: nil) Utils.execute("PartitionedCall", [args], f: f, config: config, config_proto: config_proto, executor_type: executor_type) end |
.placeholder(dtype: nil, shape: nil) ⇒ Object
2437 2438 2439 |
# File 'lib/tensorflow/raw_ops.rb', line 2437 def placeholder(dtype: nil, shape: nil) Utils.execute("Placeholder", [], dtype: dtype, shape: shape) end |
.placeholder_v2(dtype: nil, shape: nil) ⇒ Object
2441 2442 2443 |
# File 'lib/tensorflow/raw_ops.rb', line 2441 def placeholder_v2(dtype: nil, shape: nil) Utils.execute("PlaceholderV2", [], dtype: dtype, shape: shape) end |
.placeholder_with_default(input: nil, dtype: nil, shape: nil) ⇒ Object
2445 2446 2447 |
# File 'lib/tensorflow/raw_ops.rb', line 2445 def placeholder_with_default(input: nil, dtype: nil, shape: nil) Utils.execute("PlaceholderWithDefault", [input], dtype: dtype, shape: shape) end |
.polygamma(a: nil, x: nil) ⇒ Object
2449 2450 2451 |
# File 'lib/tensorflow/raw_ops.rb', line 2449 def polygamma(a: nil, x: nil) Utils.execute("Polygamma", [a, x]) end |
.population_count(x: nil) ⇒ Object
2453 2454 2455 |
# File 'lib/tensorflow/raw_ops.rb', line 2453 def population_count(x: nil) Utils.execute("PopulationCount", [x]) end |
.pow(x: nil, y: nil) ⇒ Object
2457 2458 2459 |
# File 'lib/tensorflow/raw_ops.rb', line 2457 def pow(x: nil, y: nil) Utils.execute("Pow", [x, y]) end |
.prefetch_dataset(input_dataset: nil, buffer_size: nil, output_types: nil, output_shapes: nil, slack_period: nil) ⇒ Object
2461 2462 2463 |
# File 'lib/tensorflow/raw_ops.rb', line 2461 def prefetch_dataset(input_dataset: nil, buffer_size: nil, output_types: nil, output_shapes: nil, slack_period: nil) Utils.execute("PrefetchDataset", [input_dataset, buffer_size], output_types: output_types, output_shapes: output_shapes, slack_period: slack_period) end |
.prelinearize(input: nil, dtype: nil, shape: nil, layout: nil) ⇒ Object
2465 2466 2467 |
# File 'lib/tensorflow/raw_ops.rb', line 2465 def prelinearize(input: nil, dtype: nil, shape: nil, layout: nil) Utils.execute("Prelinearize", [input], dtype: dtype, shape: shape, layout: layout) end |
.prelinearize_tuple(inputs: nil, dtypes: nil, shapes: nil, layouts: nil) ⇒ Object
2469 2470 2471 |
# File 'lib/tensorflow/raw_ops.rb', line 2469 def prelinearize_tuple(inputs: nil, dtypes: nil, shapes: nil, layouts: nil) Utils.execute("PrelinearizeTuple", [inputs], dtypes: dtypes, shapes: shapes, layouts: layouts) end |
.prevent_gradient(input: nil, message: nil) ⇒ Object
2473 2474 2475 |
# File 'lib/tensorflow/raw_ops.rb', line 2473 def prevent_gradient(input: nil, message: nil) Utils.execute("PreventGradient", [input], message: ) end |
.print(input: nil, data: nil, message: nil, first_n: nil, summarize: nil) ⇒ Object
2477 2478 2479 |
# File 'lib/tensorflow/raw_ops.rb', line 2477 def print(input: nil, data: nil, message: nil, first_n: nil, summarize: nil) Utils.execute("Print", [input, data], message: , first_n: first_n, summarize: summarize) end |
.print_v2(input: nil, output_stream: nil, stop: nil) ⇒ Object
2481 2482 2483 |
# File 'lib/tensorflow/raw_ops.rb', line 2481 def print_v2(input: nil, output_stream: nil, stop: nil) Utils.execute("PrintV2", [input], output_stream: output_stream, stop: stop) end |
.priority_queue(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) ⇒ Object
2485 2486 2487 |
# File 'lib/tensorflow/raw_ops.rb', line 2485 def priority_queue(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) Utils.execute("PriorityQueue", [], component_types: component_types, shapes: shapes, capacity: capacity, container: container, shared_name: shared_name) end |
.priority_queue_v2(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) ⇒ Object
2489 2490 2491 |
# File 'lib/tensorflow/raw_ops.rb', line 2489 def priority_queue_v2(component_types: nil, shapes: nil, capacity: nil, container: nil, shared_name: nil) Utils.execute("PriorityQueueV2", [], component_types: component_types, shapes: shapes, capacity: capacity, container: container, shared_name: shared_name) end |
.prod(input: nil, reduction_indices: nil, keep_dims: nil) ⇒ Object
2493 2494 2495 |
# File 'lib/tensorflow/raw_ops.rb', line 2493 def prod(input: nil, reduction_indices: nil, keep_dims: nil) Utils.execute("Prod", [input, reduction_indices], keep_dims: keep_dims) end |
.py_func(input: nil, token: nil) ⇒ Object
2497 2498 2499 |
# File 'lib/tensorflow/raw_ops.rb', line 2497 def py_func(input: nil, token: nil) Utils.execute("PyFunc", [input], token: token) end |
.py_func_stateless(input: nil, token: nil) ⇒ Object
2501 2502 2503 |
# File 'lib/tensorflow/raw_ops.rb', line 2501 def py_func_stateless(input: nil, token: nil) Utils.execute("PyFuncStateless", [input], token: token) end |
.qr(input: nil, full_matrices: nil) ⇒ Object
2505 2506 2507 |
# File 'lib/tensorflow/raw_ops.rb', line 2505 def qr(input: nil, full_matrices: nil) Utils.execute("Qr", [input], full_matrices: full_matrices) end |
.quantize_and_dequantize(input: nil, signed_input: nil, num_bits: nil, range_given: nil, input_min: nil, input_max: nil) ⇒ Object
2509 2510 2511 |
# File 'lib/tensorflow/raw_ops.rb', line 2509 def quantize_and_dequantize(input: nil, signed_input: nil, num_bits: nil, range_given: nil, input_min: nil, input_max: nil) Utils.execute("QuantizeAndDequantize", [input], signed_input: signed_input, num_bits: num_bits, range_given: range_given, input_min: input_min, input_max: input_max) end |
.quantize_and_dequantize_v2(input: nil, input_min: nil, input_max: nil, signed_input: nil, num_bits: nil, range_given: nil, round_mode: nil) ⇒ Object
2513 2514 2515 |
# File 'lib/tensorflow/raw_ops.rb', line 2513 def quantize_and_dequantize_v2(input: nil, input_min: nil, input_max: nil, signed_input: nil, num_bits: nil, range_given: nil, round_mode: nil) Utils.execute("QuantizeAndDequantizeV2", [input, input_min, input_max], signed_input: signed_input, num_bits: num_bits, range_given: range_given, round_mode: round_mode) end |
.quantize_and_dequantize_v3(input: nil, input_min: nil, input_max: nil, num_bits: nil, signed_input: nil, range_given: nil) ⇒ Object
2517 2518 2519 |
# File 'lib/tensorflow/raw_ops.rb', line 2517 def quantize_and_dequantize_v3(input: nil, input_min: nil, input_max: nil, num_bits: nil, signed_input: nil, range_given: nil) Utils.execute("QuantizeAndDequantizeV3", [input, input_min, input_max, num_bits], signed_input: signed_input, range_given: range_given) end |
.quantize_down_and_shrink_range(input: nil, input_min: nil, input_max: nil, out_type: nil) ⇒ Object
2521 2522 2523 |
# File 'lib/tensorflow/raw_ops.rb', line 2521 def quantize_down_and_shrink_range(input: nil, input_min: nil, input_max: nil, out_type: nil) Utils.execute("QuantizeDownAndShrinkRange", [input, input_min, input_max], out_type: out_type) end |
.quantize_v2(input: nil, min_range: nil, max_range: nil, mode: nil, round_mode: nil) ⇒ Object
2525 2526 2527 |
# File 'lib/tensorflow/raw_ops.rb', line 2525 def quantize_v2(input: nil, min_range: nil, max_range: nil, mode: nil, round_mode: nil) Utils.execute("QuantizeV2", [input, min_range, max_range], mode: mode, round_mode: round_mode) end |
.quantized_add(x: nil, y: nil, min_x: nil, max_x: nil, min_y: nil, max_y: nil) ⇒ Object
2529 2530 2531 |
# File 'lib/tensorflow/raw_ops.rb', line 2529 def quantized_add(x: nil, y: nil, min_x: nil, max_x: nil, min_y: nil, max_y: nil) Utils.execute("QuantizedAdd", [x, y, min_x, max_x, min_y, max_y]) end |
.quantized_avg_pool(input: nil, min_input: nil, max_input: nil, ksize: nil, strides: nil, padding: nil) ⇒ Object
2533 2534 2535 |
# File 'lib/tensorflow/raw_ops.rb', line 2533 def quantized_avg_pool(input: nil, min_input: nil, max_input: nil, ksize: nil, strides: nil, padding: nil) Utils.execute("QuantizedAvgPool", [input, min_input, max_input], ksize: ksize, strides: strides, padding: padding) end |
.quantized_batch_norm_with_global_normalization(t: nil, t_min: nil, t_max: nil, m: nil, m_min: nil, m_max: nil, v: nil, v_min: nil, v_max: nil, beta: nil, beta_min: nil, beta_max: nil, gamma: nil, gamma_min: nil, gamma_max: nil, out_type: nil, variance_epsilon: nil, scale_after_normalization: nil) ⇒ Object
2537 2538 2539 |
# File 'lib/tensorflow/raw_ops.rb', line 2537 def quantized_batch_norm_with_global_normalization(t: nil, t_min: nil, t_max: nil, m: nil, m_min: nil, m_max: nil, v: nil, v_min: nil, v_max: nil, beta: nil, beta_min: nil, beta_max: nil, gamma: nil, gamma_min: nil, gamma_max: nil, out_type: nil, variance_epsilon: nil, scale_after_normalization: nil) Utils.execute("QuantizedBatchNormWithGlobalNormalization", [t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max], out_type: out_type, variance_epsilon: variance_epsilon, scale_after_normalization: scale_after_normalization) end |
.quantized_bias_add(input: nil, bias: nil, min_input: nil, max_input: nil, min_bias: nil, max_bias: nil, out_type: nil) ⇒ Object
2541 2542 2543 |
# File 'lib/tensorflow/raw_ops.rb', line 2541 def quantized_bias_add(input: nil, bias: nil, min_input: nil, max_input: nil, min_bias: nil, max_bias: nil, out_type: nil) Utils.execute("QuantizedBiasAdd", [input, bias, min_input, max_input, min_bias, max_bias], out_type: out_type) end |
.quantized_concat(concat_dim: nil, values: nil, input_mins: nil, input_maxes: nil) ⇒ Object
2545 2546 2547 |
# File 'lib/tensorflow/raw_ops.rb', line 2545 def quantized_concat(concat_dim: nil, values: nil, input_mins: nil, input_maxes: nil) Utils.execute("QuantizedConcat", [concat_dim, values, input_mins, input_maxes]) end |
.quantized_conv2d(input: nil, filter: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil) ⇒ Object
2549 2550 2551 |
# File 'lib/tensorflow/raw_ops.rb', line 2549 def quantized_conv2d(input: nil, filter: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil) Utils.execute("QuantizedConv2D", [input, filter, min_input, max_input, min_filter, max_filter], out_type: out_type, strides: strides, padding: padding, dilations: dilations) end |
.quantized_conv2d_and_relu(input: nil, filter: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
2553 2554 2555 |
# File 'lib/tensorflow/raw_ops.rb', line 2553 def quantized_conv2d_and_relu(input: nil, filter: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) Utils.execute("QuantizedConv2DAndRelu", [input, filter, min_input, max_input, min_filter, max_filter], out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list) end |
.quantized_conv2d_and_relu_and_requantize(input: nil, filter: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
2557 2558 2559 |
# File 'lib/tensorflow/raw_ops.rb', line 2557 def quantized_conv2d_and_relu_and_requantize(input: nil, filter: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) Utils.execute("QuantizedConv2DAndReluAndRequantize", [input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output], out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list) end |
.quantized_conv2d_and_requantize(input: nil, filter: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
2561 2562 2563 |
# File 'lib/tensorflow/raw_ops.rb', line 2561 def quantized_conv2d_and_requantize(input: nil, filter: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) Utils.execute("QuantizedConv2DAndRequantize", [input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output], out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list) end |
.quantized_conv2d_per_channel(input: nil, filter: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil) ⇒ Object
2565 2566 2567 |
# File 'lib/tensorflow/raw_ops.rb', line 2565 def quantized_conv2d_per_channel(input: nil, filter: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil) Utils.execute("QuantizedConv2DPerChannel", [input, filter, min_input, max_input, min_filter, max_filter], out_type: out_type, strides: strides, padding: padding, dilations: dilations) end |
.quantized_conv2d_with_bias(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
2569 2570 2571 |
# File 'lib/tensorflow/raw_ops.rb', line 2569 def quantized_conv2d_with_bias(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) Utils.execute("QuantizedConv2DWithBias", [input, filter, bias, min_input, max_input, min_filter, max_filter], out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list) end |
.quantized_conv2d_with_bias_and_relu(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
2573 2574 2575 |
# File 'lib/tensorflow/raw_ops.rb', line 2573 def quantized_conv2d_with_bias_and_relu(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) Utils.execute("QuantizedConv2DWithBiasAndRelu", [input, filter, bias, min_input, max_input, min_filter, max_filter], out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list) end |
.quantized_conv2d_with_bias_and_relu_and_requantize(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
2577 2578 2579 |
# File 'lib/tensorflow/raw_ops.rb', line 2577 def quantized_conv2d_with_bias_and_relu_and_requantize(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) Utils.execute("QuantizedConv2DWithBiasAndReluAndRequantize", [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output], out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list) end |
.quantized_conv2d_with_bias_and_requantize(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
2581 2582 2583 |
# File 'lib/tensorflow/raw_ops.rb', line 2581 def quantized_conv2d_with_bias_and_requantize(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) Utils.execute("QuantizedConv2DWithBiasAndRequantize", [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output], out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list) end |
.quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, summand: nil, min_summand: nil, max_summand: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
2585 2586 2587 |
# File 'lib/tensorflow/raw_ops.rb', line 2585 def quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, summand: nil, min_summand: nil, max_summand: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) Utils.execute("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand], out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list) end |
.quantized_conv2d_with_bias_sum_and_relu(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, summand: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
2589 2590 2591 |
# File 'lib/tensorflow/raw_ops.rb', line 2589 def quantized_conv2d_with_bias_sum_and_relu(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, summand: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) Utils.execute("QuantizedConv2DWithBiasSumAndRelu", [input, filter, bias, min_input, max_input, min_filter, max_filter, summand], out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list) end |
.quantized_conv2d_with_bias_sum_and_relu_and_requantize(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, summand: nil, min_summand: nil, max_summand: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) ⇒ Object
2593 2594 2595 |
# File 'lib/tensorflow/raw_ops.rb', line 2593 def quantized_conv2d_with_bias_sum_and_relu_and_requantize(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, summand: nil, min_summand: nil, max_summand: nil, out_type: nil, strides: nil, padding: nil, dilations: nil, padding_list: nil) Utils.execute("QuantizedConv2DWithBiasSumAndReluAndRequantize", [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand], out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list) end |
.quantized_depthwise_conv2d(input: nil, filter: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil) ⇒ Object
2597 2598 2599 |
# File 'lib/tensorflow/raw_ops.rb', line 2597 def quantized_depthwise_conv2d(input: nil, filter: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil) Utils.execute("QuantizedDepthwiseConv2D", [input, filter, min_input, max_input, min_filter, max_filter], out_type: out_type, strides: strides, padding: padding, dilations: dilations) end |
.quantized_depthwise_conv2d_with_bias(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil) ⇒ Object
2601 2602 2603 |
# File 'lib/tensorflow/raw_ops.rb', line 2601 def quantized_depthwise_conv2d_with_bias(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil) Utils.execute("QuantizedDepthwiseConv2DWithBias", [input, filter, bias, min_input, max_input, min_filter, max_filter], out_type: out_type, strides: strides, padding: padding, dilations: dilations) end |
.quantized_depthwise_conv2d_with_bias_and_relu(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil) ⇒ Object
2605 2606 2607 |
# File 'lib/tensorflow/raw_ops.rb', line 2605 def quantized_depthwise_conv2d_with_bias_and_relu(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, out_type: nil, strides: nil, padding: nil, dilations: nil) Utils.execute("QuantizedDepthwiseConv2DWithBiasAndRelu", [input, filter, bias, min_input, max_input, min_filter, max_filter], out_type: out_type, strides: strides, padding: padding, dilations: dilations) end |
.quantized_depthwise_conv2d_with_bias_and_relu_and_requantize(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, out_type: nil, strides: nil, padding: nil, dilations: nil) ⇒ Object
2609 2610 2611 |
# File 'lib/tensorflow/raw_ops.rb', line 2609 def quantized_depthwise_conv2d_with_bias_and_relu_and_requantize(input: nil, filter: nil, bias: nil, min_input: nil, max_input: nil, min_filter: nil, max_filter: nil, min_freezed_output: nil, max_freezed_output: nil, out_type: nil, strides: nil, padding: nil, dilations: nil) Utils.execute("QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output], out_type: out_type, strides: strides, padding: padding, dilations: dilations) end |
.quantized_instance_norm(x: nil, x_min: nil, x_max: nil, output_range_given: nil, given_y_min: nil, given_y_max: nil, variance_epsilon: nil, min_separation: nil) ⇒ Object
2613 2614 2615 |
# File 'lib/tensorflow/raw_ops.rb', line 2613 def quantized_instance_norm(x: nil, x_min: nil, x_max: nil, output_range_given: nil, given_y_min: nil, given_y_max: nil, variance_epsilon: nil, min_separation: nil) Utils.execute("QuantizedInstanceNorm", [x, x_min, x_max], output_range_given: output_range_given, given_y_min: given_y_min, given_y_max: given_y_max, variance_epsilon: variance_epsilon, min_separation: min_separation) end |
.quantized_mat_mul(a: nil, b: nil, min_a: nil, max_a: nil, min_b: nil, max_b: nil, transpose_a: nil, transpose_b: nil) ⇒ Object
2617 2618 2619 |
# File 'lib/tensorflow/raw_ops.rb', line 2617 def quantized_mat_mul(a: nil, b: nil, min_a: nil, max_a: nil, min_b: nil, max_b: nil, transpose_a: nil, transpose_b: nil) Utils.execute("QuantizedMatMul", [a, b, min_a, max_a, min_b, max_b], transpose_a: transpose_a, transpose_b: transpose_b) end |
.quantized_max_pool(input: nil, min_input: nil, max_input: nil, ksize: nil, strides: nil, padding: nil) ⇒ Object
2621 2622 2623 |
# File 'lib/tensorflow/raw_ops.rb', line 2621 def quantized_max_pool(input: nil, min_input: nil, max_input: nil, ksize: nil, strides: nil, padding: nil) Utils.execute("QuantizedMaxPool", [input, min_input, max_input], ksize: ksize, strides: strides, padding: padding) end |
.quantized_mul(x: nil, y: nil, min_x: nil, max_x: nil, min_y: nil, max_y: nil) ⇒ Object
2625 2626 2627 |
# File 'lib/tensorflow/raw_ops.rb', line 2625 def quantized_mul(x: nil, y: nil, min_x: nil, max_x: nil, min_y: nil, max_y: nil) Utils.execute("QuantizedMul", [x, y, min_x, max_x, min_y, max_y]) end |
.quantized_relu(features: nil, min_features: nil, max_features: nil, out_type: nil) ⇒ Object
2629 2630 2631 |
# File 'lib/tensorflow/raw_ops.rb', line 2629 def quantized_relu(features: nil, min_features: nil, max_features: nil, out_type: nil) Utils.execute("QuantizedRelu", [features, min_features, max_features], out_type: out_type) end |
.quantized_relu6(features: nil, min_features: nil, max_features: nil, out_type: nil) ⇒ Object
2633 2634 2635 |
# File 'lib/tensorflow/raw_ops.rb', line 2633 def quantized_relu6(features: nil, min_features: nil, max_features: nil, out_type: nil) Utils.execute("QuantizedRelu6", [features, min_features, max_features], out_type: out_type) end |
.quantized_relu_x(features: nil, max_value: nil, min_features: nil, max_features: nil, out_type: nil) ⇒ Object
2637 2638 2639 |
# File 'lib/tensorflow/raw_ops.rb', line 2637 def quantized_relu_x(features: nil, max_value: nil, min_features: nil, max_features: nil, out_type: nil) Utils.execute("QuantizedReluX", [features, max_value, min_features, max_features], out_type: out_type) end |
.quantized_reshape(tensor: nil, shape: nil, input_min: nil, input_max: nil) ⇒ Object
2641 2642 2643 |
# File 'lib/tensorflow/raw_ops.rb', line 2641 def quantized_reshape(tensor: nil, shape: nil, input_min: nil, input_max: nil) Utils.execute("QuantizedReshape", [tensor, shape, input_min, input_max]) end |
.quantized_resize_bilinear(images: nil, size: nil, min: nil, max: nil, align_corners: nil, half_pixel_centers: nil) ⇒ Object
2645 2646 2647 |
# File 'lib/tensorflow/raw_ops.rb', line 2645 def quantized_resize_bilinear(images: nil, size: nil, min: nil, max: nil, align_corners: nil, half_pixel_centers: nil) Utils.execute("QuantizedResizeBilinear", [images, size, min, max], align_corners: align_corners, half_pixel_centers: half_pixel_centers) end |
.queue_close(handle: nil, cancel_pending_enqueues: nil) ⇒ Object
2649 2650 2651 |
# File 'lib/tensorflow/raw_ops.rb', line 2649 def queue_close(handle: nil, cancel_pending_enqueues: nil) Utils.execute("QueueClose", [handle], cancel_pending_enqueues: cancel_pending_enqueues) end |
.queue_close_v2(handle: nil, cancel_pending_enqueues: nil) ⇒ Object
2653 2654 2655 |
# File 'lib/tensorflow/raw_ops.rb', line 2653 def queue_close_v2(handle: nil, cancel_pending_enqueues: nil) Utils.execute("QueueCloseV2", [handle], cancel_pending_enqueues: cancel_pending_enqueues) end |
.queue_dequeue(handle: nil, component_types: nil, timeout_ms: nil) ⇒ Object
2657 2658 2659 |
# File 'lib/tensorflow/raw_ops.rb', line 2657 def queue_dequeue(handle: nil, component_types: nil, timeout_ms: nil) Utils.execute("QueueDequeue", [handle], component_types: component_types, timeout_ms: timeout_ms) end |
.queue_dequeue_many(handle: nil, n: nil, component_types: nil, timeout_ms: nil) ⇒ Object
2661 2662 2663 |
# File 'lib/tensorflow/raw_ops.rb', line 2661 def queue_dequeue_many(handle: nil, n: nil, component_types: nil, timeout_ms: nil) Utils.execute("QueueDequeueMany", [handle, n], component_types: component_types, timeout_ms: timeout_ms) end |
.queue_dequeue_many_v2(handle: nil, n: nil, component_types: nil, timeout_ms: nil) ⇒ Object
2665 2666 2667 |
# File 'lib/tensorflow/raw_ops.rb', line 2665 def queue_dequeue_many_v2(handle: nil, n: nil, component_types: nil, timeout_ms: nil) Utils.execute("QueueDequeueManyV2", [handle, n], component_types: component_types, timeout_ms: timeout_ms) end |
.queue_dequeue_up_to(handle: nil, n: nil, component_types: nil, timeout_ms: nil) ⇒ Object
2669 2670 2671 |
# File 'lib/tensorflow/raw_ops.rb', line 2669 def queue_dequeue_up_to(handle: nil, n: nil, component_types: nil, timeout_ms: nil) Utils.execute("QueueDequeueUpTo", [handle, n], component_types: component_types, timeout_ms: timeout_ms) end |
.queue_dequeue_up_to_v2(handle: nil, n: nil, component_types: nil, timeout_ms: nil) ⇒ Object
2673 2674 2675 |
# File 'lib/tensorflow/raw_ops.rb', line 2673 def queue_dequeue_up_to_v2(handle: nil, n: nil, component_types: nil, timeout_ms: nil) Utils.execute("QueueDequeueUpToV2", [handle, n], component_types: component_types, timeout_ms: timeout_ms) end |
.queue_dequeue_v2(handle: nil, component_types: nil, timeout_ms: nil) ⇒ Object
2677 2678 2679 |
# File 'lib/tensorflow/raw_ops.rb', line 2677 def queue_dequeue_v2(handle: nil, component_types: nil, timeout_ms: nil) Utils.execute("QueueDequeueV2", [handle], component_types: component_types, timeout_ms: timeout_ms) end |
.queue_enqueue(handle: nil, components: nil, timeout_ms: nil) ⇒ Object
2681 2682 2683 |
# File 'lib/tensorflow/raw_ops.rb', line 2681 def queue_enqueue(handle: nil, components: nil, timeout_ms: nil) Utils.execute("QueueEnqueue", [handle, components], timeout_ms: timeout_ms) end |
.queue_enqueue_many(handle: nil, components: nil, timeout_ms: nil) ⇒ Object
2685 2686 2687 |
# File 'lib/tensorflow/raw_ops.rb', line 2685 def queue_enqueue_many(handle: nil, components: nil, timeout_ms: nil) Utils.execute("QueueEnqueueMany", [handle, components], timeout_ms: timeout_ms) end |
.queue_enqueue_many_v2(handle: nil, components: nil, timeout_ms: nil) ⇒ Object
2689 2690 2691 |
# File 'lib/tensorflow/raw_ops.rb', line 2689 def queue_enqueue_many_v2(handle: nil, components: nil, timeout_ms: nil) Utils.execute("QueueEnqueueManyV2", [handle, components], timeout_ms: timeout_ms) end |
.queue_enqueue_v2(handle: nil, components: nil, timeout_ms: nil) ⇒ Object
2693 2694 2695 |
# File 'lib/tensorflow/raw_ops.rb', line 2693 def queue_enqueue_v2(handle: nil, components: nil, timeout_ms: nil) Utils.execute("QueueEnqueueV2", [handle, components], timeout_ms: timeout_ms) end |
.queue_is_closed(handle: nil) ⇒ Object
2697 2698 2699 |
# File 'lib/tensorflow/raw_ops.rb', line 2697 def queue_is_closed(handle: nil) Utils.execute("QueueIsClosed", [handle]) end |
.queue_is_closed_v2(handle: nil) ⇒ Object
2701 2702 2703 |
# File 'lib/tensorflow/raw_ops.rb', line 2701 def queue_is_closed_v2(handle: nil) Utils.execute("QueueIsClosedV2", [handle]) end |
.queue_size(handle: nil) ⇒ Object
2705 2706 2707 |
# File 'lib/tensorflow/raw_ops.rb', line 2705 def queue_size(handle: nil) Utils.execute("QueueSize", [handle]) end |
.queue_size_v2(handle: nil) ⇒ Object
2709 2710 2711 |
# File 'lib/tensorflow/raw_ops.rb', line 2709 def queue_size_v2(handle: nil) Utils.execute("QueueSizeV2", [handle]) end |
.ragged_gather(params_nested_splits: nil, params_dense_values: nil, indices: nil) ⇒ Object
2729 2730 2731 |
# File 'lib/tensorflow/raw_ops.rb', line 2729 def ragged_gather(params_nested_splits: nil, params_dense_values: nil, indices: nil) Utils.execute("RaggedGather", [params_nested_splits, params_dense_values, indices]) end |
.ragged_range(starts: nil, limits: nil, deltas: nil) ⇒ Object
2733 2734 2735 |
# File 'lib/tensorflow/raw_ops.rb', line 2733 def ragged_range(starts: nil, limits: nil, deltas: nil) Utils.execute("RaggedRange", [starts, limits, deltas]) end |
.ragged_tensor_from_variant(encoded_ragged: nil, input_ragged_rank: nil, output_ragged_rank: nil) ⇒ Object
2737 2738 2739 |
# File 'lib/tensorflow/raw_ops.rb', line 2737 def ragged_tensor_from_variant(encoded_ragged: nil, input_ragged_rank: nil, output_ragged_rank: nil) Utils.execute("RaggedTensorFromVariant", [encoded_ragged], input_ragged_rank: input_ragged_rank, output_ragged_rank: output_ragged_rank) end |
.ragged_tensor_to_sparse(rt_nested_splits: nil, rt_dense_values: nil) ⇒ Object
2741 2742 2743 |
# File 'lib/tensorflow/raw_ops.rb', line 2741 def ragged_tensor_to_sparse(rt_nested_splits: nil, rt_dense_values: nil) Utils.execute("RaggedTensorToSparse", [rt_nested_splits, rt_dense_values]) end |
.ragged_tensor_to_variant(rt_nested_splits: nil, rt_dense_values: nil, batched_input: nil) ⇒ Object
2745 2746 2747 |
# File 'lib/tensorflow/raw_ops.rb', line 2745 def ragged_tensor_to_variant(rt_nested_splits: nil, rt_dense_values: nil, batched_input: nil) Utils.execute("RaggedTensorToVariant", [rt_nested_splits, rt_dense_values], batched_input: batched_input) end |
.random_crop(image: nil, size: nil, seed: nil, seed2: nil) ⇒ Object
2749 2750 2751 |
# File 'lib/tensorflow/raw_ops.rb', line 2749 def random_crop(image: nil, size: nil, seed: nil, seed2: nil) Utils.execute("RandomCrop", [image, size], seed: seed, seed2: seed2) end |
.random_gamma(shape: nil, alpha: nil, seed: nil, seed2: nil) ⇒ Object
2753 2754 2755 |
# File 'lib/tensorflow/raw_ops.rb', line 2753 def random_gamma(shape: nil, alpha: nil, seed: nil, seed2: nil) Utils.execute("RandomGamma", [shape, alpha], seed: seed, seed2: seed2) end |
.random_gamma_grad(alpha: nil, sample: nil) ⇒ Object
2757 2758 2759 |
# File 'lib/tensorflow/raw_ops.rb', line 2757 def random_gamma_grad(alpha: nil, sample: nil) Utils.execute("RandomGammaGrad", [alpha, sample]) end |
.random_poisson(shape: nil, rate: nil, seed: nil, seed2: nil, dtype: nil) ⇒ Object
2761 2762 2763 |
# File 'lib/tensorflow/raw_ops.rb', line 2761 def random_poisson(shape: nil, rate: nil, seed: nil, seed2: nil, dtype: nil) Utils.execute("RandomPoisson", [shape, rate], seed: seed, seed2: seed2, dtype: dtype) end |
.random_poisson_v2(shape: nil, rate: nil, seed: nil, seed2: nil, dtype: nil) ⇒ Object
2765 2766 2767 |
# File 'lib/tensorflow/raw_ops.rb', line 2765 def random_poisson_v2(shape: nil, rate: nil, seed: nil, seed2: nil, dtype: nil) Utils.execute("RandomPoissonV2", [shape, rate], seed: seed, seed2: seed2, dtype: dtype) end |
.random_shuffle(value: nil, seed: nil, seed2: nil) ⇒ Object
2769 2770 2771 |
# File 'lib/tensorflow/raw_ops.rb', line 2769 def random_shuffle(value: nil, seed: nil, seed2: nil) Utils.execute("RandomShuffle", [value], seed: seed, seed2: seed2) end |
.random_shuffle_queue(component_types: nil, shapes: nil, capacity: nil, min_after_dequeue: nil, seed: nil, seed2: nil, container: nil, shared_name: nil) ⇒ Object
2773 2774 2775 |
# File 'lib/tensorflow/raw_ops.rb', line 2773 def random_shuffle_queue(component_types: nil, shapes: nil, capacity: nil, min_after_dequeue: nil, seed: nil, seed2: nil, container: nil, shared_name: nil) Utils.execute("RandomShuffleQueue", [], component_types: component_types, shapes: shapes, capacity: capacity, min_after_dequeue: min_after_dequeue, seed: seed, seed2: seed2, container: container, shared_name: shared_name) end |
.random_shuffle_queue_v2(component_types: nil, shapes: nil, capacity: nil, min_after_dequeue: nil, seed: nil, seed2: nil, container: nil, shared_name: nil) ⇒ Object
2777 2778 2779 |
# File 'lib/tensorflow/raw_ops.rb', line 2777 def random_shuffle_queue_v2(component_types: nil, shapes: nil, capacity: nil, min_after_dequeue: nil, seed: nil, seed2: nil, container: nil, shared_name: nil) Utils.execute("RandomShuffleQueueV2", [], component_types: component_types, shapes: shapes, capacity: capacity, min_after_dequeue: min_after_dequeue, seed: seed, seed2: seed2, container: container, shared_name: shared_name) end |
.random_standard_normal(shape: nil, seed: nil, seed2: nil, dtype: nil) ⇒ Object
2781 2782 2783 |
# File 'lib/tensorflow/raw_ops.rb', line 2781 def random_standard_normal(shape: nil, seed: nil, seed2: nil, dtype: nil) Utils.execute("RandomStandardNormal", [shape], seed: seed, seed2: seed2, dtype: dtype) end |
.random_uniform(shape: nil, seed: nil, seed2: nil, dtype: nil) ⇒ Object
2785 2786 2787 |
# File 'lib/tensorflow/raw_ops.rb', line 2785 def random_uniform(shape: nil, seed: nil, seed2: nil, dtype: nil) Utils.execute("RandomUniform", [shape], seed: seed, seed2: seed2, dtype: dtype) end |
.random_uniform_int(shape: nil, minval: nil, maxval: nil, seed: nil, seed2: nil) ⇒ Object
2789 2790 2791 |
# File 'lib/tensorflow/raw_ops.rb', line 2789 def random_uniform_int(shape: nil, minval: nil, maxval: nil, seed: nil, seed2: nil) Utils.execute("RandomUniformInt", [shape, minval, maxval], seed: seed, seed2: seed2) end |
.range(start: nil, limit: nil, delta: nil) ⇒ Object
2793 2794 2795 |
# File 'lib/tensorflow/raw_ops.rb', line 2793 def range(start: nil, limit: nil, delta: nil) Utils.execute("Range", [start, limit, delta]) end |
.range_dataset(start: nil, stop: nil, step: nil, output_types: nil, output_shapes: nil) ⇒ Object
2797 2798 2799 |
# File 'lib/tensorflow/raw_ops.rb', line 2797 def range_dataset(start: nil, stop: nil, step: nil, output_types: nil, output_shapes: nil) Utils.execute("RangeDataset", [start, stop, step], output_types: output_types, output_shapes: output_shapes) end |
.rank(input: nil) ⇒ Object
2801 2802 2803 |
# File 'lib/tensorflow/raw_ops.rb', line 2801 def rank(input: nil) Utils.execute("Rank", [input]) end |
.read_file(filename: nil) ⇒ Object
2805 2806 2807 |
# File 'lib/tensorflow/raw_ops.rb', line 2805 def read_file(filename: nil) Utils.execute("ReadFile", [filename]) end |
.read_variable_op(resource: nil, dtype: nil) ⇒ Object
2809 2810 2811 |
# File 'lib/tensorflow/raw_ops.rb', line 2809 def read_variable_op(resource: nil, dtype: nil) Utils.execute("ReadVariableOp", [resource], dtype: dtype) end |
.reader_num_records_produced(reader_handle: nil) ⇒ Object
2813 2814 2815 |
# File 'lib/tensorflow/raw_ops.rb', line 2813 def reader_num_records_produced(reader_handle: nil) Utils.execute("ReaderNumRecordsProduced", [reader_handle]) end |
.reader_num_records_produced_v2(reader_handle: nil) ⇒ Object
2817 2818 2819 |
# File 'lib/tensorflow/raw_ops.rb', line 2817 def reader_num_records_produced_v2(reader_handle: nil) Utils.execute("ReaderNumRecordsProducedV2", [reader_handle]) end |
.reader_num_work_units_completed(reader_handle: nil) ⇒ Object
2821 2822 2823 |
# File 'lib/tensorflow/raw_ops.rb', line 2821 def reader_num_work_units_completed(reader_handle: nil) Utils.execute("ReaderNumWorkUnitsCompleted", [reader_handle]) end |
.reader_num_work_units_completed_v2(reader_handle: nil) ⇒ Object
2825 2826 2827 |
# File 'lib/tensorflow/raw_ops.rb', line 2825 def reader_num_work_units_completed_v2(reader_handle: nil) Utils.execute("ReaderNumWorkUnitsCompletedV2", [reader_handle]) end |
.reader_read(reader_handle: nil, queue_handle: nil) ⇒ Object
2829 2830 2831 |
# File 'lib/tensorflow/raw_ops.rb', line 2829 def reader_read(reader_handle: nil, queue_handle: nil) Utils.execute("ReaderRead", [reader_handle, queue_handle]) end |
.reader_read_up_to(reader_handle: nil, queue_handle: nil, num_records: nil) ⇒ Object
2833 2834 2835 |
# File 'lib/tensorflow/raw_ops.rb', line 2833 def reader_read_up_to(reader_handle: nil, queue_handle: nil, num_records: nil) Utils.execute("ReaderReadUpTo", [reader_handle, queue_handle, num_records]) end |
.reader_read_up_to_v2(reader_handle: nil, queue_handle: nil, num_records: nil) ⇒ Object
2837 2838 2839 |
# File 'lib/tensorflow/raw_ops.rb', line 2837 def reader_read_up_to_v2(reader_handle: nil, queue_handle: nil, num_records: nil) Utils.execute("ReaderReadUpToV2", [reader_handle, queue_handle, num_records]) end |
.reader_read_v2(reader_handle: nil, queue_handle: nil) ⇒ Object
2841 2842 2843 |
# File 'lib/tensorflow/raw_ops.rb', line 2841 def reader_read_v2(reader_handle: nil, queue_handle: nil) Utils.execute("ReaderReadV2", [reader_handle, queue_handle]) end |
.reader_reset(reader_handle: nil) ⇒ Object
2845 2846 2847 |
# File 'lib/tensorflow/raw_ops.rb', line 2845 def reader_reset(reader_handle: nil) Utils.execute("ReaderReset", [reader_handle]) end |
.reader_reset_v2(reader_handle: nil) ⇒ Object
2849 2850 2851 |
# File 'lib/tensorflow/raw_ops.rb', line 2849 def reader_reset_v2(reader_handle: nil) Utils.execute("ReaderResetV2", [reader_handle]) end |
.reader_restore_state(reader_handle: nil, state: nil) ⇒ Object
2853 2854 2855 |
# File 'lib/tensorflow/raw_ops.rb', line 2853 def reader_restore_state(reader_handle: nil, state: nil) Utils.execute("ReaderRestoreState", [reader_handle, state]) end |
.reader_restore_state_v2(reader_handle: nil, state: nil) ⇒ Object
2857 2858 2859 |
# File 'lib/tensorflow/raw_ops.rb', line 2857 def reader_restore_state_v2(reader_handle: nil, state: nil) Utils.execute("ReaderRestoreStateV2", [reader_handle, state]) end |
.reader_serialize_state(reader_handle: nil) ⇒ Object
2861 2862 2863 |
# File 'lib/tensorflow/raw_ops.rb', line 2861 def reader_serialize_state(reader_handle: nil) Utils.execute("ReaderSerializeState", [reader_handle]) end |
.reader_serialize_state_v2(reader_handle: nil) ⇒ Object
2865 2866 2867 |
# File 'lib/tensorflow/raw_ops.rb', line 2865 def reader_serialize_state_v2(reader_handle: nil) Utils.execute("ReaderSerializeStateV2", [reader_handle]) end |
.real(input: nil) ⇒ Object
2869 2870 2871 |
# File 'lib/tensorflow/raw_ops.rb', line 2869 def real(input: nil) Utils.execute("Real", [input]) end |
.real_div(x: nil, y: nil) ⇒ Object
2873 2874 2875 |
# File 'lib/tensorflow/raw_ops.rb', line 2873 def real_div(x: nil, y: nil) Utils.execute("RealDiv", [x, y]) end |
.reciprocal(x: nil) ⇒ Object
2877 2878 2879 |
# File 'lib/tensorflow/raw_ops.rb', line 2877 def reciprocal(x: nil) Utils.execute("Reciprocal", [x]) end |
.reciprocal_grad(y: nil, dy: nil) ⇒ Object
2881 2882 2883 |
# File 'lib/tensorflow/raw_ops.rb', line 2881 def reciprocal_grad(y: nil, dy: nil) Utils.execute("ReciprocalGrad", [y, dy]) end |
.record_input(file_pattern: nil, file_random_seed: nil, file_shuffle_shift_ratio: nil, file_buffer_size: nil, file_parallelism: nil, batch_size: nil, compression_type: nil) ⇒ Object
2885 2886 2887 |
# File 'lib/tensorflow/raw_ops.rb', line 2885 def record_input(file_pattern: nil, file_random_seed: nil, file_shuffle_shift_ratio: nil, file_buffer_size: nil, file_parallelism: nil, batch_size: nil, compression_type: nil) Utils.execute("RecordInput", [], file_pattern: file_pattern, file_random_seed: file_random_seed, file_shuffle_shift_ratio: file_shuffle_shift_ratio, file_buffer_size: file_buffer_size, file_parallelism: file_parallelism, batch_size: batch_size, compression_type: compression_type) end |
.recv_tpu_embedding_activations(num_outputs: nil, config: nil) ⇒ Object
2889 2890 2891 |
# File 'lib/tensorflow/raw_ops.rb', line 2889 def (num_outputs: nil, config: nil) Utils.execute("RecvTPUEmbeddingActivations", [], num_outputs: num_outputs, config: config) end |
.reduce_dataset(input_dataset: nil, initial_state: nil, other_arguments: nil, f: nil, output_types: nil, output_shapes: nil, use_inter_op_parallelism: nil) ⇒ Object
2893 2894 2895 |
# File 'lib/tensorflow/raw_ops.rb', line 2893 def reduce_dataset(input_dataset: nil, initial_state: nil, other_arguments: nil, f: nil, output_types: nil, output_shapes: nil, use_inter_op_parallelism: nil) Utils.execute("ReduceDataset", [input_dataset, initial_state, other_arguments], f: f, output_types: output_types, output_shapes: output_shapes, use_inter_op_parallelism: use_inter_op_parallelism) end |
.reduce_join(inputs: nil, reduction_indices: nil, keep_dims: nil, separator: nil) ⇒ Object
2897 2898 2899 |
# File 'lib/tensorflow/raw_ops.rb', line 2897 def reduce_join(inputs: nil, reduction_indices: nil, keep_dims: nil, separator: nil) Utils.execute("ReduceJoin", [inputs, reduction_indices], keep_dims: keep_dims, separator: separator) end |
.ref_enter(data: nil, frame_name: nil, is_constant: nil, parallel_iterations: nil) ⇒ Object
2901 2902 2903 |
# File 'lib/tensorflow/raw_ops.rb', line 2901 def ref_enter(data: nil, frame_name: nil, is_constant: nil, parallel_iterations: nil) Utils.execute("RefEnter", [data], frame_name: frame_name, is_constant: is_constant, parallel_iterations: parallel_iterations) end |
.ref_exit(data: nil) ⇒ Object
2905 2906 2907 |
# File 'lib/tensorflow/raw_ops.rb', line 2905 def ref_exit(data: nil) Utils.execute("RefExit", [data]) end |
.ref_identity(input: nil) ⇒ Object
2909 2910 2911 |
# File 'lib/tensorflow/raw_ops.rb', line 2909 def ref_identity(input: nil) Utils.execute("RefIdentity", [input]) end |
.ref_merge(inputs: nil) ⇒ Object
2913 2914 2915 |
# File 'lib/tensorflow/raw_ops.rb', line 2913 def ref_merge(inputs: nil) Utils.execute("RefMerge", [inputs]) end |
.ref_next_iteration(data: nil) ⇒ Object
2917 2918 2919 |
# File 'lib/tensorflow/raw_ops.rb', line 2917 def ref_next_iteration(data: nil) Utils.execute("RefNextIteration", [data]) end |
.ref_select(index: nil, inputs: nil) ⇒ Object
2921 2922 2923 |
# File 'lib/tensorflow/raw_ops.rb', line 2921 def ref_select(index: nil, inputs: nil) Utils.execute("RefSelect", [index, inputs]) end |
.ref_switch(data: nil, pred: nil) ⇒ Object
2925 2926 2927 |
# File 'lib/tensorflow/raw_ops.rb', line 2925 def ref_switch(data: nil, pred: nil) Utils.execute("RefSwitch", [data, pred]) end |
.regex_full_match(input: nil, pattern: nil) ⇒ Object
2929 2930 2931 |
# File 'lib/tensorflow/raw_ops.rb', line 2929 def regex_full_match(input: nil, pattern: nil) Utils.execute("RegexFullMatch", [input, pattern]) end |
.regex_replace(input: nil, pattern: nil, rewrite: nil, replace_global: nil) ⇒ Object
2933 2934 2935 |
# File 'lib/tensorflow/raw_ops.rb', line 2933 def regex_replace(input: nil, pattern: nil, rewrite: nil, replace_global: nil) Utils.execute("RegexReplace", [input, pattern, rewrite], replace_global: replace_global) end |
.relu(features: nil) ⇒ Object
2937 2938 2939 |
# File 'lib/tensorflow/raw_ops.rb', line 2937 def relu(features: nil) Utils.execute("Relu", [features]) end |
.relu6(features: nil) ⇒ Object
2941 2942 2943 |
# File 'lib/tensorflow/raw_ops.rb', line 2941 def relu6(features: nil) Utils.execute("Relu6", [features]) end |
.relu6_grad(gradients: nil, features: nil) ⇒ Object
2945 2946 2947 |
# File 'lib/tensorflow/raw_ops.rb', line 2945 def relu6_grad(gradients: nil, features: nil) Utils.execute("Relu6Grad", [gradients, features]) end |
.relu_grad(gradients: nil, features: nil) ⇒ Object
2949 2950 2951 |
# File 'lib/tensorflow/raw_ops.rb', line 2949 def relu_grad(gradients: nil, features: nil) Utils.execute("ReluGrad", [gradients, features]) end |
.remote_call(target: nil, args: nil, f: nil) ⇒ Object
2953 2954 2955 |
# File 'lib/tensorflow/raw_ops.rb', line 2953 def remote_call(target: nil, args: nil, f: nil) Utils.execute("RemoteCall", [target, args], f: f) end |
.remote_fused_graph_execute(inputs: nil, serialized_remote_fused_graph_execute_info: nil) ⇒ Object
2957 2958 2959 |
# File 'lib/tensorflow/raw_ops.rb', line 2957 def remote_fused_graph_execute(inputs: nil, serialized_remote_fused_graph_execute_info: nil) Utils.execute("RemoteFusedGraphExecute", [inputs], serialized_remote_fused_graph_execute_info: serialized_remote_fused_graph_execute_info) end |
.repeat_dataset(input_dataset: nil, count: nil, output_types: nil, output_shapes: nil) ⇒ Object
2961 2962 2963 |
# File 'lib/tensorflow/raw_ops.rb', line 2961 def repeat_dataset(input_dataset: nil, count: nil, output_types: nil, output_shapes: nil) Utils.execute("RepeatDataset", [input_dataset, count], output_types: output_types, output_shapes: output_shapes) end |
.requantization_range(input: nil, input_min: nil, input_max: nil) ⇒ Object
2965 2966 2967 |
# File 'lib/tensorflow/raw_ops.rb', line 2965 def requantization_range(input: nil, input_min: nil, input_max: nil) Utils.execute("RequantizationRange", [input, input_min, input_max]) end |
.requantization_range_per_channel(input: nil, input_min: nil, input_max: nil, clip_value_max: nil) ⇒ Object
2969 2970 2971 |
# File 'lib/tensorflow/raw_ops.rb', line 2969 def requantization_range_per_channel(input: nil, input_min: nil, input_max: nil, clip_value_max: nil) Utils.execute("RequantizationRangePerChannel", [input, input_min, input_max], clip_value_max: clip_value_max) end |
.requantize(input: nil, input_min: nil, input_max: nil, requested_output_min: nil, requested_output_max: nil, out_type: nil) ⇒ Object
2973 2974 2975 |
# File 'lib/tensorflow/raw_ops.rb', line 2973 def requantize(input: nil, input_min: nil, input_max: nil, requested_output_min: nil, requested_output_max: nil, out_type: nil) Utils.execute("Requantize", [input, input_min, input_max, requested_output_min, requested_output_max], out_type: out_type) end |
.requantize_per_channel(input: nil, input_min: nil, input_max: nil, requested_output_min: nil, requested_output_max: nil, out_type: nil) ⇒ Object
2977 2978 2979 |
# File 'lib/tensorflow/raw_ops.rb', line 2977 def requantize_per_channel(input: nil, input_min: nil, input_max: nil, requested_output_min: nil, requested_output_max: nil, out_type: nil) Utils.execute("RequantizePerChannel", [input, input_min, input_max, requested_output_min, requested_output_max], out_type: out_type) end |
.reshape(tensor: nil, shape: nil) ⇒ Object
2981 2982 2983 |
# File 'lib/tensorflow/raw_ops.rb', line 2981 def reshape(tensor: nil, shape: nil) Utils.execute("Reshape", [tensor, shape]) end |
.resize_area(images: nil, size: nil, align_corners: nil) ⇒ Object
2985 2986 2987 |
# File 'lib/tensorflow/raw_ops.rb', line 2985 def resize_area(images: nil, size: nil, align_corners: nil) Utils.execute("ResizeArea", [images, size], align_corners: align_corners) end |
.resize_bicubic(images: nil, size: nil, align_corners: nil, half_pixel_centers: nil) ⇒ Object
2989 2990 2991 |
# File 'lib/tensorflow/raw_ops.rb', line 2989 def resize_bicubic(images: nil, size: nil, align_corners: nil, half_pixel_centers: nil) Utils.execute("ResizeBicubic", [images, size], align_corners: align_corners, half_pixel_centers: half_pixel_centers) end |
.resize_bicubic_grad(grads: nil, original_image: nil, align_corners: nil, half_pixel_centers: nil) ⇒ Object
2993 2994 2995 |
# File 'lib/tensorflow/raw_ops.rb', line 2993 def resize_bicubic_grad(grads: nil, original_image: nil, align_corners: nil, half_pixel_centers: nil) Utils.execute("ResizeBicubicGrad", [grads, original_image], align_corners: align_corners, half_pixel_centers: half_pixel_centers) end |
.resize_bilinear(images: nil, size: nil, align_corners: nil, half_pixel_centers: nil) ⇒ Object
2997 2998 2999 |
# File 'lib/tensorflow/raw_ops.rb', line 2997 def resize_bilinear(images: nil, size: nil, align_corners: nil, half_pixel_centers: nil) Utils.execute("ResizeBilinear", [images, size], align_corners: align_corners, half_pixel_centers: half_pixel_centers) end |
.resize_bilinear_grad(grads: nil, original_image: nil, align_corners: nil, half_pixel_centers: nil) ⇒ Object
3001 3002 3003 |
# File 'lib/tensorflow/raw_ops.rb', line 3001 def resize_bilinear_grad(grads: nil, original_image: nil, align_corners: nil, half_pixel_centers: nil) Utils.execute("ResizeBilinearGrad", [grads, original_image], align_corners: align_corners, half_pixel_centers: half_pixel_centers) end |
.resize_nearest_neighbor(images: nil, size: nil, align_corners: nil, half_pixel_centers: nil) ⇒ Object
3005 3006 3007 |
# File 'lib/tensorflow/raw_ops.rb', line 3005 def resize_nearest_neighbor(images: nil, size: nil, align_corners: nil, half_pixel_centers: nil) Utils.execute("ResizeNearestNeighbor", [images, size], align_corners: align_corners, half_pixel_centers: half_pixel_centers) end |
.resize_nearest_neighbor_grad(grads: nil, size: nil, align_corners: nil, half_pixel_centers: nil) ⇒ Object
3009 3010 3011 |
# File 'lib/tensorflow/raw_ops.rb', line 3009 def resize_nearest_neighbor_grad(grads: nil, size: nil, align_corners: nil, half_pixel_centers: nil) Utils.execute("ResizeNearestNeighborGrad", [grads, size], align_corners: align_corners, half_pixel_centers: half_pixel_centers) end |
.resource_apply_ada_max(var: nil, m: nil, v: nil, beta1_power: nil, lr: nil, beta1: nil, beta2: nil, epsilon: nil, grad: nil, use_locking: nil) ⇒ Object
3013 3014 3015 |
# File 'lib/tensorflow/raw_ops.rb', line 3013 def resource_apply_ada_max(var: nil, m: nil, v: nil, beta1_power: nil, lr: nil, beta1: nil, beta2: nil, epsilon: nil, grad: nil, use_locking: nil) Utils.execute("ResourceApplyAdaMax", [var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad], use_locking: use_locking) end |
.resource_apply_adadelta(var: nil, accum: nil, accum_update: nil, lr: nil, rho: nil, epsilon: nil, grad: nil, use_locking: nil) ⇒ Object
3017 3018 3019 |
# File 'lib/tensorflow/raw_ops.rb', line 3017 def resource_apply_adadelta(var: nil, accum: nil, accum_update: nil, lr: nil, rho: nil, epsilon: nil, grad: nil, use_locking: nil) Utils.execute("ResourceApplyAdadelta", [var, accum, accum_update, lr, rho, epsilon, grad], use_locking: use_locking) end |
.resource_apply_adagrad(var: nil, accum: nil, lr: nil, grad: nil, use_locking: nil, update_slots: nil) ⇒ Object
3021 3022 3023 |
# File 'lib/tensorflow/raw_ops.rb', line 3021 def resource_apply_adagrad(var: nil, accum: nil, lr: nil, grad: nil, use_locking: nil, update_slots: nil) Utils.execute("ResourceApplyAdagrad", [var, accum, lr, grad], use_locking: use_locking, update_slots: update_slots) end |
.resource_apply_adagrad_da(var: nil, gradient_accumulator: nil, gradient_squared_accumulator: nil, grad: nil, lr: nil, l1: nil, l2: nil, global_step: nil, use_locking: nil) ⇒ Object
3025 3026 3027 |
# File 'lib/tensorflow/raw_ops.rb', line 3025 def resource_apply_adagrad_da(var: nil, gradient_accumulator: nil, gradient_squared_accumulator: nil, grad: nil, lr: nil, l1: nil, l2: nil, global_step: nil, use_locking: nil) Utils.execute("ResourceApplyAdagradDA", [var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step], use_locking: use_locking) end |
.resource_apply_adam(var: nil, m: nil, v: nil, beta1_power: nil, beta2_power: nil, lr: nil, beta1: nil, beta2: nil, epsilon: nil, grad: nil, use_locking: nil, use_nesterov: nil) ⇒ Object
3029 3030 3031 |
# File 'lib/tensorflow/raw_ops.rb', line 3029 def resource_apply_adam(var: nil, m: nil, v: nil, beta1_power: nil, beta2_power: nil, lr: nil, beta1: nil, beta2: nil, epsilon: nil, grad: nil, use_locking: nil, use_nesterov: nil) Utils.execute("ResourceApplyAdam", [var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad], use_locking: use_locking, use_nesterov: use_nesterov) end |
.resource_apply_adam_with_amsgrad(var: nil, m: nil, v: nil, vhat: nil, beta1_power: nil, beta2_power: nil, lr: nil, beta1: nil, beta2: nil, epsilon: nil, grad: nil, use_locking: nil) ⇒ Object
3033 3034 3035 |
# File 'lib/tensorflow/raw_ops.rb', line 3033 def resource_apply_adam_with_amsgrad(var: nil, m: nil, v: nil, vhat: nil, beta1_power: nil, beta2_power: nil, lr: nil, beta1: nil, beta2: nil, epsilon: nil, grad: nil, use_locking: nil) Utils.execute("ResourceApplyAdamWithAmsgrad", [var, m, v, vhat, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad], use_locking: use_locking) end |
.resource_apply_add_sign(var: nil, m: nil, lr: nil, alpha: nil, sign_decay: nil, beta: nil, grad: nil, use_locking: nil) ⇒ Object
3037 3038 3039 |
# File 'lib/tensorflow/raw_ops.rb', line 3037 def resource_apply_add_sign(var: nil, m: nil, lr: nil, alpha: nil, sign_decay: nil, beta: nil, grad: nil, use_locking: nil) Utils.execute("ResourceApplyAddSign", [var, m, lr, alpha, sign_decay, beta, grad], use_locking: use_locking) end |
.resource_apply_centered_rms_prop(var: nil, mg: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, use_locking: nil) ⇒ Object
3041 3042 3043 |
# File 'lib/tensorflow/raw_ops.rb', line 3041 def resource_apply_centered_rms_prop(var: nil, mg: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, use_locking: nil) Utils.execute("ResourceApplyCenteredRMSProp", [var, mg, ms, mom, lr, rho, momentum, epsilon, grad], use_locking: use_locking) end |
.resource_apply_ftrl(var: nil, accum: nil, linear: nil, grad: nil, lr: nil, l1: nil, l2: nil, lr_power: nil, use_locking: nil) ⇒ Object
3045 3046 3047 |
# File 'lib/tensorflow/raw_ops.rb', line 3045 def resource_apply_ftrl(var: nil, accum: nil, linear: nil, grad: nil, lr: nil, l1: nil, l2: nil, lr_power: nil, use_locking: nil) Utils.execute("ResourceApplyFtrl", [var, accum, linear, grad, lr, l1, l2, lr_power], use_locking: use_locking) end |
.resource_apply_ftrl_v2(var: nil, accum: nil, linear: nil, grad: nil, lr: nil, l1: nil, l2: nil, l2_shrinkage: nil, lr_power: nil, use_locking: nil) ⇒ Object
3049 3050 3051 |
# File 'lib/tensorflow/raw_ops.rb', line 3049 def resource_apply_ftrl_v2(var: nil, accum: nil, linear: nil, grad: nil, lr: nil, l1: nil, l2: nil, l2_shrinkage: nil, lr_power: nil, use_locking: nil) Utils.execute("ResourceApplyFtrlV2", [var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power], use_locking: use_locking) end |
.resource_apply_gradient_descent(var: nil, alpha: nil, delta: nil, use_locking: nil) ⇒ Object
3053 3054 3055 |
# File 'lib/tensorflow/raw_ops.rb', line 3053 def resource_apply_gradient_descent(var: nil, alpha: nil, delta: nil, use_locking: nil) Utils.execute("ResourceApplyGradientDescent", [var, alpha, delta], use_locking: use_locking) end |
.resource_apply_keras_momentum(var: nil, accum: nil, lr: nil, grad: nil, momentum: nil, use_locking: nil, use_nesterov: nil) ⇒ Object
3057 3058 3059 |
# File 'lib/tensorflow/raw_ops.rb', line 3057 def resource_apply_keras_momentum(var: nil, accum: nil, lr: nil, grad: nil, momentum: nil, use_locking: nil, use_nesterov: nil) Utils.execute("ResourceApplyKerasMomentum", [var, accum, lr, grad, momentum], use_locking: use_locking, use_nesterov: use_nesterov) end |
.resource_apply_momentum(var: nil, accum: nil, lr: nil, grad: nil, momentum: nil, use_locking: nil, use_nesterov: nil) ⇒ Object
3061 3062 3063 |
# File 'lib/tensorflow/raw_ops.rb', line 3061 def resource_apply_momentum(var: nil, accum: nil, lr: nil, grad: nil, momentum: nil, use_locking: nil, use_nesterov: nil) Utils.execute("ResourceApplyMomentum", [var, accum, lr, grad, momentum], use_locking: use_locking, use_nesterov: use_nesterov) end |
.resource_apply_power_sign(var: nil, m: nil, lr: nil, logbase: nil, sign_decay: nil, beta: nil, grad: nil, use_locking: nil) ⇒ Object
3065 3066 3067 |
# File 'lib/tensorflow/raw_ops.rb', line 3065 def resource_apply_power_sign(var: nil, m: nil, lr: nil, logbase: nil, sign_decay: nil, beta: nil, grad: nil, use_locking: nil) Utils.execute("ResourceApplyPowerSign", [var, m, lr, logbase, sign_decay, beta, grad], use_locking: use_locking) end |
.resource_apply_proximal_adagrad(var: nil, accum: nil, lr: nil, l1: nil, l2: nil, grad: nil, use_locking: nil) ⇒ Object
3069 3070 3071 |
# File 'lib/tensorflow/raw_ops.rb', line 3069 def resource_apply_proximal_adagrad(var: nil, accum: nil, lr: nil, l1: nil, l2: nil, grad: nil, use_locking: nil) Utils.execute("ResourceApplyProximalAdagrad", [var, accum, lr, l1, l2, grad], use_locking: use_locking) end |
.resource_apply_proximal_gradient_descent(var: nil, alpha: nil, l1: nil, l2: nil, delta: nil, use_locking: nil) ⇒ Object
3073 3074 3075 |
# File 'lib/tensorflow/raw_ops.rb', line 3073 def resource_apply_proximal_gradient_descent(var: nil, alpha: nil, l1: nil, l2: nil, delta: nil, use_locking: nil) Utils.execute("ResourceApplyProximalGradientDescent", [var, alpha, l1, l2, delta], use_locking: use_locking) end |
.resource_apply_rms_prop(var: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, use_locking: nil) ⇒ Object
3077 3078 3079 |
# File 'lib/tensorflow/raw_ops.rb', line 3077 def resource_apply_rms_prop(var: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, use_locking: nil) Utils.execute("ResourceApplyRMSProp", [var, ms, mom, lr, rho, momentum, epsilon, grad], use_locking: use_locking) end |
.resource_count_up_to(resource: nil, limit: nil) ⇒ Object
3081 3082 3083 |
# File 'lib/tensorflow/raw_ops.rb', line 3081 def resource_count_up_to(resource: nil, limit: nil) Utils.execute("ResourceCountUpTo", [resource], limit: limit) end |
.resource_gather(resource: nil, indices: nil, batch_dims: nil, validate_indices: nil, dtype: nil) ⇒ Object
3085 3086 3087 |
# File 'lib/tensorflow/raw_ops.rb', line 3085 def resource_gather(resource: nil, indices: nil, batch_dims: nil, validate_indices: nil, dtype: nil) Utils.execute("ResourceGather", [resource, indices], batch_dims: batch_dims, validate_indices: validate_indices, dtype: dtype) end |
.resource_gather_nd(resource: nil, indices: nil, dtype: nil) ⇒ Object
3089 3090 3091 |
# File 'lib/tensorflow/raw_ops.rb', line 3089 def resource_gather_nd(resource: nil, indices: nil, dtype: nil) Utils.execute("ResourceGatherNd", [resource, indices], dtype: dtype) end |
.resource_scatter_add(resource: nil, indices: nil, updates: nil, dtype: nil) ⇒ Object
3093 3094 3095 |
# File 'lib/tensorflow/raw_ops.rb', line 3093 def resource_scatter_add(resource: nil, indices: nil, updates: nil, dtype: nil) Utils.execute("ResourceScatterAdd", [resource, indices, updates], dtype: dtype) end |
.resource_scatter_div(resource: nil, indices: nil, updates: nil, dtype: nil) ⇒ Object
3097 3098 3099 |
# File 'lib/tensorflow/raw_ops.rb', line 3097 def resource_scatter_div(resource: nil, indices: nil, updates: nil, dtype: nil) Utils.execute("ResourceScatterDiv", [resource, indices, updates], dtype: dtype) end |
.resource_scatter_max(resource: nil, indices: nil, updates: nil, dtype: nil) ⇒ Object
3101 3102 3103 |
# File 'lib/tensorflow/raw_ops.rb', line 3101 def resource_scatter_max(resource: nil, indices: nil, updates: nil, dtype: nil) Utils.execute("ResourceScatterMax", [resource, indices, updates], dtype: dtype) end |
.resource_scatter_min(resource: nil, indices: nil, updates: nil, dtype: nil) ⇒ Object
3105 3106 3107 |
# File 'lib/tensorflow/raw_ops.rb', line 3105 def resource_scatter_min(resource: nil, indices: nil, updates: nil, dtype: nil) Utils.execute("ResourceScatterMin", [resource, indices, updates], dtype: dtype) end |
.resource_scatter_mul(resource: nil, indices: nil, updates: nil, dtype: nil) ⇒ Object
3109 3110 3111 |
# File 'lib/tensorflow/raw_ops.rb', line 3109 def resource_scatter_mul(resource: nil, indices: nil, updates: nil, dtype: nil) Utils.execute("ResourceScatterMul", [resource, indices, updates], dtype: dtype) end |
.resource_scatter_nd_add(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
3113 3114 3115 |
# File 'lib/tensorflow/raw_ops.rb', line 3113 def resource_scatter_nd_add(ref: nil, indices: nil, updates: nil, use_locking: nil) Utils.execute("ResourceScatterNdAdd", [ref, indices, updates], use_locking: use_locking) end |
.resource_scatter_nd_sub(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
3117 3118 3119 |
# File 'lib/tensorflow/raw_ops.rb', line 3117 def resource_scatter_nd_sub(ref: nil, indices: nil, updates: nil, use_locking: nil) Utils.execute("ResourceScatterNdSub", [ref, indices, updates], use_locking: use_locking) end |
.resource_scatter_nd_update(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
3121 3122 3123 |
# File 'lib/tensorflow/raw_ops.rb', line 3121 def resource_scatter_nd_update(ref: nil, indices: nil, updates: nil, use_locking: nil) Utils.execute("ResourceScatterNdUpdate", [ref, indices, updates], use_locking: use_locking) end |
.resource_scatter_sub(resource: nil, indices: nil, updates: nil, dtype: nil) ⇒ Object
3125 3126 3127 |
# File 'lib/tensorflow/raw_ops.rb', line 3125 def resource_scatter_sub(resource: nil, indices: nil, updates: nil, dtype: nil) Utils.execute("ResourceScatterSub", [resource, indices, updates], dtype: dtype) end |
.resource_scatter_update(resource: nil, indices: nil, updates: nil, dtype: nil) ⇒ Object
3129 3130 3131 |
# File 'lib/tensorflow/raw_ops.rb', line 3129 def resource_scatter_update(resource: nil, indices: nil, updates: nil, dtype: nil) Utils.execute("ResourceScatterUpdate", [resource, indices, updates], dtype: dtype) end |
.resource_sparse_apply_adadelta(var: nil, accum: nil, accum_update: nil, lr: nil, rho: nil, epsilon: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
3133 3134 3135 |
# File 'lib/tensorflow/raw_ops.rb', line 3133 def resource_sparse_apply_adadelta(var: nil, accum: nil, accum_update: nil, lr: nil, rho: nil, epsilon: nil, grad: nil, indices: nil, use_locking: nil) Utils.execute("ResourceSparseApplyAdadelta", [var, accum, accum_update, lr, rho, epsilon, grad, indices], use_locking: use_locking) end |
.resource_sparse_apply_adagrad(var: nil, accum: nil, lr: nil, grad: nil, indices: nil, use_locking: nil, update_slots: nil) ⇒ Object
3137 3138 3139 |
# File 'lib/tensorflow/raw_ops.rb', line 3137 def resource_sparse_apply_adagrad(var: nil, accum: nil, lr: nil, grad: nil, indices: nil, use_locking: nil, update_slots: nil) Utils.execute("ResourceSparseApplyAdagrad", [var, accum, lr, grad, indices], use_locking: use_locking, update_slots: update_slots) end |
.resource_sparse_apply_adagrad_da(var: nil, gradient_accumulator: nil, gradient_squared_accumulator: nil, grad: nil, indices: nil, lr: nil, l1: nil, l2: nil, global_step: nil, use_locking: nil) ⇒ Object
3141 3142 3143 |
# File 'lib/tensorflow/raw_ops.rb', line 3141 def resource_sparse_apply_adagrad_da(var: nil, gradient_accumulator: nil, gradient_squared_accumulator: nil, grad: nil, indices: nil, lr: nil, l1: nil, l2: nil, global_step: nil, use_locking: nil) Utils.execute("ResourceSparseApplyAdagradDA", [var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step], use_locking: use_locking) end |
.resource_sparse_apply_centered_rms_prop(var: nil, mg: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
3145 3146 3147 |
# File 'lib/tensorflow/raw_ops.rb', line 3145 def resource_sparse_apply_centered_rms_prop(var: nil, mg: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, indices: nil, use_locking: nil) Utils.execute("ResourceSparseApplyCenteredRMSProp", [var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices], use_locking: use_locking) end |
.resource_sparse_apply_ftrl(var: nil, accum: nil, linear: nil, grad: nil, indices: nil, lr: nil, l1: nil, l2: nil, lr_power: nil, use_locking: nil) ⇒ Object
3149 3150 3151 |
# File 'lib/tensorflow/raw_ops.rb', line 3149 def resource_sparse_apply_ftrl(var: nil, accum: nil, linear: nil, grad: nil, indices: nil, lr: nil, l1: nil, l2: nil, lr_power: nil, use_locking: nil) Utils.execute("ResourceSparseApplyFtrl", [var, accum, linear, grad, indices, lr, l1, l2, lr_power], use_locking: use_locking) end |
.resource_sparse_apply_ftrl_v2(var: nil, accum: nil, linear: nil, grad: nil, indices: nil, lr: nil, l1: nil, l2: nil, l2_shrinkage: nil, lr_power: nil, use_locking: nil) ⇒ Object
3153 3154 3155 |
# File 'lib/tensorflow/raw_ops.rb', line 3153 def resource_sparse_apply_ftrl_v2(var: nil, accum: nil, linear: nil, grad: nil, indices: nil, lr: nil, l1: nil, l2: nil, l2_shrinkage: nil, lr_power: nil, use_locking: nil) Utils.execute("ResourceSparseApplyFtrlV2", [var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power], use_locking: use_locking) end |
.resource_sparse_apply_keras_momentum(var: nil, accum: nil, lr: nil, grad: nil, indices: nil, momentum: nil, use_locking: nil, use_nesterov: nil) ⇒ Object
3157 3158 3159 |
# File 'lib/tensorflow/raw_ops.rb', line 3157 def resource_sparse_apply_keras_momentum(var: nil, accum: nil, lr: nil, grad: nil, indices: nil, momentum: nil, use_locking: nil, use_nesterov: nil) Utils.execute("ResourceSparseApplyKerasMomentum", [var, accum, lr, grad, indices, momentum], use_locking: use_locking, use_nesterov: use_nesterov) end |
.resource_sparse_apply_momentum(var: nil, accum: nil, lr: nil, grad: nil, indices: nil, momentum: nil, use_locking: nil, use_nesterov: nil) ⇒ Object
3161 3162 3163 |
# File 'lib/tensorflow/raw_ops.rb', line 3161 def resource_sparse_apply_momentum(var: nil, accum: nil, lr: nil, grad: nil, indices: nil, momentum: nil, use_locking: nil, use_nesterov: nil) Utils.execute("ResourceSparseApplyMomentum", [var, accum, lr, grad, indices, momentum], use_locking: use_locking, use_nesterov: use_nesterov) end |
.resource_sparse_apply_proximal_adagrad(var: nil, accum: nil, lr: nil, l1: nil, l2: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
3165 3166 3167 |
# File 'lib/tensorflow/raw_ops.rb', line 3165 def resource_sparse_apply_proximal_adagrad(var: nil, accum: nil, lr: nil, l1: nil, l2: nil, grad: nil, indices: nil, use_locking: nil) Utils.execute("ResourceSparseApplyProximalAdagrad", [var, accum, lr, l1, l2, grad, indices], use_locking: use_locking) end |
.resource_sparse_apply_proximal_gradient_descent(var: nil, alpha: nil, l1: nil, l2: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
3169 3170 3171 |
# File 'lib/tensorflow/raw_ops.rb', line 3169 def resource_sparse_apply_proximal_gradient_descent(var: nil, alpha: nil, l1: nil, l2: nil, grad: nil, indices: nil, use_locking: nil) Utils.execute("ResourceSparseApplyProximalGradientDescent", [var, alpha, l1, l2, grad, indices], use_locking: use_locking) end |
.resource_sparse_apply_rms_prop(var: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
3173 3174 3175 |
# File 'lib/tensorflow/raw_ops.rb', line 3173 def resource_sparse_apply_rms_prop(var: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, indices: nil, use_locking: nil) Utils.execute("ResourceSparseApplyRMSProp", [var, ms, mom, lr, rho, momentum, epsilon, grad, indices], use_locking: use_locking) end |
.resource_strided_slice_assign(ref: nil, start: nil, stop: nil, strides: nil, value: nil, begin_mask: nil, end_mask: nil, ellipsis_mask: nil, new_axis_mask: nil, shrink_axis_mask: nil) ⇒ Object
3177 3178 3179 |
# File 'lib/tensorflow/raw_ops.rb', line 3177 def resource_strided_slice_assign(ref: nil, start: nil, stop: nil, strides: nil, value: nil, begin_mask: nil, end_mask: nil, ellipsis_mask: nil, new_axis_mask: nil, shrink_axis_mask: nil) Utils.execute("ResourceStridedSliceAssign", [ref, start, stop, strides, value], begin_mask: begin_mask, end_mask: end_mask, ellipsis_mask: ellipsis_mask, new_axis_mask: new_axis_mask, shrink_axis_mask: shrink_axis_mask) end |
.restore(file_pattern: nil, tensor_name: nil, dt: nil, preferred_shard: nil) ⇒ Object
3181 3182 3183 |
# File 'lib/tensorflow/raw_ops.rb', line 3181 def restore(file_pattern: nil, tensor_name: nil, dt: nil, preferred_shard: nil) Utils.execute("Restore", [file_pattern, tensor_name], dt: dt, preferred_shard: preferred_shard) end |
.restore_slice(file_pattern: nil, tensor_name: nil, shape_and_slice: nil, dt: nil, preferred_shard: nil) ⇒ Object
3185 3186 3187 |
# File 'lib/tensorflow/raw_ops.rb', line 3185 def restore_slice(file_pattern: nil, tensor_name: nil, shape_and_slice: nil, dt: nil, preferred_shard: nil) Utils.execute("RestoreSlice", [file_pattern, tensor_name, shape_and_slice], dt: dt, preferred_shard: preferred_shard) end |
.restore_v2(prefix: nil, tensor_names: nil, shape_and_slices: nil, dtypes: nil) ⇒ Object
3189 3190 3191 |
# File 'lib/tensorflow/raw_ops.rb', line 3189 def restore_v2(prefix: nil, tensor_names: nil, shape_and_slices: nil, dtypes: nil) Utils.execute("RestoreV2", [prefix, tensor_names, shape_and_slices], dtypes: dtypes) end |
.retrieve_tpu_embedding_adadelta_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
3201 3202 3203 |
# File 'lib/tensorflow/raw_ops.rb', line 3201 def (table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("RetrieveTPUEmbeddingAdadeltaParameters", [], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
3205 3206 3207 |
# File 'lib/tensorflow/raw_ops.rb', line 3205 def (table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug", [], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.retrieve_tpu_embedding_adagrad_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
3209 3210 3211 |
# File 'lib/tensorflow/raw_ops.rb', line 3209 def (table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("RetrieveTPUEmbeddingAdagradParameters", [], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
3213 3214 3215 |
# File 'lib/tensorflow/raw_ops.rb', line 3213 def (table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug", [], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.retrieve_tpu_embedding_adam_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
3193 3194 3195 |
# File 'lib/tensorflow/raw_ops.rb', line 3193 def (table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("RetrieveTPUEmbeddingADAMParameters", [], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.retrieve_tpu_embedding_adam_parameters_grad_accum_debug(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
3197 3198 3199 |
# File 'lib/tensorflow/raw_ops.rb', line 3197 def (table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("RetrieveTPUEmbeddingADAMParametersGradAccumDebug", [], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.retrieve_tpu_embedding_centered_rms_prop_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
3217 3218 3219 |
# File 'lib/tensorflow/raw_ops.rb', line 3217 def (table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("RetrieveTPUEmbeddingCenteredRMSPropParameters", [], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.retrieve_tpu_embedding_ftrl_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
3221 3222 3223 |
# File 'lib/tensorflow/raw_ops.rb', line 3221 def (table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("RetrieveTPUEmbeddingFTRLParameters", [], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
3225 3226 3227 |
# File 'lib/tensorflow/raw_ops.rb', line 3225 def (table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug", [], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.retrieve_tpu_embedding_mdl_adagrad_light_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
3229 3230 3231 |
# File 'lib/tensorflow/raw_ops.rb', line 3229 def (table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("RetrieveTPUEmbeddingMDLAdagradLightParameters", [], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.retrieve_tpu_embedding_momentum_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
3233 3234 3235 |
# File 'lib/tensorflow/raw_ops.rb', line 3233 def (table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("RetrieveTPUEmbeddingMomentumParameters", [], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.retrieve_tpu_embedding_momentum_parameters_grad_accum_debug(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
3237 3238 3239 |
# File 'lib/tensorflow/raw_ops.rb', line 3237 def (table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug", [], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.retrieve_tpu_embedding_proximal_adagrad_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
3241 3242 3243 |
# File 'lib/tensorflow/raw_ops.rb', line 3241 def (table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("RetrieveTPUEmbeddingProximalAdagradParameters", [], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
3245 3246 3247 |
# File 'lib/tensorflow/raw_ops.rb', line 3245 def (table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug", [], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.retrieve_tpu_embedding_rms_prop_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
3249 3250 3251 |
# File 'lib/tensorflow/raw_ops.rb', line 3249 def (table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("RetrieveTPUEmbeddingRMSPropParameters", [], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
3253 3254 3255 |
# File 'lib/tensorflow/raw_ops.rb', line 3253 def (table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug", [], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.retrieve_tpu_embedding_stochastic_gradient_descent_parameters(table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) ⇒ Object
3257 3258 3259 |
# File 'lib/tensorflow/raw_ops.rb', line 3257 def (table_id: nil, table_name: nil, num_shards: nil, shard_id: nil) Utils.execute("RetrieveTPUEmbeddingStochasticGradientDescentParameters", [], table_id: table_id, table_name: table_name, num_shards: num_shards, shard_id: shard_id) end |
.reverse(tensor: nil, dims: nil) ⇒ Object
3261 3262 3263 |
# File 'lib/tensorflow/raw_ops.rb', line 3261 def reverse(tensor: nil, dims: nil) Utils.execute("Reverse", [tensor, dims]) end |
.reverse_sequence(input: nil, seq_lengths: nil, seq_dim: nil, batch_dim: nil) ⇒ Object
3265 3266 3267 |
# File 'lib/tensorflow/raw_ops.rb', line 3265 def reverse_sequence(input: nil, seq_lengths: nil, seq_dim: nil, batch_dim: nil) Utils.execute("ReverseSequence", [input, seq_lengths], seq_dim: seq_dim, batch_dim: batch_dim) end |
.reverse_v2(tensor: nil, axis: nil) ⇒ Object
3269 3270 3271 |
# File 'lib/tensorflow/raw_ops.rb', line 3269 def reverse_v2(tensor: nil, axis: nil) Utils.execute("ReverseV2", [tensor, axis]) end |
.rfft(input: nil, fft_length: nil) ⇒ Object
2713 2714 2715 |
# File 'lib/tensorflow/raw_ops.rb', line 2713 def rfft(input: nil, fft_length: nil) Utils.execute("RFFT", [input, fft_length]) end |
.rfft2d(input: nil, fft_length: nil) ⇒ Object
2717 2718 2719 |
# File 'lib/tensorflow/raw_ops.rb', line 2717 def rfft2d(input: nil, fft_length: nil) Utils.execute("RFFT2D", [input, fft_length]) end |
.rfft3d(input: nil, fft_length: nil) ⇒ Object
2721 2722 2723 |
# File 'lib/tensorflow/raw_ops.rb', line 2721 def rfft3d(input: nil, fft_length: nil) Utils.execute("RFFT3D", [input, fft_length]) end |
.rgb_to_hsv(images: nil) ⇒ Object
2725 2726 2727 |
# File 'lib/tensorflow/raw_ops.rb', line 2725 def rgb_to_hsv(images: nil) Utils.execute("RGBToHSV", [images]) end |
.right_shift(x: nil, y: nil) ⇒ Object
3273 3274 3275 |
# File 'lib/tensorflow/raw_ops.rb', line 3273 def right_shift(x: nil, y: nil) Utils.execute("RightShift", [x, y]) end |
.rint(x: nil) ⇒ Object
3277 3278 3279 |
# File 'lib/tensorflow/raw_ops.rb', line 3277 def rint(x: nil) Utils.execute("Rint", [x]) end |
.rng_skip(resource: nil, algorithm: nil, delta: nil) ⇒ Object
3281 3282 3283 |
# File 'lib/tensorflow/raw_ops.rb', line 3281 def rng_skip(resource: nil, algorithm: nil, delta: nil) Utils.execute("RngSkip", [resource, algorithm, delta]) end |
.roll(input: nil, shift: nil, axis: nil) ⇒ Object
3285 3286 3287 |
# File 'lib/tensorflow/raw_ops.rb', line 3285 def roll(input: nil, shift: nil, axis: nil) Utils.execute("Roll", [input, shift, axis]) end |
.round(x: nil) ⇒ Object
3289 3290 3291 |
# File 'lib/tensorflow/raw_ops.rb', line 3289 def round(x: nil) Utils.execute("Round", [x]) end |
.rpc(address: nil, method: nil, request: nil, protocol: nil, fail_fast: nil, timeout_in_ms: nil) ⇒ Object
3293 3294 3295 |
# File 'lib/tensorflow/raw_ops.rb', line 3293 def rpc(address: nil, method: nil, request: nil, protocol: nil, fail_fast: nil, timeout_in_ms: nil) Utils.execute("Rpc", [address, method, request], protocol: protocol, fail_fast: fail_fast, timeout_in_ms: timeout_in_ms) end |
.rsqrt(x: nil) ⇒ Object
3297 3298 3299 |
# File 'lib/tensorflow/raw_ops.rb', line 3297 def rsqrt(x: nil) Utils.execute("Rsqrt", [x]) end |
.rsqrt_grad(y: nil, dy: nil) ⇒ Object
3301 3302 3303 |
# File 'lib/tensorflow/raw_ops.rb', line 3301 def rsqrt_grad(y: nil, dy: nil) Utils.execute("RsqrtGrad", [y, dy]) end |
.sample_distorted_bounding_box(image_size: nil, bounding_boxes: nil, seed: nil, seed2: nil, min_object_covered: nil, aspect_ratio_range: nil, area_range: nil, max_attempts: nil, use_image_if_no_bounding_boxes: nil) ⇒ Object
3305 3306 3307 |
# File 'lib/tensorflow/raw_ops.rb', line 3305 def sample_distorted_bounding_box(image_size: nil, bounding_boxes: nil, seed: nil, seed2: nil, min_object_covered: nil, aspect_ratio_range: nil, area_range: nil, max_attempts: nil, use_image_if_no_bounding_boxes: nil) Utils.execute("SampleDistortedBoundingBox", [image_size, bounding_boxes], seed: seed, seed2: seed2, min_object_covered: min_object_covered, aspect_ratio_range: aspect_ratio_range, area_range: area_range, max_attempts: max_attempts, use_image_if_no_bounding_boxes: use_image_if_no_bounding_boxes) end |
.sample_distorted_bounding_box_v2(image_size: nil, bounding_boxes: nil, min_object_covered: nil, seed: nil, seed2: nil, aspect_ratio_range: nil, area_range: nil, max_attempts: nil, use_image_if_no_bounding_boxes: nil) ⇒ Object
3309 3310 3311 |
# File 'lib/tensorflow/raw_ops.rb', line 3309 def sample_distorted_bounding_box_v2(image_size: nil, bounding_boxes: nil, min_object_covered: nil, seed: nil, seed2: nil, aspect_ratio_range: nil, area_range: nil, max_attempts: nil, use_image_if_no_bounding_boxes: nil) Utils.execute("SampleDistortedBoundingBoxV2", [image_size, bounding_boxes, min_object_covered], seed: seed, seed2: seed2, aspect_ratio_range: aspect_ratio_range, area_range: area_range, max_attempts: max_attempts, use_image_if_no_bounding_boxes: use_image_if_no_bounding_boxes) end |
.sampling_dataset(input_dataset: nil, rate: nil, seed: nil, seed2: nil, output_types: nil, output_shapes: nil) ⇒ Object
3313 3314 3315 |
# File 'lib/tensorflow/raw_ops.rb', line 3313 def sampling_dataset(input_dataset: nil, rate: nil, seed: nil, seed2: nil, output_types: nil, output_shapes: nil) Utils.execute("SamplingDataset", [input_dataset, rate, seed, seed2], output_types: output_types, output_shapes: output_shapes) end |
.save(filename: nil, tensor_names: nil, data: nil) ⇒ Object
3317 3318 3319 |
# File 'lib/tensorflow/raw_ops.rb', line 3317 def save(filename: nil, tensor_names: nil, data: nil) Utils.execute("Save", [filename, tensor_names, data]) end |
.save_slices(filename: nil, tensor_names: nil, shapes_and_slices: nil, data: nil) ⇒ Object
3321 3322 3323 |
# File 'lib/tensorflow/raw_ops.rb', line 3321 def save_slices(filename: nil, tensor_names: nil, shapes_and_slices: nil, data: nil) Utils.execute("SaveSlices", [filename, tensor_names, shapes_and_slices, data]) end |
.save_v2(prefix: nil, tensor_names: nil, shape_and_slices: nil, tensors: nil, dtypes: nil) ⇒ Object
3325 3326 3327 |
# File 'lib/tensorflow/raw_ops.rb', line 3325 def save_v2(prefix: nil, tensor_names: nil, shape_and_slices: nil, tensors: nil, dtypes: nil) Utils.execute("SaveV2", [prefix, tensor_names, shape_and_slices, tensors], dtypes: dtypes) end |
.scalar_summary(tags: nil, values: nil) ⇒ Object
3329 3330 3331 |
# File 'lib/tensorflow/raw_ops.rb', line 3329 def scalar_summary(tags: nil, values: nil) Utils.execute("ScalarSummary", [, values]) end |
.scale_and_translate(images: nil, size: nil, scale: nil, translation: nil, kernel_type: nil, antialias: nil) ⇒ Object
3333 3334 3335 |
# File 'lib/tensorflow/raw_ops.rb', line 3333 def scale_and_translate(images: nil, size: nil, scale: nil, translation: nil, kernel_type: nil, antialias: nil) Utils.execute("ScaleAndTranslate", [images, size, scale, translation], kernel_type: kernel_type, antialias: antialias) end |
.scale_and_translate_grad(grads: nil, original_image: nil, scale: nil, translation: nil, kernel_type: nil, antialias: nil) ⇒ Object
3337 3338 3339 |
# File 'lib/tensorflow/raw_ops.rb', line 3337 def scale_and_translate_grad(grads: nil, original_image: nil, scale: nil, translation: nil, kernel_type: nil, antialias: nil) Utils.execute("ScaleAndTranslateGrad", [grads, original_image, scale, translation], kernel_type: kernel_type, antialias: antialias) end |
.scatter_add(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
3341 3342 3343 |
# File 'lib/tensorflow/raw_ops.rb', line 3341 def scatter_add(ref: nil, indices: nil, updates: nil, use_locking: nil) Utils.execute("ScatterAdd", [ref, indices, updates], use_locking: use_locking) end |
.scatter_div(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
3345 3346 3347 |
# File 'lib/tensorflow/raw_ops.rb', line 3345 def scatter_div(ref: nil, indices: nil, updates: nil, use_locking: nil) Utils.execute("ScatterDiv", [ref, indices, updates], use_locking: use_locking) end |
.scatter_max(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
3349 3350 3351 |
# File 'lib/tensorflow/raw_ops.rb', line 3349 def scatter_max(ref: nil, indices: nil, updates: nil, use_locking: nil) Utils.execute("ScatterMax", [ref, indices, updates], use_locking: use_locking) end |
.scatter_min(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
3353 3354 3355 |
# File 'lib/tensorflow/raw_ops.rb', line 3353 def scatter_min(ref: nil, indices: nil, updates: nil, use_locking: nil) Utils.execute("ScatterMin", [ref, indices, updates], use_locking: use_locking) end |
.scatter_mul(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
3357 3358 3359 |
# File 'lib/tensorflow/raw_ops.rb', line 3357 def scatter_mul(ref: nil, indices: nil, updates: nil, use_locking: nil) Utils.execute("ScatterMul", [ref, indices, updates], use_locking: use_locking) end |
.scatter_nd(indices: nil, updates: nil, shape: nil) ⇒ Object
3361 3362 3363 |
# File 'lib/tensorflow/raw_ops.rb', line 3361 def scatter_nd(indices: nil, updates: nil, shape: nil) Utils.execute("ScatterNd", [indices, updates, shape]) end |
.scatter_nd_add(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
3365 3366 3367 |
# File 'lib/tensorflow/raw_ops.rb', line 3365 def scatter_nd_add(ref: nil, indices: nil, updates: nil, use_locking: nil) Utils.execute("ScatterNdAdd", [ref, indices, updates], use_locking: use_locking) end |
.scatter_nd_non_aliasing_add(input: nil, indices: nil, updates: nil) ⇒ Object
3369 3370 3371 |
# File 'lib/tensorflow/raw_ops.rb', line 3369 def scatter_nd_non_aliasing_add(input: nil, indices: nil, updates: nil) Utils.execute("ScatterNdNonAliasingAdd", [input, indices, updates]) end |
.scatter_nd_sub(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
3373 3374 3375 |
# File 'lib/tensorflow/raw_ops.rb', line 3373 def scatter_nd_sub(ref: nil, indices: nil, updates: nil, use_locking: nil) Utils.execute("ScatterNdSub", [ref, indices, updates], use_locking: use_locking) end |
.scatter_nd_update(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
3377 3378 3379 |
# File 'lib/tensorflow/raw_ops.rb', line 3377 def scatter_nd_update(ref: nil, indices: nil, updates: nil, use_locking: nil) Utils.execute("ScatterNdUpdate", [ref, indices, updates], use_locking: use_locking) end |
.scatter_sub(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
3381 3382 3383 |
# File 'lib/tensorflow/raw_ops.rb', line 3381 def scatter_sub(ref: nil, indices: nil, updates: nil, use_locking: nil) Utils.execute("ScatterSub", [ref, indices, updates], use_locking: use_locking) end |
.scatter_update(ref: nil, indices: nil, updates: nil, use_locking: nil) ⇒ Object
3385 3386 3387 |
# File 'lib/tensorflow/raw_ops.rb', line 3385 def scatter_update(ref: nil, indices: nil, updates: nil, use_locking: nil) Utils.execute("ScatterUpdate", [ref, indices, updates], use_locking: use_locking) end |
.sdca_fprint(input: nil) ⇒ Object
3389 3390 3391 |
# File 'lib/tensorflow/raw_ops.rb', line 3389 def sdca_fprint(input: nil) Utils.execute("SdcaFprint", [input]) end |
.sdca_optimizer(sparse_example_indices: nil, sparse_feature_indices: nil, sparse_feature_values: nil, dense_features: nil, example_weights: nil, example_labels: nil, sparse_indices: nil, sparse_weights: nil, dense_weights: nil, example_state_data: nil, loss_type: nil, adaptative: nil, num_sparse_features: nil, num_sparse_features_with_values: nil, num_dense_features: nil, l1: nil, l2: nil, num_loss_partitions: nil, num_inner_iterations: nil) ⇒ Object
3393 3394 3395 |
# File 'lib/tensorflow/raw_ops.rb', line 3393 def sdca_optimizer(sparse_example_indices: nil, sparse_feature_indices: nil, sparse_feature_values: nil, dense_features: nil, example_weights: nil, example_labels: nil, sparse_indices: nil, sparse_weights: nil, dense_weights: nil, example_state_data: nil, loss_type: nil, adaptative: nil, num_sparse_features: nil, num_sparse_features_with_values: nil, num_dense_features: nil, l1: nil, l2: nil, num_loss_partitions: nil, num_inner_iterations: nil) Utils.execute("SdcaOptimizer", [sparse_example_indices, sparse_feature_indices, sparse_feature_values, dense_features, example_weights, example_labels, sparse_indices, sparse_weights, dense_weights, example_state_data], loss_type: loss_type, adaptative: adaptative, num_sparse_features: num_sparse_features, num_sparse_features_with_values: num_sparse_features_with_values, num_dense_features: num_dense_features, l1: l1, l2: l2, num_loss_partitions: num_loss_partitions, num_inner_iterations: num_inner_iterations) end |
.sdca_optimizer_v2(sparse_example_indices: nil, sparse_feature_indices: nil, sparse_feature_values: nil, dense_features: nil, example_weights: nil, example_labels: nil, sparse_indices: nil, sparse_weights: nil, dense_weights: nil, example_state_data: nil, loss_type: nil, adaptive: nil, num_sparse_features: nil, num_sparse_features_with_values: nil, num_dense_features: nil, l1: nil, l2: nil, num_loss_partitions: nil, num_inner_iterations: nil) ⇒ Object
3397 3398 3399 |
# File 'lib/tensorflow/raw_ops.rb', line 3397 def sdca_optimizer_v2(sparse_example_indices: nil, sparse_feature_indices: nil, sparse_feature_values: nil, dense_features: nil, example_weights: nil, example_labels: nil, sparse_indices: nil, sparse_weights: nil, dense_weights: nil, example_state_data: nil, loss_type: nil, adaptive: nil, num_sparse_features: nil, num_sparse_features_with_values: nil, num_dense_features: nil, l1: nil, l2: nil, num_loss_partitions: nil, num_inner_iterations: nil) Utils.execute("SdcaOptimizerV2", [sparse_example_indices, sparse_feature_indices, sparse_feature_values, dense_features, example_weights, example_labels, sparse_indices, sparse_weights, dense_weights, example_state_data], loss_type: loss_type, adaptive: adaptive, num_sparse_features: num_sparse_features, num_sparse_features_with_values: num_sparse_features_with_values, num_dense_features: num_dense_features, l1: l1, l2: l2, num_loss_partitions: num_loss_partitions, num_inner_iterations: num_inner_iterations) end |
.sdca_shrink_l1(weights: nil, num_features: nil, l1: nil, l2: nil) ⇒ Object
3401 3402 3403 |
# File 'lib/tensorflow/raw_ops.rb', line 3401 def sdca_shrink_l1(weights: nil, num_features: nil, l1: nil, l2: nil) Utils.execute("SdcaShrinkL1", [weights], num_features: num_features, l1: l1, l2: l2) end |
.segment_max(data: nil, segment_ids: nil) ⇒ Object
3405 3406 3407 |
# File 'lib/tensorflow/raw_ops.rb', line 3405 def segment_max(data: nil, segment_ids: nil) Utils.execute("SegmentMax", [data, segment_ids]) end |
.segment_mean(data: nil, segment_ids: nil) ⇒ Object
3409 3410 3411 |
# File 'lib/tensorflow/raw_ops.rb', line 3409 def segment_mean(data: nil, segment_ids: nil) Utils.execute("SegmentMean", [data, segment_ids]) end |
.segment_min(data: nil, segment_ids: nil) ⇒ Object
3413 3414 3415 |
# File 'lib/tensorflow/raw_ops.rb', line 3413 def segment_min(data: nil, segment_ids: nil) Utils.execute("SegmentMin", [data, segment_ids]) end |
.segment_prod(data: nil, segment_ids: nil) ⇒ Object
3417 3418 3419 |
# File 'lib/tensorflow/raw_ops.rb', line 3417 def segment_prod(data: nil, segment_ids: nil) Utils.execute("SegmentProd", [data, segment_ids]) end |
.segment_sum(data: nil, segment_ids: nil) ⇒ Object
3421 3422 3423 |
# File 'lib/tensorflow/raw_ops.rb', line 3421 def segment_sum(data: nil, segment_ids: nil) Utils.execute("SegmentSum", [data, segment_ids]) end |
.select(condition: nil, t: nil, e: nil) ⇒ Object
3425 3426 3427 |
# File 'lib/tensorflow/raw_ops.rb', line 3425 def select(condition: nil, t: nil, e: nil) Utils.execute("Select", [condition, t, e]) end |
.select_v2(condition: nil, t: nil, e: nil) ⇒ Object
3429 3430 3431 |
# File 'lib/tensorflow/raw_ops.rb', line 3429 def select_v2(condition: nil, t: nil, e: nil) Utils.execute("SelectV2", [condition, t, e]) end |
.self_adjoint_eig(input: nil) ⇒ Object
3433 3434 3435 |
# File 'lib/tensorflow/raw_ops.rb', line 3433 def self_adjoint_eig(input: nil) Utils.execute("SelfAdjointEig", [input]) end |
.self_adjoint_eig_v2(input: nil, compute_v: nil) ⇒ Object
3437 3438 3439 |
# File 'lib/tensorflow/raw_ops.rb', line 3437 def self_adjoint_eig_v2(input: nil, compute_v: nil) Utils.execute("SelfAdjointEigV2", [input], compute_v: compute_v) end |
.selu(features: nil) ⇒ Object
3441 3442 3443 |
# File 'lib/tensorflow/raw_ops.rb', line 3441 def selu(features: nil) Utils.execute("Selu", [features]) end |
.selu_grad(gradients: nil, outputs: nil) ⇒ Object
3445 3446 3447 |
# File 'lib/tensorflow/raw_ops.rb', line 3445 def selu_grad(gradients: nil, outputs: nil) Utils.execute("SeluGrad", [gradients, outputs]) end |
.send_tpu_embedding_gradients(inputs: nil, learning_rates: nil, config: nil) ⇒ Object
3449 3450 3451 |
# File 'lib/tensorflow/raw_ops.rb', line 3449 def (inputs: nil, learning_rates: nil, config: nil) Utils.execute("SendTPUEmbeddingGradients", [inputs, learning_rates], config: config) end |
.serialize_iterator(resource_handle: nil) ⇒ Object
3453 3454 3455 |
# File 'lib/tensorflow/raw_ops.rb', line 3453 def serialize_iterator(resource_handle: nil) Utils.execute("SerializeIterator", [resource_handle]) end |
.serialize_many_sparse(sparse_indices: nil, sparse_values: nil, sparse_shape: nil, out_type: nil) ⇒ Object
3457 3458 3459 |
# File 'lib/tensorflow/raw_ops.rb', line 3457 def serialize_many_sparse(sparse_indices: nil, sparse_values: nil, sparse_shape: nil, out_type: nil) Utils.execute("SerializeManySparse", [sparse_indices, sparse_values, sparse_shape], out_type: out_type) end |
.serialize_sparse(sparse_indices: nil, sparse_values: nil, sparse_shape: nil, out_type: nil) ⇒ Object
3461 3462 3463 |
# File 'lib/tensorflow/raw_ops.rb', line 3461 def serialize_sparse(sparse_indices: nil, sparse_values: nil, sparse_shape: nil, out_type: nil) Utils.execute("SerializeSparse", [sparse_indices, sparse_values, sparse_shape], out_type: out_type) end |
.serialize_tensor(tensor: nil) ⇒ Object
3465 3466 3467 |
# File 'lib/tensorflow/raw_ops.rb', line 3465 def serialize_tensor(tensor: nil) Utils.execute("SerializeTensor", [tensor]) end |
.set_size(set_indices: nil, set_values: nil, set_shape: nil, validate_indices: nil) ⇒ Object
3469 3470 3471 |
# File 'lib/tensorflow/raw_ops.rb', line 3469 def set_size(set_indices: nil, set_values: nil, set_shape: nil, validate_indices: nil) Utils.execute("SetSize", [set_indices, set_values, set_shape], validate_indices: validate_indices) end |
.shape(input: nil, out_type: nil) ⇒ Object
3473 3474 3475 |
# File 'lib/tensorflow/raw_ops.rb', line 3473 def shape(input: nil, out_type: nil) Utils.execute("Shape", [input], out_type: out_type) end |
.shape_n(input: nil, out_type: nil) ⇒ Object
3477 3478 3479 |
# File 'lib/tensorflow/raw_ops.rb', line 3477 def shape_n(input: nil, out_type: nil) Utils.execute("ShapeN", [input], out_type: out_type) end |
.shard_dataset(input_dataset: nil, num_shards: nil, index: nil, require_non_empty: nil, output_types: nil, output_shapes: nil) ⇒ Object
3481 3482 3483 |
# File 'lib/tensorflow/raw_ops.rb', line 3481 def shard_dataset(input_dataset: nil, num_shards: nil, index: nil, require_non_empty: nil, output_types: nil, output_shapes: nil) Utils.execute("ShardDataset", [input_dataset, num_shards, index], require_non_empty: require_non_empty, output_types: output_types, output_shapes: output_shapes) end |
.sharded_filename(basename: nil, shard: nil, num_shards: nil) ⇒ Object
3485 3486 3487 |
# File 'lib/tensorflow/raw_ops.rb', line 3485 def sharded_filename(basename: nil, shard: nil, num_shards: nil) Utils.execute("ShardedFilename", [basename, shard, num_shards]) end |
.sharded_filespec(basename: nil, num_shards: nil) ⇒ Object
3489 3490 3491 |
# File 'lib/tensorflow/raw_ops.rb', line 3489 def sharded_filespec(basename: nil, num_shards: nil) Utils.execute("ShardedFilespec", [basename, num_shards]) end |
.shuffle_and_repeat_dataset(input_dataset: nil, buffer_size: nil, seed: nil, seed2: nil, count: nil, output_types: nil, output_shapes: nil) ⇒ Object
3493 3494 3495 |
# File 'lib/tensorflow/raw_ops.rb', line 3493 def shuffle_and_repeat_dataset(input_dataset: nil, buffer_size: nil, seed: nil, seed2: nil, count: nil, output_types: nil, output_shapes: nil) Utils.execute("ShuffleAndRepeatDataset", [input_dataset, buffer_size, seed, seed2, count], output_types: output_types, output_shapes: output_shapes) end |
.shuffle_dataset(input_dataset: nil, buffer_size: nil, seed: nil, seed2: nil, reshuffle_each_iteration: nil, output_types: nil, output_shapes: nil) ⇒ Object
3497 3498 3499 |
# File 'lib/tensorflow/raw_ops.rb', line 3497 def shuffle_dataset(input_dataset: nil, buffer_size: nil, seed: nil, seed2: nil, reshuffle_each_iteration: nil, output_types: nil, output_shapes: nil) Utils.execute("ShuffleDataset", [input_dataset, buffer_size, seed, seed2], reshuffle_each_iteration: reshuffle_each_iteration, output_types: output_types, output_shapes: output_shapes) end |
.shutdown_distributed_tpu ⇒ Object
3501 3502 3503 |
# File 'lib/tensorflow/raw_ops.rb', line 3501 def shutdown_distributed_tpu Utils.execute("ShutdownDistributedTPU", []) end |
.sigmoid(x: nil) ⇒ Object
3505 3506 3507 |
# File 'lib/tensorflow/raw_ops.rb', line 3505 def sigmoid(x: nil) Utils.execute("Sigmoid", [x]) end |
.sigmoid_grad(y: nil, dy: nil) ⇒ Object
3509 3510 3511 |
# File 'lib/tensorflow/raw_ops.rb', line 3509 def sigmoid_grad(y: nil, dy: nil) Utils.execute("SigmoidGrad", [y, dy]) end |
.sign(x: nil) ⇒ Object
3513 3514 3515 |
# File 'lib/tensorflow/raw_ops.rb', line 3513 def sign(x: nil) Utils.execute("Sign", [x]) end |
.sin(x: nil) ⇒ Object
3517 3518 3519 |
# File 'lib/tensorflow/raw_ops.rb', line 3517 def sin(x: nil) Utils.execute("Sin", [x]) end |
.sinh(x: nil) ⇒ Object
3521 3522 3523 |
# File 'lib/tensorflow/raw_ops.rb', line 3521 def sinh(x: nil) Utils.execute("Sinh", [x]) end |
.size(input: nil, out_type: nil) ⇒ Object
3525 3526 3527 |
# File 'lib/tensorflow/raw_ops.rb', line 3525 def size(input: nil, out_type: nil) Utils.execute("Size", [input], out_type: out_type) end |
.skip_dataset(input_dataset: nil, count: nil, output_types: nil, output_shapes: nil) ⇒ Object
3529 3530 3531 |
# File 'lib/tensorflow/raw_ops.rb', line 3529 def skip_dataset(input_dataset: nil, count: nil, output_types: nil, output_shapes: nil) Utils.execute("SkipDataset", [input_dataset, count], output_types: output_types, output_shapes: output_shapes) end |
.skipgram(filename: nil, batch_size: nil, window_size: nil, min_count: nil, subsample: nil) ⇒ Object
3533 3534 3535 |
# File 'lib/tensorflow/raw_ops.rb', line 3533 def skipgram(filename: nil, batch_size: nil, window_size: nil, min_count: nil, subsample: nil) Utils.execute("Skipgram", [], filename: filename, batch_size: batch_size, window_size: window_size, min_count: min_count, subsample: subsample) end |
.slice(input: nil, start: nil, size: nil) ⇒ Object
3537 3538 3539 |
# File 'lib/tensorflow/raw_ops.rb', line 3537 def slice(input: nil, start: nil, size: nil) Utils.execute("Slice", [input, start, size]) end |
.snapshot(input: nil) ⇒ Object
3541 3542 3543 |
# File 'lib/tensorflow/raw_ops.rb', line 3541 def snapshot(input: nil) Utils.execute("Snapshot", [input]) end |
.snapshot_dataset(input_dataset: nil, path: nil, output_types: nil, output_shapes: nil) ⇒ Object
3545 3546 3547 |
# File 'lib/tensorflow/raw_ops.rb', line 3545 def snapshot_dataset(input_dataset: nil, path: nil, output_types: nil, output_shapes: nil) Utils.execute("SnapshotDataset", [input_dataset, path], output_types: output_types, output_shapes: output_shapes) end |
.softmax(logits: nil) ⇒ Object
3549 3550 3551 |
# File 'lib/tensorflow/raw_ops.rb', line 3549 def softmax(logits: nil) Utils.execute("Softmax", [logits]) end |
.softmax_cross_entropy_with_logits(features: nil, labels: nil) ⇒ Object
3553 3554 3555 |
# File 'lib/tensorflow/raw_ops.rb', line 3553 def softmax_cross_entropy_with_logits(features: nil, labels: nil) Utils.execute("SoftmaxCrossEntropyWithLogits", [features, labels]) end |
.softplus(features: nil) ⇒ Object
3557 3558 3559 |
# File 'lib/tensorflow/raw_ops.rb', line 3557 def softplus(features: nil) Utils.execute("Softplus", [features]) end |
.softplus_grad(gradients: nil, features: nil) ⇒ Object
3561 3562 3563 |
# File 'lib/tensorflow/raw_ops.rb', line 3561 def softplus_grad(gradients: nil, features: nil) Utils.execute("SoftplusGrad", [gradients, features]) end |
.softsign(features: nil) ⇒ Object
3565 3566 3567 |
# File 'lib/tensorflow/raw_ops.rb', line 3565 def softsign(features: nil) Utils.execute("Softsign", [features]) end |
.softsign_grad(gradients: nil, features: nil) ⇒ Object
3569 3570 3571 |
# File 'lib/tensorflow/raw_ops.rb', line 3569 def softsign_grad(gradients: nil, features: nil) Utils.execute("SoftsignGrad", [gradients, features]) end |
.space_to_batch(input: nil, paddings: nil, block_size: nil) ⇒ Object
3573 3574 3575 |
# File 'lib/tensorflow/raw_ops.rb', line 3573 def space_to_batch(input: nil, paddings: nil, block_size: nil) Utils.execute("SpaceToBatch", [input, paddings], block_size: block_size) end |
.space_to_batch_nd(input: nil, block_shape: nil, paddings: nil) ⇒ Object
3577 3578 3579 |
# File 'lib/tensorflow/raw_ops.rb', line 3577 def space_to_batch_nd(input: nil, block_shape: nil, paddings: nil) Utils.execute("SpaceToBatchND", [input, block_shape, paddings]) end |
.space_to_depth(input: nil, block_size: nil, data_format: nil) ⇒ Object
3581 3582 3583 |
# File 'lib/tensorflow/raw_ops.rb', line 3581 def space_to_depth(input: nil, block_size: nil, data_format: nil) Utils.execute("SpaceToDepth", [input], block_size: block_size, data_format: data_format) end |
.sparse_accumulator_apply_gradient(handle: nil, local_step: nil, gradient_indices: nil, gradient_values: nil, gradient_shape: nil, dtype: nil, has_known_shape: nil) ⇒ Object
3585 3586 3587 |
# File 'lib/tensorflow/raw_ops.rb', line 3585 def sparse_accumulator_apply_gradient(handle: nil, local_step: nil, gradient_indices: nil, gradient_values: nil, gradient_shape: nil, dtype: nil, has_known_shape: nil) Utils.execute("SparseAccumulatorApplyGradient", [handle, local_step, gradient_indices, gradient_values, gradient_shape], dtype: dtype, has_known_shape: has_known_shape) end |
.sparse_accumulator_take_gradient(handle: nil, num_required: nil, dtype: nil) ⇒ Object
3589 3590 3591 |
# File 'lib/tensorflow/raw_ops.rb', line 3589 def sparse_accumulator_take_gradient(handle: nil, num_required: nil, dtype: nil) Utils.execute("SparseAccumulatorTakeGradient", [handle, num_required], dtype: dtype) end |
.sparse_add(a_indices: nil, a_values: nil, a_shape: nil, b_indices: nil, b_values: nil, b_shape: nil, thresh: nil) ⇒ Object
3593 3594 3595 |
# File 'lib/tensorflow/raw_ops.rb', line 3593 def sparse_add(a_indices: nil, a_values: nil, a_shape: nil, b_indices: nil, b_values: nil, b_shape: nil, thresh: nil) Utils.execute("SparseAdd", [a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh]) end |
.sparse_add_grad(backprop_val_grad: nil, a_indices: nil, b_indices: nil, sum_indices: nil) ⇒ Object
3597 3598 3599 |
# File 'lib/tensorflow/raw_ops.rb', line 3597 def sparse_add_grad(backprop_val_grad: nil, a_indices: nil, b_indices: nil, sum_indices: nil) Utils.execute("SparseAddGrad", [backprop_val_grad, a_indices, b_indices, sum_indices]) end |
.sparse_apply_adadelta(var: nil, accum: nil, accum_update: nil, lr: nil, rho: nil, epsilon: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
3601 3602 3603 |
# File 'lib/tensorflow/raw_ops.rb', line 3601 def sparse_apply_adadelta(var: nil, accum: nil, accum_update: nil, lr: nil, rho: nil, epsilon: nil, grad: nil, indices: nil, use_locking: nil) Utils.execute("SparseApplyAdadelta", [var, accum, accum_update, lr, rho, epsilon, grad, indices], use_locking: use_locking) end |
.sparse_apply_adagrad(var: nil, accum: nil, lr: nil, grad: nil, indices: nil, use_locking: nil, update_slots: nil) ⇒ Object
3605 3606 3607 |
# File 'lib/tensorflow/raw_ops.rb', line 3605 def sparse_apply_adagrad(var: nil, accum: nil, lr: nil, grad: nil, indices: nil, use_locking: nil, update_slots: nil) Utils.execute("SparseApplyAdagrad", [var, accum, lr, grad, indices], use_locking: use_locking, update_slots: update_slots) end |
.sparse_apply_adagrad_da(var: nil, gradient_accumulator: nil, gradient_squared_accumulator: nil, grad: nil, indices: nil, lr: nil, l1: nil, l2: nil, global_step: nil, use_locking: nil) ⇒ Object
3609 3610 3611 |
# File 'lib/tensorflow/raw_ops.rb', line 3609 def sparse_apply_adagrad_da(var: nil, gradient_accumulator: nil, gradient_squared_accumulator: nil, grad: nil, indices: nil, lr: nil, l1: nil, l2: nil, global_step: nil, use_locking: nil) Utils.execute("SparseApplyAdagradDA", [var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step], use_locking: use_locking) end |
.sparse_apply_centered_rms_prop(var: nil, mg: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
3613 3614 3615 |
# File 'lib/tensorflow/raw_ops.rb', line 3613 def sparse_apply_centered_rms_prop(var: nil, mg: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, indices: nil, use_locking: nil) Utils.execute("SparseApplyCenteredRMSProp", [var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices], use_locking: use_locking) end |
.sparse_apply_ftrl(var: nil, accum: nil, linear: nil, grad: nil, indices: nil, lr: nil, l1: nil, l2: nil, lr_power: nil, use_locking: nil) ⇒ Object
3617 3618 3619 |
# File 'lib/tensorflow/raw_ops.rb', line 3617 def sparse_apply_ftrl(var: nil, accum: nil, linear: nil, grad: nil, indices: nil, lr: nil, l1: nil, l2: nil, lr_power: nil, use_locking: nil) Utils.execute("SparseApplyFtrl", [var, accum, linear, grad, indices, lr, l1, l2, lr_power], use_locking: use_locking) end |
.sparse_apply_ftrl_v2(var: nil, accum: nil, linear: nil, grad: nil, indices: nil, lr: nil, l1: nil, l2: nil, l2_shrinkage: nil, lr_power: nil, use_locking: nil) ⇒ Object
3621 3622 3623 |
# File 'lib/tensorflow/raw_ops.rb', line 3621 def sparse_apply_ftrl_v2(var: nil, accum: nil, linear: nil, grad: nil, indices: nil, lr: nil, l1: nil, l2: nil, l2_shrinkage: nil, lr_power: nil, use_locking: nil) Utils.execute("SparseApplyFtrlV2", [var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power], use_locking: use_locking) end |
.sparse_apply_momentum(var: nil, accum: nil, lr: nil, grad: nil, indices: nil, momentum: nil, use_locking: nil, use_nesterov: nil) ⇒ Object
3625 3626 3627 |
# File 'lib/tensorflow/raw_ops.rb', line 3625 def sparse_apply_momentum(var: nil, accum: nil, lr: nil, grad: nil, indices: nil, momentum: nil, use_locking: nil, use_nesterov: nil) Utils.execute("SparseApplyMomentum", [var, accum, lr, grad, indices, momentum], use_locking: use_locking, use_nesterov: use_nesterov) end |
.sparse_apply_proximal_adagrad(var: nil, accum: nil, lr: nil, l1: nil, l2: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
3629 3630 3631 |
# File 'lib/tensorflow/raw_ops.rb', line 3629 def sparse_apply_proximal_adagrad(var: nil, accum: nil, lr: nil, l1: nil, l2: nil, grad: nil, indices: nil, use_locking: nil) Utils.execute("SparseApplyProximalAdagrad", [var, accum, lr, l1, l2, grad, indices], use_locking: use_locking) end |
.sparse_apply_proximal_gradient_descent(var: nil, alpha: nil, l1: nil, l2: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
3633 3634 3635 |
# File 'lib/tensorflow/raw_ops.rb', line 3633 def sparse_apply_proximal_gradient_descent(var: nil, alpha: nil, l1: nil, l2: nil, grad: nil, indices: nil, use_locking: nil) Utils.execute("SparseApplyProximalGradientDescent", [var, alpha, l1, l2, grad, indices], use_locking: use_locking) end |
.sparse_apply_rms_prop(var: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, indices: nil, use_locking: nil) ⇒ Object
3637 3638 3639 |
# File 'lib/tensorflow/raw_ops.rb', line 3637 def sparse_apply_rms_prop(var: nil, ms: nil, mom: nil, lr: nil, rho: nil, momentum: nil, epsilon: nil, grad: nil, indices: nil, use_locking: nil) Utils.execute("SparseApplyRMSProp", [var, ms, mom, lr, rho, momentum, epsilon, grad, indices], use_locking: use_locking) end |
.sparse_concat(indices: nil, values: nil, shapes: nil, concat_dim: nil) ⇒ Object
3641 3642 3643 |
# File 'lib/tensorflow/raw_ops.rb', line 3641 def sparse_concat(indices: nil, values: nil, shapes: nil, concat_dim: nil) Utils.execute("SparseConcat", [indices, values, shapes], concat_dim: concat_dim) end |
.sparse_conditional_accumulator(dtype: nil, shape: nil, container: nil, shared_name: nil, reduction_type: nil) ⇒ Object
3645 3646 3647 |
# File 'lib/tensorflow/raw_ops.rb', line 3645 def sparse_conditional_accumulator(dtype: nil, shape: nil, container: nil, shared_name: nil, reduction_type: nil) Utils.execute("SparseConditionalAccumulator", [], dtype: dtype, shape: shape, container: container, shared_name: shared_name, reduction_type: reduction_type) end |
.sparse_cross(indices: nil, values: nil, shapes: nil, dense_inputs: nil, hashed_output: nil, num_buckets: nil, hash_key: nil, sparse_types: nil, dense_types: nil, out_type: nil, internal_type: nil) ⇒ Object
3649 3650 3651 |
# File 'lib/tensorflow/raw_ops.rb', line 3649 def sparse_cross(indices: nil, values: nil, shapes: nil, dense_inputs: nil, hashed_output: nil, num_buckets: nil, hash_key: nil, sparse_types: nil, dense_types: nil, out_type: nil, internal_type: nil) Utils.execute("SparseCross", [indices, values, shapes, dense_inputs], hashed_output: hashed_output, num_buckets: num_buckets, hash_key: hash_key, sparse_types: sparse_types, dense_types: dense_types, out_type: out_type, internal_type: internal_type) end |
.sparse_dense_cwise_add(sp_indices: nil, sp_values: nil, sp_shape: nil, dense: nil) ⇒ Object
3653 3654 3655 |
# File 'lib/tensorflow/raw_ops.rb', line 3653 def sparse_dense_cwise_add(sp_indices: nil, sp_values: nil, sp_shape: nil, dense: nil) Utils.execute("SparseDenseCwiseAdd", [sp_indices, sp_values, sp_shape, dense]) end |
.sparse_dense_cwise_div(sp_indices: nil, sp_values: nil, sp_shape: nil, dense: nil) ⇒ Object
3657 3658 3659 |
# File 'lib/tensorflow/raw_ops.rb', line 3657 def sparse_dense_cwise_div(sp_indices: nil, sp_values: nil, sp_shape: nil, dense: nil) Utils.execute("SparseDenseCwiseDiv", [sp_indices, sp_values, sp_shape, dense]) end |
.sparse_dense_cwise_mul(sp_indices: nil, sp_values: nil, sp_shape: nil, dense: nil) ⇒ Object
3661 3662 3663 |
# File 'lib/tensorflow/raw_ops.rb', line 3661 def sparse_dense_cwise_mul(sp_indices: nil, sp_values: nil, sp_shape: nil, dense: nil) Utils.execute("SparseDenseCwiseMul", [sp_indices, sp_values, sp_shape, dense]) end |
.sparse_fill_empty_rows(indices: nil, values: nil, dense_shape: nil, default_value: nil) ⇒ Object
3665 3666 3667 |
# File 'lib/tensorflow/raw_ops.rb', line 3665 def sparse_fill_empty_rows(indices: nil, values: nil, dense_shape: nil, default_value: nil) Utils.execute("SparseFillEmptyRows", [indices, values, dense_shape, default_value]) end |
.sparse_fill_empty_rows_grad(reverse_index_map: nil, grad_values: nil) ⇒ Object
3669 3670 3671 |
# File 'lib/tensorflow/raw_ops.rb', line 3669 def sparse_fill_empty_rows_grad(reverse_index_map: nil, grad_values: nil) Utils.execute("SparseFillEmptyRowsGrad", [reverse_index_map, grad_values]) end |
.sparse_mat_mul(a: nil, b: nil, transpose_a: nil, transpose_b: nil, a_is_sparse: nil, b_is_sparse: nil) ⇒ Object
3673 3674 3675 |
# File 'lib/tensorflow/raw_ops.rb', line 3673 def sparse_mat_mul(a: nil, b: nil, transpose_a: nil, transpose_b: nil, a_is_sparse: nil, b_is_sparse: nil) Utils.execute("SparseMatMul", [a, b], transpose_a: transpose_a, transpose_b: transpose_b, a_is_sparse: a_is_sparse, b_is_sparse: b_is_sparse) end |
.sparse_reduce_max(input_indices: nil, input_values: nil, input_shape: nil, reduction_axes: nil, keep_dims: nil) ⇒ Object
3677 3678 3679 |
# File 'lib/tensorflow/raw_ops.rb', line 3677 def sparse_reduce_max(input_indices: nil, input_values: nil, input_shape: nil, reduction_axes: nil, keep_dims: nil) Utils.execute("SparseReduceMax", [input_indices, input_values, input_shape, reduction_axes], keep_dims: keep_dims) end |
.sparse_reduce_max_sparse(input_indices: nil, input_values: nil, input_shape: nil, reduction_axes: nil, keep_dims: nil) ⇒ Object
3681 3682 3683 |
# File 'lib/tensorflow/raw_ops.rb', line 3681 def sparse_reduce_max_sparse(input_indices: nil, input_values: nil, input_shape: nil, reduction_axes: nil, keep_dims: nil) Utils.execute("SparseReduceMaxSparse", [input_indices, input_values, input_shape, reduction_axes], keep_dims: keep_dims) end |
.sparse_reduce_sum(input_indices: nil, input_values: nil, input_shape: nil, reduction_axes: nil, keep_dims: nil) ⇒ Object
3685 3686 3687 |
# File 'lib/tensorflow/raw_ops.rb', line 3685 def sparse_reduce_sum(input_indices: nil, input_values: nil, input_shape: nil, reduction_axes: nil, keep_dims: nil) Utils.execute("SparseReduceSum", [input_indices, input_values, input_shape, reduction_axes], keep_dims: keep_dims) end |
.sparse_reduce_sum_sparse(input_indices: nil, input_values: nil, input_shape: nil, reduction_axes: nil, keep_dims: nil) ⇒ Object
3689 3690 3691 |
# File 'lib/tensorflow/raw_ops.rb', line 3689 def sparse_reduce_sum_sparse(input_indices: nil, input_values: nil, input_shape: nil, reduction_axes: nil, keep_dims: nil) Utils.execute("SparseReduceSumSparse", [input_indices, input_values, input_shape, reduction_axes], keep_dims: keep_dims) end |
.sparse_reorder(input_indices: nil, input_values: nil, input_shape: nil) ⇒ Object
3693 3694 3695 |
# File 'lib/tensorflow/raw_ops.rb', line 3693 def sparse_reorder(input_indices: nil, input_values: nil, input_shape: nil) Utils.execute("SparseReorder", [input_indices, input_values, input_shape]) end |
.sparse_reshape(input_indices: nil, input_shape: nil, new_shape: nil) ⇒ Object
3697 3698 3699 |
# File 'lib/tensorflow/raw_ops.rb', line 3697 def sparse_reshape(input_indices: nil, input_shape: nil, new_shape: nil) Utils.execute("SparseReshape", [input_indices, input_shape, new_shape]) end |
.sparse_segment_mean(data: nil, indices: nil, segment_ids: nil) ⇒ Object
3701 3702 3703 |
# File 'lib/tensorflow/raw_ops.rb', line 3701 def sparse_segment_mean(data: nil, indices: nil, segment_ids: nil) Utils.execute("SparseSegmentMean", [data, indices, segment_ids]) end |
.sparse_segment_mean_grad(grad: nil, indices: nil, segment_ids: nil, output_dim0: nil) ⇒ Object
3705 3706 3707 |
# File 'lib/tensorflow/raw_ops.rb', line 3705 def sparse_segment_mean_grad(grad: nil, indices: nil, segment_ids: nil, output_dim0: nil) Utils.execute("SparseSegmentMeanGrad", [grad, indices, segment_ids, output_dim0]) end |
.sparse_segment_mean_with_num_segments(data: nil, indices: nil, segment_ids: nil, num_segments: nil) ⇒ Object
3709 3710 3711 |
# File 'lib/tensorflow/raw_ops.rb', line 3709 def sparse_segment_mean_with_num_segments(data: nil, indices: nil, segment_ids: nil, num_segments: nil) Utils.execute("SparseSegmentMeanWithNumSegments", [data, indices, segment_ids, num_segments]) end |
.sparse_segment_sqrt_n(data: nil, indices: nil, segment_ids: nil) ⇒ Object
3713 3714 3715 |
# File 'lib/tensorflow/raw_ops.rb', line 3713 def sparse_segment_sqrt_n(data: nil, indices: nil, segment_ids: nil) Utils.execute("SparseSegmentSqrtN", [data, indices, segment_ids]) end |
.sparse_segment_sqrt_n_grad(grad: nil, indices: nil, segment_ids: nil, output_dim0: nil) ⇒ Object
3717 3718 3719 |
# File 'lib/tensorflow/raw_ops.rb', line 3717 def sparse_segment_sqrt_n_grad(grad: nil, indices: nil, segment_ids: nil, output_dim0: nil) Utils.execute("SparseSegmentSqrtNGrad", [grad, indices, segment_ids, output_dim0]) end |
.sparse_segment_sqrt_n_with_num_segments(data: nil, indices: nil, segment_ids: nil, num_segments: nil) ⇒ Object
3721 3722 3723 |
# File 'lib/tensorflow/raw_ops.rb', line 3721 def sparse_segment_sqrt_n_with_num_segments(data: nil, indices: nil, segment_ids: nil, num_segments: nil) Utils.execute("SparseSegmentSqrtNWithNumSegments", [data, indices, segment_ids, num_segments]) end |
.sparse_segment_sum(data: nil, indices: nil, segment_ids: nil) ⇒ Object
3725 3726 3727 |
# File 'lib/tensorflow/raw_ops.rb', line 3725 def sparse_segment_sum(data: nil, indices: nil, segment_ids: nil) Utils.execute("SparseSegmentSum", [data, indices, segment_ids]) end |
.sparse_segment_sum_with_num_segments(data: nil, indices: nil, segment_ids: nil, num_segments: nil) ⇒ Object
3729 3730 3731 |
# File 'lib/tensorflow/raw_ops.rb', line 3729 def sparse_segment_sum_with_num_segments(data: nil, indices: nil, segment_ids: nil, num_segments: nil) Utils.execute("SparseSegmentSumWithNumSegments", [data, indices, segment_ids, num_segments]) end |
.sparse_slice(indices: nil, values: nil, shape: nil, start: nil, size: nil) ⇒ Object
3733 3734 3735 |
# File 'lib/tensorflow/raw_ops.rb', line 3733 def sparse_slice(indices: nil, values: nil, shape: nil, start: nil, size: nil) Utils.execute("SparseSlice", [indices, values, shape, start, size]) end |
.sparse_slice_grad(backprop_val_grad: nil, input_indices: nil, input_start: nil, output_indices: nil) ⇒ Object
3737 3738 3739 |
# File 'lib/tensorflow/raw_ops.rb', line 3737 def sparse_slice_grad(backprop_val_grad: nil, input_indices: nil, input_start: nil, output_indices: nil) Utils.execute("SparseSliceGrad", [backprop_val_grad, input_indices, input_start, output_indices]) end |
.sparse_softmax(sp_indices: nil, sp_values: nil, sp_shape: nil) ⇒ Object
3741 3742 3743 |
# File 'lib/tensorflow/raw_ops.rb', line 3741 def sparse_softmax(sp_indices: nil, sp_values: nil, sp_shape: nil) Utils.execute("SparseSoftmax", [sp_indices, sp_values, sp_shape]) end |
.sparse_softmax_cross_entropy_with_logits(features: nil, labels: nil) ⇒ Object
3745 3746 3747 |
# File 'lib/tensorflow/raw_ops.rb', line 3745 def sparse_softmax_cross_entropy_with_logits(features: nil, labels: nil) Utils.execute("SparseSoftmaxCrossEntropyWithLogits", [features, labels]) end |
.sparse_sparse_maximum(a_indices: nil, a_values: nil, a_shape: nil, b_indices: nil, b_values: nil, b_shape: nil) ⇒ Object
3749 3750 3751 |
# File 'lib/tensorflow/raw_ops.rb', line 3749 def sparse_sparse_maximum(a_indices: nil, a_values: nil, a_shape: nil, b_indices: nil, b_values: nil, b_shape: nil) Utils.execute("SparseSparseMaximum", [a_indices, a_values, a_shape, b_indices, b_values, b_shape]) end |
.sparse_sparse_minimum(a_indices: nil, a_values: nil, a_shape: nil, b_indices: nil, b_values: nil, b_shape: nil) ⇒ Object
3753 3754 3755 |
# File 'lib/tensorflow/raw_ops.rb', line 3753 def sparse_sparse_minimum(a_indices: nil, a_values: nil, a_shape: nil, b_indices: nil, b_values: nil, b_shape: nil) Utils.execute("SparseSparseMinimum", [a_indices, a_values, a_shape, b_indices, b_values, b_shape]) end |
.sparse_split(split_dim: nil, indices: nil, values: nil, shape: nil, num_split: nil) ⇒ Object
3757 3758 3759 |
# File 'lib/tensorflow/raw_ops.rb', line 3757 def sparse_split(split_dim: nil, indices: nil, values: nil, shape: nil, num_split: nil) Utils.execute("SparseSplit", [split_dim, indices, values, shape], num_split: num_split) end |
.sparse_tensor_dense_add(a_indices: nil, a_values: nil, a_shape: nil, b: nil) ⇒ Object
3761 3762 3763 |
# File 'lib/tensorflow/raw_ops.rb', line 3761 def sparse_tensor_dense_add(a_indices: nil, a_values: nil, a_shape: nil, b: nil) Utils.execute("SparseTensorDenseAdd", [a_indices, a_values, a_shape, b]) end |
.sparse_tensor_dense_mat_mul(a_indices: nil, a_values: nil, a_shape: nil, b: nil, adjoint_a: nil, adjoint_b: nil) ⇒ Object
3765 3766 3767 |
# File 'lib/tensorflow/raw_ops.rb', line 3765 def sparse_tensor_dense_mat_mul(a_indices: nil, a_values: nil, a_shape: nil, b: nil, adjoint_a: nil, adjoint_b: nil) Utils.execute("SparseTensorDenseMatMul", [a_indices, a_values, a_shape, b], adjoint_a: adjoint_a, adjoint_b: adjoint_b) end |
.sparse_tensor_slice_dataset(indices: nil, values: nil, dense_shape: nil) ⇒ Object
3769 3770 3771 |
# File 'lib/tensorflow/raw_ops.rb', line 3769 def sparse_tensor_slice_dataset(indices: nil, values: nil, dense_shape: nil) Utils.execute("SparseTensorSliceDataset", [indices, values, dense_shape]) end |
.sparse_to_dense(sparse_indices: nil, output_shape: nil, sparse_values: nil, default_value: nil, validate_indices: nil) ⇒ Object
3773 3774 3775 |
# File 'lib/tensorflow/raw_ops.rb', line 3773 def sparse_to_dense(sparse_indices: nil, output_shape: nil, sparse_values: nil, default_value: nil, validate_indices: nil) Utils.execute("SparseToDense", [sparse_indices, output_shape, sparse_values, default_value], validate_indices: validate_indices) end |
.sparse_to_sparse_set_operation(set1_indices: nil, set1_values: nil, set1_shape: nil, set2_indices: nil, set2_values: nil, set2_shape: nil, set_operation: nil, validate_indices: nil) ⇒ Object
3777 3778 3779 |
# File 'lib/tensorflow/raw_ops.rb', line 3777 def sparse_to_sparse_set_operation(set1_indices: nil, set1_values: nil, set1_shape: nil, set2_indices: nil, set2_values: nil, set2_shape: nil, set_operation: nil, validate_indices: nil) Utils.execute("SparseToSparseSetOperation", [set1_indices, set1_values, set1_shape, set2_indices, set2_values, set2_shape], set_operation: set_operation, validate_indices: validate_indices) end |
.split(split_dim: nil, value: nil, num_split: nil) ⇒ Object
3781 3782 3783 |
# File 'lib/tensorflow/raw_ops.rb', line 3781 def split(split_dim: nil, value: nil, num_split: nil) Utils.execute("Split", [split_dim, value], num_split: num_split) end |
.split_v(value: nil, size_splits: nil, split_dim: nil, num_split: nil) ⇒ Object
3785 3786 3787 |
# File 'lib/tensorflow/raw_ops.rb', line 3785 def split_v(value: nil, size_splits: nil, split_dim: nil, num_split: nil) Utils.execute("SplitV", [value, size_splits, split_dim], num_split: num_split) end |
.sqrt(x: nil) ⇒ Object
3789 3790 3791 |
# File 'lib/tensorflow/raw_ops.rb', line 3789 def sqrt(x: nil) Utils.execute("Sqrt", [x]) end |
.sqrt_grad(y: nil, dy: nil) ⇒ Object
3793 3794 3795 |
# File 'lib/tensorflow/raw_ops.rb', line 3793 def sqrt_grad(y: nil, dy: nil) Utils.execute("SqrtGrad", [y, dy]) end |
.square(x: nil) ⇒ Object
3797 3798 3799 |
# File 'lib/tensorflow/raw_ops.rb', line 3797 def square(x: nil) Utils.execute("Square", [x]) end |
.squared_difference(x: nil, y: nil) ⇒ Object
3801 3802 3803 |
# File 'lib/tensorflow/raw_ops.rb', line 3801 def squared_difference(x: nil, y: nil) Utils.execute("SquaredDifference", [x, y]) end |
.squeeze(input: nil, squeeze_dims: nil) ⇒ Object
3805 3806 3807 |
# File 'lib/tensorflow/raw_ops.rb', line 3805 def squeeze(input: nil, squeeze_dims: nil) Utils.execute("Squeeze", [input], squeeze_dims: squeeze_dims) end |
.stack(elem_type: nil, stack_name: nil) ⇒ Object
3809 3810 3811 |
# File 'lib/tensorflow/raw_ops.rb', line 3809 def stack(elem_type: nil, stack_name: nil) Utils.execute("Stack", [], elem_type: elem_type, stack_name: stack_name) end |
.stack_close(handle: nil) ⇒ Object
3813 3814 3815 |
# File 'lib/tensorflow/raw_ops.rb', line 3813 def stack_close(handle: nil) Utils.execute("StackClose", [handle]) end |
.stack_close_v2(handle: nil) ⇒ Object
3817 3818 3819 |
# File 'lib/tensorflow/raw_ops.rb', line 3817 def stack_close_v2(handle: nil) Utils.execute("StackCloseV2", [handle]) end |
.stack_pop(handle: nil, elem_type: nil) ⇒ Object
3821 3822 3823 |
# File 'lib/tensorflow/raw_ops.rb', line 3821 def stack_pop(handle: nil, elem_type: nil) Utils.execute("StackPop", [handle], elem_type: elem_type) end |
.stack_pop_v2(handle: nil, elem_type: nil) ⇒ Object
3825 3826 3827 |
# File 'lib/tensorflow/raw_ops.rb', line 3825 def stack_pop_v2(handle: nil, elem_type: nil) Utils.execute("StackPopV2", [handle], elem_type: elem_type) end |
.stack_push(handle: nil, elem: nil, swap_memory: nil) ⇒ Object
3829 3830 3831 |
# File 'lib/tensorflow/raw_ops.rb', line 3829 def stack_push(handle: nil, elem: nil, swap_memory: nil) Utils.execute("StackPush", [handle, elem], swap_memory: swap_memory) end |
.stack_push_v2(handle: nil, elem: nil, swap_memory: nil) ⇒ Object
3833 3834 3835 |
# File 'lib/tensorflow/raw_ops.rb', line 3833 def stack_push_v2(handle: nil, elem: nil, swap_memory: nil) Utils.execute("StackPushV2", [handle, elem], swap_memory: swap_memory) end |
.stack_v2(max_size: nil, elem_type: nil, stack_name: nil) ⇒ Object
3837 3838 3839 |
# File 'lib/tensorflow/raw_ops.rb', line 3837 def stack_v2(max_size: nil, elem_type: nil, stack_name: nil) Utils.execute("StackV2", [max_size], elem_type: elem_type, stack_name: stack_name) end |
.stage(values: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
3841 3842 3843 |
# File 'lib/tensorflow/raw_ops.rb', line 3841 def stage(values: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) Utils.execute("Stage", [values], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, container: container, shared_name: shared_name) end |
.stage_clear(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
3845 3846 3847 |
# File 'lib/tensorflow/raw_ops.rb', line 3845 def stage_clear(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) Utils.execute("StageClear", [], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, container: container, shared_name: shared_name) end |
.stage_peek(index: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
3849 3850 3851 |
# File 'lib/tensorflow/raw_ops.rb', line 3849 def stage_peek(index: nil, capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) Utils.execute("StagePeek", [index], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, container: container, shared_name: shared_name) end |
.stage_size(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
3853 3854 3855 |
# File 'lib/tensorflow/raw_ops.rb', line 3853 def stage_size(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) Utils.execute("StageSize", [], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, container: container, shared_name: shared_name) end |
.stateful_partitioned_call(args: nil, f: nil, config: nil, config_proto: nil, executor_type: nil) ⇒ Object
3857 3858 3859 |
# File 'lib/tensorflow/raw_ops.rb', line 3857 def stateful_partitioned_call(args: nil, f: nil, config: nil, config_proto: nil, executor_type: nil) Utils.execute("StatefulPartitionedCall", [args], f: f, config: config, config_proto: config_proto, executor_type: executor_type) end |
.stateful_random_binomial(resource: nil, algorithm: nil, shape: nil, counts: nil, probs: nil, dtype: nil) ⇒ Object
3861 3862 3863 |
# File 'lib/tensorflow/raw_ops.rb', line 3861 def stateful_random_binomial(resource: nil, algorithm: nil, shape: nil, counts: nil, probs: nil, dtype: nil) Utils.execute("StatefulRandomBinomial", [resource, algorithm, shape, counts, probs], dtype: dtype) end |
.stateful_standard_normal(resource: nil, shape: nil, dtype: nil, shape_dtype: nil) ⇒ Object
3865 3866 3867 |
# File 'lib/tensorflow/raw_ops.rb', line 3865 def stateful_standard_normal(resource: nil, shape: nil, dtype: nil, shape_dtype: nil) Utils.execute("StatefulStandardNormal", [resource, shape], dtype: dtype, shape_dtype: shape_dtype) end |
.stateful_standard_normal_v2(resource: nil, algorithm: nil, shape: nil, dtype: nil, shape_dtype: nil) ⇒ Object
3869 3870 3871 |
# File 'lib/tensorflow/raw_ops.rb', line 3869 def stateful_standard_normal_v2(resource: nil, algorithm: nil, shape: nil, dtype: nil, shape_dtype: nil) Utils.execute("StatefulStandardNormalV2", [resource, algorithm, shape], dtype: dtype, shape_dtype: shape_dtype) end |
.stateful_truncated_normal(resource: nil, algorithm: nil, shape: nil, dtype: nil, shape_dtype: nil) ⇒ Object
3873 3874 3875 |
# File 'lib/tensorflow/raw_ops.rb', line 3873 def stateful_truncated_normal(resource: nil, algorithm: nil, shape: nil, dtype: nil, shape_dtype: nil) Utils.execute("StatefulTruncatedNormal", [resource, algorithm, shape], dtype: dtype, shape_dtype: shape_dtype) end |
.stateful_uniform(resource: nil, algorithm: nil, shape: nil, dtype: nil, shape_dtype: nil) ⇒ Object
3877 3878 3879 |
# File 'lib/tensorflow/raw_ops.rb', line 3877 def stateful_uniform(resource: nil, algorithm: nil, shape: nil, dtype: nil, shape_dtype: nil) Utils.execute("StatefulUniform", [resource, algorithm, shape], dtype: dtype, shape_dtype: shape_dtype) end |
.stateful_uniform_full_int(resource: nil, algorithm: nil, shape: nil, dtype: nil, shape_dtype: nil) ⇒ Object
3881 3882 3883 |
# File 'lib/tensorflow/raw_ops.rb', line 3881 def stateful_uniform_full_int(resource: nil, algorithm: nil, shape: nil, dtype: nil, shape_dtype: nil) Utils.execute("StatefulUniformFullInt", [resource, algorithm, shape], dtype: dtype, shape_dtype: shape_dtype) end |
.stateful_uniform_int(resource: nil, algorithm: nil, shape: nil, minval: nil, maxval: nil, dtype: nil, shape_dtype: nil) ⇒ Object
3885 3886 3887 |
# File 'lib/tensorflow/raw_ops.rb', line 3885 def stateful_uniform_int(resource: nil, algorithm: nil, shape: nil, minval: nil, maxval: nil, dtype: nil, shape_dtype: nil) Utils.execute("StatefulUniformInt", [resource, algorithm, shape, minval, maxval], dtype: dtype, shape_dtype: shape_dtype) end |
.stateless_if(cond: nil, input: nil, then_branch: nil, else_branch: nil) ⇒ Object
3889 3890 3891 |
# File 'lib/tensorflow/raw_ops.rb', line 3889 def stateless_if(cond: nil, input: nil, then_branch: nil, else_branch: nil) Utils.execute("StatelessIf", [cond, input], then_branch: then_branch, else_branch: else_branch) end |
.stateless_multinomial(logits: nil, num_samples: nil, seed: nil, output_dtype: nil) ⇒ Object
3893 3894 3895 |
# File 'lib/tensorflow/raw_ops.rb', line 3893 def stateless_multinomial(logits: nil, num_samples: nil, seed: nil, output_dtype: nil) Utils.execute("StatelessMultinomial", [logits, num_samples, seed], output_dtype: output_dtype) end |
.stateless_random_normal(shape: nil, seed: nil, dtype: nil) ⇒ Object
3897 3898 3899 |
# File 'lib/tensorflow/raw_ops.rb', line 3897 def stateless_random_normal(shape: nil, seed: nil, dtype: nil) Utils.execute("StatelessRandomNormal", [shape, seed], dtype: dtype) end |
.stateless_random_uniform(shape: nil, seed: nil, dtype: nil) ⇒ Object
3901 3902 3903 |
# File 'lib/tensorflow/raw_ops.rb', line 3901 def stateless_random_uniform(shape: nil, seed: nil, dtype: nil) Utils.execute("StatelessRandomUniform", [shape, seed], dtype: dtype) end |
.stateless_random_uniform_int(shape: nil, seed: nil, minval: nil, maxval: nil, dtype: nil) ⇒ Object
3905 3906 3907 |
# File 'lib/tensorflow/raw_ops.rb', line 3905 def stateless_random_uniform_int(shape: nil, seed: nil, minval: nil, maxval: nil, dtype: nil) Utils.execute("StatelessRandomUniformInt", [shape, seed, minval, maxval], dtype: dtype) end |
.stateless_truncated_normal(shape: nil, seed: nil, dtype: nil) ⇒ Object
3909 3910 3911 |
# File 'lib/tensorflow/raw_ops.rb', line 3909 def stateless_truncated_normal(shape: nil, seed: nil, dtype: nil) Utils.execute("StatelessTruncatedNormal", [shape, seed], dtype: dtype) end |
.stateless_while(input: nil, cond: nil, body: nil) ⇒ Object
3913 3914 3915 |
# File 'lib/tensorflow/raw_ops.rb', line 3913 def stateless_while(input: nil, cond: nil, body: nil) Utils.execute("StatelessWhile", [input], cond: cond, body: body) end |
.static_regex_full_match(input: nil, pattern: nil) ⇒ Object
3917 3918 3919 |
# File 'lib/tensorflow/raw_ops.rb', line 3917 def static_regex_full_match(input: nil, pattern: nil) Utils.execute("StaticRegexFullMatch", [input], pattern: pattern) end |
.static_regex_replace(input: nil, pattern: nil, rewrite: nil, replace_global: nil) ⇒ Object
3921 3922 3923 |
# File 'lib/tensorflow/raw_ops.rb', line 3921 def static_regex_replace(input: nil, pattern: nil, rewrite: nil, replace_global: nil) Utils.execute("StaticRegexReplace", [input], pattern: pattern, rewrite: rewrite, replace_global: replace_global) end |
.stats_aggregator_handle_v2(container: nil, shared_name: nil) ⇒ Object
3925 3926 3927 |
# File 'lib/tensorflow/raw_ops.rb', line 3925 def stats_aggregator_handle_v2(container: nil, shared_name: nil) Utils.execute("StatsAggregatorHandleV2", [], container: container, shared_name: shared_name) end |
.stats_aggregator_set_summary_writer(stats_aggregator: nil, summary: nil) ⇒ Object
3929 3930 3931 |
# File 'lib/tensorflow/raw_ops.rb', line 3929 def stats_aggregator_set_summary_writer(stats_aggregator: nil, summary: nil) Utils.execute("StatsAggregatorSetSummaryWriter", [stats_aggregator, summary]) end |
.stop_gradient(input: nil) ⇒ Object
3933 3934 3935 |
# File 'lib/tensorflow/raw_ops.rb', line 3933 def stop_gradient(input: nil) Utils.execute("StopGradient", [input]) end |
.strided_slice(input: nil, start: nil, stop: nil, strides: nil, begin_mask: nil, end_mask: nil, ellipsis_mask: nil, new_axis_mask: nil, shrink_axis_mask: nil) ⇒ Object
3937 3938 3939 |
# File 'lib/tensorflow/raw_ops.rb', line 3937 def strided_slice(input: nil, start: nil, stop: nil, strides: nil, begin_mask: nil, end_mask: nil, ellipsis_mask: nil, new_axis_mask: nil, shrink_axis_mask: nil) Utils.execute("StridedSlice", [input, start, stop, strides], begin_mask: begin_mask, end_mask: end_mask, ellipsis_mask: ellipsis_mask, new_axis_mask: new_axis_mask, shrink_axis_mask: shrink_axis_mask) end |
.strided_slice_assign(ref: nil, start: nil, stop: nil, strides: nil, value: nil, begin_mask: nil, end_mask: nil, ellipsis_mask: nil, new_axis_mask: nil, shrink_axis_mask: nil) ⇒ Object
3941 3942 3943 |
# File 'lib/tensorflow/raw_ops.rb', line 3941 def strided_slice_assign(ref: nil, start: nil, stop: nil, strides: nil, value: nil, begin_mask: nil, end_mask: nil, ellipsis_mask: nil, new_axis_mask: nil, shrink_axis_mask: nil) Utils.execute("StridedSliceAssign", [ref, start, stop, strides, value], begin_mask: begin_mask, end_mask: end_mask, ellipsis_mask: ellipsis_mask, new_axis_mask: new_axis_mask, shrink_axis_mask: shrink_axis_mask) end |
.strided_slice_grad(shape: nil, start: nil, stop: nil, strides: nil, dy: nil, begin_mask: nil, end_mask: nil, ellipsis_mask: nil, new_axis_mask: nil, shrink_axis_mask: nil) ⇒ Object
3945 3946 3947 |
# File 'lib/tensorflow/raw_ops.rb', line 3945 def strided_slice_grad(shape: nil, start: nil, stop: nil, strides: nil, dy: nil, begin_mask: nil, end_mask: nil, ellipsis_mask: nil, new_axis_mask: nil, shrink_axis_mask: nil) Utils.execute("StridedSliceGrad", [shape, start, stop, strides, dy], begin_mask: begin_mask, end_mask: end_mask, ellipsis_mask: ellipsis_mask, new_axis_mask: new_axis_mask, shrink_axis_mask: shrink_axis_mask) end |
.string_format(inputs: nil, template: nil, placeholder: nil, summarize: nil) ⇒ Object
3949 3950 3951 |
# File 'lib/tensorflow/raw_ops.rb', line 3949 def string_format(inputs: nil, template: nil, placeholder: nil, summarize: nil) Utils.execute("StringFormat", [inputs], template: template, placeholder: placeholder, summarize: summarize) end |
.string_join(inputs: nil, separator: nil) ⇒ Object
3953 3954 3955 |
# File 'lib/tensorflow/raw_ops.rb', line 3953 def string_join(inputs: nil, separator: nil) Utils.execute("StringJoin", [inputs], separator: separator) end |
.string_length(input: nil, unit: nil) ⇒ Object
3957 3958 3959 |
# File 'lib/tensorflow/raw_ops.rb', line 3957 def string_length(input: nil, unit: nil) Utils.execute("StringLength", [input], unit: unit) end |
.string_lower(input: nil, encoding: nil) ⇒ Object
3961 3962 3963 |
# File 'lib/tensorflow/raw_ops.rb', line 3961 def string_lower(input: nil, encoding: nil) Utils.execute("StringLower", [input], encoding: encoding) end |
.string_split(input: nil, delimiter: nil, skip_empty: nil) ⇒ Object
3965 3966 3967 |
# File 'lib/tensorflow/raw_ops.rb', line 3965 def string_split(input: nil, delimiter: nil, skip_empty: nil) Utils.execute("StringSplit", [input, delimiter], skip_empty: skip_empty) end |
.string_split_v2(input: nil, sep: nil, maxsplit: nil) ⇒ Object
3969 3970 3971 |
# File 'lib/tensorflow/raw_ops.rb', line 3969 def string_split_v2(input: nil, sep: nil, maxsplit: nil) Utils.execute("StringSplitV2", [input, sep], maxsplit: maxsplit) end |
.string_strip(input: nil) ⇒ Object
3973 3974 3975 |
# File 'lib/tensorflow/raw_ops.rb', line 3973 def string_strip(input: nil) Utils.execute("StringStrip", [input]) end |
.string_to_hash_bucket(string_tensor: nil, num_buckets: nil) ⇒ Object
3977 3978 3979 |
# File 'lib/tensorflow/raw_ops.rb', line 3977 def string_to_hash_bucket(string_tensor: nil, num_buckets: nil) Utils.execute("StringToHashBucket", [string_tensor], num_buckets: num_buckets) end |
.string_to_hash_bucket_fast(input: nil, num_buckets: nil) ⇒ Object
3981 3982 3983 |
# File 'lib/tensorflow/raw_ops.rb', line 3981 def string_to_hash_bucket_fast(input: nil, num_buckets: nil) Utils.execute("StringToHashBucketFast", [input], num_buckets: num_buckets) end |
.string_to_hash_bucket_strong(input: nil, num_buckets: nil, key: nil) ⇒ Object
3985 3986 3987 |
# File 'lib/tensorflow/raw_ops.rb', line 3985 def string_to_hash_bucket_strong(input: nil, num_buckets: nil, key: nil) Utils.execute("StringToHashBucketStrong", [input], num_buckets: num_buckets, key: key) end |
.string_to_number(string_tensor: nil, out_type: nil) ⇒ Object
3989 3990 3991 |
# File 'lib/tensorflow/raw_ops.rb', line 3989 def string_to_number(string_tensor: nil, out_type: nil) Utils.execute("StringToNumber", [string_tensor], out_type: out_type) end |
.string_upper(input: nil, encoding: nil) ⇒ Object
3993 3994 3995 |
# File 'lib/tensorflow/raw_ops.rb', line 3993 def string_upper(input: nil, encoding: nil) Utils.execute("StringUpper", [input], encoding: encoding) end |
.sub(x: nil, y: nil) ⇒ Object
3997 3998 3999 |
# File 'lib/tensorflow/raw_ops.rb', line 3997 def sub(x: nil, y: nil) Utils.execute("Sub", [x, y]) end |
.substr(input: nil, pos: nil, len: nil, unit: nil) ⇒ Object
4001 4002 4003 |
# File 'lib/tensorflow/raw_ops.rb', line 4001 def substr(input: nil, pos: nil, len: nil, unit: nil) Utils.execute("Substr", [input, pos, len], unit: unit) end |
.sum(input: nil, reduction_indices: nil, keep_dims: nil) ⇒ Object
4005 4006 4007 |
# File 'lib/tensorflow/raw_ops.rb', line 4005 def sum(input: nil, reduction_indices: nil, keep_dims: nil) Utils.execute("Sum", [input, reduction_indices], keep_dims: keep_dims) end |
.summary_writer(shared_name: nil, container: nil) ⇒ Object
4009 4010 4011 |
# File 'lib/tensorflow/raw_ops.rb', line 4009 def summary_writer(shared_name: nil, container: nil) Utils.execute("SummaryWriter", [], shared_name: shared_name, container: container) end |
.svd(input: nil, compute_uv: nil, full_matrices: nil) ⇒ Object
4013 4014 4015 |
# File 'lib/tensorflow/raw_ops.rb', line 4013 def svd(input: nil, compute_uv: nil, full_matrices: nil) Utils.execute("Svd", [input], compute_uv: compute_uv, full_matrices: full_matrices) end |
.switch(data: nil, pred: nil) ⇒ Object
4017 4018 4019 |
# File 'lib/tensorflow/raw_ops.rb', line 4017 def switch(data: nil, pred: nil) Utils.execute("Switch", [data, pred]) end |
.symbolic_gradient(input: nil, f: nil) ⇒ Object
4021 4022 4023 |
# File 'lib/tensorflow/raw_ops.rb', line 4021 def symbolic_gradient(input: nil, f: nil) Utils.execute("SymbolicGradient", [input], f: f) end |
.take_dataset(input_dataset: nil, count: nil, output_types: nil, output_shapes: nil) ⇒ Object
4065 4066 4067 |
# File 'lib/tensorflow/raw_ops.rb', line 4065 def take_dataset(input_dataset: nil, count: nil, output_types: nil, output_shapes: nil) Utils.execute("TakeDataset", [input_dataset, count], output_types: output_types, output_shapes: output_shapes) end |
.take_many_sparse_from_tensors_map(sparse_handles: nil, dtype: nil, container: nil, shared_name: nil) ⇒ Object
4069 4070 4071 |
# File 'lib/tensorflow/raw_ops.rb', line 4069 def take_many_sparse_from_tensors_map(sparse_handles: nil, dtype: nil, container: nil, shared_name: nil) Utils.execute("TakeManySparseFromTensorsMap", [sparse_handles], dtype: dtype, container: container, shared_name: shared_name) end |
.tan(x: nil) ⇒ Object
4073 4074 4075 |
# File 'lib/tensorflow/raw_ops.rb', line 4073 def tan(x: nil) Utils.execute("Tan", [x]) end |
.tanh(x: nil) ⇒ Object
4077 4078 4079 |
# File 'lib/tensorflow/raw_ops.rb', line 4077 def tanh(x: nil) Utils.execute("Tanh", [x]) end |
.tanh_grad(y: nil, dy: nil) ⇒ Object
4081 4082 4083 |
# File 'lib/tensorflow/raw_ops.rb', line 4081 def tanh_grad(y: nil, dy: nil) Utils.execute("TanhGrad", [y, dy]) end |
.temporary_variable(shape: nil, dtype: nil, var_name: nil) ⇒ Object
4085 4086 4087 |
# File 'lib/tensorflow/raw_ops.rb', line 4085 def temporary_variable(shape: nil, dtype: nil, var_name: nil) Utils.execute("TemporaryVariable", [], shape: shape, dtype: dtype, var_name: var_name) end |
.tensor_array(size: nil, dtype: nil, dynamic_size: nil, clear_after_read: nil, tensor_array_name: nil, element_shape: nil) ⇒ Object
4089 4090 4091 |
# File 'lib/tensorflow/raw_ops.rb', line 4089 def tensor_array(size: nil, dtype: nil, dynamic_size: nil, clear_after_read: nil, tensor_array_name: nil, element_shape: nil) Utils.execute("TensorArray", [size], dtype: dtype, dynamic_size: dynamic_size, clear_after_read: clear_after_read, tensor_array_name: tensor_array_name, element_shape: element_shape) end |
.tensor_array_close(handle: nil) ⇒ Object
4093 4094 4095 |
# File 'lib/tensorflow/raw_ops.rb', line 4093 def tensor_array_close(handle: nil) Utils.execute("TensorArrayClose", [handle]) end |
.tensor_array_close_v2(handle: nil) ⇒ Object
4097 4098 4099 |
# File 'lib/tensorflow/raw_ops.rb', line 4097 def tensor_array_close_v2(handle: nil) Utils.execute("TensorArrayCloseV2", [handle]) end |
.tensor_array_close_v3(handle: nil) ⇒ Object
4101 4102 4103 |
# File 'lib/tensorflow/raw_ops.rb', line 4101 def tensor_array_close_v3(handle: nil) Utils.execute("TensorArrayCloseV3", [handle]) end |
.tensor_array_concat(handle: nil, flow_in: nil, dtype: nil, element_shape_except0: nil) ⇒ Object
4105 4106 4107 |
# File 'lib/tensorflow/raw_ops.rb', line 4105 def tensor_array_concat(handle: nil, flow_in: nil, dtype: nil, element_shape_except0: nil) Utils.execute("TensorArrayConcat", [handle, flow_in], dtype: dtype, element_shape_except0: element_shape_except0) end |
.tensor_array_concat_v2(handle: nil, flow_in: nil, dtype: nil, element_shape_except0: nil) ⇒ Object
4109 4110 4111 |
# File 'lib/tensorflow/raw_ops.rb', line 4109 def tensor_array_concat_v2(handle: nil, flow_in: nil, dtype: nil, element_shape_except0: nil) Utils.execute("TensorArrayConcatV2", [handle, flow_in], dtype: dtype, element_shape_except0: element_shape_except0) end |
.tensor_array_concat_v3(handle: nil, flow_in: nil, dtype: nil, element_shape_except0: nil) ⇒ Object
4113 4114 4115 |
# File 'lib/tensorflow/raw_ops.rb', line 4113 def tensor_array_concat_v3(handle: nil, flow_in: nil, dtype: nil, element_shape_except0: nil) Utils.execute("TensorArrayConcatV3", [handle, flow_in], dtype: dtype, element_shape_except0: element_shape_except0) end |
.tensor_array_gather(handle: nil, indices: nil, flow_in: nil, dtype: nil, element_shape: nil) ⇒ Object
4117 4118 4119 |
# File 'lib/tensorflow/raw_ops.rb', line 4117 def tensor_array_gather(handle: nil, indices: nil, flow_in: nil, dtype: nil, element_shape: nil) Utils.execute("TensorArrayGather", [handle, indices, flow_in], dtype: dtype, element_shape: element_shape) end |
.tensor_array_gather_v2(handle: nil, indices: nil, flow_in: nil, dtype: nil, element_shape: nil) ⇒ Object
4121 4122 4123 |
# File 'lib/tensorflow/raw_ops.rb', line 4121 def tensor_array_gather_v2(handle: nil, indices: nil, flow_in: nil, dtype: nil, element_shape: nil) Utils.execute("TensorArrayGatherV2", [handle, indices, flow_in], dtype: dtype, element_shape: element_shape) end |
.tensor_array_gather_v3(handle: nil, indices: nil, flow_in: nil, dtype: nil, element_shape: nil) ⇒ Object
4125 4126 4127 |
# File 'lib/tensorflow/raw_ops.rb', line 4125 def tensor_array_gather_v3(handle: nil, indices: nil, flow_in: nil, dtype: nil, element_shape: nil) Utils.execute("TensorArrayGatherV3", [handle, indices, flow_in], dtype: dtype, element_shape: element_shape) end |
.tensor_array_grad(handle: nil, flow_in: nil, source: nil) ⇒ Object
4129 4130 4131 |
# File 'lib/tensorflow/raw_ops.rb', line 4129 def tensor_array_grad(handle: nil, flow_in: nil, source: nil) Utils.execute("TensorArrayGrad", [handle, flow_in], source: source) end |
.tensor_array_grad_v2(handle: nil, flow_in: nil, source: nil) ⇒ Object
4133 4134 4135 |
# File 'lib/tensorflow/raw_ops.rb', line 4133 def tensor_array_grad_v2(handle: nil, flow_in: nil, source: nil) Utils.execute("TensorArrayGradV2", [handle, flow_in], source: source) end |
.tensor_array_grad_v3(handle: nil, flow_in: nil, source: nil) ⇒ Object
4137 4138 4139 |
# File 'lib/tensorflow/raw_ops.rb', line 4137 def tensor_array_grad_v3(handle: nil, flow_in: nil, source: nil) Utils.execute("TensorArrayGradV3", [handle, flow_in], source: source) end |
.tensor_array_grad_with_shape(handle: nil, flow_in: nil, shape_to_prepend: nil, source: nil) ⇒ Object
4141 4142 4143 |
# File 'lib/tensorflow/raw_ops.rb', line 4141 def tensor_array_grad_with_shape(handle: nil, flow_in: nil, shape_to_prepend: nil, source: nil) Utils.execute("TensorArrayGradWithShape", [handle, flow_in, shape_to_prepend], source: source) end |
.tensor_array_pack(handle: nil, flow_in: nil, dtype: nil, element_shape: nil) ⇒ Object
4145 4146 4147 |
# File 'lib/tensorflow/raw_ops.rb', line 4145 def tensor_array_pack(handle: nil, flow_in: nil, dtype: nil, element_shape: nil) Utils.execute("TensorArrayPack", [handle, flow_in], dtype: dtype, element_shape: element_shape) end |
.tensor_array_read(handle: nil, index: nil, flow_in: nil, dtype: nil) ⇒ Object
4149 4150 4151 |
# File 'lib/tensorflow/raw_ops.rb', line 4149 def tensor_array_read(handle: nil, index: nil, flow_in: nil, dtype: nil) Utils.execute("TensorArrayRead", [handle, index, flow_in], dtype: dtype) end |
.tensor_array_read_v2(handle: nil, index: nil, flow_in: nil, dtype: nil) ⇒ Object
4153 4154 4155 |
# File 'lib/tensorflow/raw_ops.rb', line 4153 def tensor_array_read_v2(handle: nil, index: nil, flow_in: nil, dtype: nil) Utils.execute("TensorArrayReadV2", [handle, index, flow_in], dtype: dtype) end |
.tensor_array_read_v3(handle: nil, index: nil, flow_in: nil, dtype: nil) ⇒ Object
4157 4158 4159 |
# File 'lib/tensorflow/raw_ops.rb', line 4157 def tensor_array_read_v3(handle: nil, index: nil, flow_in: nil, dtype: nil) Utils.execute("TensorArrayReadV3", [handle, index, flow_in], dtype: dtype) end |
.tensor_array_scatter(handle: nil, indices: nil, value: nil, flow_in: nil) ⇒ Object
4161 4162 4163 |
# File 'lib/tensorflow/raw_ops.rb', line 4161 def tensor_array_scatter(handle: nil, indices: nil, value: nil, flow_in: nil) Utils.execute("TensorArrayScatter", [handle, indices, value, flow_in]) end |
.tensor_array_scatter_v2(handle: nil, indices: nil, value: nil, flow_in: nil) ⇒ Object
4165 4166 4167 |
# File 'lib/tensorflow/raw_ops.rb', line 4165 def tensor_array_scatter_v2(handle: nil, indices: nil, value: nil, flow_in: nil) Utils.execute("TensorArrayScatterV2", [handle, indices, value, flow_in]) end |
.tensor_array_scatter_v3(handle: nil, indices: nil, value: nil, flow_in: nil) ⇒ Object
4169 4170 4171 |
# File 'lib/tensorflow/raw_ops.rb', line 4169 def tensor_array_scatter_v3(handle: nil, indices: nil, value: nil, flow_in: nil) Utils.execute("TensorArrayScatterV3", [handle, indices, value, flow_in]) end |
.tensor_array_size(handle: nil, flow_in: nil) ⇒ Object
4173 4174 4175 |
# File 'lib/tensorflow/raw_ops.rb', line 4173 def tensor_array_size(handle: nil, flow_in: nil) Utils.execute("TensorArraySize", [handle, flow_in]) end |
.tensor_array_size_v2(handle: nil, flow_in: nil) ⇒ Object
4177 4178 4179 |
# File 'lib/tensorflow/raw_ops.rb', line 4177 def tensor_array_size_v2(handle: nil, flow_in: nil) Utils.execute("TensorArraySizeV2", [handle, flow_in]) end |
.tensor_array_size_v3(handle: nil, flow_in: nil) ⇒ Object
4181 4182 4183 |
# File 'lib/tensorflow/raw_ops.rb', line 4181 def tensor_array_size_v3(handle: nil, flow_in: nil) Utils.execute("TensorArraySizeV3", [handle, flow_in]) end |
.tensor_array_split(handle: nil, value: nil, lengths: nil, flow_in: nil) ⇒ Object
4185 4186 4187 |
# File 'lib/tensorflow/raw_ops.rb', line 4185 def tensor_array_split(handle: nil, value: nil, lengths: nil, flow_in: nil) Utils.execute("TensorArraySplit", [handle, value, lengths, flow_in]) end |
.tensor_array_split_v2(handle: nil, value: nil, lengths: nil, flow_in: nil) ⇒ Object
4189 4190 4191 |
# File 'lib/tensorflow/raw_ops.rb', line 4189 def tensor_array_split_v2(handle: nil, value: nil, lengths: nil, flow_in: nil) Utils.execute("TensorArraySplitV2", [handle, value, lengths, flow_in]) end |
.tensor_array_split_v3(handle: nil, value: nil, lengths: nil, flow_in: nil) ⇒ Object
4193 4194 4195 |
# File 'lib/tensorflow/raw_ops.rb', line 4193 def tensor_array_split_v3(handle: nil, value: nil, lengths: nil, flow_in: nil) Utils.execute("TensorArraySplitV3", [handle, value, lengths, flow_in]) end |
.tensor_array_unpack(handle: nil, value: nil, flow_in: nil) ⇒ Object
4197 4198 4199 |
# File 'lib/tensorflow/raw_ops.rb', line 4197 def tensor_array_unpack(handle: nil, value: nil, flow_in: nil) Utils.execute("TensorArrayUnpack", [handle, value, flow_in]) end |
.tensor_array_v2(size: nil, dtype: nil, element_shape: nil, dynamic_size: nil, clear_after_read: nil, tensor_array_name: nil) ⇒ Object
4201 4202 4203 |
# File 'lib/tensorflow/raw_ops.rb', line 4201 def tensor_array_v2(size: nil, dtype: nil, element_shape: nil, dynamic_size: nil, clear_after_read: nil, tensor_array_name: nil) Utils.execute("TensorArrayV2", [size], dtype: dtype, element_shape: element_shape, dynamic_size: dynamic_size, clear_after_read: clear_after_read, tensor_array_name: tensor_array_name) end |
.tensor_array_v3(size: nil, dtype: nil, element_shape: nil, dynamic_size: nil, clear_after_read: nil, identical_element_shapes: nil, tensor_array_name: nil) ⇒ Object
4205 4206 4207 |
# File 'lib/tensorflow/raw_ops.rb', line 4205 def tensor_array_v3(size: nil, dtype: nil, element_shape: nil, dynamic_size: nil, clear_after_read: nil, identical_element_shapes: nil, tensor_array_name: nil) Utils.execute("TensorArrayV3", [size], dtype: dtype, element_shape: element_shape, dynamic_size: dynamic_size, clear_after_read: clear_after_read, identical_element_shapes: identical_element_shapes, tensor_array_name: tensor_array_name) end |
.tensor_array_write(handle: nil, index: nil, value: nil, flow_in: nil) ⇒ Object
4209 4210 4211 |
# File 'lib/tensorflow/raw_ops.rb', line 4209 def tensor_array_write(handle: nil, index: nil, value: nil, flow_in: nil) Utils.execute("TensorArrayWrite", [handle, index, value, flow_in]) end |
.tensor_array_write_v2(handle: nil, index: nil, value: nil, flow_in: nil) ⇒ Object
4213 4214 4215 |
# File 'lib/tensorflow/raw_ops.rb', line 4213 def tensor_array_write_v2(handle: nil, index: nil, value: nil, flow_in: nil) Utils.execute("TensorArrayWriteV2", [handle, index, value, flow_in]) end |
.tensor_array_write_v3(handle: nil, index: nil, value: nil, flow_in: nil) ⇒ Object
4217 4218 4219 |
# File 'lib/tensorflow/raw_ops.rb', line 4217 def tensor_array_write_v3(handle: nil, index: nil, value: nil, flow_in: nil) Utils.execute("TensorArrayWriteV3", [handle, index, value, flow_in]) end |
.tensor_dataset(components: nil, output_shapes: nil) ⇒ Object
4221 4222 4223 |
# File 'lib/tensorflow/raw_ops.rb', line 4221 def tensor_dataset(components: nil, output_shapes: nil) Utils.execute("TensorDataset", [components], output_shapes: output_shapes) end |
.tensor_forest_create_tree_variable(tree_handle: nil, tree_config: nil) ⇒ Object
4225 4226 4227 |
# File 'lib/tensorflow/raw_ops.rb', line 4225 def tensor_forest_create_tree_variable(tree_handle: nil, tree_config: nil) Utils.execute("TensorForestCreateTreeVariable", [tree_handle, tree_config]) end |
.tensor_forest_tree_deserialize(tree_handle: nil, tree_config: nil) ⇒ Object
4229 4230 4231 |
# File 'lib/tensorflow/raw_ops.rb', line 4229 def tensor_forest_tree_deserialize(tree_handle: nil, tree_config: nil) Utils.execute("TensorForestTreeDeserialize", [tree_handle, tree_config]) end |
.tensor_forest_tree_is_initialized_op(tree_handle: nil) ⇒ Object
4233 4234 4235 |
# File 'lib/tensorflow/raw_ops.rb', line 4233 def tensor_forest_tree_is_initialized_op(tree_handle: nil) Utils.execute("TensorForestTreeIsInitializedOp", [tree_handle]) end |
.tensor_forest_tree_predict(tree_handle: nil, dense_features: nil, logits_dimension: nil) ⇒ Object
4237 4238 4239 |
# File 'lib/tensorflow/raw_ops.rb', line 4237 def tensor_forest_tree_predict(tree_handle: nil, dense_features: nil, logits_dimension: nil) Utils.execute("TensorForestTreePredict", [tree_handle, dense_features], logits_dimension: logits_dimension) end |
.tensor_forest_tree_resource_handle_op(container: nil, shared_name: nil) ⇒ Object
4241 4242 4243 |
# File 'lib/tensorflow/raw_ops.rb', line 4241 def tensor_forest_tree_resource_handle_op(container: nil, shared_name: nil) Utils.execute("TensorForestTreeResourceHandleOp", [], container: container, shared_name: shared_name) end |
.tensor_forest_tree_serialize(tree_handle: nil) ⇒ Object
4245 4246 4247 |
# File 'lib/tensorflow/raw_ops.rb', line 4245 def tensor_forest_tree_serialize(tree_handle: nil) Utils.execute("TensorForestTreeSerialize", [tree_handle]) end |
.tensor_forest_tree_size(tree_handle: nil) ⇒ Object
4249 4250 4251 |
# File 'lib/tensorflow/raw_ops.rb', line 4249 def tensor_forest_tree_size(tree_handle: nil) Utils.execute("TensorForestTreeSize", [tree_handle]) end |
.tensor_list_concat(input_handle: nil, element_dtype: nil, element_shape: nil) ⇒ Object
4253 4254 4255 |
# File 'lib/tensorflow/raw_ops.rb', line 4253 def tensor_list_concat(input_handle: nil, element_dtype: nil, element_shape: nil) Utils.execute("TensorListConcat", [input_handle], element_dtype: element_dtype, element_shape: element_shape) end |
.tensor_list_concat_lists(input_a: nil, input_b: nil, element_dtype: nil) ⇒ Object
4257 4258 4259 |
# File 'lib/tensorflow/raw_ops.rb', line 4257 def tensor_list_concat_lists(input_a: nil, input_b: nil, element_dtype: nil) Utils.execute("TensorListConcatLists", [input_a, input_b], element_dtype: element_dtype) end |
.tensor_list_concat_v2(input_handle: nil, element_shape: nil, leading_dims: nil, element_dtype: nil, shape_type: nil) ⇒ Object
4261 4262 4263 |
# File 'lib/tensorflow/raw_ops.rb', line 4261 def tensor_list_concat_v2(input_handle: nil, element_shape: nil, leading_dims: nil, element_dtype: nil, shape_type: nil) Utils.execute("TensorListConcatV2", [input_handle, element_shape, leading_dims], element_dtype: element_dtype, shape_type: shape_type) end |
.tensor_list_element_shape(input_handle: nil, shape_type: nil) ⇒ Object
4265 4266 4267 |
# File 'lib/tensorflow/raw_ops.rb', line 4265 def tensor_list_element_shape(input_handle: nil, shape_type: nil) Utils.execute("TensorListElementShape", [input_handle], shape_type: shape_type) end |
.tensor_list_from_tensor(tensor: nil, element_shape: nil, element_dtype: nil, shape_type: nil) ⇒ Object
4269 4270 4271 |
# File 'lib/tensorflow/raw_ops.rb', line 4269 def tensor_list_from_tensor(tensor: nil, element_shape: nil, element_dtype: nil, shape_type: nil) Utils.execute("TensorListFromTensor", [tensor, element_shape], element_dtype: element_dtype, shape_type: shape_type) end |
.tensor_list_gather(input_handle: nil, indices: nil, element_shape: nil, element_dtype: nil) ⇒ Object
4273 4274 4275 |
# File 'lib/tensorflow/raw_ops.rb', line 4273 def tensor_list_gather(input_handle: nil, indices: nil, element_shape: nil, element_dtype: nil) Utils.execute("TensorListGather", [input_handle, indices, element_shape], element_dtype: element_dtype) end |
.tensor_list_get_item(input_handle: nil, index: nil, element_shape: nil, element_dtype: nil) ⇒ Object
4277 4278 4279 |
# File 'lib/tensorflow/raw_ops.rb', line 4277 def tensor_list_get_item(input_handle: nil, index: nil, element_shape: nil, element_dtype: nil) Utils.execute("TensorListGetItem", [input_handle, index, element_shape], element_dtype: element_dtype) end |
.tensor_list_length(input_handle: nil) ⇒ Object
4281 4282 4283 |
# File 'lib/tensorflow/raw_ops.rb', line 4281 def tensor_list_length(input_handle: nil) Utils.execute("TensorListLength", [input_handle]) end |
.tensor_list_pop_back(input_handle: nil, element_shape: nil, element_dtype: nil) ⇒ Object
4285 4286 4287 |
# File 'lib/tensorflow/raw_ops.rb', line 4285 def tensor_list_pop_back(input_handle: nil, element_shape: nil, element_dtype: nil) Utils.execute("TensorListPopBack", [input_handle, element_shape], element_dtype: element_dtype) end |
.tensor_list_push_back(input_handle: nil, tensor: nil, element_dtype: nil) ⇒ Object
4289 4290 4291 |
# File 'lib/tensorflow/raw_ops.rb', line 4289 def tensor_list_push_back(input_handle: nil, tensor: nil, element_dtype: nil) Utils.execute("TensorListPushBack", [input_handle, tensor], element_dtype: element_dtype) end |
.tensor_list_push_back_batch(input_handles: nil, tensor: nil, element_dtype: nil) ⇒ Object
4293 4294 4295 |
# File 'lib/tensorflow/raw_ops.rb', line 4293 def tensor_list_push_back_batch(input_handles: nil, tensor: nil, element_dtype: nil) Utils.execute("TensorListPushBackBatch", [input_handles, tensor], element_dtype: element_dtype) end |
.tensor_list_reserve(element_shape: nil, num_elements: nil, element_dtype: nil, shape_type: nil) ⇒ Object
4297 4298 4299 |
# File 'lib/tensorflow/raw_ops.rb', line 4297 def tensor_list_reserve(element_shape: nil, num_elements: nil, element_dtype: nil, shape_type: nil) Utils.execute("TensorListReserve", [element_shape, num_elements], element_dtype: element_dtype, shape_type: shape_type) end |
.tensor_list_resize(input_handle: nil, size: nil) ⇒ Object
4301 4302 4303 |
# File 'lib/tensorflow/raw_ops.rb', line 4301 def tensor_list_resize(input_handle: nil, size: nil) Utils.execute("TensorListResize", [input_handle, size]) end |
.tensor_list_scatter(tensor: nil, indices: nil, element_shape: nil, element_dtype: nil, shape_type: nil) ⇒ Object
4305 4306 4307 |
# File 'lib/tensorflow/raw_ops.rb', line 4305 def tensor_list_scatter(tensor: nil, indices: nil, element_shape: nil, element_dtype: nil, shape_type: nil) Utils.execute("TensorListScatter", [tensor, indices, element_shape], element_dtype: element_dtype, shape_type: shape_type) end |
.tensor_list_scatter_into_existing_list(input_handle: nil, tensor: nil, indices: nil, element_dtype: nil) ⇒ Object
4309 4310 4311 |
# File 'lib/tensorflow/raw_ops.rb', line 4309 def tensor_list_scatter_into_existing_list(input_handle: nil, tensor: nil, indices: nil, element_dtype: nil) Utils.execute("TensorListScatterIntoExistingList", [input_handle, tensor, indices], element_dtype: element_dtype) end |
.tensor_list_scatter_v2(tensor: nil, indices: nil, element_shape: nil, num_elements: nil, element_dtype: nil, shape_type: nil) ⇒ Object
4313 4314 4315 |
# File 'lib/tensorflow/raw_ops.rb', line 4313 def tensor_list_scatter_v2(tensor: nil, indices: nil, element_shape: nil, num_elements: nil, element_dtype: nil, shape_type: nil) Utils.execute("TensorListScatterV2", [tensor, indices, element_shape, num_elements], element_dtype: element_dtype, shape_type: shape_type) end |
.tensor_list_set_item(input_handle: nil, index: nil, item: nil, element_dtype: nil) ⇒ Object
4317 4318 4319 |
# File 'lib/tensorflow/raw_ops.rb', line 4317 def tensor_list_set_item(input_handle: nil, index: nil, item: nil, element_dtype: nil) Utils.execute("TensorListSetItem", [input_handle, index, item], element_dtype: element_dtype) end |
.tensor_list_split(tensor: nil, element_shape: nil, lengths: nil, element_dtype: nil, shape_type: nil) ⇒ Object
4321 4322 4323 |
# File 'lib/tensorflow/raw_ops.rb', line 4321 def tensor_list_split(tensor: nil, element_shape: nil, lengths: nil, element_dtype: nil, shape_type: nil) Utils.execute("TensorListSplit", [tensor, element_shape, lengths], element_dtype: element_dtype, shape_type: shape_type) end |
.tensor_list_stack(input_handle: nil, element_shape: nil, element_dtype: nil, num_elements: nil) ⇒ Object
4325 4326 4327 |
# File 'lib/tensorflow/raw_ops.rb', line 4325 def tensor_list_stack(input_handle: nil, element_shape: nil, element_dtype: nil, num_elements: nil) Utils.execute("TensorListStack", [input_handle, element_shape], element_dtype: element_dtype, num_elements: num_elements) end |
.tensor_scatter_add(tensor: nil, indices: nil, updates: nil) ⇒ Object
4329 4330 4331 |
# File 'lib/tensorflow/raw_ops.rb', line 4329 def tensor_scatter_add(tensor: nil, indices: nil, updates: nil) Utils.execute("TensorScatterAdd", [tensor, indices, updates]) end |
.tensor_scatter_sub(tensor: nil, indices: nil, updates: nil) ⇒ Object
4333 4334 4335 |
# File 'lib/tensorflow/raw_ops.rb', line 4333 def tensor_scatter_sub(tensor: nil, indices: nil, updates: nil) Utils.execute("TensorScatterSub", [tensor, indices, updates]) end |
.tensor_scatter_update(tensor: nil, indices: nil, updates: nil) ⇒ Object
4337 4338 4339 |
# File 'lib/tensorflow/raw_ops.rb', line 4337 def tensor_scatter_update(tensor: nil, indices: nil, updates: nil) Utils.execute("TensorScatterUpdate", [tensor, indices, updates]) end |
.tensor_slice_dataset(components: nil, output_shapes: nil) ⇒ Object
4341 4342 4343 |
# File 'lib/tensorflow/raw_ops.rb', line 4341 def tensor_slice_dataset(components: nil, output_shapes: nil) Utils.execute("TensorSliceDataset", [components], output_shapes: output_shapes) end |
.tensor_strided_slice_update(input: nil, start: nil, stop: nil, strides: nil, value: nil, begin_mask: nil, end_mask: nil, ellipsis_mask: nil, new_axis_mask: nil, shrink_axis_mask: nil) ⇒ Object
4345 4346 4347 |
# File 'lib/tensorflow/raw_ops.rb', line 4345 def tensor_strided_slice_update(input: nil, start: nil, stop: nil, strides: nil, value: nil, begin_mask: nil, end_mask: nil, ellipsis_mask: nil, new_axis_mask: nil, shrink_axis_mask: nil) Utils.execute("TensorStridedSliceUpdate", [input, start, stop, strides, value], begin_mask: begin_mask, end_mask: end_mask, ellipsis_mask: ellipsis_mask, new_axis_mask: new_axis_mask, shrink_axis_mask: shrink_axis_mask) end |
.tensor_summary(tensor: nil, description: nil, labels: nil, display_name: nil) ⇒ Object
4349 4350 4351 |
# File 'lib/tensorflow/raw_ops.rb', line 4349 def tensor_summary(tensor: nil, description: nil, labels: nil, display_name: nil) Utils.execute("TensorSummary", [tensor], description: description, labels: labels, display_name: display_name) end |
.tensor_summary_v2(tag: nil, tensor: nil, serialized_summary_metadata: nil) ⇒ Object
4353 4354 4355 |
# File 'lib/tensorflow/raw_ops.rb', line 4353 def tensor_summary_v2(tag: nil, tensor: nil, serialized_summary_metadata: nil) Utils.execute("TensorSummaryV2", [tag, tensor, ]) end |
.text_line_dataset(filenames: nil, compression_type: nil, buffer_size: nil) ⇒ Object
4357 4358 4359 |
# File 'lib/tensorflow/raw_ops.rb', line 4357 def text_line_dataset(filenames: nil, compression_type: nil, buffer_size: nil) Utils.execute("TextLineDataset", [filenames, compression_type, buffer_size]) end |
.text_line_reader(skip_header_lines: nil, container: nil, shared_name: nil) ⇒ Object
4361 4362 4363 |
# File 'lib/tensorflow/raw_ops.rb', line 4361 def text_line_reader(skip_header_lines: nil, container: nil, shared_name: nil) Utils.execute("TextLineReader", [], skip_header_lines: skip_header_lines, container: container, shared_name: shared_name) end |
.text_line_reader_v2(skip_header_lines: nil, container: nil, shared_name: nil) ⇒ Object
4365 4366 4367 |
# File 'lib/tensorflow/raw_ops.rb', line 4365 def text_line_reader_v2(skip_header_lines: nil, container: nil, shared_name: nil) Utils.execute("TextLineReaderV2", [], skip_header_lines: skip_header_lines, container: container, shared_name: shared_name) end |
.tf_record_dataset(filenames: nil, compression_type: nil, buffer_size: nil) ⇒ Object
4025 4026 4027 |
# File 'lib/tensorflow/raw_ops.rb', line 4025 def tf_record_dataset(filenames: nil, compression_type: nil, buffer_size: nil) Utils.execute("TFRecordDataset", [filenames, compression_type, buffer_size]) end |
.tf_record_reader(container: nil, shared_name: nil, compression_type: nil) ⇒ Object
4029 4030 4031 |
# File 'lib/tensorflow/raw_ops.rb', line 4029 def tf_record_reader(container: nil, shared_name: nil, compression_type: nil) Utils.execute("TFRecordReader", [], container: container, shared_name: shared_name, compression_type: compression_type) end |
.tf_record_reader_v2(container: nil, shared_name: nil, compression_type: nil) ⇒ Object
4033 4034 4035 |
# File 'lib/tensorflow/raw_ops.rb', line 4033 def tf_record_reader_v2(container: nil, shared_name: nil, compression_type: nil) Utils.execute("TFRecordReaderV2", [], container: container, shared_name: shared_name, compression_type: compression_type) end |
.thread_unsafe_unigram_candidate_sampler(true_classes: nil, num_true: nil, num_sampled: nil, unique: nil, range_max: nil, seed: nil, seed2: nil) ⇒ Object
4369 4370 4371 |
# File 'lib/tensorflow/raw_ops.rb', line 4369 def thread_unsafe_unigram_candidate_sampler(true_classes: nil, num_true: nil, num_sampled: nil, unique: nil, range_max: nil, seed: nil, seed2: nil) Utils.execute("ThreadUnsafeUnigramCandidateSampler", [true_classes], num_true: num_true, num_sampled: num_sampled, unique: unique, range_max: range_max, seed: seed, seed2: seed2) end |
.tile(input: nil, multiples: nil) ⇒ Object
4373 4374 4375 |
# File 'lib/tensorflow/raw_ops.rb', line 4373 def tile(input: nil, multiples: nil) Utils.execute("Tile", [input, multiples]) end |
.tile_grad(input: nil, multiples: nil) ⇒ Object
4377 4378 4379 |
# File 'lib/tensorflow/raw_ops.rb', line 4377 def tile_grad(input: nil, multiples: nil) Utils.execute("TileGrad", [input, multiples]) end |
.timestamp ⇒ Object
4381 4382 4383 |
# File 'lib/tensorflow/raw_ops.rb', line 4381 def Utils.execute("Timestamp", []) end |
.top_k(input: nil, k: nil, sorted: nil) ⇒ Object
4385 4386 4387 |
# File 'lib/tensorflow/raw_ops.rb', line 4385 def top_k(input: nil, k: nil, sorted: nil) Utils.execute("TopK", [input], k: k, sorted: sorted) end |
.top_kv2(input: nil, k: nil, sorted: nil) ⇒ Object
4389 4390 4391 |
# File 'lib/tensorflow/raw_ops.rb', line 4389 def top_kv2(input: nil, k: nil, sorted: nil) Utils.execute("TopKV2", [input, k], sorted: sorted) end |
.tpu_compilation_result ⇒ Object
4037 4038 4039 |
# File 'lib/tensorflow/raw_ops.rb', line 4037 def tpu_compilation_result Utils.execute("TPUCompilationResult", []) end |
.tpu_embedding_activations(embedding_variable: nil, sliced_activations: nil, table_id: nil, lookup_id: nil) ⇒ Object
4041 4042 4043 |
# File 'lib/tensorflow/raw_ops.rb', line 4041 def (embedding_variable: nil, sliced_activations: nil, table_id: nil, lookup_id: nil) Utils.execute("TPUEmbeddingActivations", [, sliced_activations], table_id: table_id, lookup_id: lookup_id) end |
.tpu_ordinal_selector ⇒ Object
4045 4046 4047 |
# File 'lib/tensorflow/raw_ops.rb', line 4045 def tpu_ordinal_selector Utils.execute("TPUOrdinalSelector", []) end |
.tpu_partitioned_call(args: nil, device_ordinal: nil, f: nil) ⇒ Object
4049 4050 4051 |
# File 'lib/tensorflow/raw_ops.rb', line 4049 def tpu_partitioned_call(args: nil, device_ordinal: nil, f: nil) Utils.execute("TPUPartitionedCall", [args, device_ordinal], f: f) end |
.tpu_replicate_metadata(num_replicas: nil, num_cores_per_replica: nil, topology: nil, use_tpu: nil, device_assignment: nil, computation_shape: nil, host_compute_core: nil, padding_map: nil, step_marker_location: nil) ⇒ Object
4053 4054 4055 |
# File 'lib/tensorflow/raw_ops.rb', line 4053 def (num_replicas: nil, num_cores_per_replica: nil, topology: nil, use_tpu: nil, device_assignment: nil, computation_shape: nil, host_compute_core: nil, padding_map: nil, step_marker_location: nil) Utils.execute("TPUReplicateMetadata", [], num_replicas: num_replicas, num_cores_per_replica: num_cores_per_replica, topology: topology, use_tpu: use_tpu, device_assignment: device_assignment, computation_shape: computation_shape, host_compute_core: host_compute_core, padding_map: padding_map, step_marker_location: step_marker_location) end |
.tpu_replicated_input(inputs: nil) ⇒ Object
4057 4058 4059 |
# File 'lib/tensorflow/raw_ops.rb', line 4057 def tpu_replicated_input(inputs: nil) Utils.execute("TPUReplicatedInput", [inputs]) end |
.tpu_replicated_output(input: nil, num_replicas: nil) ⇒ Object
4061 4062 4063 |
# File 'lib/tensorflow/raw_ops.rb', line 4061 def tpu_replicated_output(input: nil, num_replicas: nil) Utils.execute("TPUReplicatedOutput", [input], num_replicas: num_replicas) end |
.transpose(x: nil, perm: nil) ⇒ Object
4393 4394 4395 |
# File 'lib/tensorflow/raw_ops.rb', line 4393 def transpose(x: nil, perm: nil) Utils.execute("Transpose", [x, perm]) end |
.tridiagonal_mat_mul(superdiag: nil, maindiag: nil, subdiag: nil, rhs: nil) ⇒ Object
4397 4398 4399 |
# File 'lib/tensorflow/raw_ops.rb', line 4397 def tridiagonal_mat_mul(superdiag: nil, maindiag: nil, subdiag: nil, rhs: nil) Utils.execute("TridiagonalMatMul", [superdiag, maindiag, subdiag, rhs]) end |
.tridiagonal_solve(diagonals: nil, rhs: nil, partial_pivoting: nil) ⇒ Object
4401 4402 4403 |
# File 'lib/tensorflow/raw_ops.rb', line 4401 def tridiagonal_solve(diagonals: nil, rhs: nil, partial_pivoting: nil) Utils.execute("TridiagonalSolve", [diagonals, rhs], partial_pivoting: partial_pivoting) end |
.truncate_div(x: nil, y: nil) ⇒ Object
4405 4406 4407 |
# File 'lib/tensorflow/raw_ops.rb', line 4405 def truncate_div(x: nil, y: nil) Utils.execute("TruncateDiv", [x, y]) end |
.truncate_mod(x: nil, y: nil) ⇒ Object
4409 4410 4411 |
# File 'lib/tensorflow/raw_ops.rb', line 4409 def truncate_mod(x: nil, y: nil) Utils.execute("TruncateMod", [x, y]) end |
.truncated_normal(shape: nil, seed: nil, seed2: nil, dtype: nil) ⇒ Object
4413 4414 4415 |
# File 'lib/tensorflow/raw_ops.rb', line 4413 def truncated_normal(shape: nil, seed: nil, seed2: nil, dtype: nil) Utils.execute("TruncatedNormal", [shape], seed: seed, seed2: seed2, dtype: dtype) end |
.try_rpc(address: nil, method: nil, request: nil, protocol: nil, fail_fast: nil, timeout_in_ms: nil) ⇒ Object
4417 4418 4419 |
# File 'lib/tensorflow/raw_ops.rb', line 4417 def try_rpc(address: nil, method: nil, request: nil, protocol: nil, fail_fast: nil, timeout_in_ms: nil) Utils.execute("TryRpc", [address, method, request], protocol: protocol, fail_fast: fail_fast, timeout_in_ms: timeout_in_ms) end |
.unbatch(batched_tensor: nil, batch_index: nil, id: nil, timeout_micros: nil, container: nil, shared_name: nil) ⇒ Object
4421 4422 4423 |
# File 'lib/tensorflow/raw_ops.rb', line 4421 def unbatch(batched_tensor: nil, batch_index: nil, id: nil, timeout_micros: nil, container: nil, shared_name: nil) Utils.execute("Unbatch", [batched_tensor, batch_index, id], timeout_micros: timeout_micros, container: container, shared_name: shared_name) end |
.unbatch_grad(original_input: nil, batch_index: nil, grad: nil, id: nil, container: nil, shared_name: nil) ⇒ Object
4425 4426 4427 |
# File 'lib/tensorflow/raw_ops.rb', line 4425 def unbatch_grad(original_input: nil, batch_index: nil, grad: nil, id: nil, container: nil, shared_name: nil) Utils.execute("UnbatchGrad", [original_input, batch_index, grad, id], container: container, shared_name: shared_name) end |
.unicode_decode(input: nil, input_encoding: nil, errors: nil, replacement_char: nil, replace_control_characters: nil) ⇒ Object
4429 4430 4431 |
# File 'lib/tensorflow/raw_ops.rb', line 4429 def unicode_decode(input: nil, input_encoding: nil, errors: nil, replacement_char: nil, replace_control_characters: nil) Utils.execute("UnicodeDecode", [input], input_encoding: input_encoding, errors: errors, replacement_char: replacement_char, replace_control_characters: replace_control_characters) end |
.unicode_decode_with_offsets(input: nil, input_encoding: nil, errors: nil, replacement_char: nil, replace_control_characters: nil) ⇒ Object
4433 4434 4435 |
# File 'lib/tensorflow/raw_ops.rb', line 4433 def unicode_decode_with_offsets(input: nil, input_encoding: nil, errors: nil, replacement_char: nil, replace_control_characters: nil) Utils.execute("UnicodeDecodeWithOffsets", [input], input_encoding: input_encoding, errors: errors, replacement_char: replacement_char, replace_control_characters: replace_control_characters) end |
.unicode_encode(input_values: nil, input_splits: nil, errors: nil, output_encoding: nil, replacement_char: nil) ⇒ Object
4437 4438 4439 |
# File 'lib/tensorflow/raw_ops.rb', line 4437 def unicode_encode(input_values: nil, input_splits: nil, errors: nil, output_encoding: nil, replacement_char: nil) Utils.execute("UnicodeEncode", [input_values, input_splits], errors: errors, output_encoding: output_encoding, replacement_char: replacement_char) end |
.unicode_script(input: nil) ⇒ Object
4441 4442 4443 |
# File 'lib/tensorflow/raw_ops.rb', line 4441 def unicode_script(input: nil) Utils.execute("UnicodeScript", [input]) end |
.unicode_transcode(input: nil, input_encoding: nil, output_encoding: nil, errors: nil, replacement_char: nil, replace_control_characters: nil) ⇒ Object
4445 4446 4447 |
# File 'lib/tensorflow/raw_ops.rb', line 4445 def unicode_transcode(input: nil, input_encoding: nil, output_encoding: nil, errors: nil, replacement_char: nil, replace_control_characters: nil) Utils.execute("UnicodeTranscode", [input], input_encoding: input_encoding, output_encoding: output_encoding, errors: errors, replacement_char: replacement_char, replace_control_characters: replace_control_characters) end |
.uniform_candidate_sampler(true_classes: nil, num_true: nil, num_sampled: nil, unique: nil, range_max: nil, seed: nil, seed2: nil) ⇒ Object
4449 4450 4451 |
# File 'lib/tensorflow/raw_ops.rb', line 4449 def uniform_candidate_sampler(true_classes: nil, num_true: nil, num_sampled: nil, unique: nil, range_max: nil, seed: nil, seed2: nil) Utils.execute("UniformCandidateSampler", [true_classes], num_true: num_true, num_sampled: num_sampled, unique: unique, range_max: range_max, seed: seed, seed2: seed2) end |
.unique(x: nil, out_idx: nil) ⇒ Object
4453 4454 4455 |
# File 'lib/tensorflow/raw_ops.rb', line 4453 def unique(x: nil, out_idx: nil) Utils.execute("Unique", [x], out_idx: out_idx) end |
.unique_v2(x: nil, axis: nil, out_idx: nil) ⇒ Object
4457 4458 4459 |
# File 'lib/tensorflow/raw_ops.rb', line 4457 def unique_v2(x: nil, axis: nil, out_idx: nil) Utils.execute("UniqueV2", [x, axis], out_idx: out_idx) end |
.unique_with_counts(x: nil, out_idx: nil) ⇒ Object
4461 4462 4463 |
# File 'lib/tensorflow/raw_ops.rb', line 4461 def unique_with_counts(x: nil, out_idx: nil) Utils.execute("UniqueWithCounts", [x], out_idx: out_idx) end |
.unique_with_counts_v2(x: nil, axis: nil, out_idx: nil) ⇒ Object
4465 4466 4467 |
# File 'lib/tensorflow/raw_ops.rb', line 4465 def unique_with_counts_v2(x: nil, axis: nil, out_idx: nil) Utils.execute("UniqueWithCountsV2", [x, axis], out_idx: out_idx) end |
.unpack(value: nil, num: nil, axis: nil) ⇒ Object
4469 4470 4471 |
# File 'lib/tensorflow/raw_ops.rb', line 4469 def unpack(value: nil, num: nil, axis: nil) Utils.execute("Unpack", [value], num: num, axis: axis) end |
.unravel_index(indices: nil, dims: nil) ⇒ Object
4473 4474 4475 |
# File 'lib/tensorflow/raw_ops.rb', line 4473 def unravel_index(indices: nil, dims: nil) Utils.execute("UnravelIndex", [indices, dims]) end |
.unsorted_segment_max(data: nil, segment_ids: nil, num_segments: nil) ⇒ Object
4477 4478 4479 |
# File 'lib/tensorflow/raw_ops.rb', line 4477 def unsorted_segment_max(data: nil, segment_ids: nil, num_segments: nil) Utils.execute("UnsortedSegmentMax", [data, segment_ids, num_segments]) end |
.unsorted_segment_min(data: nil, segment_ids: nil, num_segments: nil) ⇒ Object
4481 4482 4483 |
# File 'lib/tensorflow/raw_ops.rb', line 4481 def unsorted_segment_min(data: nil, segment_ids: nil, num_segments: nil) Utils.execute("UnsortedSegmentMin", [data, segment_ids, num_segments]) end |
.unsorted_segment_prod(data: nil, segment_ids: nil, num_segments: nil) ⇒ Object
4485 4486 4487 |
# File 'lib/tensorflow/raw_ops.rb', line 4485 def unsorted_segment_prod(data: nil, segment_ids: nil, num_segments: nil) Utils.execute("UnsortedSegmentProd", [data, segment_ids, num_segments]) end |
.unsorted_segment_sum(data: nil, segment_ids: nil, num_segments: nil) ⇒ Object
4489 4490 4491 |
# File 'lib/tensorflow/raw_ops.rb', line 4489 def unsorted_segment_sum(data: nil, segment_ids: nil, num_segments: nil) Utils.execute("UnsortedSegmentSum", [data, segment_ids, num_segments]) end |
.unstage(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) ⇒ Object
4493 4494 4495 |
# File 'lib/tensorflow/raw_ops.rb', line 4493 def unstage(capacity: nil, memory_limit: nil, dtypes: nil, container: nil, shared_name: nil) Utils.execute("Unstage", [], capacity: capacity, memory_limit: memory_limit, dtypes: dtypes, container: container, shared_name: shared_name) end |
.unwrap_dataset_variant(input_handle: nil) ⇒ Object
4497 4498 4499 |
# File 'lib/tensorflow/raw_ops.rb', line 4497 def unwrap_dataset_variant(input_handle: nil) Utils.execute("UnwrapDatasetVariant", [input_handle]) end |
.upper_bound(sorted_inputs: nil, values: nil, out_type: nil) ⇒ Object
4501 4502 4503 |
# File 'lib/tensorflow/raw_ops.rb', line 4501 def upper_bound(sorted_inputs: nil, values: nil, out_type: nil) Utils.execute("UpperBound", [sorted_inputs, values], out_type: out_type) end |
.var_handle_op(container: nil, shared_name: nil, dtype: nil, shape: nil) ⇒ Object
4505 4506 4507 |
# File 'lib/tensorflow/raw_ops.rb', line 4505 def var_handle_op(container: nil, shared_name: nil, dtype: nil, shape: nil) Utils.execute("VarHandleOp", [], container: container, shared_name: shared_name, dtype: dtype, shape: shape) end |
.var_is_initialized_op(resource: nil) ⇒ Object
4509 4510 4511 |
# File 'lib/tensorflow/raw_ops.rb', line 4509 def var_is_initialized_op(resource: nil) Utils.execute("VarIsInitializedOp", [resource]) end |
.variable(shape: nil, dtype: nil, container: nil, shared_name: nil) ⇒ Object
4513 4514 4515 |
# File 'lib/tensorflow/raw_ops.rb', line 4513 def variable(shape: nil, dtype: nil, container: nil, shared_name: nil) Utils.execute("Variable", [], shape: shape, dtype: dtype, container: container, shared_name: shared_name) end |
.variable_shape(input: nil, out_type: nil) ⇒ Object
4517 4518 4519 |
# File 'lib/tensorflow/raw_ops.rb', line 4517 def variable_shape(input: nil, out_type: nil) Utils.execute("VariableShape", [input], out_type: out_type) end |
.variable_v2(shape: nil, dtype: nil, container: nil, shared_name: nil) ⇒ Object
4521 4522 4523 |
# File 'lib/tensorflow/raw_ops.rb', line 4521 def variable_v2(shape: nil, dtype: nil, container: nil, shared_name: nil) Utils.execute("VariableV2", [], shape: shape, dtype: dtype, container: container, shared_name: shared_name) end |
.where(input: nil) ⇒ Object
4525 4526 4527 |
# File 'lib/tensorflow/raw_ops.rb', line 4525 def where(input: nil) Utils.execute("Where", [input]) end |
.while(input: nil, cond: nil, body: nil, output_shapes: nil, parallel_iterations: nil) ⇒ Object
4529 4530 4531 |
# File 'lib/tensorflow/raw_ops.rb', line 4529 def while(input: nil, cond: nil, body: nil, output_shapes: nil, parallel_iterations: nil) Utils.execute("While", [input], cond: cond, body: body, output_shapes: output_shapes, parallel_iterations: parallel_iterations) end |
.whole_file_reader(container: nil, shared_name: nil) ⇒ Object
4533 4534 4535 |
# File 'lib/tensorflow/raw_ops.rb', line 4533 def whole_file_reader(container: nil, shared_name: nil) Utils.execute("WholeFileReader", [], container: container, shared_name: shared_name) end |
.whole_file_reader_v2(container: nil, shared_name: nil) ⇒ Object
4537 4538 4539 |
# File 'lib/tensorflow/raw_ops.rb', line 4537 def whole_file_reader_v2(container: nil, shared_name: nil) Utils.execute("WholeFileReaderV2", [], container: container, shared_name: shared_name) end |
.window_dataset(input_dataset: nil, size: nil, shift: nil, stride: nil, drop_remainder: nil, output_types: nil, output_shapes: nil) ⇒ Object
4541 4542 4543 |
# File 'lib/tensorflow/raw_ops.rb', line 4541 def window_dataset(input_dataset: nil, size: nil, shift: nil, stride: nil, drop_remainder: nil, output_types: nil, output_shapes: nil) Utils.execute("WindowDataset", [input_dataset, size, shift, stride, drop_remainder], output_types: output_types, output_shapes: output_shapes) end |
.worker_heartbeat(request: nil) ⇒ Object
4545 4546 4547 |
# File 'lib/tensorflow/raw_ops.rb', line 4545 def worker_heartbeat(request: nil) Utils.execute("WorkerHeartbeat", [request]) end |
.wrap_dataset_variant(input_handle: nil) ⇒ Object
4549 4550 4551 |
# File 'lib/tensorflow/raw_ops.rb', line 4549 def wrap_dataset_variant(input_handle: nil) Utils.execute("WrapDatasetVariant", [input_handle]) end |
.write_audio_summary(writer: nil, step: nil, tag: nil, tensor: nil, sample_rate: nil, max_outputs: nil) ⇒ Object
4553 4554 4555 |
# File 'lib/tensorflow/raw_ops.rb', line 4553 def write_audio_summary(writer: nil, step: nil, tag: nil, tensor: nil, sample_rate: nil, max_outputs: nil) Utils.execute("WriteAudioSummary", [writer, step, tag, tensor, sample_rate], max_outputs: max_outputs) end |
.write_file(filename: nil, contents: nil) ⇒ Object
4557 4558 4559 |
# File 'lib/tensorflow/raw_ops.rb', line 4557 def write_file(filename: nil, contents: nil) Utils.execute("WriteFile", [filename, contents]) end |
.write_graph_summary(writer: nil, step: nil, tensor: nil) ⇒ Object
4561 4562 4563 |
# File 'lib/tensorflow/raw_ops.rb', line 4561 def write_graph_summary(writer: nil, step: nil, tensor: nil) Utils.execute("WriteGraphSummary", [writer, step, tensor]) end |
.write_histogram_summary(writer: nil, step: nil, tag: nil, values: nil) ⇒ Object
4565 4566 4567 |
# File 'lib/tensorflow/raw_ops.rb', line 4565 def write_histogram_summary(writer: nil, step: nil, tag: nil, values: nil) Utils.execute("WriteHistogramSummary", [writer, step, tag, values]) end |
.write_image_summary(writer: nil, step: nil, tag: nil, tensor: nil, bad_color: nil, max_images: nil) ⇒ Object
4569 4570 4571 |
# File 'lib/tensorflow/raw_ops.rb', line 4569 def write_image_summary(writer: nil, step: nil, tag: nil, tensor: nil, bad_color: nil, max_images: nil) Utils.execute("WriteImageSummary", [writer, step, tag, tensor, bad_color], max_images: max_images) end |
.write_raw_proto_summary(writer: nil, step: nil, tensor: nil) ⇒ Object
4573 4574 4575 |
# File 'lib/tensorflow/raw_ops.rb', line 4573 def write_raw_proto_summary(writer: nil, step: nil, tensor: nil) Utils.execute("WriteRawProtoSummary", [writer, step, tensor]) end |
.write_scalar_summary(writer: nil, step: nil, tag: nil, value: nil) ⇒ Object
4577 4578 4579 |
# File 'lib/tensorflow/raw_ops.rb', line 4577 def write_scalar_summary(writer: nil, step: nil, tag: nil, value: nil) Utils.execute("WriteScalarSummary", [writer, step, tag, value]) end |
.write_summary(writer: nil, step: nil, tensor: nil, tag: nil, summary_metadata: nil) ⇒ Object
4581 4582 4583 |
# File 'lib/tensorflow/raw_ops.rb', line 4581 def write_summary(writer: nil, step: nil, tensor: nil, tag: nil, summary_metadata: nil) Utils.execute("WriteSummary", [writer, step, tensor, tag, ]) end |
.xdivy(x: nil, y: nil) ⇒ Object
4585 4586 4587 |
# File 'lib/tensorflow/raw_ops.rb', line 4585 def xdivy(x: nil, y: nil) Utils.execute("Xdivy", [x, y]) end |
.xlogy(x: nil, y: nil) ⇒ Object
4589 4590 4591 |
# File 'lib/tensorflow/raw_ops.rb', line 4589 def xlogy(x: nil, y: nil) Utils.execute("Xlogy", [x, y]) end |
.zeros_like(x: nil) ⇒ Object
4593 4594 4595 |
# File 'lib/tensorflow/raw_ops.rb', line 4593 def zeros_like(x: nil) Utils.execute("ZerosLike", [x]) end |
.zeta(x: nil, q: nil) ⇒ Object
4597 4598 4599 |
# File 'lib/tensorflow/raw_ops.rb', line 4597 def zeta(x: nil, q: nil) Utils.execute("Zeta", [x, q]) end |
.zip_dataset(input_datasets: nil, output_types: nil, output_shapes: nil) ⇒ Object
4601 4602 4603 |
# File 'lib/tensorflow/raw_ops.rb', line 4601 def zip_dataset(input_datasets: nil, output_types: nil, output_shapes: nil) Utils.execute("ZipDataset", [input_datasets], output_types: output_types, output_shapes: output_shapes) end |