Module: TensorStream::Ops

Includes:
OpStub
Included in:
TensorStream, Train::LearningRateDecay
Defined in:
lib/tensor_stream/ops.rb

Overview

Class that defines all available ops supported by TensorStream

Defined Under Namespace

Classes: OutputHolder

Constant Summary collapse

FLOATING_POINT_TYPES =
%i[float32 float64 float float16].freeze
INTEGER_TYPES =
%i[uint8 int32 int int16 uint16 int64 uint32 uint64].freeze
NUMERIC_TYPES =
FLOATING_POINT_TYPES + INTEGER_TYPES

Instance Method Summary collapse

Methods included from OpStub

#add, #argmax, #argmin, #ceil, #cos, #div, #equal, #fill, #floor, #greater, #greater_equal, #less, #less_equal, #mat_mul, #max, #min, #mod, #mul, #negate, #pow, #prod, #random_uniform, #range, #rank, #round, #shape, #sigmoid, #sign, #sin, #size, #sub, #sum, #tan, #tanh, #tile, #zeros

Instance Method Details

#abs(input, name: nil) ⇒ Object

Computes the absolute value of a tensor.



395
396
397
# File 'lib/tensor_stream/ops.rb', line 395

def abs(input, name: nil)
  _op(:abs, input, name: name)
end

#acos(input, name: nil) ⇒ Object

Computes acos of input element-wise



299
300
301
302
# File 'lib/tensor_stream/ops.rb', line 299

def acos(input, name: nil)
  check_allowed_types(input, FLOATING_POINT_TYPES)
  _op(:acos, input, name: name)
end

#add_n(inputs, name: nil) ⇒ Object

Adds all input tensors element-wise.

Elements must all be the same shape and type



286
287
288
# File 'lib/tensor_stream/ops.rb', line 286

def add_n(inputs, name: nil)
  _op(:add_n, *inputs, name: name)
end

#asin(input, name: nil) ⇒ Object

Computes asin of input element-wise



292
293
294
295
# File 'lib/tensor_stream/ops.rb', line 292

def asin(input, name: nil)
  check_allowed_types(input, FLOATING_POINT_TYPES)
  _op(:asin, input, name: name)
end

#assert_equal(x, y, data: nil, summarize: nil, message: nil, name: nil) ⇒ Object

Assert the condition x == y holds element-wise.

Argmuments

x Numeric Tensor. y Numeric Tensor, same dtype as and broadcastable to x.

Returns Op that raises InvalidArgumentError if x == y is false



29
30
31
# File 'lib/tensor_stream/ops.rb', line 29

def assert_equal(x, y, data: nil, summarize: nil, message: nil, name: nil)
  _op(:assert_equal, x, y, data: data, summarize: summarize, message: message, name: name)
end

#atan(input, name: nil) ⇒ Object

Computes atan of input element-wise



306
307
308
309
# File 'lib/tensor_stream/ops.rb', line 306

def atan(input, name: nil)
  check_allowed_types(input, FLOATING_POINT_TYPES)
  _op(:atan, input, name: name)
end

#broadcast_gradient_args(shape_a, shape_b, name: nil) ⇒ Object



458
459
460
461
# File 'lib/tensor_stream/ops.rb', line 458

def broadcast_gradient_args(shape_a, shape_b, name: nil)
  op_result = _op(:broadcast_gradient_args, shape_a, shape_b, name: name)
  [op_result[0], op_result[1]]
end

#case(args = {}) ⇒ Object

Create a case operation.

The pred_fn_pairs parameter is a dict or list of pairs of size N. Each pair contains a boolean scalar tensor and a proc that creates the tensors to be returned if the boolean evaluates to true. default is a proc generating a list of tensors. All the proc in pred_fn_pairs as well as default (if provided) should return the same number and types of tensors.



547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
# File 'lib/tensor_stream/ops.rb', line 547

def case(args = {})
  args = args.dup
  default = args.delete(:default)
  exclusive = args.delete(:exclusive)
  strict = args.delete(:strict)
  name = args.delete(:name)

  predicates = []
  functions = []

  args.each do |k, v|
    raise "Invalid argment or option #{k}" unless k.is_a?(Tensor)

    predicates << k
    functions << (v.is_a?(Proc) ? v.call : v)
  end

  _op(:case, predicates, default, *functions, exclusive: exclusive, strict: strict, name: name)
end

#cast(input, dtype, name: nil) ⇒ Object

Casts a tensor to a new type, if needed



320
321
322
323
324
325
# File 'lib/tensor_stream/ops.rb', line 320

def cast(input, dtype, name: nil)
  input = convert_to_tensor(input)
  return input if input.data_type == dtype

  _op(:cast, input, data_type: dtype, name: name)
end

#cast_axis(input, axis) ⇒ Object



575
576
577
578
579
580
581
582
583
# File 'lib/tensor_stream/ops.rb', line 575

def cast_axis(input, axis)
  if !axis.nil?
    axis
  elsif input.shape.known?
    (0...input.shape.ndims).to_a
  else
    range(0, rank(input))
  end
end

#check_numerics(tensor, message, name: nil) ⇒ Object

Checks a tensor for NaN and Inf values. When run, reports an InvalidArgument error if tensor has any values that are not a number (NaN) or infinity (Inf). Otherwise, passes tensor as-is.



450
451
452
# File 'lib/tensor_stream/ops.rb', line 450

def check_numerics(tensor, message, name: nil)
  _op(:check_numerics, tensor, message: message, name: name)
end

#concat(values, axis, name: "concat") ⇒ Object

Concatenates tensors along one dimension.



190
191
192
193
194
195
196
# File 'lib/tensor_stream/ops.rb', line 190

def concat(values, axis, name: "concat")
  if values.is_a?(Array)
    _op(:concat, axis, *values, name: name)
  else
    _op(:concat, axis, values, name: name)
  end
end

#cond(pred, true_fn, false_fn, name: nil) ⇒ Object

Return true_fn() if the predicate pred is true else false_fn().



272
273
274
# File 'lib/tensor_stream/ops.rb', line 272

def cond(pred, true_fn, false_fn, name: nil)
  _op(:case, [pred], false_fn, true_fn, name: name)
end

#constant_initializer(value, dtype: nil, verify_shape: false) ⇒ Object



130
131
132
# File 'lib/tensor_stream/ops.rb', line 130

def constant_initializer(value, dtype: nil, verify_shape: false)
  TensorStream::Initializer.new(-> { _op(:fill, nil, convert_to_tensor(value, dtype: dtype)) })
end

#cumprod(x, axis: 0, exclusive: false, reverse: false, name: nil) ⇒ Object



567
568
569
# File 'lib/tensor_stream/ops.rb', line 567

def cumprod(x, axis: 0, exclusive: false, reverse: false, name: nil)
  _op(:cumprod, x, axis: axis, exclusive: exclusive, reverse: reverse, name: name)
end

#exp(input, name: nil) ⇒ Object

Computes exponential of x element-wise.



429
430
431
432
# File 'lib/tensor_stream/ops.rb', line 429

def exp(input, name: nil)
  check_allowed_types(input, FLOATING_POINT_TYPES)
  _op(:exp, input, name: name)
end

#expand_dims(input, axis = nil, name: nil) ⇒ Object



92
93
94
# File 'lib/tensor_stream/ops.rb', line 92

def expand_dims(input, axis = nil, name: nil)
  _op(:expand_dims, input, axis, name: name)
end

#eye(num_rows, num_columns: nil, dtype: :float32, name: nil) ⇒ Object

Construct an identity matrix



88
89
90
# File 'lib/tensor_stream/ops.rb', line 88

def eye(num_rows, num_columns: nil, dtype: :float32, name: nil)
  _op(:eye, num_rows, num_columns || num_rows, data_type: dtype, name: name)
end

#floor_div(input_a, input_b, name: nil) ⇒ Object

Returns element-wise integer divistion.



313
314
315
316
# File 'lib/tensor_stream/ops.rb', line 313

def floor_div(input_a, input_b, name: nil)
  check_data_types(input_a, input_b)
  _op(:floor_div, input_a, input_b, name: name)
end

#gather(params, indices, validate_indices: nil, name: nil, axis: 0) ⇒ Object

Gather slices from params and axis according to indices.



466
467
468
469
470
# File 'lib/tensor_stream/ops.rb', line 466

def gather(params, indices, validate_indices: nil,
  name: nil,
  axis: 0)
  _op(:gather, params, indices, validate_indices: validate_indices, name: name, axis: axis)
end

#glorot_uniform_initializer(seed: nil, dtype: nil) ⇒ Object

The Glorot uniform initializer, also called Xavier uniform initializer.

It draws samples from a uniform distribution within [-limit, limit] where limit is sqrt(6 / (fan_in + fan_out)) where fan_in is the number of input units in the weight tensor and fan_out is the number of output units in the weight tensor.



140
141
142
# File 'lib/tensor_stream/ops.rb', line 140

def glorot_uniform_initializer(seed: nil, dtype: nil)
  TensorStream::Initializer.new(-> { _op(:glorot_uniform, seed: seed, data_type: dtype) })
end

#gradients(tensor_ys, wrt_xs, name: "gradients", stop_gradients: nil) ⇒ Object

Constructs symbolic derivatives of ys of input w.r.t. x in wrt_xs.

ys and xs are each a Tensor or a list of tensors. grad_ys is a list of Tensor, holding the gradients received by the ys. The list must be the same length as ys.

Arguments: tensor_ys : A Tensor or list of tensors to be differentiated. wrt_xs : A Tensor or list of tensors to be used for differentiation. stop_gradients : Optional. A Tensor or list of tensors not to differentiate through



42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
# File 'lib/tensor_stream/ops.rb', line 42

def gradients(tensor_ys, wrt_xs, name: "gradients", stop_gradients: nil)
  tensor_ys = tensor_ys.op
  gs = wrt_xs.map(&:op).collect { |x|
    stops = stop_gradients ? stop_gradients.map(&:name).join("_") : ""
    gradient_program_name = "grad_#{tensor_ys.name}_#{x.name}_#{stops}".to_sym
    tensor_graph = tensor_ys.graph

    tensor_program = if tensor_graph.node_added?(gradient_program_name)
      tensor_graph.get_node(gradient_program_name)
    else
      tensor_graph.name_scope("gradient_wrt_#{x.name}") do
        derivative_ops = TensorStream::MathGradients.derivative(tensor_ys, x, graph: tensor_graph,
                                                                              stop_gradients: stop_gradients)
        tensor_graph.add_node!(gradient_program_name, derivative_ops)
      end
    end
    tensor_program
  }

  gs
end

#identity(input, name: nil) ⇒ Object

Return a tensor with the same shape and contents as input.



381
382
383
# File 'lib/tensor_stream/ops.rb', line 381

def identity(input, name: nil)
  _op(:identity, input, name: name)
end

#index(tensor, sel, name: nil) ⇒ Object

select an index in an array or a set of tensor outputs



246
247
248
# File 'lib/tensor_stream/ops.rb', line 246

def index(tensor, sel, name: nil)
  _op(:index, tensor, sel, name: name)
end

#invert_permutation(x, name: nil) ⇒ Object



571
572
573
# File 'lib/tensor_stream/ops.rb', line 571

def invert_permutation(x, name: nil)
  _op(:invert_permutation, x, name: name)
end

#log(input, name: nil) ⇒ Object

Computes natural logarithm of x element-wise.



415
416
417
418
# File 'lib/tensor_stream/ops.rb', line 415

def log(input, name: nil)
  check_allowed_types(input, FLOATING_POINT_TYPES)
  _op(:log, input, name: name)
end

#log1p(input, name: nil) ⇒ Object

Computes natural logarithm of (1 + x) element-wise.



422
423
424
425
# File 'lib/tensor_stream/ops.rb', line 422

def log1p(input, name: nil)
  check_allowed_types(input, FLOATING_POINT_TYPES)
  _op(:log1p, input, name: name)
end

#logical_and(input_a, input_b, name: nil) ⇒ Object

Returns the truth value of x AND y element-wise.



168
169
170
171
# File 'lib/tensor_stream/ops.rb', line 168

def logical_and(input_a, input_b, name: nil)
  check_data_types(input_a, input_b)
  _op(:logical_and, input_a, input_b, name: name)
end

#maximum(input_a, input_b, name: nil) ⇒ Object

Returns the max of x and y (i.e. x > y ? x : y) element-wise.



329
330
331
# File 'lib/tensor_stream/ops.rb', line 329

def maximum(input_a, input_b, name: nil)
  max(input_a, input_b, name: name)
end

#minimum(input_a, input_b, name: nil) ⇒ Object

Returns the min of x and y (i.e. x < y ? x : y) element-wise.



335
336
337
# File 'lib/tensor_stream/ops.rb', line 335

def minimum(input_a, input_b, name: nil)
  min(input_a, input_b, name: name)
end

#multiply(input_a, input_b, name: nil) ⇒ Object

Returns x * y element-wise. This operation supports broadcasting



388
389
390
391
# File 'lib/tensor_stream/ops.rb', line 388

def multiply(input_a, input_b, name: nil)
  check_data_types(input_a, input_b)
  _op(:mul, input_a, input_b, name: name)
end

#negative(input, name: nil) ⇒ Object

Computes numerical negative value element-wise.



349
350
351
# File 'lib/tensor_stream/ops.rb', line 349

def negative(input, name: nil)
  negate(input, name: name)
end

#not_equal(input_a, input_b, name: nil) ⇒ Object

Returns the truth value of (x != y) element-wise. This ops supports broadcasting



356
357
358
359
# File 'lib/tensor_stream/ops.rb', line 356

def not_equal(input_a, input_b, name: nil)
  check_data_types(input_a, input_b)
  _op(:not_equal, input_a, input_b, name: name)
end

#ones(shape, dtype: :float32, name: nil) ⇒ Object

Creates a tensor with all elements set to 1.



162
163
164
# File 'lib/tensor_stream/ops.rb', line 162

def ones(shape, dtype: :float32, name: nil)
  _op(:ones, shape, data_type: dtype, name: name)
end

#ones_initializer(dtype: :float32) ⇒ Object

initializer that generates tensors initialized to 1.



126
127
128
# File 'lib/tensor_stream/ops.rb', line 126

def ones_initializer(dtype: :float32)
  TensorStream::Initializer.new(-> { _op(:ones, data_type: dtype) })
end

#ones_like(tensor, dtype: nil, name: nil) ⇒ Object

Creates a tensor with all elements set to 1. Given a single tensor (tensor), this operation returns a tensor of the same type and shape as tensor with all elements set to 1. Optionally, you can specify a new type (dtype) for the returned tensor.



375
376
377
# File 'lib/tensor_stream/ops.rb', line 375

def ones_like(tensor, dtype: nil, name: nil)
  _op(:ones_like, tensor, data_type: dtype, name: name)
end

#pack(values, axis: 0, name: "pack") ⇒ Object

Same as stack



507
508
509
# File 'lib/tensor_stream/ops.rb', line 507

def pack(values, axis: 0, name: "pack")
  _op(:stack, *values, axis: axis, name: name)
end

#pad(tensor, paddings, mode: "CONSTANT", name: nil) ⇒ Object

Pads a tensor. This operation pads a tensor according to the paddings you specify.



443
444
445
# File 'lib/tensor_stream/ops.rb', line 443

def pad(tensor, paddings, mode: "CONSTANT", name: nil)
  _op(:pad, tensor, paddings, mode: mode, name: name)
end

Prints a list of tensors.

This is an identity op (behaves like tf.identity) with the side effect of printing data when evaluating.



343
344
345
# File 'lib/tensor_stream/ops.rb', line 343

def print(input, data, message: nil, name: nil)
  _op(:print, input, data, message: message, name: name)
end

#random_normal(shape, dtype: :float32, mean: 0.0, stddev: 1.0, seed: nil, name: nil) ⇒ Object

Outputs random values from a normal distribution.



66
67
68
69
# File 'lib/tensor_stream/ops.rb', line 66

def random_normal(shape, dtype: :float32, mean: 0.0, stddev: 1.0, seed: nil, name: nil)
  options = {dtype: dtype, mean: mean, stddev: stddev, seed: seed, name: name}
  _op(:random_standard_normal, shape, options)
end

#random_uniform_initializer(minval: 0, maxval: 1, seed: nil, dtype: nil) ⇒ Object

Initializer that generates tensors with a uniform distribution.



146
147
148
# File 'lib/tensor_stream/ops.rb', line 146

def random_uniform_initializer(minval: 0, maxval: 1, seed: nil, dtype: nil)
  TensorStream::Initializer.new(-> { _op(:random_uniform, minval: 0, maxval: 1, seed: seed, data_type: dtype) })
end

#reciprocal(tensor, name: nil) ⇒ Object

Computes the reciprocal of x element-wise.



266
267
268
# File 'lib/tensor_stream/ops.rb', line 266

def reciprocal(tensor, name: nil)
  _op(:reciprocal, tensor, name: name)
end

#reduce(op, input, axis = nil, keepdims: false, name: nil) ⇒ Object



179
180
181
182
183
184
185
186
# File 'lib/tensor_stream/ops.rb', line 179

def reduce(op, input, axis = nil, keepdims: false, name: nil)
  input = TensorStream.convert_to_tensor(input)
  return input if input.shape.scalar?

  axis = cast_axis(input, axis)

  _op(op, input, axis, keepdims: keepdims, name: name)
end

#reduce_mean(input_tensor, axis = nil, keepdims: false, name: nil) ⇒ Object

Computes the mean of elements across dimensions of a tensor.



175
176
177
# File 'lib/tensor_stream/ops.rb', line 175

def reduce_mean(input_tensor, axis = nil, keepdims: false, name: nil)
  reduce(:mean, input_tensor, axis, keepdims: keepdims, name: name)
end

#reshape(tensor, shape, name: nil) ⇒ Object

Reshapes a tensor.

Given tensor, this operation returns a tensor that has the same values as tensor with shape shape.



254
255
256
# File 'lib/tensor_stream/ops.rb', line 254

def reshape(tensor, shape, name: nil)
  _op(:reshape, tensor, shape, name: name)
end

#sec(input, name: nil) ⇒ Object

Computes sec of input element-wise.



401
402
403
404
# File 'lib/tensor_stream/ops.rb', line 401

def sec(input, name: nil)
  check_allowed_types(input, FLOATING_POINT_TYPES)
  _op(:sec, input, name: name)
end

#setdiff1d(x, y, index_dtype: :int32, name: nil) ⇒ Object

Computes the difference between two lists of numbers or strings. Given a list x and a list y, this operation returns a list out that represents all values that are in x but not in y. The returned list out is sorted in the same order that the numbers appear in x (duplicates are preserved). This operation also returns a list idx that represents the position of each out element in x. In other words:



534
535
536
537
# File 'lib/tensor_stream/ops.rb', line 534

def setdiff1d(x, y, index_dtype: :int32, name: nil)
  result = _op(:setdiff1d, x, y, index_dtype: index_dtype, name: name)
  [result[0], result[1]]
end

#shape_n(inputs, name: nil, out_type: :int32) ⇒ Object



97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
# File 'lib/tensor_stream/ops.rb', line 97

def shape_n(inputs, name: nil, out_type: :int32)
  shapes_known = true
  inputs.each do |input|
    unless input.shape.known?
      shapes_known = false
      break
    end
  end

  if shapes_known
    inputs.collect { |input| cons(input.shape.shape, dtype: out_type).op }
  else
    res = _op(:shape_n, *inputs, out_type: out_type, name: name)
    Array.new(inputs.size) do |index|
      res[index]
    end
  end
end

#slice(input, start, size, name: nil) ⇒ Object

Extracts a slice from a tensor.

This operation extracts a slice of size size from a tensor input starting at the location specified by begin. The slice size is represented as a tensor shape, where size is the number of elements of the ‘i’th dimension of input that you want to slice. The starting location (begin) for the slice is represented as an offset in each dimension of input. In other words, begin is the offset into the ‘i’th dimension of input that you want to slice from.



156
157
158
# File 'lib/tensor_stream/ops.rb', line 156

def slice(input, start, size, name: nil)
  _op(:slice, input, start, size: size, name: name)
end

#split(value, num_or_size_splits, axis: 0, num: nil, name: "split") ⇒ Object



198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
# File 'lib/tensor_stream/ops.rb', line 198

def split(value, num_or_size_splits, axis: 0, num: nil, name: "split")
  value = convert_to_tensor(value)
  num_or_size_splits = convert_to_tensor(num_or_size_splits)
  axis = convert_to_tensor(axis)

  raise TensorStream::ValueError, "num_or_size_splits must be integer dtype" unless INTEGER_TYPES.include?(num_or_size_splits.data_type)

  res = _op(:split, value, num_or_size_splits, axis, name: name)

  pieces = if value.shape.known? && num_or_size_splits.is_const && num_or_size_splits.value && axis.is_const
    if num_or_size_splits.shape.scalar?
      raise TensorStream::ValueError, "num_or_size_splits must divide dimension #{value.shape.shape[axis.value]} evenly" unless (value.shape.shape[axis.value] % num_or_size_splits.value).zero?

      div = num_or_size_splits.value
      n = value.shape.shape[axis.value] / div

      Array.new(div) do
        new_shape = value.shape.shape.dup
        new_shape[axis.value] = n
        new_shape
      end
    elsif num_or_size_splits.shape.ndims == 1
      raise TensorStream::ValueError, "Sum of splits do not match total dimen in axis #{value.shape.shape[axis.value]} != #{num_or_size_splits.value.reduce(:+)}" if value.shape.shape[axis.value] != num_or_size_splits.value.reduce(:+)

      num_or_size_splits.value.collect do |v|
        new_shape = value.shape.shape.dup
        new_shape[axis.value] = v
        new_shape
      end
    else
      raise TensorStream::ValueError, "Scalar or 1D Tensor expected for num_or_size_splits"
    end
  else
    raise TensorStream::ValueError, "Cannot automatically determine num, please specify num: in options" if num.nil?

    Array.new(num) { nil }
  end

  pieces.collect.with_index do |shape, i|
    op = index(res, i, name: "split/index:#{i}")
    op.shape = TensorShape.new(shape) if shape

    op
  end
end

#sqrt(input, name: nil) ⇒ Object

Computes sqrt of input element-wise.



408
409
410
411
# File 'lib/tensor_stream/ops.rb', line 408

def sqrt(input, name: nil)
  check_allowed_types(input, FLOATING_POINT_TYPES)
  _op(:sqrt, input, name: name)
end

#square(tensor, name: nil) ⇒ Object

Computes square of x element-wise.



260
261
262
# File 'lib/tensor_stream/ops.rb', line 260

def square(tensor, name: nil)
  _op(:square, tensor, name: name)
end

#squared_difference(input_a, input_b, name: nil) ⇒ Object



454
455
456
# File 'lib/tensor_stream/ops.rb', line 454

def squared_difference(input_a, input_b, name: nil)
  _op(:squared_difference, input_a, input_b, name: name)
end

#squeeze(value, axis: [], name: nil) ⇒ Object

Removes dimensions of size 1 from the shape of a tensor.

Given a tensor input, this operation returns a tensor of the same type with all dimensions of size 1 removed. If you don’t want to remove all size 1 dimensions, you can remove specific size 1 dimensions by specifying axis.



523
524
525
# File 'lib/tensor_stream/ops.rb', line 523

def squeeze(value, axis: [], name: nil)
  _op(:squeeze, value, axis: axis, name: nil)
end

#stack(values, axis: 0, name: "stack") ⇒ Object

Stacks a list of rank-R tensors into one rank-(R+1) tensor.



475
476
477
# File 'lib/tensor_stream/ops.rb', line 475

def stack(values, axis: 0, name: "stack")
  _op(:stack, *values, axis: axis, name: name)
end

#stop_gradient(tensor, options = {}) ⇒ Object

Stops gradient computation.

When executed in a graph, this op outputs its input tensor as-is.



82
83
84
# File 'lib/tensor_stream/ops.rb', line 82

def stop_gradient(tensor, options = {})
  _op(:stop_gradient, tensor, options)
end

#transpose(tensor, perm = nil, name: "transpose") ⇒ Object

Transposes a. Permutes the dimensions according to perm.



436
437
438
# File 'lib/tensor_stream/ops.rb', line 436

def transpose(tensor, perm = nil, name: "transpose")
  _op(:transpose, tensor, perm, name: name)
end

#truncated_normal(shape, dtype: :float32, mean: 0.0, stddev: 1.0, seed: nil, name: nil) ⇒ Object

Outputs random values from a truncated normal distribution.



73
74
75
76
# File 'lib/tensor_stream/ops.rb', line 73

def truncated_normal(shape, dtype: :float32, mean: 0.0, stddev: 1.0, seed: nil, name: nil)
  options = {dtype: dtype, mean: mean, stddev: stddev, seed: seed, name: name}
  _op(:truncated_normal, shape, options)
end

#unpack(value, num: nil, axis: 0, name: "unpack") ⇒ Object

Same as unstack



514
515
516
# File 'lib/tensor_stream/ops.rb', line 514

def unpack(value, num: nil, axis: 0, name: "unpack")
  unstack(value, num: num, axis: axis, name: name)
end

#unstack(value, num: nil, axis: 0, name: "unstack") ⇒ Object

Unpacks the given dimension of a rank-R tensor into rank-(R-1) tensors.



482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
# File 'lib/tensor_stream/ops.rb', line 482

def unstack(value, num: nil, axis: 0, name: "unstack")
  res = _op(:unstack, value, num: num, axis: axis, name: name)

  num_vars = if value.shape.known?
    new_shape = value.shape.shape.dup
    rank = new_shape.size - 1
    axis = rank + axis if axis < 0
    rotated_shape = Array.new(axis + 1) { new_shape.shift }
    new_shape = rotated_shape.rotate!(-1) + new_shape
    new_shape[0]
  else
    raise TensorStream::ValueError, "num is unspecified and cannot be inferred." if num.nil?

    num
  end

  return res[0] if num_vars == 1

  Array.new(num_vars) do |i|
    index(res, i, name: "unstack/index:#{i}")
  end
end

#where(condition, true_t = nil, false_t = nil, name: nil) ⇒ Object

Return the elements, either from x or y, depending on the condition.



278
279
280
# File 'lib/tensor_stream/ops.rb', line 278

def where(condition, true_t = nil, false_t = nil, name: nil)
  _op(:where, condition, true_t, false_t, name: name)
end

#zeros_initializer(dtype: :float32) ⇒ Object

initializer that generates tensors initialized to 0.



119
120
121
# File 'lib/tensor_stream/ops.rb', line 119

def zeros_initializer(dtype: :float32)
  TensorStream::Initializer.new(-> { _op(:zeros, data_type: dtype) })
end

#zeros_like(tensor, dtype: nil, name: nil) ⇒ Object

reates a tensor with all elements set to zero. Given a single tensor (tensor), this operation returns a tensor of the same type and shape as tensor with all elements set to zero. Optionally, you can use dtype to specify a new type for the returned tensor.



366
367
368
# File 'lib/tensor_stream/ops.rb', line 366

def zeros_like(tensor, dtype: nil, name: nil)
  _op(:zeros_like, tensor, data_type: dtype, name: name)
end