Class: TensorStream::NN

Inherits:
Object
  • Object
show all
Extended by:
OpHelper
Defined in:
lib/tensor_stream/nn/nn_ops.rb

Overview

High level machine learning functions

Class Method Summary collapse

Methods included from OpHelper

_op, cons, format_source, fp_type?, i_cons, i_op, i_var, int_type?, reduced_shape, shape_eval, shape_full_specified, shapes_fully_specified_and_equal

Class Method Details

.conv2d(input, filter, strides, padding, name: nil) ⇒ Object



135
136
137
# File 'lib/tensor_stream/nn/nn_ops.rb', line 135

def conv2d(input, filter, strides, padding, name: nil)
  _op(:conv2d, input, filter, strides: strides, padding: padding, name: name)
end

.dropout(x, keep_prob, noise_shape: nil, seed: nil, name: nil) ⇒ Object

Computes dropout.

With probability keep_prob, outputs the input element scaled up by 1 / keep_prob, otherwise outputs 0. The scaling is so that the expected sum is unchanged.



26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
# File 'lib/tensor_stream/nn/nn_ops.rb', line 26

def dropout(x, keep_prob, noise_shape: nil, seed: nil, name: nil)
  TensorStream.name_scope(name, "dropout", values: [x]) do
    x = TensorStream.convert_to_tensor(x, name: "x")
    raise TensorStream::ValueError, "x has to be a floating point tensor since it's going to be scaled. Got a #{x.data_type} tensor instead." unless fp_type?(x.data_type)
    raise TensorStream::ValueError, "keep_prob must be a scalar tensor or a float in the range (0, 1], got #{keep_prob}" if keep_prob.is_a?(Float) && !(0 < keep_prob && keep_prob <= 1)

    return x if keep_prob.is_a?(Float) && keep_prob.to_f == 1.0

    keep_prob = TensorStream.convert_to_tensor(keep_prob, dtype: x.dtype, name: "keep_prob")
    return x if keep_prob.value == 1.0

    noise_shape = if noise_shape.nil?
                    TensorStream.shape(x)
                  else
                    noise_shape
                  end

    random_tensor = keep_prob
    random_tensor += TensorStream.random_uniform(noise_shape, seed: seed, dtype: x.dtype)

    binary_tensor = TensorStream.floor(random_tensor)
    TensorStream.div(x, keep_prob) * binary_tensor
  end
end

.log_softmax(logits, axis: -1,, name: nil) ⇒ Object

Computes log softmax activations.



116
117
118
# File 'lib/tensor_stream/nn/nn_ops.rb', line 116

def log_softmax(logits, axis: -1, name: nil)
  _op(:log_softmax, logits, axis: axis, name: name)
end

.relu(features, name: nil) ⇒ Object



11
12
13
# File 'lib/tensor_stream/nn/nn_ops.rb', line 11

def relu(features, name: nil)
  TensorStream.max(features, 0, name: "relu_#{name}")
end

.relu6(features, name: nil) ⇒ Object



15
16
17
18
19
20
# File 'lib/tensor_stream/nn/nn_ops.rb', line 15

def relu6(features, name: nil)
  TensorStream.name_scope(name, "Relu6", values: [features]) do
    features = TensorStream.convert_to_tensor(features, name: "features")
    _op(:relu6, features, name: name)
  end
end

.sigmoid(input, name: nil) ⇒ Object



51
52
53
# File 'lib/tensor_stream/nn/nn_ops.rb', line 51

def sigmoid(input, name: nil)
  TensorStream.sigmoid(input, name: name)
end

.sigmoid_cross_entropy_with_logits(labels: nil, logits: nil, name: nil) ⇒ Object



120
121
122
123
124
125
126
127
128
129
130
131
132
133
# File 'lib/tensor_stream/nn/nn_ops.rb', line 120

def sigmoid_cross_entropy_with_logits(labels: nil, logits: nil, name: nil)
  TensorStream.name_scope(name, default: 'logistic_loss', values: [logits, labels]) do |_name|
    tf = TensorStream
    logits = tf.convert_to_tensor(logits, name: 'logits')
    labels = tf.convert_to_tensor(labels, name: 'labels')
    zeros = tf.zeros_like(logits, dtype: logits.dtype)
    cond = (logits >= zeros)
    relu_logits = tf.where(cond, logits, zeros)
    neg_abs_logits = tf.where(cond, -logits, logits)

    tf.add(relu_logits - logits * labels,
           tf.log1p(tf.exp(neg_abs_logits)), name: name)
  end
end

.softmax(logits, axis: nil, name: nil) ⇒ Object



7
8
9
# File 'lib/tensor_stream/nn/nn_ops.rb', line 7

def softmax(logits, axis: nil, name: nil)
  _op(:softmax, logits, axis: axis, name: name)
end

.softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil) ⇒ Object



55
56
57
# File 'lib/tensor_stream/nn/nn_ops.rb', line 55

def softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil)
  softmax_cross_entropy_with_logits_v2(labels: labels, logits: logits, name: name)
end

.softmax_cross_entropy_with_logits_v2(labels: nil, logits: nil, name: nil) ⇒ Object



59
60
61
62
63
64
65
66
67
68
69
# File 'lib/tensor_stream/nn/nn_ops.rb', line 59

def softmax_cross_entropy_with_logits_v2(labels: nil, logits: nil, name: nil)
  TensorStream.name_scope(name, default: 'softmax_cross_entropy_with_logits', values: [logits, labels]) do
    ts = TensorStream
    logits = ts.convert_to_tensor(logits, name: 'logits')
    labels = ts.convert_to_tensor(labels, name: 'labels')
    labels = ts.cast(labels, logits.dtype)

    output = _op(:softmax_cross_entropy_with_logits_v2, logits, labels)
    output[0]
  end
end

.sparse_softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil) ⇒ Object



71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
# File 'lib/tensor_stream/nn/nn_ops.rb', line 71

def sparse_softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil)
  TensorStream.name_scope(name, default: "SparseSoftmaxCrossEntropyWithLogits", values: [logits, labels]) do
    tf = TensorStream
    labels = tf.convert_to_tensor(labels)
    logits = tf.convert_to_tensor(logits)
    precise_logits = logits.data_type == :float16 ? tf.cast(logits, :float32) : logits

    labels_static_shape = labels.shape
    labels_shape = tf.shape(labels)
    static_shapes_fully_defined = labels_static_shape.known? && logits.shape.known?

    raise TensorStream::ValueError, "Logits cannot be scalars - received shape #{logits.shape.shape}." if logits.shape.known? && logits.shape.scalar?
    raise TensorStream::ValueError, "Rank mismatch: Rank of labels (received #{labels_static_shape.ndims}) " +
      "should equal rank of logits minus 1 (received #{logits.shape.ndims})." if logits.shape.known? && (labels_static_shape.known? && labels_static_shape.ndims != logits.shape.ndims - 1)
    if logits.shape.ndims == 2
      cost = _op(:sparse_softmax_cross_entropy_with_logits,
          precise_logits, labels, name: name)
      if logits.data_type == :float16
        return tf.cast(cost[0], :float16)
      else
        return cost[0]
      end
    end

    shape_checks = []

    shape_checks << tf.assert_equal(tf.rank(labels), tf.rank(logits) - 1) unless static_shapes_fully_defined

    tf.control_dependencies(shape_checks) do
      num_classes = tf.shape(logits)[tf.rank(logits) - 1]
      precise_logits = tf.reshape(precise_logits, [-1, num_classes])
      labels = tf.reshape(labels, [-1])
      cost = _op(:sparse_softmax_cross_entropy_with_logits, precise_logits, labels, name: name)
      cost = tf.reshape(cost[0], labels_shape)

      if logits.data_type == :float16
        tf.cast(cost, :float16)
      else
        cost
      end
    end
  end
end