Module: TensorStream::Utils

Included in:
TensorStream
Defined in:
lib/tensor_stream/utils.rb

Instance Method Summary collapse

Instance Method Details

#__v_scope_nameObject



101
102
103
# File 'lib/tensor_stream/utils.rb', line 101

def __v_scope_name
  Thread.current[:tensor_stream_variable_scope].map(&:name).compact.reject(&:empty?).join('/')
end

#assign(ref, value, name: nil) ⇒ Object



152
153
154
155
# File 'lib/tensor_stream/utils.rb', line 152

def assign(ref, value, name: nil)
  raise "#{ref.name} not a variable" unless ref.is_a?(Variable)
  ref.assign(value, name: name)
end

#check_allowed_types(input, types) ⇒ Object



191
192
193
194
195
196
# File 'lib/tensor_stream/utils.rb', line 191

def check_allowed_types(input, types)
  return input unless input.is_a?(Tensor)
  return input if input.data_type.nil?

  raise "#{input.source}: Parameter data type #{input.data_type} passed not in #{types.join(',')}" unless types.include?(input.data_type.to_sym)
end

#check_data_types(input_a, input_b) ⇒ Object



198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
# File 'lib/tensor_stream/utils.rb', line 198

def check_data_types(input_a, input_b)
  if !input_a.is_a?(Tensor) && input_b.is_a?(Tensor)
    input_a = convert_to_tensor(input_a, dtype: input_b.data_type)
  elsif !input_b.is_a?(Tensor) && input_a.is_a?(Tensor)
    input_b = convert_to_tensor(input_b, dtype: input_a.data_type)
  else
    input_a = convert_to_tensor(input_a)
    input_b = convert_to_tensor(input_b)
  end

  if norm_dtype(input_a.data_type) != norm_dtype(input_b.data_type)
    raise "Value Error: Tensor conversion requested dtype #{input_a.data_type} for tensor type #{input_b.data_type}" 
  end

  [input_a, input_b]
end

#constant(value, dtype: nil, shape: nil, internal: false, name: 'Const') ⇒ Object



120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
# File 'lib/tensor_stream/utils.rb', line 120

def constant(value, dtype: nil, shape: nil, internal: false, name: 'Const')
  shared_options = { const: true, value: value, name: name, internal: internal }
  if value.is_a?(Float)
    TensorStream::Tensor.new(dtype || :float32, 0, shape || [], shared_options)
  elsif value.is_a?(Integer)
    TensorStream::Tensor.new(dtype || :int32, 0, shape || [], shared_options)
  elsif value.is_a?(String)
    TensorStream::Tensor.new(dtype || :string, 0, shape || [], shared_options)
  elsif value.is_a?(Array)
    dimension = shape || shape_eval(value)
    rank = dimension.size

    cur_dtype = dtype || Tensor.detect_type(value.flatten.last)
    value = Tensor.cast_dtype(value, cur_dtype) unless dtype.nil?

    shared_options[:value] = value
    TensorStream::Tensor.new(cur_dtype, rank, dimension, shared_options)
  end
end

#control_dependencies(control_inputs, &block) ⇒ Object



177
178
179
# File 'lib/tensor_stream/utils.rb', line 177

def control_dependencies(control_inputs, &block)
  TensorStream.get_default_graph.control_dependencies(control_inputs, &block)
end

#convert_to_tensor(value, dtype: nil, name: nil, preferred_dtype: nil) ⇒ Object



181
182
183
184
185
186
187
188
189
# File 'lib/tensor_stream/utils.rb', line 181

def convert_to_tensor(value, dtype: nil, name: nil, preferred_dtype: nil)
  return convert_to_tensor(value.call) if value.is_a?(Proc)

  if !value.is_a?(Tensor)
    i_cons(value, dtype: dtype || Tensor.detect_type(value), name: name)
  else
    value
  end
end

#device(device_uri, &block) ⇒ Object



81
82
83
# File 'lib/tensor_stream/utils.rb', line 81

def device(device_uri, &block)
  get_default_graph.device(device_uri, &block)
end

#disable_eager_executionObject



23
24
25
# File 'lib/tensor_stream/utils.rb', line 23

def disable_eager_execution
  TensorStream::Graph.get_default_graph.disable_eager_execution
end

#enable_eager_executionObject



19
20
21
# File 'lib/tensor_stream/utils.rb', line 19

def enable_eager_execution
  TensorStream::Graph.get_default_graph.enable_eager_execution
end

#executing_eagerly?Boolean

Returns:

  • (Boolean)


27
28
29
# File 'lib/tensor_stream/utils.rb', line 27

def executing_eagerly?
  TensorStream::Graph.get_default_graph.executing_eagerly?
end

#float32Object



3
4
5
# File 'lib/tensor_stream/utils.rb', line 3

def float32
  Types.float32
end

#get_collection(name, options = {}) ⇒ Object



148
149
150
# File 'lib/tensor_stream/utils.rb', line 148

def get_collection(name, options = {})
  Graph.get_default_graph.get_collection(name, options)
end

#get_default_graphObject



11
12
13
# File 'lib/tensor_stream/utils.rb', line 11

def get_default_graph
  TensorStream::Graph.get_default_graph
end

#get_variable(name, dtype: nil, shape: nil, initializer: nil, trainable: true, collections: nil) ⇒ Object



144
145
146
# File 'lib/tensor_stream/utils.rb', line 144

def get_variable(name, dtype: nil, shape: nil, initializer: nil, trainable: true, collections: nil)
  TensorStream::Variable.new(dtype || :float32, nil, shape, collections: collections, name: name, initializer: initializer, trainable: trainable)
end

#get_variable_scopeObject



96
97
98
99
# File 'lib/tensor_stream/utils.rb', line 96

def get_variable_scope
  return nil unless Thread.current[:tensor_stream_variable_scope]
  __v_scope_name
end

#global_variables_initializerObject



161
162
163
# File 'lib/tensor_stream/utils.rb', line 161

def global_variables_initializer
  TensorStream::Variable.global_variables_initializer
end

#graphObject



7
8
9
# File 'lib/tensor_stream/utils.rb', line 7

def graph
  TensorStream::Graph.new
end

#group(inputs, name: nil) ⇒ Object



140
141
142
# File 'lib/tensor_stream/utils.rb', line 140

def group(inputs, name: nil)
  TensorStream::ControlFlow.new(:group, inputs, nil, name: name)
end

#layersObject



116
117
118
# File 'lib/tensor_stream/utils.rb', line 116

def layers
  TensorStream::Layers
end

#list_local_devicesObject

List available evaluators + devices in the current local environment Returns:

  • An array containing the names of those devices



35
36
37
38
39
40
41
42
# File 'lib/tensor_stream/utils.rb', line 35

def list_local_devices
  local_name = 'job:localhost'
  TensorStream::Evaluator.evaluators.collect do |k, v|
    v[:class].query_supported_devices.collect do |device_str|
      [local_name, "ts:#{k}:#{device_str.name}"].join('/')
    end
  end.flatten
end

#name_scope(name, default: nil, values: nil) ⇒ Object



85
86
87
88
89
90
91
92
93
94
# File 'lib/tensor_stream/utils.rb', line 85

def name_scope(name, default: nil, values: nil)
  if values
    graph_count = values.select { |v| v.is_a?(Tensor) }.map(&:graph).map(&:object_id).uniq.size
    raise "values are not on the same graph" if graph_count > 1
  end

  get_default_graph.name_scope(name || default) do |scope|
    yield scope if block_given?
  end
end

#norm_dtype(dtype) ⇒ Object



215
216
217
218
219
220
221
222
223
224
225
# File 'lib/tensor_stream/utils.rb', line 215

def norm_dtype(dtype)
  dtype = dtype.to_sym
  case dtype
  when :int
    :int32
  when :float
    :float32
  else
    dtype
  end
end

#placeholder(dtype, shape: nil, name: nil) ⇒ Object



157
158
159
# File 'lib/tensor_stream/utils.rb', line 157

def placeholder(dtype, shape: nil, name: nil)
  TensorStream::Placeholder.new(dtype, nil, shape, name: name)
end

#program(&block) ⇒ Object



112
113
114
# File 'lib/tensor_stream/utils.rb', line 112

def program(&block)
  block.call(self)
end

#reset_default_graphObject



15
16
17
# File 'lib/tensor_stream/utils.rb', line 15

def reset_default_graph
  TensorStream::Graph.get_default_graph.reset
end

#session(evaluator = nil, thread_pool_class: Concurrent::ImmediateExecutor, log_device_placement: false) {|session| ... } ⇒ Object

Yields:



105
106
107
108
109
110
# File 'lib/tensor_stream/utils.rb', line 105

def session(evaluator = nil, thread_pool_class: Concurrent::ImmediateExecutor, log_device_placement: false)
  session = TensorStream::Session.new(evaluator, thread_pool_class: thread_pool_class, log_device_placement: log_device_placement)
  yield session if block_given?

  session
end

#set_random_seed(seed) ⇒ Object



173
174
175
# File 'lib/tensor_stream/utils.rb', line 173

def set_random_seed(seed)
  TensorStream.get_default_graph.random_seed = seed
end

#trainObject



165
166
167
# File 'lib/tensor_stream/utils.rb', line 165

def train
  TensorStream::Trainer
end

#trainable_variablesObject



169
170
171
# File 'lib/tensor_stream/utils.rb', line 169

def trainable_variables
  TensorStream.get_default_graph.get_collection(TensorStream::GraphKeys::TRAINABLE_VARIABLES)
end

#variable(value, name: nil, initializer: nil, graph: nil, dtype: nil, trainable: true) ⇒ Object



44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
# File 'lib/tensor_stream/utils.rb', line 44

def variable(value, name: nil, initializer: nil, graph: nil, dtype: nil, trainable: true)
  op = Operation.new(:assign, nil, value)
  common_options = {
    initializer: initializer || op,
    name: name,
    graph: graph,
    dtype: dtype,
    trainable: trainable
  }
  tensor = if value.is_a?(String)
    TensorStream::Variable.new(dtype || :string, 0, [], common_options)
  elsif value.is_a?(Integer)
    TensorStream::Variable.new(dtype || :int32, 0, [], common_options)
  elsif value.is_a?(Float)
    TensorStream::Variable.new(dtype || :float32, 0, [], common_options)
  else
    TensorStream::Variable.new(dtype || :float32, 0, nil, common_options)
  end
  op.inputs[0] = tensor
  tensor
end

#variable_scope(scope = nil, reuse: nil, initializer: nil) ⇒ Object



66
67
68
69
70
71
72
73
74
75
76
77
78
79
# File 'lib/tensor_stream/utils.rb', line 66

def variable_scope(scope = nil, reuse: nil, initializer: nil)
  Thread.current[:tensor_stream_variable_scope] ||= []
  Thread.current[:tensor_stream_variable_scope] << OpenStruct.new(name: scope, reuse: reuse, initializer: initializer)
  scope_name = __v_scope_name
  begin
    if block_given?
      TensorStream.get_default_graph.name_scope(scope) do
        yield(scope_name)
      end
    end
  ensure
    Thread.current[:tensor_stream_variable_scope].pop
  end
end