Module: TensorStream::ArrayOpsHelper
- Included in:
- Evaluator::OpenclEvaluator, Evaluator::RubyEvaluator, OpenCLBuffer
- Defined in:
- lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb
Overview
varoius utility functions for array processing
Instance Method Summary collapse
- #broadcast(input_a, input_b) ⇒ Object
-
#broadcast_dimensions(input, dims = []) ⇒ Object
explicit broadcasting helper.
- #get_rank(value, rank = 0) ⇒ Object
- #process_function_op(a, op) ⇒ Object
- #reduced_shape(input_shape, axes) ⇒ Object
- #shape_diff(shape_a, shape_b) ⇒ Object
- #slice_tensor(input, start, size) ⇒ Object
- #softmax(arr) ⇒ Object
- #softmax_grad(arr) ⇒ Object
- #tile_arr(input, dimen, multiples) ⇒ Object
- #truncate(input, target_shape) ⇒ Object
-
#vector_op(vector, vector2, op = ->(a, b) { a + b }, switch = false, safe = true) ⇒ Object
handle 2 tensor math operations.
Instance Method Details
#broadcast(input_a, input_b) ⇒ Object
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
# File 'lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb', line 37 def broadcast(input_a, input_b) sa = shape_eval(input_a) sb = shape_eval(input_b) return [input_a, input_b] if sa == sb # descalar if sa.empty? input_a = [input_a] sa = [1] end if sb.empty? input_b = [input_b] sb = [1] end target_shape = shape_diff(sa, sb) if target_shape input_b = broadcast_dimensions(input_b, target_shape) else target_shape = shape_diff(sb, sa) raise "Incompatible shapes for op #{shape_eval(input_a)} vs #{shape_eval(input_b)}" if target_shape.nil? input_a = broadcast_dimensions(input_a, target_shape) end [input_a, input_b] end |
#broadcast_dimensions(input, dims = []) ⇒ Object
explicit broadcasting helper
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
# File 'lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb', line 69 def broadcast_dimensions(input, dims = []) return input if dims.empty? d = dims.shift if input.is_a?(Array) && (get_rank(input) - 1) == dims.size row_to_dup = input.collect do |input| broadcast_dimensions(input, dims.dup) end row_to_dup + Array.new(d) { row_to_dup }.flatten(1) elsif input.is_a?(Array) Array.new(d) { broadcast_dimensions(input, dims.dup) } else Array.new(d + 1) { input } end end |
#get_rank(value, rank = 0) ⇒ Object
162 163 164 165 166 167 |
# File 'lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb', line 162 def get_rank(value, rank = 0) return rank unless value.is_a?(Array) return rank + 1 if value.empty? get_rank(value[0], rank + 1) end |
#process_function_op(a, op) ⇒ Object
153 154 155 156 157 158 159 160 |
# File 'lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb', line 153 def process_function_op(a, op) # ruby scalar if (a.is_a?(Tensor) && a.shape.rank > 0) || a.is_a?(Array) vector_op(a, 0, op) else op.call(a, 0) end end |
#reduced_shape(input_shape, axes) ⇒ Object
26 27 28 29 30 31 32 33 34 35 |
# File 'lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb', line 26 def reduced_shape(input_shape, axes) return [] if axes.nil? # reduce to scalar axes = [ axes ] unless axes.is_a?(Array) return input_shape if axes.empty? axes.each do |dimen| input_shape[dimen] = 1 end input_shape end |
#shape_diff(shape_a, shape_b) ⇒ Object
124 125 126 127 128 129 130 131 132 133 134 135 |
# File 'lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb', line 124 def shape_diff(shape_a, shape_b) return nil if shape_b.size > shape_a.size reversed_a = shape_a.reverse reversed_b = shape_b.reverse reversed_a.each_with_index.collect do |s, index| next s if index >= reversed_b.size return nil if reversed_b[index] > s s - reversed_b[index] end.reverse end |
#slice_tensor(input, start, size) ⇒ Object
4 5 6 7 8 9 10 11 12 13 14 15 16 |
# File 'lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb', line 4 def slice_tensor(input, start, size) return input if size.empty? start_index = start.shift dimen_size = start_index + size.shift input[start_index...dimen_size].collect do |input| if input.is_a?(Array) slice_tensor(input, start.dup, size.dup) else input end end end |
#softmax(arr) ⇒ Object
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
# File 'lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb', line 169 def softmax(arr) return arr if arr.empty? sum = if !arr[0].is_a?(Array) arr.map { |a| Math.exp(a - arr.max) }.reduce(:+) end arr.collect do |input| if input.is_a?(Array) softmax(input) else Math.exp(input - arr.max) / sum end end end |
#softmax_grad(arr) ⇒ Object
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 |
# File 'lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb', line 185 def softmax_grad(arr) return arr if arr.empty? arr.each_with_index.collect do |input, index| if input.is_a?(Array) softmax_grad(input) else arr.each_with_index.collect do |input2, index2| if index != index2 -input * input2 else input * (1.0 - input) end end end end end |
#tile_arr(input, dimen, multiples) ⇒ Object
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
# File 'lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb', line 137 def tile_arr(input, dimen, multiples) t = multiples[dimen] if dimen == multiples.size - 1 return nil if t.zero? input * t # ruby array dup else new_arr = input.collect do |sub| tile_arr(sub, dimen + 1, multiples) end.compact return nil if new_arr.empty? new_arr * t end end |
#truncate(input, target_shape) ⇒ Object
18 19 20 21 22 23 24 |
# File 'lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb', line 18 def truncate(input, target_shape) rank = get_rank(input) return input if rank.zero? start = Array.new(rank) { 0 } slice_tensor(input, start, target_shape) end |
#vector_op(vector, vector2, op = ->(a, b) { a + b }, switch = false, safe = true) ⇒ Object
handle 2 tensor math operations
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
# File 'lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb', line 88 def vector_op(vector, vector2, op = ->(a, b) { a + b }, switch = false, safe = true) if get_rank(vector) < get_rank(vector2) # upgrade rank of A duplicated = Array.new(vector2.size) do vector end return vector_op(duplicated, vector2, op, switch) end return op.call(vector, vector2) unless vector.is_a?(Array) vector.each_with_index.collect do |input, index| next vector_op(input, vector2, op, switch) if input.is_a?(Array) && get_rank(vector) > get_rank(vector2) if safe && vector2.is_a?(Array) next nil if vector2.size != 1 && index >= vector2.size end z = if vector2.is_a?(Array) if index < vector2.size vector2[index] else raise 'incompatible tensor shapes used during op' if vector2.size != 1 vector2[0] end else vector2 end if input.is_a?(Array) vector_op(input, z, op, switch) else switch ? op.call(z, input) : op.call(input, z) end end.compact end |