Class: Torch::NN::Functional

Inherits:
Object
  • Object
show all
Extended by:
Utils
Defined in:
lib/torch/nn/functional.rb

Class Method Summary collapse

Methods included from Utils

_ntuple, _pair, _quadrupal, _single, _triple

Class Method Details

.alpha_dropout(input, p: 0.5, training: true, inplace: false) ⇒ Object



304
305
306
307
308
309
310
# File 'lib/torch/nn/functional.rb', line 304

def alpha_dropout(input, p: 0.5, training: true, inplace: false)
  if inplace
    Torch.alpha_dropout!(input, p, training)
  else
    Torch.alpha_dropout(input, p, training)
  end
end

.avg_pool1d(*args, **options) ⇒ Object



90
91
92
# File 'lib/torch/nn/functional.rb', line 90

def avg_pool1d(*args, **options)
  Torch.avg_pool1d(*args, **options)
end

.avg_pool2d(*args, **options) ⇒ Object



94
95
96
# File 'lib/torch/nn/functional.rb', line 94

def avg_pool2d(*args, **options)
  NN.avg_pool2d(*args, **options)
end

.avg_pool3d(*args, **options) ⇒ Object



98
99
100
# File 'lib/torch/nn/functional.rb', line 98

def avg_pool3d(*args, **options)
  NN.avg_pool3d(*args, **options)
end

.batch_norm(input, running_mean, running_var, weight: nil, bias: nil, training: false, momentum: 0.1, eps: 1e-5) ⇒ Object

normalization layers



209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
# File 'lib/torch/nn/functional.rb', line 209

def batch_norm(input, running_mean, running_var, weight: nil, bias: nil,
  training: false, momentum: 0.1, eps: 1e-5)

  if training
    size = input.size
    size_prods = size[0]
    (size.length - 2).times do |i|
      size_prods *= size[i + 2]
    end
    if size_prods == 1
      raise ArgumentError, "Expected more than 1 value per channel when training, got input size #{size.inspect}"
    end
  end

  Torch.batch_norm(
    input, weight, bias, running_mean, running_var,
    training, momentum, eps, false
  )
end

.bilinear(input1, input2, weight, bias) ⇒ Object



272
273
274
# File 'lib/torch/nn/functional.rb', line 272

def bilinear(input1, input2, weight, bias)
  Torch.bilinear(input1, input2, weight, bias)
end

.binary_cross_entropy(input, target, weight: nil, reduction: "mean") ⇒ Object

loss functions



363
364
365
# File 'lib/torch/nn/functional.rb', line 363

def binary_cross_entropy(input, target, weight: nil, reduction: "mean")
  NN.binary_cross_entropy(input, target, weight, reduction)
end

.binary_cross_entropy_with_logits(input, target, weight: nil, reduction: "mean", pos_weight: nil) ⇒ Object



367
368
369
# File 'lib/torch/nn/functional.rb', line 367

def binary_cross_entropy_with_logits(input, target, weight: nil, reduction: "mean", pos_weight: nil)
  Torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction)
end

.conv1d(*args, **options) ⇒ Object

convolution layers



9
10
11
# File 'lib/torch/nn/functional.rb', line 9

def conv1d(*args, **options)
  Torch.conv1d(*args, **options)
end

.conv2d(*args, **options) ⇒ Object



13
14
15
# File 'lib/torch/nn/functional.rb', line 13

def conv2d(*args, **options)
  Torch.conv2d(*args, **options)
end

.conv3d(*args, **options) ⇒ Object



17
18
19
# File 'lib/torch/nn/functional.rb', line 17

def conv3d(*args, **options)
  Torch.conv3d(*args, **options)
end

.cosine_embedding_loss(input1, input2, target, margin: 0, reduction: "mean") ⇒ Object

Raises:



371
372
373
# File 'lib/torch/nn/functional.rb', line 371

def cosine_embedding_loss(input1, input2, target, margin: 0, reduction: "mean")
  raise NotImplementedYet
end

.cosine_similarity(x1, x2, dim: 1, eps: 1e-8) ⇒ Object

distance functions



353
354
355
# File 'lib/torch/nn/functional.rb', line 353

def cosine_similarity(x1, x2, dim: 1, eps: 1e-8)
  Torch.cosine_similarity(x1, x2, dim, eps)
end

.cross_entropy(input, target, weight: nil, ignore_index: -100,, reduction: "mean") ⇒ Object



375
376
377
# File 'lib/torch/nn/functional.rb', line 375

def cross_entropy(input, target, weight: nil, ignore_index: -100, reduction: "mean")
  nll_loss(log_softmax(input, 1), target, weight: weight, ignore_index: ignore_index, reduction: reduction)
end

.ctc_loss(log_probs, targets, input_lengths, target_lengths, blank: 0, reduction: "mean", zero_infinity: false) ⇒ Object



379
380
381
382
# File 'lib/torch/nn/functional.rb', line 379

def ctc_loss(log_probs, targets, input_lengths, target_lengths, blank: 0, reduction: "mean", zero_infinity: false)
  # call to_a on input_lengths and target_lengths for C++
  Torch.ctc_loss(log_probs, targets, input_lengths.to_a, target_lengths.to_a, blank, reduction, zero_infinity)
end

.dropout(input, p: 0.5, training: true, inplace: false) ⇒ Object

dropout layers



278
279
280
281
282
283
284
# File 'lib/torch/nn/functional.rb', line 278

def dropout(input, p: 0.5, training: true, inplace: false)
  if inplace
    Torch.dropout!(input, p, training)
  else
    Torch.dropout(input, p, training)
  end
end

.dropout2d(input, p: 0.5, training: true, inplace: false) ⇒ Object

Raises:

  • (ArgumentError)


286
287
288
289
290
291
292
293
294
# File 'lib/torch/nn/functional.rb', line 286

def dropout2d(input, p: 0.5, training: true, inplace: false)
  raise ArgumentError, "dropout probability has to be between 0 and 1, but got #{p}" if p < 0 || p > 1

  if inplace
    Torch.feature_dropout!(input, p, training)
  else
    Torch.feature_dropout(input, p, training)
  end
end

.dropout3d(input, p: 0.5, training: true, inplace: false) ⇒ Object



296
297
298
299
300
301
302
# File 'lib/torch/nn/functional.rb', line 296

def dropout3d(input, p: 0.5, training: true, inplace: false)
  if inplace
    Torch.feature_dropout!(input, p, training)
  else
    Torch.feature_dropout(input, p, training)
  end
end

.embedding(input, weight, padding_idx: nil, max_norm: nil, norm_type: 2.0, scale_grad_by_freq: false, sparse: false) ⇒ Object

sparse layers

Raises:



322
323
324
325
326
327
328
329
# File 'lib/torch/nn/functional.rb', line 322

def embedding(input, weight, padding_idx: nil, max_norm: nil, norm_type: 2.0, scale_grad_by_freq: false, sparse: false)
  # TODO handle max_norm and norm_type
  raise NotImplementedYet unless max_norm.nil? && norm_type == 2.0

  padding_idx ||= -1
  # weight and indices are swapped from Python interface
  Torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
end

.embedding_bag(input, weight, offsets: nil, max_norm: nil, norm_type: 2, scale_grad_by_freq: false, mode: "mean", sparse: false, per_sample_weights: nil) ⇒ Object

Raises:



331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
# File 'lib/torch/nn/functional.rb', line 331

def embedding_bag(input, weight, offsets: nil, max_norm: nil, norm_type: 2, scale_grad_by_freq: false, mode: "mean", sparse: false, per_sample_weights: nil)
  # TODO handle max_norm and norm_type
  raise NotImplementedYet unless max_norm.nil? && norm_type == 2.0

  mode_enum =
    case mode
    when "sum"
      0
    when "mean"
      1
    when "max"
      2
    else
      raise ArgumentError, "Unknown mode: #{mode}"
    end

  # weight and input swapped
  Torch.embedding_bag(weight, input, offsets, scale_grad_by_freq, mode_enum, sparse, per_sample_weights)
end

.feature_alpha_dropout(input, p: 0.5, training: true, inplace: false) ⇒ Object



312
313
314
315
316
317
318
# File 'lib/torch/nn/functional.rb', line 312

def feature_alpha_dropout(input, p: 0.5, training: true, inplace: false)
  if inplace
    Torch.feature_alpha_dropout!(input, p, training)
  else
    Torch.feature_alpha_dropout(input, p, training)
  end
end

.fold(input, output_size, kernel_size, dilation: 1, padding: 0, stride: 1) ⇒ Object



29
30
31
32
33
34
35
# File 'lib/torch/nn/functional.rb', line 29

def fold(input, output_size, kernel_size, dilation: 1, padding: 0, stride: 1)
  if input.dim == 3
    NN.col2im(input, _pair(output_size), _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride))
  else
    raise Error, "Input Error: Only 3D input Tensors are supported (got #{input.dim}D)"
  end
end

.group_norm(input, num_groups, weight: nil, bias: nil, eps: 1e-5) ⇒ Object



229
230
231
# File 'lib/torch/nn/functional.rb', line 229

def group_norm(input, num_groups, weight: nil, bias: nil, eps: 1e-5)
  Torch.group_norm(input, num_groups, weight, bias, eps, false)
end

.hardshrink(input, lambd = 0.5) ⇒ Object

activation layers



149
150
151
# File 'lib/torch/nn/functional.rb', line 149

def hardshrink(input, lambd = 0.5)
  Torch.hardshrink(input, lambd)
end

.hinge_embedding_loss(input, target, margin: 1.0, reduction: "mean") ⇒ Object



384
385
386
# File 'lib/torch/nn/functional.rb', line 384

def hinge_embedding_loss(input, target, margin: 1.0, reduction: "mean")
  Torch.hinge_embedding_loss(input, target, margin, reduction)
end

.instance_norm(input, running_mean: nil, running_var: nil, weight: nil, bias: nil, use_input_stats: true, momentum: 0.1, eps: 1e-5) ⇒ Object



233
234
235
236
237
238
239
240
# File 'lib/torch/nn/functional.rb', line 233

def instance_norm(input, running_mean: nil, running_var: nil, weight: nil,
  bias: nil, use_input_stats: true, momentum: 0.1, eps: 1e-5)

  Torch.instance_norm(
      input, weight, bias, running_mean, running_var,
      use_input_stats, momentum, eps, false
  )
end

.kl_div(input, target, reduction: "mean") ⇒ Object



388
389
390
# File 'lib/torch/nn/functional.rb', line 388

def kl_div(input, target, reduction: "mean")
  Torch.kl_div(input, target, reduction)
end

.l1_loss(input, target, reduction: "mean") ⇒ Object



392
393
394
# File 'lib/torch/nn/functional.rb', line 392

def l1_loss(input, target, reduction: "mean")
  NN.l1_loss(input, target, reduction)
end

.layer_norm(input, normalized_shape, weight: nil, bias: nil, eps: 1e-5) ⇒ Object



242
243
244
# File 'lib/torch/nn/functional.rb', line 242

def layer_norm(input, normalized_shape, weight: nil, bias: nil, eps: 1e-5)
  Torch.layer_norm(input, normalized_shape, weight, bias, eps, false)
end

.leaky_relu(input, negative_slope = 0.01) ⇒ Object



153
154
155
# File 'lib/torch/nn/functional.rb', line 153

def leaky_relu(input, negative_slope = 0.01)
  NN.leaky_relu(input, negative_slope)
end

.linear(input, weight, bias) ⇒ Object

linear layers



268
269
270
# File 'lib/torch/nn/functional.rb', line 268

def linear(input, weight, bias)
  NN.linear(input, weight, bias)
end

.local_response_norm(input, size, alpha: 1e-4, beta: 0.75, k: 1.0) ⇒ Object



246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
# File 'lib/torch/nn/functional.rb', line 246

def local_response_norm(input, size, alpha: 1e-4, beta: 0.75, k: 1.0)
  dim = input.dim
  if dim < 3
    raise ArgumentError, "Expected 3D or higher dimensionality input (got #{dim} dimensions)"
  end
  div = input.mul(input).unsqueeze(1)
  if dim == 3
    div = pad(div, [0, 0, size / 2, (size - 1) / 2])
    div = avg_pool2d(div, [size, 1], stride: 1).squeeze(1)
  else
    sizes = input.size
    div = div.view(sizes[0], 1, sizes[1], sizes[2], -1)
    div = pad(div, [0, 0, 0, 0, size / 2, (size - 1) / 2])
    div = avg_pool3d(div, [size, 1, 1], stride: 1).squeeze(1)
    div = div.view(sizes)
  end
  div = div.mul(alpha).add(k).pow(beta)
  input / div
end

.log_sigmoid(input) ⇒ Object



157
158
159
# File 'lib/torch/nn/functional.rb', line 157

def log_sigmoid(input)
  NN.log_sigmoid(input)
end

.log_softmax(input, dim = nil) ⇒ Object

TODO make dim keyword argument and update examples



202
203
204
205
# File 'lib/torch/nn/functional.rb', line 202

def log_softmax(input, dim = nil)
  dim ||= softmax_dim(input.dim)
  input.log_softmax(dim)
end

.margin_ranking_loss(input1, input2, target, margin: 0, reduction: "mean") ⇒ Object

Raises:



396
397
398
# File 'lib/torch/nn/functional.rb', line 396

def margin_ranking_loss(input1, input2, target, margin: 0, reduction: "mean")
  raise NotImplementedYet
end

.max_pool1d(*args, **options) ⇒ Object

pooling layers



39
40
41
42
43
44
45
46
# File 'lib/torch/nn/functional.rb', line 39

def max_pool1d(*args, **options)
  return_indices = args.pop if args.size == 7
  if return_indices
    Torch.max_pool1d_with_indices(*args, **options)
  else
    Torch.max_pool1d(*args, **options)
  end
end

.max_pool2d(*args, **options) ⇒ Object



48
49
50
51
52
53
54
55
# File 'lib/torch/nn/functional.rb', line 48

def max_pool2d(*args, **options)
  return_indices = args.pop if args.size == 7
  if return_indices
    NN.max_pool2d_with_indices(*args, **options)
  else
    Torch.max_pool2d(*args, **options)
  end
end

.max_pool3d(*args, **options) ⇒ Object



57
58
59
60
61
62
63
64
# File 'lib/torch/nn/functional.rb', line 57

def max_pool3d(*args, **options)
  return_indices = args.pop if args.size == 7
  if return_indices
    NN.max_pool3d_with_indices(*args, **options)
  else
    Torch.max_pool3d(*args, **options)
  end
end

.max_unpool1d(input, indices, kernel_size, stride: nil, padding: 0, output_size: nil) ⇒ Object

Raises:



66
67
68
69
70
71
72
73
74
75
76
77
78
# File 'lib/torch/nn/functional.rb', line 66

def max_unpool1d(input, indices, kernel_size, stride: nil, padding: 0, output_size: nil)
  raise NotImplementedYet
  kernel_size = _single(kernel_size)
  if !stride.nil?
    _stride = _single(stride)
  else
    _stride = kernel_size
  end
  padding = _single(padding)
  output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)
  output_size = output_size + [1]
  NN.max_unpool2d(input.unsqueeze(3), indices.unsqueeze(3), output_size).squeeze(3)
end

.max_unpool2d(*args, **options) ⇒ Object

Raises:



80
81
82
83
# File 'lib/torch/nn/functional.rb', line 80

def max_unpool2d(*args, **options)
  raise NotImplementedYet
  NN.max_unpool2d(*args, **options)
end

.max_unpool3d(*args, **options) ⇒ Object

Raises:



85
86
87
88
# File 'lib/torch/nn/functional.rb', line 85

def max_unpool3d(*args, **options)
  raise NotImplementedYet
  NN.max_unpool3d(*args, **options)
end

.mse_loss(input, target, reduction: "mean") ⇒ Object



400
401
402
# File 'lib/torch/nn/functional.rb', line 400

def mse_loss(input, target, reduction: "mean")
  NN.mse_loss(input, target, reduction)
end

.multi_margin_loss(input, target, p: 1, margin: 1.0, weight: nil, reduction: "mean") ⇒ Object



412
413
414
# File 'lib/torch/nn/functional.rb', line 412

def multi_margin_loss(input, target, p: 1, margin: 1.0, weight: nil, reduction: "mean")
  NN.multi_margin_loss(input, target, p, margin, weight, reduction)
end

.multilabel_margin_loss(input, target, reduction: "mean") ⇒ Object



404
405
406
# File 'lib/torch/nn/functional.rb', line 404

def multilabel_margin_loss(input, target, reduction: "mean")
  NN.multilabel_margin_loss(input, target, reduction)
end

.multilabel_soft_margin_loss(input, target, weight: nil) ⇒ Object

Raises:



408
409
410
# File 'lib/torch/nn/functional.rb', line 408

def multilabel_soft_margin_loss(input, target, weight: nil)
  raise NotImplementedYet
end

.nll_loss(input, target, weight: nil, ignore_index: -100,, reduction: "mean") ⇒ Object



416
417
418
# File 'lib/torch/nn/functional.rb', line 416

def nll_loss(input, target, weight: nil, ignore_index: -100, reduction: "mean")
  NN.nll_loss(input, target, weight, reduction, ignore_index)
end

.pad(input, pad, mode: "constant", value: 0) ⇒ Object

padding layers

Raises:

  • (ArgumentError)


104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
# File 'lib/torch/nn/functional.rb', line 104

def pad(input, pad, mode: "constant", value: 0)
  raise ArgumentError, "Padding length must be divisible by 2" unless pad.size % 2 == 0
  raise ArgumentError, "Padding length too large" unless pad.size / 2 <= input.dim

  if mode == "constant"
    return Torch.constant_pad_nd(input, pad, value)
  else
    raise ArgumentError, "Padding mode doesn't take in value argument" unless value == 0

    if input.dim == 3
      raise ArgumentError, "3D tensors expect 2 values for padding" unless pad.size == 2
      case mode
      when "reflect"
        NN.reflection_pad1d(input, pad)
      when "replicate"
        NN.replication_pad1d(input, pad)
      else
        raise NotImplementedYet
      end
    elsif input.dim == 4
      raise ArgumentError, "4D tensors expect 4 values for padding" unless pad.size == 4
      case mode
      when "reflect"
        NN.reflection_pad2d(input, pad)
      when "replicate"
        NN.replication_pad2d(input, pad)
      else
        raise NotImplementedYet
      end
    elsif input.dim == 5
      raise ArgumentError, "5D tensors expect 6 values for padding" unless pad.size == 6
      case mode
      when "replicate"
        NN.replication_pad3d(input, pad)
      else
        raise NotImplementedYet
      end
    else
      raise ArgumentError, "Only 3D, 4D, 5D padding with non-constant padding are supported for now"
    end
  end
end

.pairwise_distance(x1, x2, p: 2.0, eps: 1e-6, keepdim: false) ⇒ Object



357
358
359
# File 'lib/torch/nn/functional.rb', line 357

def pairwise_distance(x1, x2, p: 2.0, eps: 1e-6, keepdim: false)
  Torch.pairwise_distance(x1, x2, p, eps, keepdim)
end

.poisson_nll_loss(input, target, log_input: true, full: false, eps: 1e-8, reduction: "mean") ⇒ Object



420
421
422
# File 'lib/torch/nn/functional.rb', line 420

def poisson_nll_loss(input, target, log_input: true, full: false, eps: 1e-8, reduction: "mean")
  Torch.poisson_nll_loss(input, target, log_input, full, eps, reduction)
end

.prelu(input, weight) ⇒ Object



161
162
163
# File 'lib/torch/nn/functional.rb', line 161

def prelu(input, weight)
  Torch.prelu(input, weight)
end

.relu(input, inplace: false) ⇒ Object



165
166
167
168
169
170
171
# File 'lib/torch/nn/functional.rb', line 165

def relu(input, inplace: false)
  if inplace
    input.relu!
  else
    input.relu
  end
end

.smooth_l1_loss(input, target, reduction: "mean") ⇒ Object



428
429
430
# File 'lib/torch/nn/functional.rb', line 428

def smooth_l1_loss(input, target, reduction: "mean")
  NN.smooth_l1_loss(input, target, reduction)
end

.soft_margin_loss(input, target, reduction: "mean") ⇒ Object



424
425
426
# File 'lib/torch/nn/functional.rb', line 424

def soft_margin_loss(input, target, reduction: "mean")
  NN.soft_margin_loss(input, target, reduction)
end

.softmax(input, dim: nil) ⇒ Object



196
197
198
199
# File 'lib/torch/nn/functional.rb', line 196

def softmax(input, dim: nil)
  dim ||= softmax_dim(input.dim)
  input.softmax(dim)
end

.softmin(input, dim: nil) ⇒ Object

other activation layers



191
192
193
194
# File 'lib/torch/nn/functional.rb', line 191

def softmin(input, dim: nil)
  dim ||= softmax_dim(input.dim)
  (-input).softmax(dim)
end

.softplus(input, beta: 1, threshold: 20) ⇒ Object



173
174
175
# File 'lib/torch/nn/functional.rb', line 173

def softplus(input, beta: 1, threshold: 20)
  NN.softplus(input, beta, threshold)
end

.softshrink(*args, **options) ⇒ Object



177
178
179
# File 'lib/torch/nn/functional.rb', line 177

def softshrink(*args, **options)
  NN.softshrink(*args, **options)
end

.softsign(input) ⇒ Object



181
182
183
# File 'lib/torch/nn/functional.rb', line 181

def softsign(input)
  input / (input.abs + 1)
end

.tanhshrink(input) ⇒ Object



185
186
187
# File 'lib/torch/nn/functional.rb', line 185

def tanhshrink(input)
  input - input.tanh
end

.triplet_margin_loss(anchor, positive, negative, margin: 1.0, p: 2, eps: 1e-06, swap: false, reduction: "mean") ⇒ Object



432
433
434
# File 'lib/torch/nn/functional.rb', line 432

def triplet_margin_loss(anchor, positive, negative, margin: 1.0, p: 2, eps: 1e-06, swap: false, reduction: "mean")
  Torch.triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction)
end

.unfold(input, kernel_size, dilation: 1, padding: 0, stride: 1) ⇒ Object



21
22
23
24
25
26
27
# File 'lib/torch/nn/functional.rb', line 21

def unfold(input, kernel_size, dilation: 1, padding: 0, stride: 1)
  if input.dim == 4
    NN.im2col(input, _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride))
  else
    raise Error, "Input Error: Only 4D input Tensors are supported (got #{input.dim}D)"
  end
end