Class: Torch::NN::Functional

Inherits:
Object
  • Object
show all
Extended by:
Utils
Defined in:
lib/torch/nn/functional.rb,
lib/torch/nn/functional_attention.rb

Class Method Summary collapse

Methods included from Utils

_activation_fn, _clones, _ntuple, _pair, _quadrupal, _single, _triple

Class Method Details

.adaptive_avg_pool1d(*args, **options) ⇒ Object



116
117
118
# File 'lib/torch/nn/functional.rb', line 116

def adaptive_avg_pool1d(*args, **options)
  Torch.adaptive_avg_pool1d(*args, **options)
end

.adaptive_avg_pool2d(input, output_size) ⇒ Object



120
121
122
123
# File 'lib/torch/nn/functional.rb', line 120

def adaptive_avg_pool2d(input, output_size)
  output_size = list_with_default(output_size, input.size)
  NN.adaptive_avg_pool2d(input, output_size)
end

.adaptive_avg_pool3d(input, output_size) ⇒ Object



125
126
127
128
# File 'lib/torch/nn/functional.rb', line 125

def adaptive_avg_pool3d(input, output_size)
  output_size = list_with_default(output_size, input.size)
  NN.adaptive_avg_pool3d(input, output_size)
end

.adaptive_max_pool1d(*args, **options) ⇒ Object



102
103
104
# File 'lib/torch/nn/functional.rb', line 102

def adaptive_max_pool1d(*args, **options)
  Torch.adaptive_max_pool1d(*args, **options)
end

.adaptive_max_pool2d(input, output_size) ⇒ Object



106
107
108
109
# File 'lib/torch/nn/functional.rb', line 106

def adaptive_max_pool2d(input, output_size)
  output_size = list_with_default(output_size, input.size)
  NN.adaptive_max_pool2d(input, output_size)
end

.adaptive_max_pool3d(input, output_size) ⇒ Object



111
112
113
114
# File 'lib/torch/nn/functional.rb', line 111

def adaptive_max_pool3d(input, output_size)
  output_size = list_with_default(output_size, input.size)
  NN.adaptive_max_pool3d(input, output_size)
end

.alpha_dropout(input, p: 0.5, training: true, inplace: false) ⇒ Object



336
337
338
339
340
341
342
# File 'lib/torch/nn/functional.rb', line 336

def alpha_dropout(input, p: 0.5, training: true, inplace: false)
  if inplace
    Torch.alpha_dropout!(input, p, training)
  else
    Torch.alpha_dropout(input, p, training)
  end
end

.avg_pool1d(*args, **options) ⇒ Object



90
91
92
# File 'lib/torch/nn/functional.rb', line 90

def avg_pool1d(*args, **options)
  Torch.avg_pool1d(*args, **options)
end

.avg_pool2d(*args, **options) ⇒ Object



94
95
96
# File 'lib/torch/nn/functional.rb', line 94

def avg_pool2d(*args, **options)
  NN.avg_pool2d(*args, **options)
end

.avg_pool3d(*args, **options) ⇒ Object



98
99
100
# File 'lib/torch/nn/functional.rb', line 98

def avg_pool3d(*args, **options)
  NN.avg_pool3d(*args, **options)
end

.batch_norm(input, running_mean, running_var, weight: nil, bias: nil, training: false, momentum: 0.1, eps: 1e-5) ⇒ Object

normalization layers



241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
# File 'lib/torch/nn/functional.rb', line 241

def batch_norm(input, running_mean, running_var, weight: nil, bias: nil,
  training: false, momentum: 0.1, eps: 1e-5)

  if training
    size = input.size
    size_prods = size[0]
    (size.length - 2).times do |i|
      size_prods *= size[i + 2]
    end
    if size_prods == 1
      raise ArgumentError, "Expected more than 1 value per channel when training, got input size #{size.inspect}"
    end
  end

  Torch.batch_norm(
    input, weight, bias, running_mean, running_var,
    training, momentum, eps, false
  )
end

.bilinear(input1, input2, weight, bias) ⇒ Object



304
305
306
# File 'lib/torch/nn/functional.rb', line 304

def bilinear(input1, input2, weight, bias)
  Torch.bilinear(input1, input2, weight, bias)
end

.binary_cross_entropy(input, target, weight: nil, reduction: "mean") ⇒ Object

loss functions



396
397
398
# File 'lib/torch/nn/functional.rb', line 396

def binary_cross_entropy(input, target, weight: nil, reduction: "mean")
  NN.binary_cross_entropy(input, target, weight, to_reduction(reduction))
end

.binary_cross_entropy_with_logits(input, target, weight: nil, reduction: "mean", pos_weight: nil) ⇒ Object



400
401
402
# File 'lib/torch/nn/functional.rb', line 400

def binary_cross_entropy_with_logits(input, target, weight: nil, reduction: "mean", pos_weight: nil)
  Torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, to_reduction(reduction))
end

.conv1d(*args, **options) ⇒ Object

convolution layers



9
10
11
# File 'lib/torch/nn/functional.rb', line 9

def conv1d(*args, **options)
  Torch.conv1d(*args, **options)
end

.conv2d(*args, **options) ⇒ Object



13
14
15
# File 'lib/torch/nn/functional.rb', line 13

def conv2d(*args, **options)
  Torch.conv2d(*args, **options)
end

.conv3d(*args, **options) ⇒ Object



17
18
19
# File 'lib/torch/nn/functional.rb', line 17

def conv3d(*args, **options)
  Torch.conv3d(*args, **options)
end

.cosine_embedding_loss(input1, input2, target, margin: 0, reduction: "mean") ⇒ Object



404
405
406
# File 'lib/torch/nn/functional.rb', line 404

def cosine_embedding_loss(input1, input2, target, margin: 0, reduction: "mean")
  Torch.cosine_embedding_loss(input1, input2, target, margin, to_reduction(reduction))
end

.cosine_similarity(x1, x2, dim: 1, eps: 1e-8) ⇒ Object

distance functions



386
387
388
# File 'lib/torch/nn/functional.rb', line 386

def cosine_similarity(x1, x2, dim: 1, eps: 1e-8)
  Torch.cosine_similarity(x1, x2, dim, eps)
end

.cross_entropy(input, target, weight: nil, ignore_index: -100,, reduction: "mean") ⇒ Object



408
409
410
# File 'lib/torch/nn/functional.rb', line 408

def cross_entropy(input, target, weight: nil, ignore_index: -100, reduction: "mean")
  nll_loss(log_softmax(input, 1), target, weight: weight, ignore_index: ignore_index, reduction: reduction)
end

.ctc_loss(log_probs, targets, input_lengths, target_lengths, blank: 0, reduction: "mean", zero_infinity: false) ⇒ Object



412
413
414
415
# File 'lib/torch/nn/functional.rb', line 412

def ctc_loss(log_probs, targets, input_lengths, target_lengths, blank: 0, reduction: "mean", zero_infinity: false)
  # call to_a on input_lengths and target_lengths for C++
  Torch.ctc_loss(log_probs, targets, input_lengths.to_a, target_lengths.to_a, blank, to_reduction(reduction), zero_infinity)
end

.dropout(input, p: 0.5, training: true, inplace: false) ⇒ Object

dropout layers



310
311
312
313
314
315
316
# File 'lib/torch/nn/functional.rb', line 310

def dropout(input, p: 0.5, training: true, inplace: false)
  if inplace
    Torch.dropout!(input, p, training)
  else
    Torch.dropout(input, p, training)
  end
end

.dropout2d(input, p: 0.5, training: true, inplace: false) ⇒ Object

Raises:

  • (ArgumentError)


318
319
320
321
322
323
324
325
326
# File 'lib/torch/nn/functional.rb', line 318

def dropout2d(input, p: 0.5, training: true, inplace: false)
  raise ArgumentError, "dropout probability has to be between 0 and 1, but got #{p}" if p < 0 || p > 1

  if inplace
    Torch.feature_dropout!(input, p, training)
  else
    Torch.feature_dropout(input, p, training)
  end
end

.dropout3d(input, p: 0.5, training: true, inplace: false) ⇒ Object



328
329
330
331
332
333
334
# File 'lib/torch/nn/functional.rb', line 328

def dropout3d(input, p: 0.5, training: true, inplace: false)
  if inplace
    Torch.feature_dropout!(input, p, training)
  else
    Torch.feature_dropout(input, p, training)
  end
end

.embedding(input, weight, padding_idx: nil, max_norm: nil, norm_type: 2.0, scale_grad_by_freq: false, sparse: false) ⇒ Object

sparse layers

Raises:



354
355
356
357
358
359
360
361
# File 'lib/torch/nn/functional.rb', line 354

def embedding(input, weight, padding_idx: nil, max_norm: nil, norm_type: 2.0, scale_grad_by_freq: false, sparse: false)
  # TODO handle max_norm and norm_type
  raise NotImplementedYet unless max_norm.nil? && norm_type == 2.0

  padding_idx ||= -1
  # weight and indices are swapped from Python interface
  Torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
end

.embedding_bag(input, weight, offsets: nil, max_norm: nil, norm_type: 2, scale_grad_by_freq: false, mode: "mean", sparse: false, per_sample_weights: nil) ⇒ Object

Raises:



363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
# File 'lib/torch/nn/functional.rb', line 363

def embedding_bag(input, weight, offsets: nil, max_norm: nil, norm_type: 2, scale_grad_by_freq: false, mode: "mean", sparse: false, per_sample_weights: nil)
  # TODO handle max_norm and norm_type
  raise NotImplementedYet unless max_norm.nil? && norm_type == 2.0

  mode_enum =
    case mode
    when "sum"
      0
    when "mean"
      1
    when "max"
      2
    else
      raise ArgumentError, "Unknown mode: #{mode}"
    end

  # weight and input swapped
  ret, _, _, _ = Torch.embedding_bag(weight, input, offsets, scale_grad_by_freq, mode_enum, sparse, per_sample_weights)
  ret
end

.feature_alpha_dropout(input, p: 0.5, training: true, inplace: false) ⇒ Object



344
345
346
347
348
349
350
# File 'lib/torch/nn/functional.rb', line 344

def feature_alpha_dropout(input, p: 0.5, training: true, inplace: false)
  if inplace
    Torch.feature_alpha_dropout!(input, p, training)
  else
    Torch.feature_alpha_dropout(input, p, training)
  end
end

.fold(input, output_size, kernel_size, dilation: 1, padding: 0, stride: 1) ⇒ Object



29
30
31
32
33
34
35
# File 'lib/torch/nn/functional.rb', line 29

def fold(input, output_size, kernel_size, dilation: 1, padding: 0, stride: 1)
  if input.dim == 3
    NN.col2im(input, _pair(output_size), _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride))
  else
    raise Error, "Input Error: Only 3D input Tensors are supported (got #{input.dim}D)"
  end
end

.group_norm(input, num_groups, weight: nil, bias: nil, eps: 1e-5) ⇒ Object



261
262
263
# File 'lib/torch/nn/functional.rb', line 261

def group_norm(input, num_groups, weight: nil, bias: nil, eps: 1e-5)
  Torch.group_norm(input, num_groups, weight, bias, eps, false)
end

.hardshrink(input, lambd = 0.5) ⇒ Object

activation layers



177
178
179
# File 'lib/torch/nn/functional.rb', line 177

def hardshrink(input, lambd = 0.5)
  Torch.hardshrink(input, lambd)
end

.hinge_embedding_loss(input, target, margin: 1.0, reduction: "mean") ⇒ Object



417
418
419
# File 'lib/torch/nn/functional.rb', line 417

def hinge_embedding_loss(input, target, margin: 1.0, reduction: "mean")
  Torch.hinge_embedding_loss(input, target, margin, to_reduction(reduction))
end

.in_projection(q, k, v, w_q, w_k, w_v, b_q: nil, b_k: nil, b_v: nil) ⇒ Object

Raises:

  • (ArgumentError)


35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
# File 'lib/torch/nn/functional_attention.rb', line 35

def in_projection(
  q, k, v,
  w_q, w_k, w_v,
  b_q: nil, b_k: nil, b_v: nil
)

  e_q, e_k, e_v = q.size(-1), k.size(-1), v.size(-1)

  raise ArgumentError, "Expecting query weights shape of #{[e_q, e_q]}, but got #{w_q.shape}" unless w_q.shape == [e_q, e_q]
  raise ArgumentError, "Expecting key weights shape of #{[e_k, e_k]}, but got #{w_k.shape}" unless w_k.shape == [e_k, e_k]
  raise ArgumentError, "Expecting value weights shape of #{[e_v, e_v]}, but got #{w_v.shape}" unless w_v.shape == [e_v, e_v]

  raise ArgumentError, "Expecting query bias shape of #{[e_q]}, but got #{b_q.shape}" if b_q && b_q.shape != [e_q]
  raise ArgumentError, "Expecting key bias shape of #{[e_k]}, but got #{b_k.shape}" if b_k && b_k.shape != [e_k]
  raise ArgumentError, "Expecting value bias shape of #{[e_v]}, but got #{b_v.shape}" if b_v && b_v.shape != [e_v]

  [linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)]
end

.in_projection_packed(q, k, v, w, b: nil) ⇒ Object



5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
# File 'lib/torch/nn/functional_attention.rb', line 5

def in_projection_packed(q, k, v, w, b: nil)
  e = q.size(-1)

  if k.eql? v
    if q.eql? k
      # self-attention
      return linear(q, w, b).chunk(3, dim: -1)
    else
      # encoder-decoder attention
      w_q, w_kv = w.split_with_sizes([e, e * 2])
      if b.nil?
        b_q = b_kv = nil
      else
        b_q, b_kv = b.split_with_sizes([e, e * 2])
      end

      return [linear(q, w_q, b_q), *linear(k, w_kv, b_kv).chunk(2, dim: -1)]
    end
  else
    w_q, w_k, w_v = w.chunk(3)
    if b.nil?
      b_q = b_k = b_v = nil
    else
      b_q, b_k, b_v = b.chunk(3)
    end

    return [linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)]
  end
end

.instance_norm(input, running_mean: nil, running_var: nil, weight: nil, bias: nil, use_input_stats: true, momentum: 0.1, eps: 1e-5) ⇒ Object



265
266
267
268
269
270
271
272
# File 'lib/torch/nn/functional.rb', line 265

def instance_norm(input, running_mean: nil, running_var: nil, weight: nil,
  bias: nil, use_input_stats: true, momentum: 0.1, eps: 1e-5)

  Torch.instance_norm(
      input, weight, bias, running_mean, running_var,
      use_input_stats, momentum, eps, false
  )
end

.interpolate(input, size: nil, scale_factor: nil, mode: "nearest", align_corners: nil, recompute_scale_factor: nil) ⇒ Object

vision



474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
# File 'lib/torch/nn/functional.rb', line 474

def interpolate(input, size: nil, scale_factor: nil, mode: "nearest", align_corners: nil, recompute_scale_factor: nil)
  if ["nearest", "area"].include?(mode)
    unless align_corners.nil?
      raise ArgumentError, "align_corners option can only be set with the interpolating modes: linear | bilinear | bicubic | trilinear"
    end
  else
    if align_corners.nil?
      align_corners = false
    end
  end

  scale_factor_len = input.dim - 2
  scale_factor_list = [nil] * scale_factor_len
  # default value of recompute_scale_factor is False
  if !scale_factor.nil? && (recompute_scale_factor == false || recompute_scale_factor.nil?)
    if scale_factor.is_a?(Array)
      _scale_factor_repeated = scale_factor
    else
      _scale_factor_repeated = [scale_factor] * scale_factor_len
    end
    scale_factor_list = _scale_factor_repeated
  end

  # Give this variable a short name because it has to be repeated multiple times below.
  sfl = scale_factor_list

  closed_over_args = [input, size, scale_factor, recompute_scale_factor]
  output_size = _interp_output_size(closed_over_args)
  if input.dim == 3 && mode == "nearest"
    NN.upsample_nearest1d(input, output_size, sfl[0])
  elsif input.dim == 4 && mode == "nearest"
    NN.upsample_nearest2d(input, output_size, sfl[0], sfl[1])
  elsif input.dim == 5 && mode == "nearest"
    NN.upsample_nearest3d(input, output_size, sfl[0], sfl[1], sfl[2])
  elsif input.dim == 3 && mode == "area"
    adaptive_avg_pool1d(input, output_size)
  elsif input.dim == 4 && mode == "area"
    adaptive_avg_pool2d(input, output_size)
  elsif input.dim == 5 && mode == "area"
    adaptive_avg_pool3d(input, output_size)
  elsif input.dim == 3 && mode == "linear"
    # assert align_corners is not None
    NN.upsample_linear1d(input, output_size, align_corners, sfl[0])
  elsif input.dim == 3 && mode == "bilinear"
    raise ArgumentError, "Got 3D input, but bilinear mode needs 4D input"
  elsif input.dim == 3 && mode == "trilinear"
    raise ArgumentError, "Got 3D input, but trilinear mode needs 5D input"
  elsif input.dim == 4 && mode == "linear"
    raise ArgumentError, "Got 4D input, but linear mode needs 3D input"
  elsif input.dim == 4 && mode == "bilinear"
    # assert align_corners is not None
    NN.upsample_bilinear2d(input, output_size, align_corners, sfl[0], sfl[1])
  elsif input.dim == 4 && mode == "trilinear"
    raise ArgumentError, "Got 4D input, but trilinear mode needs 5D input"
  elsif input.dim == 5 && mode == "linear"
    raise ArgumentError, "Got 5D input, but linear mode needs 3D input"
  elsif input.dim == 5 && mode == "bilinear"
    raise ArgumentError, "Got 5D input, but bilinear mode needs 4D input"
  elsif input.dim == 5 && mode == "trilinear"
    # assert align_corners is not None
    NN.upsample_trilinear3d(input, output_size, align_corners, sfl[0], sfl[1], sfl[2])
  elsif input.dim == 4 && mode == "bicubic"
    # assert align_corners is not None
    NN.upsample_bicubic2d(input, output_size, align_corners, sfl[0], sfl[1])
  else
    raise ArgumentError, "Input Error: Only 3D, 4D and 5D input Tensors supported (got #{input.dim}D) for the modes: nearest | linear | bilinear | bicubic | trilinear (got #{mode})"
  end
end

.kl_div(input, target, reduction: "mean") ⇒ Object



421
422
423
# File 'lib/torch/nn/functional.rb', line 421

def kl_div(input, target, reduction: "mean")
  Torch.kl_div(input, target, to_reduction(reduction))
end

.l1_loss(input, target, reduction: "mean") ⇒ Object



425
426
427
# File 'lib/torch/nn/functional.rb', line 425

def l1_loss(input, target, reduction: "mean")
  NN.l1_loss(input, target, to_reduction(reduction))
end

.layer_norm(input, normalized_shape, weight: nil, bias: nil, eps: 1e-5) ⇒ Object



274
275
276
# File 'lib/torch/nn/functional.rb', line 274

def layer_norm(input, normalized_shape, weight: nil, bias: nil, eps: 1e-5)
  Torch.layer_norm(input, normalized_shape, weight, bias, eps, false)
end

.leaky_relu(input, negative_slope = 0.01, inplace: false) ⇒ Object



181
182
183
184
185
186
187
# File 'lib/torch/nn/functional.rb', line 181

def leaky_relu(input, negative_slope = 0.01, inplace: false)
  if inplace
    NN.leaky_relu!(input, negative_slope)
  else
    NN.leaky_relu(input, negative_slope)
  end
end

.linear(input, weight, bias) ⇒ Object

linear layers



300
301
302
# File 'lib/torch/nn/functional.rb', line 300

def linear(input, weight, bias)
  NN.linear(input, weight, bias)
end

.local_response_norm(input, size, alpha: 1e-4, beta: 0.75, k: 1.0) ⇒ Object



278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
# File 'lib/torch/nn/functional.rb', line 278

def local_response_norm(input, size, alpha: 1e-4, beta: 0.75, k: 1.0)
  dim = input.dim
  if dim < 3
    raise ArgumentError, "Expected 3D or higher dimensionality input (got #{dim} dimensions)"
  end
  div = input.mul(input).unsqueeze(1)
  if dim == 3
    div = pad(div, [0, 0, size / 2, (size - 1) / 2])
    div = avg_pool2d(div, [size, 1], stride: 1).squeeze(1)
  else
    sizes = input.size
    div = div.view(sizes[0], 1, sizes[1], sizes[2], -1)
    div = pad(div, [0, 0, 0, 0, size / 2, (size - 1) / 2])
    div = avg_pool3d(div, [size, 1, 1], stride: 1).squeeze(1)
    div = div.view(sizes)
  end
  div = div.mul(alpha).add(k).pow(beta)
  input / div
end

.log_sigmoid(input) ⇒ Object



189
190
191
# File 'lib/torch/nn/functional.rb', line 189

def log_sigmoid(input)
  NN.log_sigmoid(input)
end

.log_softmax(input, dim = nil) ⇒ Object

TODO make dim keyword argument and update examples



234
235
236
237
# File 'lib/torch/nn/functional.rb', line 234

def log_softmax(input, dim = nil)
  dim ||= softmax_dim(input.dim)
  input.log_softmax(dim)
end

.margin_ranking_loss(input1, input2, target, margin: 0, reduction: "mean") ⇒ Object



429
430
431
# File 'lib/torch/nn/functional.rb', line 429

def margin_ranking_loss(input1, input2, target, margin: 0, reduction: "mean")
  Torch.margin_ranking_loss(input1, input2, target, margin, to_reduction(reduction))
end

.max_pool1d(*args, **options) ⇒ Object

pooling layers



39
40
41
42
43
44
45
46
# File 'lib/torch/nn/functional.rb', line 39

def max_pool1d(*args, **options)
  return_indices = args.pop if args.size == 7
  if return_indices
    Torch.max_pool1d_with_indices(*args, **options)
  else
    Torch.max_pool1d(*args, **options)
  end
end

.max_pool2d(*args, **options) ⇒ Object



48
49
50
51
52
53
54
55
# File 'lib/torch/nn/functional.rb', line 48

def max_pool2d(*args, **options)
  return_indices = args.pop if args.size == 7
  if return_indices
    NN.max_pool2d_with_indices(*args, **options)
  else
    Torch.max_pool2d(*args, **options)
  end
end

.max_pool3d(*args, **options) ⇒ Object



57
58
59
60
61
62
63
64
# File 'lib/torch/nn/functional.rb', line 57

def max_pool3d(*args, **options)
  return_indices = args.pop if args.size == 7
  if return_indices
    NN.max_pool3d_with_indices(*args, **options)
  else
    Torch.max_pool3d(*args, **options)
  end
end

.max_unpool1d(input, indices, kernel_size, stride: nil, padding: 0, output_size: nil) ⇒ Object

Raises:



66
67
68
69
70
71
72
73
74
75
76
77
78
# File 'lib/torch/nn/functional.rb', line 66

def max_unpool1d(input, indices, kernel_size, stride: nil, padding: 0, output_size: nil)
  raise NotImplementedYet
  kernel_size = _single(kernel_size)
  if !stride.nil?
    _stride = _single(stride)
  else
    _stride = kernel_size
  end
  padding = _single(padding)
  output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)
  output_size = output_size + [1]
  NN.max_unpool2d(input.unsqueeze(3), indices.unsqueeze(3), output_size).squeeze(3)
end

.max_unpool2d(*args, **options) ⇒ Object

Raises:



80
81
82
83
# File 'lib/torch/nn/functional.rb', line 80

def max_unpool2d(*args, **options)
  raise NotImplementedYet
  NN.max_unpool2d(*args, **options)
end

.max_unpool3d(*args, **options) ⇒ Object

Raises:



85
86
87
88
# File 'lib/torch/nn/functional.rb', line 85

def max_unpool3d(*args, **options)
  raise NotImplementedYet
  NN.max_unpool3d(*args, **options)
end

.mse_loss(input, target, reduction: "mean") ⇒ Object



433
434
435
436
437
438
# File 'lib/torch/nn/functional.rb', line 433

def mse_loss(input, target, reduction: "mean")
  if target.size != input.size
    warn "Using a target size (#{target.size}) that is different to the input size (#{input.size}). This will likely lead to incorrect results due to broadcasting. Please ensure they have the same size."
  end
  NN.mse_loss(input, target, to_reduction(reduction))
end

.multi_head_attention_forward(query, key, value, embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, out_proj_bias, training: true, key_padding_mask: nil, need_weights: true, attn_mask: nil, use_separate_proj_weight: false, q_proj_weight: nil, k_proj_weight: nil, v_proj_weight: nil, static_k: nil, static_v: nil) ⇒ Object

Raises:

  • (ArgumentError)


73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
# File 'lib/torch/nn/functional_attention.rb', line 73

def multi_head_attention_forward(
  query, key, value,
  embed_dim_to_check, num_heads,
  in_proj_weight, in_proj_bias,
  bias_k, bias_v,
  add_zero_attn,
  dropout_p,
  out_proj_weight, out_proj_bias,
  training: true,
  key_padding_mask: nil,
  need_weights: true,
  attn_mask: nil,
  use_separate_proj_weight: false,
  q_proj_weight: nil, k_proj_weight: nil, v_proj_weight: nil,
  static_k: nil, static_v: nil
)

  tgt_len, bsz, embed_dim = query.shape
  src_len = key.shape.first

  raise ArgumentError, "Was expecting embedding dimension of #{embed_dim_to_check}, but got #{embed_dim}" unless embed_dim == embed_dim_to_check

  head_dim = if embed_dim.is_a?(Torch::Tensor)
    embed_dim.div(num_heads, rounding_mode: 'trunc')
  else
    head_dim = embed_dim.div num_heads
  end

  if use_separate_proj_weight
    raise ArgumentError, "Key's sequence and batch dims #{key.shape[0...2]} do not match value's #{value.shape[0...2]}" unless key.shape[0...2] == value.shape[0...2]
  else
    raise ArgumentError, "Key shape #{key.shape} does not match value shape #{value.shape}" unless key.shape == value.shape
  end

  # compute in-projection
  q, k, v =
    if use_separate_proj_weight
      raise ArgumentError, "use_separate_proj_weight is true but q_proj_weight is nil" unless q_proj_weight
      raise ArgumentError, "use_separate_proj_weight is true but k_proj_weight is nil" unless k_proj_weight
      raise ArgumentError, "use_separate_proj_weight is true but v_proj_weight is nil" unless v_proj_weight

      if in_proj_bias
        b_q, b_k, b_v = in_proj_bias.chunk(3)
      else
        b_q = b_k = b_v = nil
      end

      in_projection(query, key, value, q_proj_weight, k_proj_weight, v_proj_weight, b_q: b_q, b_k: b_k, b_v: b_v)
    else
      in_projection_packed(query, key, value, in_proj_weight, b: in_proj_bias)
    end

  # prep attention mask
  if attn_mask
    if attn_mask.dtype == :uint8
      puts "[WARN] Byte tensor for attn_mask in Multihead Attention is deprecated. Use bool tensor instead."
      attn_mask = attn_mask.bool
    else
      raise ArgumentError, "Only float, byte, and bool types are supported for attn_mask, not #{attn_mask.dtype}" unless attn_mask.floating_point? || attn_mask.dtype == :bool
    end

    if attn_mask.dim == 2
      correct_2d_size = [tgt_len, src_len]
      raise ArgumentError, "The shape of the 2D attn_mask is #{attn_mask.shape}, but should be #{correct_2d_size}." unless attn_mask.shape == correct_2d_size

      attn_mask = attn_mask.unsqueeze(0)
    elsif attn_mask.dim == 3
      correct_3d_size = [bsz * num_heads, tgt_len, src_len]
      raise ArgumentError, "The shape of the 3D attn_mask is #{attn_mask.shape}, but should be #{correct_3d_size}." unless attn_mask.shape == correct_3d_size
    else
      raise ArgumentError, "attn_mask's dimension #{attn_mask.dim} is not supported"
    end
  end

  # prep key padding mask
  if key_padding_mask && key_padding_mask.dtype == :uint8
    puts "[WARN] Byte tensor for key_padding_mask in Multihead Attention is deprecated. Use bool tensor instead."
    key_padding_mask = key_padding_mask.bool
  end

  # add bias along batch dimension (currently second)
  if bias_k && bias_v
    raise ArgumentError, "bias cannot be added to static key." if static_k
    raise ArgumentError, "bias cannot be added to static value." if static_v

    k = Torch.cat([k, bias_k.repeat(1, bsz, 1)])
    v = Torch.cat([v, bias_v.repeat(1, bsz, 1)])

    attn_mask = pad(attn_mask, [0, 1]) if attn_mask
    key_padding_mask = pad(key_padding_mask, [0, 1]) if key_padding_mask
  else
    raise ArgumentError unless bias_k.nil?
    raise ArgumentError unless bias_v.nil?
  end

  # reshape q, k, v for multihead attention and make em batch first
  q = q.contiguous.view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)

  if static_k.nil?
    k = k.contiguous.view(-1, bsz * num_heads, head_dim).transpose(0, 1)
  else
    raise ArgumentError, "Expecting static_k.size(0) of #{bsz * num_heads}, but got #{static_k.size(0)}" unless static_k.size(0) == bsz * num_heads
    raise ArgumentError, "Expecting static_k.size(2) of #{head_dim}, but got #{static_k.size(2)}" unless static_k.size(2) == head_dim

    k = static_k
  end

  if static_v.nil?
    v = v.contiguous.view(-1, bsz * num_heads, head_dim).transpose(0, 1)
  else
    raise ArgumentError, "Expecting static_v.size(0) of #{bsz * num_heads}, but got #{static_v.size(0)}" unless static_v.size(0) == bsz * num_heads
    raise ArgumentError, "Expecting static_v.size(2) of #{head_dim}, but got #{static_v.size(2)}" unless static_v.size(2) == head_dim

    v = static_v
  end

  # add zero attention along batch dimension (now first)
  if add_zero_attn
    zero_attn_shape = [bsz * num_heads, 1, head_dim]
    k = Torch.cat([k, Torch.zeros(zero_attn_shape, dtype: k.dtype, device: k.device)], dim: 1)
    v = Torch.cat([v, Torch.zeros(zero_attn_shape, dtype: v.dtype, device: v.device)], dim: 1)

    attn_mask = pad(attn_mask, [0, 1]) if attn_mask
    key_padding_mask = pad(key_padding_mask, [0, 1]) if key_padding_mask
  end

  # update source sequence length after adjustments
  src_len = k.size(1)

  # merge key padding and attention masks
  if key_padding_mask
    raise ArgumentError, "Expecting key_padding_mask shape of #{[bsz, src_len]}, but got #{key_padding_mask.shape}" unless key_padding_mask.shape == [bsz, src_len]

    key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len).expand(-1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len)

    attn_mask = if attn_mask.nil?
      key_padding_mask
    elsif attn_mask.dtype == :bool
      attn_mask.logical_or(key_padding_mask)
    else
      attn_mask.masked_fill(key_padding_mask, -Float::INFINITY)
    end
  end

  # convert mask to float
  if attn_mask && attn_mask.dtype == :bool
    new_attn_mask = Torch.zeros_like(attn_mask, dtype: :float32)
    attn_mask = new_attn_mask.masked_fill(attn_mask, -Float::INFINITY)
  end

  dropout_p = 0.0 unless training

  # (deep breath) calculate attention and out projection
  attn_output, attn_output_weights = scaled_dot_product_attention(q, k, v, attn_mask: attn_mask, dropout_p: dropout_p)
  attn_output = attn_output.transpose(0, 1).contiguous.view(tgt_len, bsz, embed_dim)
  attn_output = linear(attn_output, out_proj_weight, out_proj_bias)

  if need_weights
    # average attention weights over heads
    attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
    [attn_output, attn_output_weights.sum(dim: 1) / num_heads]
  else
    [attn_output, nil]
  end
end

.multi_margin_loss(input, target, p: 1, margin: 1.0, weight: nil, reduction: "mean") ⇒ Object



448
449
450
# File 'lib/torch/nn/functional.rb', line 448

def multi_margin_loss(input, target, p: 1, margin: 1.0, weight: nil, reduction: "mean")
  NN.multi_margin_loss(input, target, p, margin, weight, to_reduction(reduction))
end

.multilabel_margin_loss(input, target, reduction: "mean") ⇒ Object



440
441
442
# File 'lib/torch/nn/functional.rb', line 440

def multilabel_margin_loss(input, target, reduction: "mean")
  NN.multilabel_margin_loss(input, target, to_reduction(reduction))
end

.multilabel_soft_margin_loss(input, target, weight: nil) ⇒ Object

Raises:



444
445
446
# File 'lib/torch/nn/functional.rb', line 444

def multilabel_soft_margin_loss(input, target, weight: nil)
  raise NotImplementedYet
end

.nll_loss(input, target, weight: nil, ignore_index: -100,, reduction: "mean") ⇒ Object



452
453
454
# File 'lib/torch/nn/functional.rb', line 452

def nll_loss(input, target, weight: nil, ignore_index: -100, reduction: "mean")
  NN.nll_loss(input, target, weight, to_reduction(reduction), ignore_index)
end

.pad(input, pad, mode: "constant", value: 0) ⇒ Object

padding layers

Raises:

  • (ArgumentError)


132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
# File 'lib/torch/nn/functional.rb', line 132

def pad(input, pad, mode: "constant", value: 0)
  raise ArgumentError, "Padding length must be divisible by 2" unless pad.size % 2 == 0
  raise ArgumentError, "Padding length too large" unless pad.size / 2 <= input.dim

  if mode == "constant"
    return Torch.constant_pad_nd(input, pad, value)
  else
    raise ArgumentError, "Padding mode doesn't take in value argument" unless value == 0

    if input.dim == 3
      raise ArgumentError, "3D tensors expect 2 values for padding" unless pad.size == 2
      case mode
      when "reflect"
        NN.reflection_pad1d(input, pad)
      when "replicate"
        NN.replication_pad1d(input, pad)
      else
        raise NotImplementedYet
      end
    elsif input.dim == 4
      raise ArgumentError, "4D tensors expect 4 values for padding" unless pad.size == 4
      case mode
      when "reflect"
        NN.reflection_pad2d(input, pad)
      when "replicate"
        NN.replication_pad2d(input, pad)
      else
        raise NotImplementedYet
      end
    elsif input.dim == 5
      raise ArgumentError, "5D tensors expect 6 values for padding" unless pad.size == 6
      case mode
      when "replicate"
        NN.replication_pad3d(input, pad)
      else
        raise NotImplementedYet
      end
    else
      raise ArgumentError, "Only 3D, 4D, 5D padding with non-constant padding are supported for now"
    end
  end
end

.pairwise_distance(x1, x2, p: 2.0, eps: 1e-6, keepdim: false) ⇒ Object



390
391
392
# File 'lib/torch/nn/functional.rb', line 390

def pairwise_distance(x1, x2, p: 2.0, eps: 1e-6, keepdim: false)
  Torch.pairwise_distance(x1, x2, p, eps, keepdim)
end

.poisson_nll_loss(input, target, log_input: true, full: false, eps: 1e-8, reduction: "mean") ⇒ Object



456
457
458
# File 'lib/torch/nn/functional.rb', line 456

def poisson_nll_loss(input, target, log_input: true, full: false, eps: 1e-8, reduction: "mean")
  Torch.poisson_nll_loss(input, target, log_input, full, eps, to_reduction(reduction))
end

.prelu(input, weight) ⇒ Object



193
194
195
# File 'lib/torch/nn/functional.rb', line 193

def prelu(input, weight)
  Torch.prelu(input, weight)
end

.relu(input, inplace: false) ⇒ Object



197
198
199
200
201
202
203
# File 'lib/torch/nn/functional.rb', line 197

def relu(input, inplace: false)
  if inplace
    input.relu!
  else
    input.relu
  end
end

.scaled_dot_product_attention(q, k, v, attn_mask: nil, dropout_p: 0.0) ⇒ Object



54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
# File 'lib/torch/nn/functional_attention.rb', line 54

def scaled_dot_product_attention(
  q, k, v,
  attn_mask: nil, dropout_p: 0.0
)

  _b, _nt, e = q.shape

  q = q / Math.sqrt(e)

  attn = Torch.bmm(q, k.transpose(-2, -1))
  attn += attn_mask if attn_mask
  attn = softmax(attn, dim: -1)
  attn = dropout(attn, p: dropout_p) if dropout_p > 0

  output = Torch.bmm(attn, v)

  [output, attn]
end

.smooth_l1_loss(input, target, reduction: "mean") ⇒ Object



464
465
466
# File 'lib/torch/nn/functional.rb', line 464

def smooth_l1_loss(input, target, reduction: "mean")
  NN.smooth_l1_loss(input, target, to_reduction(reduction))
end

.soft_margin_loss(input, target, reduction: "mean") ⇒ Object



460
461
462
# File 'lib/torch/nn/functional.rb', line 460

def soft_margin_loss(input, target, reduction: "mean")
  NN.soft_margin_loss(input, target, to_reduction(reduction))
end

.softmax(input, dim: nil) ⇒ Object



228
229
230
231
# File 'lib/torch/nn/functional.rb', line 228

def softmax(input, dim: nil)
  dim ||= softmax_dim(input.dim)
  input.softmax(dim)
end

.softmin(input, dim: nil) ⇒ Object

other activation layers



223
224
225
226
# File 'lib/torch/nn/functional.rb', line 223

def softmin(input, dim: nil)
  dim ||= softmax_dim(input.dim)
  (-input).softmax(dim)
end

.softplus(input, beta: 1, threshold: 20) ⇒ Object



205
206
207
# File 'lib/torch/nn/functional.rb', line 205

def softplus(input, beta: 1, threshold: 20)
  NN.softplus(input, beta, threshold)
end

.softshrink(*args, **options) ⇒ Object



209
210
211
# File 'lib/torch/nn/functional.rb', line 209

def softshrink(*args, **options)
  NN.softshrink(*args, **options)
end

.softsign(input) ⇒ Object



213
214
215
# File 'lib/torch/nn/functional.rb', line 213

def softsign(input)
  input / (input.abs + 1)
end

.tanhshrink(input) ⇒ Object



217
218
219
# File 'lib/torch/nn/functional.rb', line 217

def tanhshrink(input)
  input - input.tanh
end

.triplet_margin_loss(anchor, positive, negative, margin: 1.0, p: 2, eps: 1e-06, swap: false, reduction: "mean") ⇒ Object



468
469
470
# File 'lib/torch/nn/functional.rb', line 468

def triplet_margin_loss(anchor, positive, negative, margin: 1.0, p: 2, eps: 1e-06, swap: false, reduction: "mean")
  Torch.triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, to_reduction(reduction))
end

.unfold(input, kernel_size, dilation: 1, padding: 0, stride: 1) ⇒ Object



21
22
23
24
25
26
27
# File 'lib/torch/nn/functional.rb', line 21

def unfold(input, kernel_size, dilation: 1, padding: 0, stride: 1)
  if input.dim == 4
    NN.im2col(input, _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride))
  else
    raise Error, "Input Error: Only 4D input Tensors are supported (got #{input.dim}D)"
  end
end