Class: Torch::NN::Functional

Inherits:
Object
  • Object
show all
Extended by:
Utils
Defined in:
lib/torch/nn/functional.rb

Class Method Summary collapse

Methods included from Utils

_ntuple, _pair, _quadrupal, _single, _triple

Class Method Details

.adaptive_avg_pool1d(*args, **options) ⇒ Object



116
117
118
# File 'lib/torch/nn/functional.rb', line 116

def adaptive_avg_pool1d(*args, **options)
  Torch.adaptive_avg_pool1d(*args, **options)
end

.adaptive_avg_pool2d(input, output_size) ⇒ Object



120
121
122
123
# File 'lib/torch/nn/functional.rb', line 120

def adaptive_avg_pool2d(input, output_size)
  output_size = list_with_default(output_size, input.size)
  NN.adaptive_avg_pool2d(input, output_size)
end

.adaptive_avg_pool3d(input, output_size) ⇒ Object



125
126
127
128
# File 'lib/torch/nn/functional.rb', line 125

def adaptive_avg_pool3d(input, output_size)
  output_size = list_with_default(output_size, input.size)
  NN.adaptive_avg_pool3d(input, output_size)
end

.adaptive_max_pool1d(*args, **options) ⇒ Object



102
103
104
# File 'lib/torch/nn/functional.rb', line 102

def adaptive_max_pool1d(*args, **options)
  Torch.adaptive_max_pool1d(*args, **options)
end

.adaptive_max_pool2d(input, output_size) ⇒ Object



106
107
108
109
# File 'lib/torch/nn/functional.rb', line 106

def adaptive_max_pool2d(input, output_size)
  output_size = list_with_default(output_size, input.size)
  NN.adaptive_max_pool2d(input, output_size)
end

.adaptive_max_pool3d(input, output_size) ⇒ Object



111
112
113
114
# File 'lib/torch/nn/functional.rb', line 111

def adaptive_max_pool3d(input, output_size)
  output_size = list_with_default(output_size, input.size)
  NN.adaptive_max_pool3d(input, output_size)
end

.alpha_dropout(input, p: 0.5, training: true, inplace: false) ⇒ Object



336
337
338
339
340
341
342
# File 'lib/torch/nn/functional.rb', line 336

def alpha_dropout(input, p: 0.5, training: true, inplace: false)
  if inplace
    Torch.alpha_dropout!(input, p, training)
  else
    Torch.alpha_dropout(input, p, training)
  end
end

.avg_pool1d(*args, **options) ⇒ Object



90
91
92
# File 'lib/torch/nn/functional.rb', line 90

def avg_pool1d(*args, **options)
  Torch.avg_pool1d(*args, **options)
end

.avg_pool2d(*args, **options) ⇒ Object



94
95
96
# File 'lib/torch/nn/functional.rb', line 94

def avg_pool2d(*args, **options)
  NN.avg_pool2d(*args, **options)
end

.avg_pool3d(*args, **options) ⇒ Object



98
99
100
# File 'lib/torch/nn/functional.rb', line 98

def avg_pool3d(*args, **options)
  NN.avg_pool3d(*args, **options)
end

.batch_norm(input, running_mean, running_var, weight: nil, bias: nil, training: false, momentum: 0.1, eps: 1e-5) ⇒ Object

normalization layers



241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
# File 'lib/torch/nn/functional.rb', line 241

def batch_norm(input, running_mean, running_var, weight: nil, bias: nil,
  training: false, momentum: 0.1, eps: 1e-5)

  if training
    size = input.size
    size_prods = size[0]
    (size.length - 2).times do |i|
      size_prods *= size[i + 2]
    end
    if size_prods == 1
      raise ArgumentError, "Expected more than 1 value per channel when training, got input size #{size.inspect}"
    end
  end

  Torch.batch_norm(
    input, weight, bias, running_mean, running_var,
    training, momentum, eps, false
  )
end

.bilinear(input1, input2, weight, bias) ⇒ Object



304
305
306
# File 'lib/torch/nn/functional.rb', line 304

def bilinear(input1, input2, weight, bias)
  Torch.bilinear(input1, input2, weight, bias)
end

.binary_cross_entropy(input, target, weight: nil, reduction: "mean") ⇒ Object

loss functions



396
397
398
# File 'lib/torch/nn/functional.rb', line 396

def binary_cross_entropy(input, target, weight: nil, reduction: "mean")
  NN.binary_cross_entropy(input, target, weight, reduction)
end

.binary_cross_entropy_with_logits(input, target, weight: nil, reduction: "mean", pos_weight: nil) ⇒ Object



400
401
402
# File 'lib/torch/nn/functional.rb', line 400

def binary_cross_entropy_with_logits(input, target, weight: nil, reduction: "mean", pos_weight: nil)
  Torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction)
end

.conv1d(*args, **options) ⇒ Object

convolution layers



9
10
11
# File 'lib/torch/nn/functional.rb', line 9

def conv1d(*args, **options)
  Torch.conv1d(*args, **options)
end

.conv2d(*args, **options) ⇒ Object



13
14
15
# File 'lib/torch/nn/functional.rb', line 13

def conv2d(*args, **options)
  Torch.conv2d(*args, **options)
end

.conv3d(*args, **options) ⇒ Object



17
18
19
# File 'lib/torch/nn/functional.rb', line 17

def conv3d(*args, **options)
  Torch.conv3d(*args, **options)
end

.cosine_embedding_loss(input1, input2, target, margin: 0, reduction: "mean") ⇒ Object



404
405
406
# File 'lib/torch/nn/functional.rb', line 404

def cosine_embedding_loss(input1, input2, target, margin: 0, reduction: "mean")
  Torch.cosine_embedding_loss(input1, input2, target, margin, reduction)
end

.cosine_similarity(x1, x2, dim: 1, eps: 1e-8) ⇒ Object

distance functions



386
387
388
# File 'lib/torch/nn/functional.rb', line 386

def cosine_similarity(x1, x2, dim: 1, eps: 1e-8)
  Torch.cosine_similarity(x1, x2, dim, eps)
end

.cross_entropy(input, target, weight: nil, ignore_index: -100,, reduction: "mean") ⇒ Object



408
409
410
# File 'lib/torch/nn/functional.rb', line 408

def cross_entropy(input, target, weight: nil, ignore_index: -100, reduction: "mean")
  nll_loss(log_softmax(input, 1), target, weight: weight, ignore_index: ignore_index, reduction: reduction)
end

.ctc_loss(log_probs, targets, input_lengths, target_lengths, blank: 0, reduction: "mean", zero_infinity: false) ⇒ Object



412
413
414
415
# File 'lib/torch/nn/functional.rb', line 412

def ctc_loss(log_probs, targets, input_lengths, target_lengths, blank: 0, reduction: "mean", zero_infinity: false)
  # call to_a on input_lengths and target_lengths for C++
  Torch.ctc_loss(log_probs, targets, input_lengths.to_a, target_lengths.to_a, blank, reduction, zero_infinity)
end

.dropout(input, p: 0.5, training: true, inplace: false) ⇒ Object

dropout layers



310
311
312
313
314
315
316
# File 'lib/torch/nn/functional.rb', line 310

def dropout(input, p: 0.5, training: true, inplace: false)
  if inplace
    Torch.dropout!(input, p, training)
  else
    Torch.dropout(input, p, training)
  end
end

.dropout2d(input, p: 0.5, training: true, inplace: false) ⇒ Object

Raises:

  • (ArgumentError)


318
319
320
321
322
323
324
325
326
# File 'lib/torch/nn/functional.rb', line 318

def dropout2d(input, p: 0.5, training: true, inplace: false)
  raise ArgumentError, "dropout probability has to be between 0 and 1, but got #{p}" if p < 0 || p > 1

  if inplace
    Torch.feature_dropout!(input, p, training)
  else
    Torch.feature_dropout(input, p, training)
  end
end

.dropout3d(input, p: 0.5, training: true, inplace: false) ⇒ Object



328
329
330
331
332
333
334
# File 'lib/torch/nn/functional.rb', line 328

def dropout3d(input, p: 0.5, training: true, inplace: false)
  if inplace
    Torch.feature_dropout!(input, p, training)
  else
    Torch.feature_dropout(input, p, training)
  end
end

.embedding(input, weight, padding_idx: nil, max_norm: nil, norm_type: 2.0, scale_grad_by_freq: false, sparse: false) ⇒ Object

sparse layers

Raises:



354
355
356
357
358
359
360
361
# File 'lib/torch/nn/functional.rb', line 354

def embedding(input, weight, padding_idx: nil, max_norm: nil, norm_type: 2.0, scale_grad_by_freq: false, sparse: false)
  # TODO handle max_norm and norm_type
  raise NotImplementedYet unless max_norm.nil? && norm_type == 2.0

  padding_idx ||= -1
  # weight and indices are swapped from Python interface
  Torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
end

.embedding_bag(input, weight, offsets: nil, max_norm: nil, norm_type: 2, scale_grad_by_freq: false, mode: "mean", sparse: false, per_sample_weights: nil) ⇒ Object

Raises:



363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
# File 'lib/torch/nn/functional.rb', line 363

def embedding_bag(input, weight, offsets: nil, max_norm: nil, norm_type: 2, scale_grad_by_freq: false, mode: "mean", sparse: false, per_sample_weights: nil)
  # TODO handle max_norm and norm_type
  raise NotImplementedYet unless max_norm.nil? && norm_type == 2.0

  mode_enum =
    case mode
    when "sum"
      0
    when "mean"
      1
    when "max"
      2
    else
      raise ArgumentError, "Unknown mode: #{mode}"
    end

  # weight and input swapped
  ret, _, _, _ = Torch.embedding_bag(weight, input, offsets, scale_grad_by_freq, mode_enum, sparse, per_sample_weights)
  ret
end

.feature_alpha_dropout(input, p: 0.5, training: true, inplace: false) ⇒ Object



344
345
346
347
348
349
350
# File 'lib/torch/nn/functional.rb', line 344

def feature_alpha_dropout(input, p: 0.5, training: true, inplace: false)
  if inplace
    Torch.feature_alpha_dropout!(input, p, training)
  else
    Torch.feature_alpha_dropout(input, p, training)
  end
end

.fold(input, output_size, kernel_size, dilation: 1, padding: 0, stride: 1) ⇒ Object



29
30
31
32
33
34
35
# File 'lib/torch/nn/functional.rb', line 29

def fold(input, output_size, kernel_size, dilation: 1, padding: 0, stride: 1)
  if input.dim == 3
    NN.col2im(input, _pair(output_size), _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride))
  else
    raise Error, "Input Error: Only 3D input Tensors are supported (got #{input.dim}D)"
  end
end

.group_norm(input, num_groups, weight: nil, bias: nil, eps: 1e-5) ⇒ Object



261
262
263
# File 'lib/torch/nn/functional.rb', line 261

def group_norm(input, num_groups, weight: nil, bias: nil, eps: 1e-5)
  Torch.group_norm(input, num_groups, weight, bias, eps, false)
end

.hardshrink(input, lambd = 0.5) ⇒ Object

activation layers



177
178
179
# File 'lib/torch/nn/functional.rb', line 177

def hardshrink(input, lambd = 0.5)
  Torch.hardshrink(input, lambd)
end

.hinge_embedding_loss(input, target, margin: 1.0, reduction: "mean") ⇒ Object



417
418
419
# File 'lib/torch/nn/functional.rb', line 417

def hinge_embedding_loss(input, target, margin: 1.0, reduction: "mean")
  Torch.hinge_embedding_loss(input, target, margin, reduction)
end

.instance_norm(input, running_mean: nil, running_var: nil, weight: nil, bias: nil, use_input_stats: true, momentum: 0.1, eps: 1e-5) ⇒ Object



265
266
267
268
269
270
271
272
# File 'lib/torch/nn/functional.rb', line 265

def instance_norm(input, running_mean: nil, running_var: nil, weight: nil,
  bias: nil, use_input_stats: true, momentum: 0.1, eps: 1e-5)

  Torch.instance_norm(
      input, weight, bias, running_mean, running_var,
      use_input_stats, momentum, eps, false
  )
end

.kl_div(input, target, reduction: "mean") ⇒ Object



421
422
423
# File 'lib/torch/nn/functional.rb', line 421

def kl_div(input, target, reduction: "mean")
  Torch.kl_div(input, target, reduction)
end

.l1_loss(input, target, reduction: "mean") ⇒ Object



425
426
427
# File 'lib/torch/nn/functional.rb', line 425

def l1_loss(input, target, reduction: "mean")
  NN.l1_loss(input, target, reduction)
end

.layer_norm(input, normalized_shape, weight: nil, bias: nil, eps: 1e-5) ⇒ Object



274
275
276
# File 'lib/torch/nn/functional.rb', line 274

def layer_norm(input, normalized_shape, weight: nil, bias: nil, eps: 1e-5)
  Torch.layer_norm(input, normalized_shape, weight, bias, eps, false)
end

.leaky_relu(input, negative_slope = 0.01, inplace: false) ⇒ Object



181
182
183
184
185
186
187
# File 'lib/torch/nn/functional.rb', line 181

def leaky_relu(input, negative_slope = 0.01, inplace: false)
  if inplace
    NN.leaky_relu!(input, negative_slope)
  else
    NN.leaky_relu(input, negative_slope)
  end
end

.linear(input, weight, bias) ⇒ Object

linear layers



300
301
302
# File 'lib/torch/nn/functional.rb', line 300

def linear(input, weight, bias)
  NN.linear(input, weight, bias)
end

.local_response_norm(input, size, alpha: 1e-4, beta: 0.75, k: 1.0) ⇒ Object



278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
# File 'lib/torch/nn/functional.rb', line 278

def local_response_norm(input, size, alpha: 1e-4, beta: 0.75, k: 1.0)
  dim = input.dim
  if dim < 3
    raise ArgumentError, "Expected 3D or higher dimensionality input (got #{dim} dimensions)"
  end
  div = input.mul(input).unsqueeze(1)
  if dim == 3
    div = pad(div, [0, 0, size / 2, (size - 1) / 2])
    div = avg_pool2d(div, [size, 1], stride: 1).squeeze(1)
  else
    sizes = input.size
    div = div.view(sizes[0], 1, sizes[1], sizes[2], -1)
    div = pad(div, [0, 0, 0, 0, size / 2, (size - 1) / 2])
    div = avg_pool3d(div, [size, 1, 1], stride: 1).squeeze(1)
    div = div.view(sizes)
  end
  div = div.mul(alpha).add(k).pow(beta)
  input / div
end

.log_sigmoid(input) ⇒ Object



189
190
191
# File 'lib/torch/nn/functional.rb', line 189

def log_sigmoid(input)
  NN.log_sigmoid(input)
end

.log_softmax(input, dim = nil) ⇒ Object

TODO make dim keyword argument and update examples



234
235
236
237
# File 'lib/torch/nn/functional.rb', line 234

def log_softmax(input, dim = nil)
  dim ||= softmax_dim(input.dim)
  input.log_softmax(dim)
end

.margin_ranking_loss(input1, input2, target, margin: 0, reduction: "mean") ⇒ Object



429
430
431
# File 'lib/torch/nn/functional.rb', line 429

def margin_ranking_loss(input1, input2, target, margin: 0, reduction: "mean")
  Torch.margin_ranking_loss(input1, input2, target, margin, reduction)
end

.max_pool1d(*args, **options) ⇒ Object

pooling layers



39
40
41
42
43
44
45
46
# File 'lib/torch/nn/functional.rb', line 39

def max_pool1d(*args, **options)
  return_indices = args.pop if args.size == 7
  if return_indices
    Torch.max_pool1d_with_indices(*args, **options)
  else
    Torch.max_pool1d(*args, **options)
  end
end

.max_pool2d(*args, **options) ⇒ Object



48
49
50
51
52
53
54
55
# File 'lib/torch/nn/functional.rb', line 48

def max_pool2d(*args, **options)
  return_indices = args.pop if args.size == 7
  if return_indices
    NN.max_pool2d_with_indices(*args, **options)
  else
    Torch.max_pool2d(*args, **options)
  end
end

.max_pool3d(*args, **options) ⇒ Object



57
58
59
60
61
62
63
64
# File 'lib/torch/nn/functional.rb', line 57

def max_pool3d(*args, **options)
  return_indices = args.pop if args.size == 7
  if return_indices
    NN.max_pool3d_with_indices(*args, **options)
  else
    Torch.max_pool3d(*args, **options)
  end
end

.max_unpool1d(input, indices, kernel_size, stride: nil, padding: 0, output_size: nil) ⇒ Object

Raises:



66
67
68
69
70
71
72
73
74
75
76
77
78
# File 'lib/torch/nn/functional.rb', line 66

def max_unpool1d(input, indices, kernel_size, stride: nil, padding: 0, output_size: nil)
  raise NotImplementedYet
  kernel_size = _single(kernel_size)
  if !stride.nil?
    _stride = _single(stride)
  else
    _stride = kernel_size
  end
  padding = _single(padding)
  output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)
  output_size = output_size + [1]
  NN.max_unpool2d(input.unsqueeze(3), indices.unsqueeze(3), output_size).squeeze(3)
end

.max_unpool2d(*args, **options) ⇒ Object

Raises:



80
81
82
83
# File 'lib/torch/nn/functional.rb', line 80

def max_unpool2d(*args, **options)
  raise NotImplementedYet
  NN.max_unpool2d(*args, **options)
end

.max_unpool3d(*args, **options) ⇒ Object

Raises:



85
86
87
88
# File 'lib/torch/nn/functional.rb', line 85

def max_unpool3d(*args, **options)
  raise NotImplementedYet
  NN.max_unpool3d(*args, **options)
end

.mse_loss(input, target, reduction: "mean") ⇒ Object



433
434
435
436
437
438
# File 'lib/torch/nn/functional.rb', line 433

def mse_loss(input, target, reduction: "mean")
  if target.size != input.size
    warn "Using a target size (#{target.size}) that is different to the input size (#{input.size}). This will likely lead to incorrect results due to broadcasting. Please ensure they have the same size."
  end
  NN.mse_loss(input, target, reduction)
end

.multi_margin_loss(input, target, p: 1, margin: 1.0, weight: nil, reduction: "mean") ⇒ Object



448
449
450
# File 'lib/torch/nn/functional.rb', line 448

def multi_margin_loss(input, target, p: 1, margin: 1.0, weight: nil, reduction: "mean")
  NN.multi_margin_loss(input, target, p, margin, weight, reduction)
end

.multilabel_margin_loss(input, target, reduction: "mean") ⇒ Object



440
441
442
# File 'lib/torch/nn/functional.rb', line 440

def multilabel_margin_loss(input, target, reduction: "mean")
  NN.multilabel_margin_loss(input, target, reduction)
end

.multilabel_soft_margin_loss(input, target, weight: nil) ⇒ Object

Raises:



444
445
446
# File 'lib/torch/nn/functional.rb', line 444

def multilabel_soft_margin_loss(input, target, weight: nil)
  raise NotImplementedYet
end

.nll_loss(input, target, weight: nil, ignore_index: -100,, reduction: "mean") ⇒ Object



452
453
454
# File 'lib/torch/nn/functional.rb', line 452

def nll_loss(input, target, weight: nil, ignore_index: -100, reduction: "mean")
  NN.nll_loss(input, target, weight, reduction, ignore_index)
end

.pad(input, pad, mode: "constant", value: 0) ⇒ Object

padding layers

Raises:

  • (ArgumentError)


132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
# File 'lib/torch/nn/functional.rb', line 132

def pad(input, pad, mode: "constant", value: 0)
  raise ArgumentError, "Padding length must be divisible by 2" unless pad.size % 2 == 0
  raise ArgumentError, "Padding length too large" unless pad.size / 2 <= input.dim

  if mode == "constant"
    return Torch.constant_pad_nd(input, pad, value)
  else
    raise ArgumentError, "Padding mode doesn't take in value argument" unless value == 0

    if input.dim == 3
      raise ArgumentError, "3D tensors expect 2 values for padding" unless pad.size == 2
      case mode
      when "reflect"
        NN.reflection_pad1d(input, pad)
      when "replicate"
        NN.replication_pad1d(input, pad)
      else
        raise NotImplementedYet
      end
    elsif input.dim == 4
      raise ArgumentError, "4D tensors expect 4 values for padding" unless pad.size == 4
      case mode
      when "reflect"
        NN.reflection_pad2d(input, pad)
      when "replicate"
        NN.replication_pad2d(input, pad)
      else
        raise NotImplementedYet
      end
    elsif input.dim == 5
      raise ArgumentError, "5D tensors expect 6 values for padding" unless pad.size == 6
      case mode
      when "replicate"
        NN.replication_pad3d(input, pad)
      else
        raise NotImplementedYet
      end
    else
      raise ArgumentError, "Only 3D, 4D, 5D padding with non-constant padding are supported for now"
    end
  end
end

.pairwise_distance(x1, x2, p: 2.0, eps: 1e-6, keepdim: false) ⇒ Object



390
391
392
# File 'lib/torch/nn/functional.rb', line 390

def pairwise_distance(x1, x2, p: 2.0, eps: 1e-6, keepdim: false)
  Torch.pairwise_distance(x1, x2, p, eps, keepdim)
end

.poisson_nll_loss(input, target, log_input: true, full: false, eps: 1e-8, reduction: "mean") ⇒ Object



456
457
458
# File 'lib/torch/nn/functional.rb', line 456

def poisson_nll_loss(input, target, log_input: true, full: false, eps: 1e-8, reduction: "mean")
  Torch.poisson_nll_loss(input, target, log_input, full, eps, reduction)
end

.prelu(input, weight) ⇒ Object



193
194
195
# File 'lib/torch/nn/functional.rb', line 193

def prelu(input, weight)
  Torch.prelu(input, weight)
end

.relu(input, inplace: false) ⇒ Object



197
198
199
200
201
202
203
# File 'lib/torch/nn/functional.rb', line 197

def relu(input, inplace: false)
  if inplace
    input.relu!
  else
    input.relu
  end
end

.smooth_l1_loss(input, target, reduction: "mean") ⇒ Object



464
465
466
# File 'lib/torch/nn/functional.rb', line 464

def smooth_l1_loss(input, target, reduction: "mean")
  NN.smooth_l1_loss(input, target, reduction)
end

.soft_margin_loss(input, target, reduction: "mean") ⇒ Object



460
461
462
# File 'lib/torch/nn/functional.rb', line 460

def soft_margin_loss(input, target, reduction: "mean")
  NN.soft_margin_loss(input, target, reduction)
end

.softmax(input, dim: nil) ⇒ Object



228
229
230
231
# File 'lib/torch/nn/functional.rb', line 228

def softmax(input, dim: nil)
  dim ||= softmax_dim(input.dim)
  input.softmax(dim)
end

.softmin(input, dim: nil) ⇒ Object

other activation layers



223
224
225
226
# File 'lib/torch/nn/functional.rb', line 223

def softmin(input, dim: nil)
  dim ||= softmax_dim(input.dim)
  (-input).softmax(dim)
end

.softplus(input, beta: 1, threshold: 20) ⇒ Object



205
206
207
# File 'lib/torch/nn/functional.rb', line 205

def softplus(input, beta: 1, threshold: 20)
  NN.softplus(input, beta, threshold)
end

.softshrink(*args, **options) ⇒ Object



209
210
211
# File 'lib/torch/nn/functional.rb', line 209

def softshrink(*args, **options)
  NN.softshrink(*args, **options)
end

.softsign(input) ⇒ Object



213
214
215
# File 'lib/torch/nn/functional.rb', line 213

def softsign(input)
  input / (input.abs + 1)
end

.tanhshrink(input) ⇒ Object



217
218
219
# File 'lib/torch/nn/functional.rb', line 217

def tanhshrink(input)
  input - input.tanh
end

.triplet_margin_loss(anchor, positive, negative, margin: 1.0, p: 2, eps: 1e-06, swap: false, reduction: "mean") ⇒ Object



468
469
470
# File 'lib/torch/nn/functional.rb', line 468

def triplet_margin_loss(anchor, positive, negative, margin: 1.0, p: 2, eps: 1e-06, swap: false, reduction: "mean")
  Torch.triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction)
end

.unfold(input, kernel_size, dilation: 1, padding: 0, stride: 1) ⇒ Object



21
22
23
24
25
26
27
# File 'lib/torch/nn/functional.rb', line 21

def unfold(input, kernel_size, dilation: 1, padding: 0, stride: 1)
  if input.dim == 4
    NN.im2col(input, _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride))
  else
    raise Error, "Input Error: Only 4D input Tensors are supported (got #{input.dim}D)"
  end
end