Class: Torch::NN::Functional

Inherits:
Object
  • Object
show all
Extended by:
Utils
Defined in:
lib/torch/nn/functional.rb

Class Method Summary collapse

Methods included from Utils

_ntuple, _pair, _quadrupal, _single, _triple

Class Method Details

.adaptive_avg_pool1d(*args, **options) ⇒ Object



116
117
118
# File 'lib/torch/nn/functional.rb', line 116

def adaptive_avg_pool1d(*args, **options)
  Torch.adaptive_avg_pool1d(*args, **options)
end

.adaptive_avg_pool2d(input, output_size) ⇒ Object



120
121
122
123
# File 'lib/torch/nn/functional.rb', line 120

def adaptive_avg_pool2d(input, output_size)
  output_size = list_with_default(output_size, input.size)
  NN.adaptive_avg_pool2d(input, output_size)
end

.adaptive_avg_pool3d(input, output_size) ⇒ Object



125
126
127
128
# File 'lib/torch/nn/functional.rb', line 125

def adaptive_avg_pool3d(input, output_size)
  output_size = list_with_default(output_size, input.size)
  NN.adaptive_avg_pool3d(input, output_size)
end

.adaptive_max_pool1d(*args, **options) ⇒ Object



102
103
104
# File 'lib/torch/nn/functional.rb', line 102

def adaptive_max_pool1d(*args, **options)
  Torch.adaptive_max_pool1d(*args, **options)
end

.adaptive_max_pool2d(input, output_size) ⇒ Object



106
107
108
109
# File 'lib/torch/nn/functional.rb', line 106

def adaptive_max_pool2d(input, output_size)
  output_size = list_with_default(output_size, input.size)
  NN.adaptive_max_pool2d(input, output_size)
end

.adaptive_max_pool3d(input, output_size) ⇒ Object



111
112
113
114
# File 'lib/torch/nn/functional.rb', line 111

def adaptive_max_pool3d(input, output_size)
  output_size = list_with_default(output_size, input.size)
  NN.adaptive_max_pool3d(input, output_size)
end

.alpha_dropout(input, p: 0.5, training: true, inplace: false) ⇒ Object



332
333
334
335
336
337
338
# File 'lib/torch/nn/functional.rb', line 332

def alpha_dropout(input, p: 0.5, training: true, inplace: false)
  if inplace
    Torch.alpha_dropout!(input, p, training)
  else
    Torch.alpha_dropout(input, p, training)
  end
end

.avg_pool1d(*args, **options) ⇒ Object



90
91
92
# File 'lib/torch/nn/functional.rb', line 90

def avg_pool1d(*args, **options)
  Torch.avg_pool1d(*args, **options)
end

.avg_pool2d(*args, **options) ⇒ Object



94
95
96
# File 'lib/torch/nn/functional.rb', line 94

def avg_pool2d(*args, **options)
  NN.avg_pool2d(*args, **options)
end

.avg_pool3d(*args, **options) ⇒ Object



98
99
100
# File 'lib/torch/nn/functional.rb', line 98

def avg_pool3d(*args, **options)
  NN.avg_pool3d(*args, **options)
end

.batch_norm(input, running_mean, running_var, weight: nil, bias: nil, training: false, momentum: 0.1, eps: 1e-5) ⇒ Object

normalization layers



237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
# File 'lib/torch/nn/functional.rb', line 237

def batch_norm(input, running_mean, running_var, weight: nil, bias: nil,
  training: false, momentum: 0.1, eps: 1e-5)

  if training
    size = input.size
    size_prods = size[0]
    (size.length - 2).times do |i|
      size_prods *= size[i + 2]
    end
    if size_prods == 1
      raise ArgumentError, "Expected more than 1 value per channel when training, got input size #{size.inspect}"
    end
  end

  Torch.batch_norm(
    input, weight, bias, running_mean, running_var,
    training, momentum, eps, false
  )
end

.bilinear(input1, input2, weight, bias) ⇒ Object



300
301
302
# File 'lib/torch/nn/functional.rb', line 300

def bilinear(input1, input2, weight, bias)
  Torch.bilinear(input1, input2, weight, bias)
end

.binary_cross_entropy(input, target, weight: nil, reduction: "mean") ⇒ Object

loss functions



391
392
393
# File 'lib/torch/nn/functional.rb', line 391

def binary_cross_entropy(input, target, weight: nil, reduction: "mean")
  NN.binary_cross_entropy(input, target, weight, reduction)
end

.binary_cross_entropy_with_logits(input, target, weight: nil, reduction: "mean", pos_weight: nil) ⇒ Object



395
396
397
# File 'lib/torch/nn/functional.rb', line 395

def binary_cross_entropy_with_logits(input, target, weight: nil, reduction: "mean", pos_weight: nil)
  Torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction)
end

.conv1d(*args, **options) ⇒ Object

convolution layers



9
10
11
# File 'lib/torch/nn/functional.rb', line 9

def conv1d(*args, **options)
  Torch.conv1d(*args, **options)
end

.conv2d(*args, **options) ⇒ Object



13
14
15
# File 'lib/torch/nn/functional.rb', line 13

def conv2d(*args, **options)
  Torch.conv2d(*args, **options)
end

.conv3d(*args, **options) ⇒ Object



17
18
19
# File 'lib/torch/nn/functional.rb', line 17

def conv3d(*args, **options)
  Torch.conv3d(*args, **options)
end

.cosine_embedding_loss(input1, input2, target, margin: 0, reduction: "mean") ⇒ Object



399
400
401
# File 'lib/torch/nn/functional.rb', line 399

def cosine_embedding_loss(input1, input2, target, margin: 0, reduction: "mean")
  Torch.cosine_embedding_loss(input1, input2, target, margin, reduction)
end

.cosine_similarity(x1, x2, dim: 1, eps: 1e-8) ⇒ Object

distance functions



381
382
383
# File 'lib/torch/nn/functional.rb', line 381

def cosine_similarity(x1, x2, dim: 1, eps: 1e-8)
  Torch.cosine_similarity(x1, x2, dim, eps)
end

.cross_entropy(input, target, weight: nil, ignore_index: -100,, reduction: "mean") ⇒ Object



403
404
405
# File 'lib/torch/nn/functional.rb', line 403

def cross_entropy(input, target, weight: nil, ignore_index: -100, reduction: "mean")
  nll_loss(log_softmax(input, 1), target, weight: weight, ignore_index: ignore_index, reduction: reduction)
end

.ctc_loss(log_probs, targets, input_lengths, target_lengths, blank: 0, reduction: "mean", zero_infinity: false) ⇒ Object



407
408
409
410
# File 'lib/torch/nn/functional.rb', line 407

def ctc_loss(log_probs, targets, input_lengths, target_lengths, blank: 0, reduction: "mean", zero_infinity: false)
  # call to_a on input_lengths and target_lengths for C++
  Torch.ctc_loss(log_probs, targets, input_lengths.to_a, target_lengths.to_a, blank, reduction, zero_infinity)
end

.dropout(input, p: 0.5, training: true, inplace: false) ⇒ Object

dropout layers



306
307
308
309
310
311
312
# File 'lib/torch/nn/functional.rb', line 306

def dropout(input, p: 0.5, training: true, inplace: false)
  if inplace
    Torch.dropout!(input, p, training)
  else
    Torch.dropout(input, p, training)
  end
end

.dropout2d(input, p: 0.5, training: true, inplace: false) ⇒ Object

Raises:

  • (ArgumentError)


314
315
316
317
318
319
320
321
322
# File 'lib/torch/nn/functional.rb', line 314

def dropout2d(input, p: 0.5, training: true, inplace: false)
  raise ArgumentError, "dropout probability has to be between 0 and 1, but got #{p}" if p < 0 || p > 1

  if inplace
    Torch.feature_dropout!(input, p, training)
  else
    Torch.feature_dropout(input, p, training)
  end
end

.dropout3d(input, p: 0.5, training: true, inplace: false) ⇒ Object



324
325
326
327
328
329
330
# File 'lib/torch/nn/functional.rb', line 324

def dropout3d(input, p: 0.5, training: true, inplace: false)
  if inplace
    Torch.feature_dropout!(input, p, training)
  else
    Torch.feature_dropout(input, p, training)
  end
end

.embedding(input, weight, padding_idx: nil, max_norm: nil, norm_type: 2.0, scale_grad_by_freq: false, sparse: false) ⇒ Object

sparse layers

Raises:



350
351
352
353
354
355
356
357
# File 'lib/torch/nn/functional.rb', line 350

def embedding(input, weight, padding_idx: nil, max_norm: nil, norm_type: 2.0, scale_grad_by_freq: false, sparse: false)
  # TODO handle max_norm and norm_type
  raise NotImplementedYet unless max_norm.nil? && norm_type == 2.0

  padding_idx ||= -1
  # weight and indices are swapped from Python interface
  Torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
end

.embedding_bag(input, weight, offsets: nil, max_norm: nil, norm_type: 2, scale_grad_by_freq: false, mode: "mean", sparse: false, per_sample_weights: nil) ⇒ Object

Raises:



359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
# File 'lib/torch/nn/functional.rb', line 359

def embedding_bag(input, weight, offsets: nil, max_norm: nil, norm_type: 2, scale_grad_by_freq: false, mode: "mean", sparse: false, per_sample_weights: nil)
  # TODO handle max_norm and norm_type
  raise NotImplementedYet unless max_norm.nil? && norm_type == 2.0

  mode_enum =
    case mode
    when "sum"
      0
    when "mean"
      1
    when "max"
      2
    else
      raise ArgumentError, "Unknown mode: #{mode}"
    end

  # weight and input swapped
  Torch.embedding_bag(weight, input, offsets, scale_grad_by_freq, mode_enum, sparse, per_sample_weights)
end

.feature_alpha_dropout(input, p: 0.5, training: true, inplace: false) ⇒ Object



340
341
342
343
344
345
346
# File 'lib/torch/nn/functional.rb', line 340

def feature_alpha_dropout(input, p: 0.5, training: true, inplace: false)
  if inplace
    Torch.feature_alpha_dropout!(input, p, training)
  else
    Torch.feature_alpha_dropout(input, p, training)
  end
end

.fold(input, output_size, kernel_size, dilation: 1, padding: 0, stride: 1) ⇒ Object



29
30
31
32
33
34
35
# File 'lib/torch/nn/functional.rb', line 29

def fold(input, output_size, kernel_size, dilation: 1, padding: 0, stride: 1)
  if input.dim == 3
    NN.col2im(input, _pair(output_size), _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride))
  else
    raise Error, "Input Error: Only 3D input Tensors are supported (got #{input.dim}D)"
  end
end

.group_norm(input, num_groups, weight: nil, bias: nil, eps: 1e-5) ⇒ Object



257
258
259
# File 'lib/torch/nn/functional.rb', line 257

def group_norm(input, num_groups, weight: nil, bias: nil, eps: 1e-5)
  Torch.group_norm(input, num_groups, weight, bias, eps, false)
end

.hardshrink(input, lambd = 0.5) ⇒ Object

activation layers



177
178
179
# File 'lib/torch/nn/functional.rb', line 177

def hardshrink(input, lambd = 0.5)
  Torch.hardshrink(input, lambd)
end

.hinge_embedding_loss(input, target, margin: 1.0, reduction: "mean") ⇒ Object



412
413
414
# File 'lib/torch/nn/functional.rb', line 412

def hinge_embedding_loss(input, target, margin: 1.0, reduction: "mean")
  Torch.hinge_embedding_loss(input, target, margin, reduction)
end

.instance_norm(input, running_mean: nil, running_var: nil, weight: nil, bias: nil, use_input_stats: true, momentum: 0.1, eps: 1e-5) ⇒ Object



261
262
263
264
265
266
267
268
# File 'lib/torch/nn/functional.rb', line 261

def instance_norm(input, running_mean: nil, running_var: nil, weight: nil,
  bias: nil, use_input_stats: true, momentum: 0.1, eps: 1e-5)

  Torch.instance_norm(
      input, weight, bias, running_mean, running_var,
      use_input_stats, momentum, eps, false
  )
end

.kl_div(input, target, reduction: "mean") ⇒ Object



416
417
418
# File 'lib/torch/nn/functional.rb', line 416

def kl_div(input, target, reduction: "mean")
  Torch.kl_div(input, target, reduction)
end

.l1_loss(input, target, reduction: "mean") ⇒ Object



420
421
422
# File 'lib/torch/nn/functional.rb', line 420

def l1_loss(input, target, reduction: "mean")
  NN.l1_loss(input, target, reduction)
end

.layer_norm(input, normalized_shape, weight: nil, bias: nil, eps: 1e-5) ⇒ Object



270
271
272
# File 'lib/torch/nn/functional.rb', line 270

def layer_norm(input, normalized_shape, weight: nil, bias: nil, eps: 1e-5)
  Torch.layer_norm(input, normalized_shape, weight, bias, eps, false)
end

.leaky_relu(input, negative_slope = 0.01) ⇒ Object



181
182
183
# File 'lib/torch/nn/functional.rb', line 181

def leaky_relu(input, negative_slope = 0.01)
  NN.leaky_relu(input, negative_slope)
end

.linear(input, weight, bias) ⇒ Object

linear layers



296
297
298
# File 'lib/torch/nn/functional.rb', line 296

def linear(input, weight, bias)
  NN.linear(input, weight, bias)
end

.local_response_norm(input, size, alpha: 1e-4, beta: 0.75, k: 1.0) ⇒ Object



274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
# File 'lib/torch/nn/functional.rb', line 274

def local_response_norm(input, size, alpha: 1e-4, beta: 0.75, k: 1.0)
  dim = input.dim
  if dim < 3
    raise ArgumentError, "Expected 3D or higher dimensionality input (got #{dim} dimensions)"
  end
  div = input.mul(input).unsqueeze(1)
  if dim == 3
    div = pad(div, [0, 0, size / 2, (size - 1) / 2])
    div = avg_pool2d(div, [size, 1], stride: 1).squeeze(1)
  else
    sizes = input.size
    div = div.view(sizes[0], 1, sizes[1], sizes[2], -1)
    div = pad(div, [0, 0, 0, 0, size / 2, (size - 1) / 2])
    div = avg_pool3d(div, [size, 1, 1], stride: 1).squeeze(1)
    div = div.view(sizes)
  end
  div = div.mul(alpha).add(k).pow(beta)
  input / div
end

.log_sigmoid(input) ⇒ Object



185
186
187
# File 'lib/torch/nn/functional.rb', line 185

def log_sigmoid(input)
  NN.log_sigmoid(input)
end

.log_softmax(input, dim = nil) ⇒ Object

TODO make dim keyword argument and update examples



230
231
232
233
# File 'lib/torch/nn/functional.rb', line 230

def log_softmax(input, dim = nil)
  dim ||= softmax_dim(input.dim)
  input.log_softmax(dim)
end

.margin_ranking_loss(input1, input2, target, margin: 0, reduction: "mean") ⇒ Object



424
425
426
# File 'lib/torch/nn/functional.rb', line 424

def margin_ranking_loss(input1, input2, target, margin: 0, reduction: "mean")
  Torch.margin_ranking_loss(input1, input2, target, margin, reduction)
end

.max_pool1d(*args, **options) ⇒ Object

pooling layers



39
40
41
42
43
44
45
46
# File 'lib/torch/nn/functional.rb', line 39

def max_pool1d(*args, **options)
  return_indices = args.pop if args.size == 7
  if return_indices
    Torch.max_pool1d_with_indices(*args, **options)
  else
    Torch.max_pool1d(*args, **options)
  end
end

.max_pool2d(*args, **options) ⇒ Object



48
49
50
51
52
53
54
55
# File 'lib/torch/nn/functional.rb', line 48

def max_pool2d(*args, **options)
  return_indices = args.pop if args.size == 7
  if return_indices
    NN.max_pool2d_with_indices(*args, **options)
  else
    Torch.max_pool2d(*args, **options)
  end
end

.max_pool3d(*args, **options) ⇒ Object



57
58
59
60
61
62
63
64
# File 'lib/torch/nn/functional.rb', line 57

def max_pool3d(*args, **options)
  return_indices = args.pop if args.size == 7
  if return_indices
    NN.max_pool3d_with_indices(*args, **options)
  else
    Torch.max_pool3d(*args, **options)
  end
end

.max_unpool1d(input, indices, kernel_size, stride: nil, padding: 0, output_size: nil) ⇒ Object

Raises:



66
67
68
69
70
71
72
73
74
75
76
77
78
# File 'lib/torch/nn/functional.rb', line 66

def max_unpool1d(input, indices, kernel_size, stride: nil, padding: 0, output_size: nil)
  raise NotImplementedYet
  kernel_size = _single(kernel_size)
  if !stride.nil?
    _stride = _single(stride)
  else
    _stride = kernel_size
  end
  padding = _single(padding)
  output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)
  output_size = output_size + [1]
  NN.max_unpool2d(input.unsqueeze(3), indices.unsqueeze(3), output_size).squeeze(3)
end

.max_unpool2d(*args, **options) ⇒ Object

Raises:



80
81
82
83
# File 'lib/torch/nn/functional.rb', line 80

def max_unpool2d(*args, **options)
  raise NotImplementedYet
  NN.max_unpool2d(*args, **options)
end

.max_unpool3d(*args, **options) ⇒ Object

Raises:



85
86
87
88
# File 'lib/torch/nn/functional.rb', line 85

def max_unpool3d(*args, **options)
  raise NotImplementedYet
  NN.max_unpool3d(*args, **options)
end

.mse_loss(input, target, reduction: "mean") ⇒ Object



428
429
430
# File 'lib/torch/nn/functional.rb', line 428

def mse_loss(input, target, reduction: "mean")
  NN.mse_loss(input, target, reduction)
end

.multi_margin_loss(input, target, p: 1, margin: 1.0, weight: nil, reduction: "mean") ⇒ Object



440
441
442
# File 'lib/torch/nn/functional.rb', line 440

def multi_margin_loss(input, target, p: 1, margin: 1.0, weight: nil, reduction: "mean")
  NN.multi_margin_loss(input, target, p, margin, weight, reduction)
end

.multilabel_margin_loss(input, target, reduction: "mean") ⇒ Object



432
433
434
# File 'lib/torch/nn/functional.rb', line 432

def multilabel_margin_loss(input, target, reduction: "mean")
  NN.multilabel_margin_loss(input, target, reduction)
end

.multilabel_soft_margin_loss(input, target, weight: nil) ⇒ Object

Raises:



436
437
438
# File 'lib/torch/nn/functional.rb', line 436

def multilabel_soft_margin_loss(input, target, weight: nil)
  raise NotImplementedYet
end

.nll_loss(input, target, weight: nil, ignore_index: -100,, reduction: "mean") ⇒ Object



444
445
446
# File 'lib/torch/nn/functional.rb', line 444

def nll_loss(input, target, weight: nil, ignore_index: -100, reduction: "mean")
  NN.nll_loss(input, target, weight, reduction, ignore_index)
end

.pad(input, pad, mode: "constant", value: 0) ⇒ Object

padding layers

Raises:

  • (ArgumentError)


132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
# File 'lib/torch/nn/functional.rb', line 132

def pad(input, pad, mode: "constant", value: 0)
  raise ArgumentError, "Padding length must be divisible by 2" unless pad.size % 2 == 0
  raise ArgumentError, "Padding length too large" unless pad.size / 2 <= input.dim

  if mode == "constant"
    return Torch.constant_pad_nd(input, pad, value)
  else
    raise ArgumentError, "Padding mode doesn't take in value argument" unless value == 0

    if input.dim == 3
      raise ArgumentError, "3D tensors expect 2 values for padding" unless pad.size == 2
      case mode
      when "reflect"
        NN.reflection_pad1d(input, pad)
      when "replicate"
        NN.replication_pad1d(input, pad)
      else
        raise NotImplementedYet
      end
    elsif input.dim == 4
      raise ArgumentError, "4D tensors expect 4 values for padding" unless pad.size == 4
      case mode
      when "reflect"
        NN.reflection_pad2d(input, pad)
      when "replicate"
        NN.replication_pad2d(input, pad)
      else
        raise NotImplementedYet
      end
    elsif input.dim == 5
      raise ArgumentError, "5D tensors expect 6 values for padding" unless pad.size == 6
      case mode
      when "replicate"
        NN.replication_pad3d(input, pad)
      else
        raise NotImplementedYet
      end
    else
      raise ArgumentError, "Only 3D, 4D, 5D padding with non-constant padding are supported for now"
    end
  end
end

.pairwise_distance(x1, x2, p: 2.0, eps: 1e-6, keepdim: false) ⇒ Object



385
386
387
# File 'lib/torch/nn/functional.rb', line 385

def pairwise_distance(x1, x2, p: 2.0, eps: 1e-6, keepdim: false)
  Torch.pairwise_distance(x1, x2, p, eps, keepdim)
end

.poisson_nll_loss(input, target, log_input: true, full: false, eps: 1e-8, reduction: "mean") ⇒ Object



448
449
450
# File 'lib/torch/nn/functional.rb', line 448

def poisson_nll_loss(input, target, log_input: true, full: false, eps: 1e-8, reduction: "mean")
  Torch.poisson_nll_loss(input, target, log_input, full, eps, reduction)
end

.prelu(input, weight) ⇒ Object



189
190
191
# File 'lib/torch/nn/functional.rb', line 189

def prelu(input, weight)
  Torch.prelu(input, weight)
end

.relu(input, inplace: false) ⇒ Object



193
194
195
196
197
198
199
# File 'lib/torch/nn/functional.rb', line 193

def relu(input, inplace: false)
  if inplace
    input.relu!
  else
    input.relu
  end
end

.smooth_l1_loss(input, target, reduction: "mean") ⇒ Object



456
457
458
# File 'lib/torch/nn/functional.rb', line 456

def smooth_l1_loss(input, target, reduction: "mean")
  NN.smooth_l1_loss(input, target, reduction)
end

.soft_margin_loss(input, target, reduction: "mean") ⇒ Object



452
453
454
# File 'lib/torch/nn/functional.rb', line 452

def soft_margin_loss(input, target, reduction: "mean")
  NN.soft_margin_loss(input, target, reduction)
end

.softmax(input, dim: nil) ⇒ Object



224
225
226
227
# File 'lib/torch/nn/functional.rb', line 224

def softmax(input, dim: nil)
  dim ||= softmax_dim(input.dim)
  input.softmax(dim)
end

.softmin(input, dim: nil) ⇒ Object

other activation layers



219
220
221
222
# File 'lib/torch/nn/functional.rb', line 219

def softmin(input, dim: nil)
  dim ||= softmax_dim(input.dim)
  (-input).softmax(dim)
end

.softplus(input, beta: 1, threshold: 20) ⇒ Object



201
202
203
# File 'lib/torch/nn/functional.rb', line 201

def softplus(input, beta: 1, threshold: 20)
  NN.softplus(input, beta, threshold)
end

.softshrink(*args, **options) ⇒ Object



205
206
207
# File 'lib/torch/nn/functional.rb', line 205

def softshrink(*args, **options)
  NN.softshrink(*args, **options)
end

.softsign(input) ⇒ Object



209
210
211
# File 'lib/torch/nn/functional.rb', line 209

def softsign(input)
  input / (input.abs + 1)
end

.tanhshrink(input) ⇒ Object



213
214
215
# File 'lib/torch/nn/functional.rb', line 213

def tanhshrink(input)
  input - input.tanh
end

.triplet_margin_loss(anchor, positive, negative, margin: 1.0, p: 2, eps: 1e-06, swap: false, reduction: "mean") ⇒ Object



460
461
462
# File 'lib/torch/nn/functional.rb', line 460

def triplet_margin_loss(anchor, positive, negative, margin: 1.0, p: 2, eps: 1e-06, swap: false, reduction: "mean")
  Torch.triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction)
end

.unfold(input, kernel_size, dilation: 1, padding: 0, stride: 1) ⇒ Object



21
22
23
24
25
26
27
# File 'lib/torch/nn/functional.rb', line 21

def unfold(input, kernel_size, dilation: 1, padding: 0, stride: 1)
  if input.dim == 4
    NN.im2col(input, _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride))
  else
    raise Error, "Input Error: Only 4D input Tensors are supported (got #{input.dim}D)"
  end
end