Module: Torch

Defined in:
lib/torch/optim/sgd.rb,
lib/torch.rb,
lib/torch/hub.rb,
lib/torch/nn/gru.rb,
lib/torch/nn/rnn.rb,
lib/torch/random.rb,
lib/torch/tensor.rb,
lib/torch/nn/fold.rb,
lib/torch/nn/init.rb,
lib/torch/nn/loss.rb,
lib/torch/nn/lstm.rb,
lib/torch/nn/relu.rb,
lib/torch/nn/tanh.rb,
lib/torch/version.rb,
lib/torch/nn/prelu.rb,
lib/torch/nn/utils.rb,
lib/torch/inspector.rb,
lib/torch/nn/conv1d.rb,
lib/torch/nn/conv2d.rb,
lib/torch/nn/conv3d.rb,
lib/torch/nn/convnd.rb,
lib/torch/nn/linear.rb,
lib/torch/nn/module.rb,
lib/torch/nn/unfold.rb,
lib/torch/nn/dropout.rb,
lib/torch/nn/l1_loss.rb,
lib/torch/nn/sigmoid.rb,
lib/torch/nn/softmax.rb,
lib/torch/nn/softmin.rb,
lib/torch/optim/adam.rb,
lib/torch/optim/asgd.rb,
lib/torch/nn/bce_loss.rb,
lib/torch/nn/bilinear.rb,
lib/torch/nn/ctc_loss.rb,
lib/torch/nn/identity.rb,
lib/torch/nn/mse_loss.rb,
lib/torch/nn/nll_loss.rb,
lib/torch/nn/rnn_base.rb,
lib/torch/nn/softplus.rb,
lib/torch/nn/softsign.rb,
lib/torch/optim/adamw.rb,
lib/torch/optim/rprop.rb,
lib/torch/nn/dropout2d.rb,
lib/torch/nn/dropout3d.rb,
lib/torch/nn/dropoutnd.rb,
lib/torch/nn/embedding.rb,
lib/torch/nn/lp_pool1d.rb,
lib/torch/nn/lp_pool2d.rb,
lib/torch/nn/lp_poolnd.rb,
lib/torch/nn/parameter.rb,
lib/torch/nn/softmax2d.rb,
lib/torch/optim/adamax.rb,
lib/torch/native/parser.rb,
lib/torch/nn/avg_pool1d.rb,
lib/torch/nn/avg_pool2d.rb,
lib/torch/nn/avg_pool3d.rb,
lib/torch/nn/avg_poolnd.rb,
lib/torch/nn/batch_norm.rb,
lib/torch/nn/functional.rb,
lib/torch/nn/group_norm.rb,
lib/torch/nn/hardshrink.rb,
lib/torch/nn/layer_norm.rb,
lib/torch/nn/leaky_relu.rb,
lib/torch/nn/max_pool1d.rb,
lib/torch/nn/max_pool2d.rb,
lib/torch/nn/max_pool3d.rb,
lib/torch/nn/max_poolnd.rb,
lib/torch/nn/sequential.rb,
lib/torch/nn/softshrink.rb,
lib/torch/nn/tanhshrink.rb,
lib/torch/nn/zero_pad2d.rb,
lib/torch/optim/adagrad.rb,
lib/torch/optim/rmsprop.rb,
lib/torch/nn/kl_div_loss.rb,
lib/torch/nn/log_sigmoid.rb,
lib/torch/nn/log_softmax.rb,
lib/torch/optim/adadelta.rb,
lib/torch/native/function.rb,
lib/torch/nn/batch_norm1d.rb,
lib/torch/nn/batch_norm2d.rb,
lib/torch/nn/batch_norm3d.rb,
lib/torch/nn/max_unpool1d.rb,
lib/torch/nn/max_unpool2d.rb,
lib/torch/nn/max_unpool3d.rb,
lib/torch/nn/max_unpoolnd.rb,
lib/torch/optim/optimizer.rb,
lib/torch/native/generator.rb,
lib/torch/nn/alpha_dropout.rb,
lib/torch/nn/embedding_bag.rb,
lib/torch/nn/instance_norm.rb,
lib/torch/nn/weighted_loss.rb,
lib/torch/native/dispatcher.rb,
lib/torch/nn/constant_pad1d.rb,
lib/torch/nn/constant_pad2d.rb,
lib/torch/nn/constant_pad3d.rb,
lib/torch/nn/constant_padnd.rb,
lib/torch/nn/smooth_l1_loss.rb,
lib/torch/nn/instance_norm1d.rb,
lib/torch/nn/instance_norm2d.rb,
lib/torch/nn/instance_norm3d.rb,
lib/torch/nn/poisson_nll_loss.rb,
lib/torch/nn/reflection_pad1d.rb,
lib/torch/nn/reflection_pad2d.rb,
lib/torch/nn/reflection_padnd.rb,
lib/torch/nn/soft_margin_loss.rb,
lib/torch/nn/cosine_similarity.rb,
lib/torch/nn/multi_margin_loss.rb,
lib/torch/nn/pairwise_distance.rb,
lib/torch/nn/replication_pad1d.rb,
lib/torch/nn/replication_pad2d.rb,
lib/torch/nn/replication_pad3d.rb,
lib/torch/nn/replication_padnd.rb,
lib/torch/nn/cross_entropy_loss.rb,
lib/torch/nn/adaptive_avg_pool1d.rb,
lib/torch/nn/adaptive_avg_pool2d.rb,
lib/torch/nn/adaptive_avg_pool3d.rb,
lib/torch/nn/adaptive_avg_poolnd.rb,
lib/torch/nn/adaptive_max_pool1d.rb,
lib/torch/nn/adaptive_max_pool2d.rb,
lib/torch/nn/adaptive_max_pool3d.rb,
lib/torch/nn/adaptive_max_poolnd.rb,
lib/torch/nn/local_response_norm.rb,
lib/torch/nn/margin_ranking_loss.rb,
lib/torch/nn/triplet_margin_loss.rb,
lib/torch/utils/data/data_loader.rb,
lib/torch/nn/bce_with_logits_loss.rb,
lib/torch/nn/hinge_embedding_loss.rb,
lib/torch/nn/cosine_embedding_loss.rb,
lib/torch/nn/feature_alpha_dropout.rb,
lib/torch/utils/data/tensor_dataset.rb,
lib/torch/nn/multi_label_margin_loss.rb,
lib/torch/optim/lr_scheduler/step_lr.rb,
lib/torch/optim/lr_scheduler/lambda_lr.rb,
lib/torch/nn/multi_label_soft_margin_loss.rb,
lib/torch/optim/lr_scheduler/lr_scheduler.rb,
lib/torch/optim/lr_scheduler/multi_step_lr.rb,
lib/torch/optim/lr_scheduler/exponential_lr.rb,
lib/torch/optim/lr_scheduler/multiplicative_lr.rb,
lib/torch/optim/lr_scheduler/cosine_annealing_lr.rb

Overview

We use a generic interface for methods (*args, **options) and this class to determine the C++ method to call

This is needed since LibTorch uses function overloading, which isn’t available in Ruby or Python

PyTorch uses this approach, but the parser/dispatcher is written in C++

We could generate Ruby methods directly, but an advantage of this approach is arguments and keyword arguments can be used interchangably like in Python, making it easier to port code

Defined Under Namespace

Modules: Hub, Inspector, NN, Native, Optim, Random, Utils Classes: Error, NotImplementedYet, Tensor

Constant Summary collapse

DTYPE_TO_ENUM =
{
  uint8: 0,
  int8: 1,
  short: 2,
  int16: 2,
  int: 3,
  int32: 3,
  long: 4,
  int64: 4,
  half: 5,
  float16: 5,
  float: 6,
  float32: 6,
  double: 7,
  float64: 7,
  complex_half: 8,
  complex_float: 9,
  complex_double: 10,
  bool: 11,
  qint8: 12,
  quint8: 13,
  qint32: 14,
  bfloat16: 15
}
ENUM_TO_DTYPE =
DTYPE_TO_ENUM.map(&:reverse).to_h
FloatTensor =
_make_tensor_class(:float32)
DoubleTensor =
_make_tensor_class(:float64)
HalfTensor =
_make_tensor_class(:float16)
ByteTensor =
_make_tensor_class(:uint8)
CharTensor =
_make_tensor_class(:int8)
ShortTensor =
_make_tensor_class(:int16)
IntTensor =
_make_tensor_class(:int32)
LongTensor =
_make_tensor_class(:int64)
BoolTensor =
_make_tensor_class(:bool)
VERSION =
"0.2.1"

Class Method Summary collapse

Class Method Details

._dtype_to_numoObject

private use method for cases when Numo not available or available after Torch loaded

Raises:



291
292
293
294
295
296
297
298
299
300
301
302
303
# File 'lib/torch.rb', line 291

def _dtype_to_numo
  raise Error, "Numo not found" unless defined?(Numo::NArray)

  {
    uint8: Numo::UInt8,
    int8: Numo::Int8,
    int16: Numo::Int16,
    int32: Numo::Int32,
    int64: Numo::Int64,
    float32: Numo::SFloat,
    float64: Numo::DFloat
  }
end

._make_tensor_class(dtype, cuda = false) ⇒ Object



221
222
223
224
225
226
227
228
229
230
231
232
233
234
# File 'lib/torch.rb', line 221

def self._make_tensor_class(dtype, cuda = false)
  cls = Class.new
  device = cuda ? "cuda" : "cpu"
  cls.define_singleton_method("new") do |*args|
    if args.size == 1 && args.first.is_a?(Tensor)
      args.first.send(dtype).to(device)
    elsif args.size == 1 && args.first.is_a?(Array)
      Torch.tensor(args.first, dtype: dtype, device: device)
    else
      Torch.empty(*args, dtype: dtype, device: device)
    end
  end
  cls
end

.arange(start, finish = nil, step = 1, **options) ⇒ Object

— begin tensor creation: pytorch.org/cppdocs/notes/tensor_creation.html



329
330
331
332
333
334
335
336
# File 'lib/torch.rb', line 329

def arange(start, finish = nil, step = 1, **options)
  # ruby doesn't support start = 0, finish, step = 1, ...
  if finish.nil?
    finish = start
    start = 0
  end
  _arange(start, finish, step, tensor_options(**options))
end

.device(str) ⇒ Object



315
316
317
# File 'lib/torch.rb', line 315

def device(str)
  Device.new(str)
end

.empty(*size, **options) ⇒ Object



338
339
340
# File 'lib/torch.rb', line 338

def empty(*size, **options)
  _empty(tensor_size(size), tensor_options(**options))
end

.empty_like(input, **options) ⇒ Object



418
419
420
# File 'lib/torch.rb', line 418

def empty_like(input, **options)
  empty(input.size, **like_options(input, options))
end

.eye(n, m = nil, **options) ⇒ Object



342
343
344
# File 'lib/torch.rb', line 342

def eye(n, m = nil, **options)
  _eye(n, m || n, tensor_options(**options))
end

.from_numo(ndarray) ⇒ Object

Raises:



274
275
276
277
278
279
280
281
282
283
284
285
286
# File 'lib/torch.rb', line 274

def from_numo(ndarray)
  dtype = _dtype_to_numo.find { |k, v| ndarray.is_a?(v) }
  raise Error, "Cannot convert #{ndarray.class.name} to tensor" unless dtype
  options = tensor_options(device: "cpu", dtype: dtype[0])
  # TODO pass pointer to array instead of creating string
  str = ndarray.to_string
  tensor = _from_blob(str, ndarray.shape, options)
  # from_blob does not own the data, so we need to keep
  # a reference to it for duration of tensor
  # can remove when passing pointer directly
  tensor.instance_variable_set("@_numo_str", str)
  tensor
end

.full(size, fill_value, **options) ⇒ Object



346
347
348
# File 'lib/torch.rb', line 346

def full(size, fill_value, **options)
  _full(size, fill_value, tensor_options(**options))
end

.full_like(input, fill_value, **options) ⇒ Object



422
423
424
# File 'lib/torch.rb', line 422

def full_like(input, fill_value, **options)
  full(input.size, fill_value, **like_options(input, options))
end

.linspace(start, finish, steps = 100, **options) ⇒ Object



350
351
352
# File 'lib/torch.rb', line 350

def linspace(start, finish, steps = 100, **options)
  _linspace(start, finish, steps, tensor_options(**options))
end

.load(f) ⇒ Object



323
324
325
# File 'lib/torch.rb', line 323

def load(f)
  to_ruby(_load(File.binread(f)))
end

.logspace(start, finish, steps = 100, base = 10.0, **options) ⇒ Object



354
355
356
# File 'lib/torch.rb', line 354

def logspace(start, finish, steps = 100, base = 10.0, **options)
  _logspace(start, finish, steps, base, tensor_options(**options))
end

.no_gradObject



305
306
307
308
309
310
311
312
313
# File 'lib/torch.rb', line 305

def no_grad
  previous_value = grad_enabled?
  begin
    _set_grad_enabled(false)
    yield
  ensure
    _set_grad_enabled(previous_value)
  end
end

.ones(*size, **options) ⇒ Object



358
359
360
# File 'lib/torch.rb', line 358

def ones(*size, **options)
  _ones(tensor_size(size), tensor_options(**options))
end

.ones_like(input, **options) ⇒ Object

— begin like —



414
415
416
# File 'lib/torch.rb', line 414

def ones_like(input, **options)
  ones(input.size, **like_options(input, options))
end

.rand(*size, **options) ⇒ Object



362
363
364
# File 'lib/torch.rb', line 362

def rand(*size, **options)
  _rand(tensor_size(size), tensor_options(**options))
end

.rand_like(input, **options) ⇒ Object



426
427
428
# File 'lib/torch.rb', line 426

def rand_like(input, **options)
  rand(input.size, **like_options(input, options))
end

.randint(low = 0, high, size, **options) ⇒ Object



366
367
368
# File 'lib/torch.rb', line 366

def randint(low = 0, high, size, **options)
  _randint(low, high, size, tensor_options(**options))
end

.randint_like(input, low, high = nil, **options) ⇒ Object



430
431
432
433
434
435
436
437
# File 'lib/torch.rb', line 430

def randint_like(input, low, high = nil, **options)
  # ruby doesn't support input, low = 0, high, ...
  if high.nil?
    high = low
    low = 0
  end
  randint(low, high, input.size, **like_options(input, options))
end

.randn(*size, **options) ⇒ Object



370
371
372
# File 'lib/torch.rb', line 370

def randn(*size, **options)
  _randn(tensor_size(size), tensor_options(**options))
end

.randn_like(input, **options) ⇒ Object



439
440
441
# File 'lib/torch.rb', line 439

def randn_like(input, **options)
  randn(input.size, **like_options(input, options))
end

.randperm(n, **options) ⇒ Object



374
375
376
# File 'lib/torch.rb', line 374

def randperm(n, **options)
  _randperm(n, tensor_options(**options))
end

.save(obj, f) ⇒ Object



319
320
321
# File 'lib/torch.rb', line 319

def save(obj, f)
  File.binwrite(f, _save(to_ivalue(obj)))
end

.tensor(data, **options) ⇒ Object



382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
# File 'lib/torch.rb', line 382

def tensor(data, **options)
  if options[:dtype].nil? && defined?(Numo::NArray) && data.is_a?(Numo::NArray)
    numo_to_dtype = _dtype_to_numo.map(&:reverse).to_h
    options[:dtype] = numo_to_dtype[data.class]
  end

  size = []
  if data.respond_to?(:to_a)
    data = data.to_a
    d = data
    while d.is_a?(Array)
      size << d.size
      d = d.first
    end
    data = data.flatten
  else
    data = [data].compact
  end

  if options[:dtype].nil?
    if data.all? { |v| v.is_a?(Integer) }
      options[:dtype] = :int64
    elsif data.all? { |v| v == true || v == false }
      options[:dtype] = :bool
    end
  end

  _tensor(data, size, tensor_options(**options))
end

.tensor?(obj) ⇒ Boolean

Returns:

  • (Boolean)


270
271
272
# File 'lib/torch.rb', line 270

def tensor?(obj)
  obj.is_a?(Tensor)
end

.zeros(*size, **options) ⇒ Object



378
379
380
# File 'lib/torch.rb', line 378

def zeros(*size, **options)
  _zeros(tensor_size(size), tensor_options(**options))
end

.zeros_like(input, **options) ⇒ Object



443
444
445
# File 'lib/torch.rb', line 443

def zeros_like(input, **options)
  zeros(input.size, **like_options(input, options))
end