Module: Torch
- Defined in:
- lib/torch/inspector.rb,
lib/torch.rb,
lib/torch/hub.rb,
lib/torch/nn/gru.rb,
lib/torch/nn/rnn.rb,
lib/torch/tensor.rb,
lib/torch/nn/fold.rb,
lib/torch/nn/init.rb,
lib/torch/nn/loss.rb,
lib/torch/nn/lstm.rb,
lib/torch/nn/relu.rb,
lib/torch/nn/tanh.rb,
lib/torch/version.rb,
lib/torch/nn/prelu.rb,
lib/torch/nn/utils.rb,
lib/torch/nn/conv1d.rb,
lib/torch/nn/conv2d.rb,
lib/torch/nn/conv3d.rb,
lib/torch/nn/convnd.rb,
lib/torch/nn/linear.rb,
lib/torch/nn/module.rb,
lib/torch/nn/unfold.rb,
lib/torch/optim/sgd.rb,
lib/torch/nn/dropout.rb,
lib/torch/nn/l1_loss.rb,
lib/torch/nn/sigmoid.rb,
lib/torch/nn/softmax.rb,
lib/torch/nn/softmin.rb,
lib/torch/optim/adam.rb,
lib/torch/optim/asgd.rb,
lib/torch/utils/data.rb,
lib/torch/nn/bce_loss.rb,
lib/torch/nn/bilinear.rb,
lib/torch/nn/ctc_loss.rb,
lib/torch/nn/identity.rb,
lib/torch/nn/mse_loss.rb,
lib/torch/nn/nll_loss.rb,
lib/torch/nn/rnn_base.rb,
lib/torch/nn/softplus.rb,
lib/torch/nn/softsign.rb,
lib/torch/nn/upsample.rb,
lib/torch/optim/adamw.rb,
lib/torch/optim/rprop.rb,
lib/torch/nn/dropout2d.rb,
lib/torch/nn/dropout3d.rb,
lib/torch/nn/dropoutnd.rb,
lib/torch/nn/embedding.rb,
lib/torch/nn/lp_pool1d.rb,
lib/torch/nn/lp_pool2d.rb,
lib/torch/nn/lp_poolnd.rb,
lib/torch/nn/parameter.rb,
lib/torch/nn/softmax2d.rb,
lib/torch/optim/adamax.rb,
lib/torch/nn/avg_pool1d.rb,
lib/torch/nn/avg_pool2d.rb,
lib/torch/nn/avg_pool3d.rb,
lib/torch/nn/avg_poolnd.rb,
lib/torch/nn/batch_norm.rb,
lib/torch/nn/functional.rb,
lib/torch/nn/group_norm.rb,
lib/torch/nn/hardshrink.rb,
lib/torch/nn/layer_norm.rb,
lib/torch/nn/leaky_relu.rb,
lib/torch/nn/max_pool1d.rb,
lib/torch/nn/max_pool2d.rb,
lib/torch/nn/max_pool3d.rb,
lib/torch/nn/max_poolnd.rb,
lib/torch/nn/sequential.rb,
lib/torch/nn/softshrink.rb,
lib/torch/nn/tanhshrink.rb,
lib/torch/nn/zero_pad2d.rb,
lib/torch/optim/adagrad.rb,
lib/torch/optim/rmsprop.rb,
lib/torch/nn/kl_div_loss.rb,
lib/torch/nn/log_sigmoid.rb,
lib/torch/nn/log_softmax.rb,
lib/torch/optim/adadelta.rb,
lib/torch/nn/batch_norm1d.rb,
lib/torch/nn/batch_norm2d.rb,
lib/torch/nn/batch_norm3d.rb,
lib/torch/nn/max_unpool1d.rb,
lib/torch/nn/max_unpool2d.rb,
lib/torch/nn/max_unpool3d.rb,
lib/torch/nn/max_unpoolnd.rb,
lib/torch/optim/optimizer.rb,
lib/torch/nn/alpha_dropout.rb,
lib/torch/nn/embedding_bag.rb,
lib/torch/nn/instance_norm.rb,
lib/torch/nn/weighted_loss.rb,
lib/torch/nn/constant_pad1d.rb,
lib/torch/nn/constant_pad2d.rb,
lib/torch/nn/constant_pad3d.rb,
lib/torch/nn/constant_padnd.rb,
lib/torch/nn/smooth_l1_loss.rb,
lib/torch/utils/data/subset.rb,
lib/torch/nn/instance_norm1d.rb,
lib/torch/nn/instance_norm2d.rb,
lib/torch/nn/instance_norm3d.rb,
lib/torch/utils/data/dataset.rb,
lib/torch/nn/poisson_nll_loss.rb,
lib/torch/nn/reflection_pad1d.rb,
lib/torch/nn/reflection_pad2d.rb,
lib/torch/nn/reflection_padnd.rb,
lib/torch/nn/soft_margin_loss.rb,
lib/torch/nn/cosine_similarity.rb,
lib/torch/nn/multi_margin_loss.rb,
lib/torch/nn/pairwise_distance.rb,
lib/torch/nn/replication_pad1d.rb,
lib/torch/nn/replication_pad2d.rb,
lib/torch/nn/replication_pad3d.rb,
lib/torch/nn/replication_padnd.rb,
lib/torch/nn/cross_entropy_loss.rb,
lib/torch/nn/adaptive_avg_pool1d.rb,
lib/torch/nn/adaptive_avg_pool2d.rb,
lib/torch/nn/adaptive_avg_pool3d.rb,
lib/torch/nn/adaptive_avg_poolnd.rb,
lib/torch/nn/adaptive_max_pool1d.rb,
lib/torch/nn/adaptive_max_pool2d.rb,
lib/torch/nn/adaptive_max_pool3d.rb,
lib/torch/nn/adaptive_max_poolnd.rb,
lib/torch/nn/local_response_norm.rb,
lib/torch/nn/margin_ranking_loss.rb,
lib/torch/nn/triplet_margin_loss.rb,
lib/torch/utils/data/data_loader.rb,
lib/torch/nn/bce_with_logits_loss.rb,
lib/torch/nn/hinge_embedding_loss.rb,
lib/torch/nn/cosine_embedding_loss.rb,
lib/torch/nn/feature_alpha_dropout.rb,
lib/torch/utils/data/tensor_dataset.rb,
lib/torch/nn/multi_label_margin_loss.rb,
lib/torch/optim/lr_scheduler/step_lr.rb,
lib/torch/optim/lr_scheduler/lambda_lr.rb,
lib/torch/nn/multi_label_soft_margin_loss.rb,
lib/torch/optim/lr_scheduler/lr_scheduler.rb,
lib/torch/optim/lr_scheduler/multi_step_lr.rb,
lib/torch/optim/lr_scheduler/exponential_lr.rb,
lib/torch/optim/lr_scheduler/multiplicative_lr.rb,
lib/torch/optim/lr_scheduler/cosine_annealing_lr.rb
Overview
Defined Under Namespace
Modules: Autograd, Hub, Inspector, NN, Optim, Utils Classes: ByteStorage, Error, NotImplementedYet, Tensor
Constant Summary collapse
- DTYPE_TO_ENUM =
{ uint8: 0, int8: 1, short: 2, int16: 2, int: 3, int32: 3, long: 4, int64: 4, half: 5, float16: 5, float: 6, float32: 6, double: 7, float64: 7, complex_half: 8, complex_float: 9, complex_double: 10, bool: 11, qint8: 12, quint8: 13, qint32: 14, bfloat16: 15 }
- ENUM_TO_DTYPE =
DTYPE_TO_ENUM.map(&:reverse).to_h
- TENSOR_TYPE_CLASSES =
[]
- DTYPE_TO_CLASS =
{ float32: "FloatTensor", float64: "DoubleTensor", float16: "HalfTensor", uint8: "ByteTensor", int8: "CharTensor", int16: "ShortTensor", int32: "IntTensor", int64: "LongTensor", bool: "BoolTensor" }
- VERSION =
"0.5.3"
Class Method Summary collapse
-
._dtype_to_numo ⇒ Object
private use method for cases when Numo not available or available after Torch loaded.
- ._make_tensor_class(dtype, cuda = false) ⇒ Object
- .device(str) ⇒ Object
- .empty_like(input, **options) ⇒ Object
- .enable_grad ⇒ Object
- .from_numo(ndarray) ⇒ Object
- .full_like(input, fill_value, **options) ⇒ Object
- .load(f) ⇒ Object
- .no_grad ⇒ Object
-
.ones_like(input, **options) ⇒ Object
— begin like —.
- .rand_like(input, **options) ⇒ Object
- .randint_like(input, low, high = nil, **options) ⇒ Object
- .randn_like(input, **options) ⇒ Object
- .save(obj, f) ⇒ Object
-
.stft(input, n_fft, hop_length: nil, win_length: nil, window: nil, center: true, pad_mode: "reflect", normalized: false, onesided: true, return_complex: nil) ⇒ Object
center option.
-
.tensor(data, **options) ⇒ Object
— begin tensor creation: pytorch.org/cppdocs/notes/tensor_creation.html —.
- .tensor?(obj) ⇒ Boolean
- .zeros_like(input, **options) ⇒ Object
Class Method Details
._dtype_to_numo ⇒ Object
private use method for cases when Numo not available or available after Torch loaded
326 327 328 329 330 331 332 333 334 335 336 337 338 |
# File 'lib/torch.rb', line 326 def _dtype_to_numo raise Error, "Numo not found" unless defined?(Numo::NArray) { uint8: Numo::UInt8, int8: Numo::Int8, int16: Numo::Int16, int32: Numo::Int32, int64: Numo::Int64, float32: Numo::SFloat, float64: Numo::DFloat } end |
._make_tensor_class(dtype, cuda = false) ⇒ Object
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 |
# File 'lib/torch.rb', line 253 def self._make_tensor_class(dtype, cuda = false) cls = Class.new device = cuda ? "cuda" : "cpu" cls.define_singleton_method("new") do |*args| if args.size == 1 && args.first.is_a?(Tensor) args.first.send(dtype).to(device) elsif args.size == 1 && args.first.is_a?(ByteStorage) && dtype == :uint8 bytes = args.first.bytes Torch._from_blob(bytes, [bytes.bytesize], TensorOptions.new.dtype(DTYPE_TO_ENUM[dtype])) elsif args.size == 1 && args.first.is_a?(Array) Torch.tensor(args.first, dtype: dtype, device: device) elsif args.size == 0 Torch.empty(0, dtype: dtype, device: device) else Torch.empty(*args, dtype: dtype, device: device) end end TENSOR_TYPE_CLASSES << cls cls end |
.device(str) ⇒ Object
360 361 362 |
# File 'lib/torch.rb', line 360 def device(str) Device.new(str) end |
.empty_like(input, **options) ⇒ Object
410 411 412 |
# File 'lib/torch.rb', line 410 def empty_like(input, **) empty(input.size, **(input, )) end |
.enable_grad ⇒ Object
350 351 352 353 354 355 356 357 358 |
# File 'lib/torch.rb', line 350 def enable_grad previous_value = grad_enabled? begin _set_grad_enabled(true) yield ensure _set_grad_enabled(previous_value) end end |
.from_numo(ndarray) ⇒ Object
309 310 311 312 313 314 315 316 317 318 319 320 321 |
# File 'lib/torch.rb', line 309 def from_numo(ndarray) dtype = _dtype_to_numo.find { |k, v| ndarray.is_a?(v) } raise Error, "Cannot convert #{ndarray.class.name} to tensor" unless dtype = (device: "cpu", dtype: dtype[0]) # TODO pass pointer to array instead of creating string str = ndarray.to_string tensor = _from_blob(str, ndarray.shape, ) # from_blob does not own the data, so we need to keep # a reference to it for duration of tensor # can remove when passing pointer directly tensor.instance_variable_set("@_numo_str", str) tensor end |
.full_like(input, fill_value, **options) ⇒ Object
414 415 416 |
# File 'lib/torch.rb', line 414 def full_like(input, fill_value, **) full(input.size, fill_value, **(input, )) end |
.load(f) ⇒ Object
368 369 370 |
# File 'lib/torch.rb', line 368 def load(f) to_ruby(_load(File.binread(f))) end |
.no_grad ⇒ Object
340 341 342 343 344 345 346 347 348 |
# File 'lib/torch.rb', line 340 def no_grad previous_value = grad_enabled? begin _set_grad_enabled(false) yield ensure _set_grad_enabled(previous_value) end end |
.ones_like(input, **options) ⇒ Object
— begin like —
406 407 408 |
# File 'lib/torch.rb', line 406 def ones_like(input, **) ones(input.size, **(input, )) end |
.rand_like(input, **options) ⇒ Object
418 419 420 |
# File 'lib/torch.rb', line 418 def rand_like(input, **) rand(input.size, **(input, )) end |
.randint_like(input, low, high = nil, **options) ⇒ Object
422 423 424 425 426 427 428 429 |
# File 'lib/torch.rb', line 422 def randint_like(input, low, high = nil, **) # ruby doesn't support input, low = 0, high, ... if high.nil? high = low low = 0 end randint(low, high, input.size, **(input, )) end |
.randn_like(input, **options) ⇒ Object
431 432 433 |
# File 'lib/torch.rb', line 431 def randn_like(input, **) randn(input.size, **(input, )) end |
.save(obj, f) ⇒ Object
364 365 366 |
# File 'lib/torch.rb', line 364 def save(obj, f) File.binwrite(f, _save(to_ivalue(obj))) end |
.stft(input, n_fft, hop_length: nil, win_length: nil, window: nil, center: true, pad_mode: "reflect", normalized: false, onesided: true, return_complex: nil) ⇒ Object
center option
440 441 442 443 444 445 446 447 448 449 |
# File 'lib/torch.rb', line 440 def stft(input, n_fft, hop_length: nil, win_length: nil, window: nil, center: true, pad_mode: "reflect", normalized: false, onesided: true, return_complex: nil) if center signal_dim = input.dim extended_shape = [1] * (3 - signal_dim) + input.size pad = n_fft.div(2).to_i input = NN::F.pad(input.view(extended_shape), [pad, pad], mode: pad_mode) input = input.view(input.shape[-signal_dim..-1]) end _stft(input, n_fft, hop_length, win_length, window, normalized, onesided, return_complex) end |
.tensor(data, **options) ⇒ Object
— begin tensor creation: pytorch.org/cppdocs/notes/tensor_creation.html —
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 |
# File 'lib/torch.rb', line 374 def tensor(data, **) if [:dtype].nil? && defined?(Numo::NArray) && data.is_a?(Numo::NArray) numo_to_dtype = _dtype_to_numo.map(&:reverse).to_h [:dtype] = numo_to_dtype[data.class] end size = [] if data.respond_to?(:to_a) data = data.to_a d = data while d.is_a?(Array) size << d.size d = d.first end data = data.flatten else data = [data].compact end if [:dtype].nil? if data.all? { |v| v.is_a?(Integer) } [:dtype] = :int64 elsif data.all? { |v| v == true || v == false } [:dtype] = :bool end end _tensor(data, size, (**)) end |
.tensor?(obj) ⇒ Boolean
305 306 307 |
# File 'lib/torch.rb', line 305 def tensor?(obj) obj.is_a?(Tensor) end |
.zeros_like(input, **options) ⇒ Object
435 436 437 |
# File 'lib/torch.rb', line 435 def zeros_like(input, **) zeros(input.size, **(input, )) end |