Class: DNN::Models::Model

Inherits:
Chain
  • Object
show all
Defined in:
lib/dnn/core/models.rb

Overview

This class deals with the model of the network.

Direct Known Subclasses

FixedModel, Sequential

Instance Attribute Summary collapse

Class Method Summary collapse

Instance Method Summary collapse

Methods inherited from Chain

#forward, #layers, #load_hash, #to_hash

Constructor Details

#initializeModel

Returns a new instance of Model.



125
126
127
128
129
130
131
132
133
# File 'lib/dnn/core/models.rb', line 125

def initialize
  super
  @optimizer = nil
  @loss_func = nil
  @built = false
  @loss_weights = nil
  @callbacks = []
  @last_log = {}
end

Instance Attribute Details

#last_logObject (readonly)

Returns the value of attribute last_log.



113
114
115
# File 'lib/dnn/core/models.rb', line 113

def last_log
  @last_log
end

#loss_weightsObject

Returns the value of attribute loss_weights.



112
113
114
# File 'lib/dnn/core/models.rb', line 112

def loss_weights
  @loss_weights
end

#optimizerObject

Returns the value of attribute optimizer.



111
112
113
# File 'lib/dnn/core/models.rb', line 111

def optimizer
  @optimizer
end

Class Method Details

.load(file_name) ⇒ DNN::Models::Model

Load marshal model.

Parameters:

  • file_name (String)

    File name of marshal model to load.

Returns:



118
119
120
121
122
123
# File 'lib/dnn/core/models.rb', line 118

def self.load(file_name)
  model = self.allocate
  loader = Loaders::MarshalLoader.new(model)
  loader.load(file_name)
  model
end

Instance Method Details

#add_callback(callback) ⇒ Object

Add callback function.

Parameters:

  • callback (Callback)

    Callback object.



473
474
475
476
# File 'lib/dnn/core/models.rb', line 473

def add_callback(callback)
  callback.model = self
  @callbacks << callback
end

#add_lambda_callback(event) { ... } ⇒ Object

Add lambda callback.

Parameters:

  • event (Symbol)

    Event to execute callback.

Yields:

  • Register the contents of the callback.



481
482
483
484
485
# File 'lib/dnn/core/models.rb', line 481

def add_lambda_callback(event, &block)
  callback = Callbacks::LambdaCallback.new(event, &block)
  callback.model = self
  @callbacks << callback
end

#built?Boolean

Returns If model have already been built then return true.

Returns:

  • (Boolean)

    If model have already been built then return true.



534
535
536
# File 'lib/dnn/core/models.rb', line 534

def built?
  @built
end

#call(input_tensors) ⇒ Object



135
136
137
138
139
# File 'lib/dnn/core/models.rb', line 135

def call(input_tensors)
  output_tensors = forward(input_tensors)
  @built = true unless @built
  output_tensors
end

#clean_layersObject

Clean all layers.



539
540
541
542
543
544
545
546
547
548
549
# File 'lib/dnn/core/models.rb', line 539

def clean_layers
  layers.each(&:clean)
  if @loss_func.is_a?(Array)
    @loss_func.each do |lf|
      lf.clean
    end
  elsif @loss_func.is_a?(Losses::Loss)
    @loss_func.clean
  end
  @layers_cache = nil
end

#clear_callbacksObject

Clear the callback function registered for each event.



488
489
490
# File 'lib/dnn/core/models.rb', line 488

def clear_callbacks
  @callbacks = []
end

#copyDNN::Models::Model

Return the copy this model.

Returns:



514
515
516
# File 'lib/dnn/core/models.rb', line 514

def copy
  Marshal.load(Marshal.dump(self))
end

#evaluate(x, y, batch_size: 100, accuracy: true) ⇒ Array

Evaluate model and get accuracy and loss of test data.

Parameters:

  • x (Numo::SFloat)

    Input test data.

  • y (Numo::SFloat)

    Output test data.

  • batch_size (Integer) (defaults to: 100)

    Batch size used for one test.

Returns:

  • (Array)

    Returns the test data accuracy and mean loss in the form [accuracy, mean_loss]. If accuracy is not needed returns in the form [nil, mean_loss].



330
331
332
333
# File 'lib/dnn/core/models.rb', line 330

def evaluate(x, y, batch_size: 100, accuracy: true)
  check_xy_type(x, y)
  evaluate_by_iterator(Iterator.new(x, y, random: false), batch_size: batch_size, accuracy: accuracy)
end

#evaluate_by_iterator(test_iterator, batch_size: 100, accuracy: true) ⇒ Array

Evaluate model by iterator.

Parameters:

  • test_iterator (DNN::Iterator)

    Iterator used for testing.

  • batch_size (Integer) (defaults to: 100)

    Batch size used for one test.

Returns:

  • (Array)

    Returns the test data accuracy and mean loss in the form [accuracy, mean_loss]. If accuracy is not needed returns in the form [nil, mean_loss].



340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
# File 'lib/dnn/core/models.rb', line 340

def evaluate_by_iterator(test_iterator, batch_size: 100, accuracy: true)
  num_test_datas = test_iterator.num_datas
  batch_size = batch_size >= num_test_datas ? num_test_datas : batch_size
  if @loss_func.is_a?(Array)
    total_correct = Array.new(@loss_func.length, 0)
    sum_loss = Array.new(@loss_func.length, 0)
  else
    total_correct = 0
    sum_loss = 0
  end
  max_steps = (num_test_datas.to_f / batch_size).ceil
  test_iterator.foreach(batch_size) do |x_batch, y_batch|
    correct, loss_value = test_on_batch(x_batch, y_batch, accuracy: accuracy)
    if @loss_func.is_a?(Array)
      @loss_func.each_index do |i|
        total_correct[i] += correct[i] if accuracy
        sum_loss[i] += loss_value[i]
      end
    else
      total_correct += correct if accuracy
      sum_loss += loss_value
    end
  end
  acc = nil
  if @loss_func.is_a?(Array)
    mean_loss = Array.new(@loss_func.length, 0)
    acc = Array.new(@loss_func.length, 0) if accuracy
    @loss_func.each_index do |i|
      mean_loss[i] += sum_loss[i] / max_steps
      acc[i] += total_correct[i].to_f / num_test_datas if accuracy
    end
  else
    mean_loss = sum_loss / max_steps
    acc = total_correct.to_f / num_test_datas if accuracy
  end
  @last_log[:test_loss] = mean_loss
  @last_log[:test_accuracy] = acc
  [acc, mean_loss]
end

#get_all_params_dataArray

Get parameter data of all layers.

Returns:

  • (Array)

    Parameter data.



553
554
555
556
557
558
559
# File 'lib/dnn/core/models.rb', line 553

def get_all_params_data
  trainable_layers.map do |layer|
    layer.get_params.to_h do |key, param|
      [key, param.data]
    end
  end
end

#get_layer(name) ⇒ DNN::Layers::Layer

Get the layer that the model has.

Parameters:

  • name (Symbol)

    The name of the layer to get.

Returns:



527
528
529
530
531
# File 'lib/dnn/core/models.rb', line 527

def get_layer(name)
  layer = instance_variable_get("@#{name}")
  return layer if layer.is_a?(Layers::Layer) || layer.is_a?(Chain) || layer.is_a?(LayersList)
  nil
end

#load_params(file_name) ⇒ Object

Load marshal params.

Parameters:

  • file_name (String)

    File name of marshal model to load.



494
495
496
497
# File 'lib/dnn/core/models.rb', line 494

def load_params(file_name)
  loader = Loaders::MarshalLoader.new(self)
  loader.load(file_name)
end

#loss_funcObject



157
158
159
# File 'lib/dnn/core/models.rb', line 157

def loss_func
  @loss_func
end

#loss_func=(lfs) ⇒ Object



161
162
163
164
165
166
167
168
169
170
171
172
173
# File 'lib/dnn/core/models.rb', line 161

def loss_func=(lfs)
  if lfs.is_a?(Array)
    @loss_func = []
    lfs.each.with_index do |lf, i|
      unless lf.is_a?(Losses::Loss)
        raise TypeError, "loss_func[#{i}]:#{lf.class} is not an instance of DNN::Losses::Loss class."
      end
      @loss_func << lf
    end
  else
    @loss_func = lfs
  end
end

#predict(x, use_loss_activation: true) ⇒ Object

Predict data.

Parameters:

  • x (Numo::SFloat)

    Input data.

  • use_loss_activation (Boolean) (defaults to: true)

    Use loss activation when loss has an activation.



431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
# File 'lib/dnn/core/models.rb', line 431

def predict(x, use_loss_activation: true)
  check_xy_type(x)
  DNN.learning_phase = false
  output_tensors = call(Tensor.convert(x))
  if output_tensors.is_a?(Array)
    lfs = @loss_func
    ary_output_tensors = output_tensors
  else
    lfs = [@loss_func]
    ary_output_tensors = [output_tensors]
  end
  ys = []
  ary_output_tensors.each.with_index do |out, i|
    y = out.data
    lf = lfs[i]
    if use_loss_activation && lf && lf.class.respond_to?(:activation)
      y = lf.class.activation(y)
    end
    ys << y
  end
  output_tensors.is_a?(Array) ? ys : ys.first
end

#predict1(x, use_loss_activation: true) ⇒ Object

Predict one data.

Parameters:

  • x (Numo::SFloat)

    Input data. However, x is single data.



456
457
458
459
460
461
462
463
464
465
466
467
468
469
# File 'lib/dnn/core/models.rb', line 456

def predict1(x, use_loss_activation: true)
  check_xy_type(x)
  input = if x.is_a?(Array)
            x.map { |v| v.reshape(1, *v.shape) }
          else
            x.reshape(1, *x.shape)
          end
  y = predict(input, use_loss_activation: use_loss_activation)
  if y.is_a?(Array)
    y.map { |v| v[0, false] }
  else
    y[0, false]
  end
end

#save(file_name) ⇒ Object

Save the model in marshal format.

Parameters:

  • file_name (String)

    Name to save model.



501
502
503
504
# File 'lib/dnn/core/models.rb', line 501

def save(file_name)
  saver = Savers::MarshalSaver.new(self, include_model: true)
  saver.save(file_name)
end

#save_params(file_name) ⇒ Object

Save the params in marshal format.

Parameters:

  • file_name (String)

    Name to save model.



508
509
510
511
# File 'lib/dnn/core/models.rb', line 508

def save_params(file_name)
  saver = Savers::MarshalSaver.new(self, include_model: false)
  saver.save(file_name)
end

#set_all_params_data(params_data) ⇒ Object

Set parameter data of all layers.

Parameters:

  • params_data (Array)

    Parameter data obtained by get_all_params_data.



563
564
565
566
567
568
569
# File 'lib/dnn/core/models.rb', line 563

def set_all_params_data(params_data)
  trainable_layers.each.with_index do |layer, i|
    params_data[i].each do |(key, data)|
      layer.get_params[key].data = data
    end
  end
end

#setup(optimizer, loss_func, loss_weights: nil) ⇒ Object

Set optimizer and loss_func to model.

Parameters:

  • optimizer (DNN::Optimizers::Optimizer)

    Optimizer to use for learning.

  • loss_func (DNN::Losses::Loss)

    Loss function to use for learning.

  • loss_weights (Array | NilClass) (defaults to: nil)

    Setting loss weights contribution.



145
146
147
148
149
150
151
152
153
154
155
# File 'lib/dnn/core/models.rb', line 145

def setup(optimizer, loss_func, loss_weights: nil)
  unless optimizer.is_a?(Optimizers::Optimizer)
    raise TypeError, "optimizer:#{optimizer.class} is not an instance of DNN::Optimizers::Optimizer class."
  end
  unless loss_func.is_a?(Losses::Loss) || loss_func.is_a?(Array)
    raise TypeError, "loss_func:#{loss_func.class} is not an instance of DNN::Losses::Loss or Array class."
  end
  @optimizer = optimizer
  self.loss_func = loss_func
  @loss_weights = loss_weights
end

#test_on_batch(x, y, accuracy: true) ⇒ Array

Evaluate once.

Parameters:

  • x (Numo::SFloat | Array)

    Input test data.

  • y (Numo::SFloat | Array)

    Output test data.

Returns:

  • (Array)

    Returns the test data accuracy and mean loss in the form [accuracy, loss]. If accuracy is not needed returns in the form [nil, loss].



385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
# File 'lib/dnn/core/models.rb', line 385

def test_on_batch(x, y, accuracy: true)
  call_callbacks(:before_test_on_batch)
  DNN.learning_phase = false
  output_tensors = call(Tensor.convert(x))
  correct = nil
  if output_tensors.is_a?(Array)
    correct = [] if accuracy
    loss_data = []
    output_tensors.each.with_index do |out, i|
      correct << accuracy(out.data, y[i]) if accuracy
      loss = @loss_func[i].(out, Tensor.convert(y[i]))
      loss_data << Utils.to_f(loss.data)
    end
  else
    out = output_tensors
    correct = accuracy(out.data, y) if accuracy
    loss = @loss_func.(out, Tensor.convert(y))
    loss_data = Utils.to_f(loss.data)
  end
  call_callbacks(:after_test_on_batch)
  [correct, loss_data]
end

#to_cpuDNN::Models::Model

Convert the parameters of model and optimizer for cpu.

Returns:



573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
# File 'lib/dnn/core/models.rb', line 573

def to_cpu
  params_data = get_all_params_data
  clean_layers
  set_all_params_data(params_data)
  trainable_layers.each do |layer|
    layer.get_params.each do |key, param|
      data = param.data
      if DNN.use_cumo? && data.is_a?(Cumo::NArray)
        param.data = Utils.cumo2numo(data)
      end
    end
  end
  @optimizer.status.each do |key, state|
    next unless state
    state.each do |param, data|
      if DNN.use_cumo? && data.is_a?(Cumo::NArray)
        state[param] = Utils.cumo2numo(data)
      end
    end
  end
  self
end

#to_gpuDNN::Models::Model

Convert the parameters of model and optimizer for gpu.

Returns:



598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
# File 'lib/dnn/core/models.rb', line 598

def to_gpu
  params_data = get_all_params_data
  clean_layers
  set_all_params_data(params_data)
  trainable_layers.each do |layer|
    layer.get_params.each do |(key, param)|
      data = param.data
      if DNN.use_cumo? && data.is_a?(Numo::NArray)
        param.data = Utils.numo2cumo(data)
      end
    end
  end
  @optimizer.status.each do |(key, state)|
    next unless state
    state.each do |(param, data)|
      if DNN.use_cumo? && data.is_a?(Numo::NArray)
        state[param] = Utils.numo2cumo(data)
      end
    end
  end
  self
end

#train(x, y, epochs, batch_size: 1, initial_epoch: 1, test: nil, verbose: true, accuracy: true) ⇒ Object Also known as: fit

Start training. Setup the model before use this method.

Parameters:

  • x (Numo::SFloat)

    Input training data.

  • y (Numo::SFloat)

    Output training data.

  • epochs (Integer)

    Number of training.

  • batch_size (Integer) (defaults to: 1)

    Batch size used for one training.

  • initial_epoch (Integer) (defaults to: 1)

    Initial epoch.

  • test (Array | NilClass) (defaults to: nil)

    If you to test the model for every 1 epoch, specify [x_test, y_test]. Don’t test to the model, specify nil.

  • verbose (Boolean) (defaults to: true)

    Set true to display the log. If false is set, the log is not displayed.

  • accuracy (Boolean) (defaults to: true)

    Set true to compute the accuracy.



186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
# File 'lib/dnn/core/models.rb', line 186

def train(x, y, epochs,
          batch_size: 1,
          initial_epoch: 1,
          test: nil,
          verbose: true,
          accuracy: true)
  check_xy_type(x, y)
  train_iterator = Iterator.new(x, y)
  train_by_iterator(train_iterator, epochs,
                    batch_size: batch_size,
                    initial_epoch: initial_epoch,
                    test: test,
                    verbose: verbose,
                    accuracy: accuracy)
end

#train_by_iterator(train_iterator, epochs, batch_size: 1, initial_epoch: 1, test: nil, verbose: true, accuracy: true) ⇒ Object Also known as: fit_by_iterator

Start training by iterator. Setup the model before use this method.

Parameters:

  • train_iterator (DNN::Iterator)

    Iterator used for training.

  • epochs (Integer)

    Number of training.

  • batch_size (Integer) (defaults to: 1)

    Batch size used for one training.

  • initial_epoch (Integer) (defaults to: 1)

    Initial epoch.

  • test (Array | NilClass) (defaults to: nil)

    If you to test the model for every 1 epoch, specify [x_test, y_test]. Don’t test to the model, specify nil.

  • verbose (Boolean) (defaults to: true)

    Set true to display the log. If false is set, the log is not displayed.

  • accuracy (Boolean) (defaults to: true)

    Set true to compute the accuracy.

Raises:



214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
# File 'lib/dnn/core/models.rb', line 214

def train_by_iterator(train_iterator, epochs,
                      batch_size: 1,
                      initial_epoch: 1,
                      test: nil,
                      verbose: true,
                      accuracy: true)
  raise DNNError, "The model is not optimizer setup complete." unless @optimizer
  raise DNNError, "The model is not loss_func setup complete." unless @loss_func

  num_train_datas = train_iterator.num_datas
  num_train_datas = num_train_datas / batch_size * batch_size if train_iterator.last_round_down

  stopped = catch(:stop) do
    (initial_epoch..epochs).each do |epoch|
      @last_log[:epoch] = epoch
      call_callbacks(:before_epoch)
      puts "【 epoch #{epoch}/#{epochs}" if verbose

      train_iterator.foreach(batch_size) do |x_batch, y_batch, index|
        @last_log[:step] = index
        train_step_met = train_step(x_batch, y_batch)
        num_trained_datas = (index + 1) * batch_size
        num_trained_datas = num_trained_datas > num_train_datas ? num_train_datas : num_trained_datas
        log = "\r"
        40.times do |i|
          if i < num_trained_datas * 40 / num_train_datas
            log << "="
          elsif i == num_trained_datas * 40 / num_train_datas
            log << ">"
          else
            log << "_"
          end
        end

        log << "  #{num_trained_datas}/#{num_train_datas} "
        log << metrics_to_str(train_step_met)
        print log if verbose
      end

      if test
        acc, loss = if test.is_a?(Array)
                      evaluate(test[0], test[1], batch_size: batch_size, accuracy: accuracy)
                    else
                      evaluate_by_iterator(test, batch_size: batch_size, accuracy: accuracy)
                    end
        if verbose
          metrics = if accuracy
                      { accuracy: acc, test_loss: loss }
                    else
                      { test_loss: loss }
                    end
          print "  " + metrics_to_str(metrics)
        end
      end
      puts "" if verbose
      call_callbacks(:after_epoch)
    end
    nil
  end

  if stopped
    puts "\n#{stopped}" if verbose
  end
end

#train_on_batch(x, y) ⇒ Float | Numo::SFloat

Training once. Setup the model before use this method.

Parameters:

  • x (Numo::SFloat)

    Input training data.

  • y (Numo::SFloat)

    Output training data.

Returns:

  • (Float | Numo::SFloat)

    Return loss value in the form of Float or Numo::SFloat.

Raises:



295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
# File 'lib/dnn/core/models.rb', line 295

def train_on_batch(x, y)
  raise DNNError, "The model is not optimizer setup complete." unless @optimizer
  raise DNNError, "The model is not loss_func setup complete." unless @loss_func
  check_xy_type(x, y)
  call_callbacks(:before_train_on_batch)
  DNN.learning_phase = true
  output_tensors = call(Tensor.convert(x))
  if output_tensors.is_a?(Array)
    loss_data = []
    output_tensors.each.with_index do |out, i|
      loss_opt = {}
      loss_opt[:layers] = layers if i == 0
      loss_opt[:loss_weight] = @loss_weights[i] if @loss_weights
      loss = @loss_func[i].loss(out, Tensor.convert(y[i]), **loss_opt)
      loss_data << Utils.to_f(loss.data)
      loss.link.backward(Xumo::SFloat.ones(y[i][0...1, false].shape[0], 1))
    end
  else
    out = output_tensors
    loss = @loss_func.loss(out, Tensor.convert(y), layers: layers)
    loss_data = Utils.to_f(loss.data)
    loss.link.backward(Xumo::SFloat.ones(y[0...1, false].shape[0], 1))
  end
  @optimizer.update(get_all_trainable_params)
  @last_log[:train_loss] = loss_data
  call_callbacks(:after_train_on_batch)
  loss_data
end

#trainable_layersArray

Get the all trainable layers.

Returns:

  • (Array)

    All has param layers array.



520
521
522
# File 'lib/dnn/core/models.rb', line 520

def trainable_layers
  layers.select { |layer| layer.is_a?(Layers::TrainableLayer) }
end