Class: DNN::Models::Model

Inherits:
Chain
  • Object
show all
Defined in:
lib/dnn/core/models.rb

Overview

This class deals with the model of the network.

Direct Known Subclasses

Sequential

Instance Attribute Summary collapse

Class Method Summary collapse

Instance Method Summary collapse

Methods inherited from Chain

#forward, #layers, #load_hash, #to_hash

Constructor Details

#initializeModel

Returns a new instance of Model.



120
121
122
123
124
125
126
127
# File 'lib/dnn/core/models.rb', line 120

def initialize
  @optimizer = nil
  @loss_func = nil
  @built = false
  @callbacks = []
  @layers_cache = nil
  @last_log = {}
end

Instance Attribute Details

#last_logObject (readonly)

Returns the value of attribute last_log.



108
109
110
# File 'lib/dnn/core/models.rb', line 108

def last_log
  @last_log
end

#loss_funcObject

Returns the value of attribute loss_func.



107
108
109
# File 'lib/dnn/core/models.rb', line 107

def loss_func
  @loss_func
end

#optimizerObject

Returns the value of attribute optimizer.



106
107
108
# File 'lib/dnn/core/models.rb', line 106

def optimizer
  @optimizer
end

Class Method Details

.load(file_name) ⇒ DNN::Models::Model

Load marshal model.

Parameters:

  • file_name (String)

    File name of marshal model to load.

Returns:



113
114
115
116
117
118
# File 'lib/dnn/core/models.rb', line 113

def self.load(file_name)
  model = self.allocate
  loader = Loaders::MarshalLoader.new(model)
  loader.load(file_name)
  model
end

Instance Method Details

#add_callback(callback) ⇒ Object

Add callback function.

Parameters:

  • callback (Callback)

    Callback object.



361
362
363
364
# File 'lib/dnn/core/models.rb', line 361

def add_callback(callback)
  callback.model = self
  @callbacks << callback
end

#built?Boolean

Returns If model have already been built then return true.

Returns:

  • (Boolean)

    If model have already been built then return true.



413
414
415
# File 'lib/dnn/core/models.rb', line 413

def built?
  @built
end

#call(inputs) ⇒ Object



129
130
131
132
133
134
# File 'lib/dnn/core/models.rb', line 129

def call(inputs)
  @layers_cache = nil
  output_tensor = forward(inputs)
  @built = true unless @built
  output_tensor
end

#clean_layersObject



417
418
419
420
421
# File 'lib/dnn/core/models.rb', line 417

def clean_layers
  layers.each(&:clean)
  @loss_func.clean
  @layers_cache = nil
end

#clear_callbacksObject

Clear the callback function registered for each event.



367
368
369
# File 'lib/dnn/core/models.rb', line 367

def clear_callbacks
  @callbacks = []
end

#copyDNN::Models::Model

Return the copy this model.

Returns:



393
394
395
# File 'lib/dnn/core/models.rb', line 393

def copy
  Marshal.load(Marshal.dump(self))
end

#evaluate(x, y, batch_size: 100) ⇒ Array

Evaluate model and get accuracy and loss of test data.

Parameters:

  • x (Numo::SFloat)

    Input test data.

  • y (Numo::SFloat)

    Output test data.

  • batch_size (Integer) (defaults to: 100)

    Batch size used for one test.

Returns:

  • (Array)

    Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].



277
278
279
280
# File 'lib/dnn/core/models.rb', line 277

def evaluate(x, y, batch_size: 100)
  check_xy_type(x, y)
  evaluate_by_iterator(Iterator.new(x, y, random: false), batch_size: batch_size)
end

#evaluate_by_iterator(test_iterator, batch_size: 100) ⇒ Array

Evaluate model by iterator.

Parameters:

  • test_iterator (DNN::Iterator)

    Iterator used for testing.

  • batch_size (Integer) (defaults to: 100)

    Batch size used for one test.

Returns:

  • (Array)

    Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].



286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
# File 'lib/dnn/core/models.rb', line 286

def evaluate_by_iterator(test_iterator, batch_size: 100)
  num_test_datas = test_iterator.num_datas
  batch_size = batch_size >= num_test_datas ? num_test_datas : batch_size
  total_correct = 0
  sum_loss = 0
  max_steps = (num_test_datas.to_f / batch_size).ceil
  test_iterator.foreach(batch_size) do |x_batch, y_batch|
    correct, loss_value = test_on_batch(x_batch, y_batch)
    total_correct += correct
    sum_loss += loss_value
  end
  mean_loss = sum_loss / max_steps
  acc = total_correct.to_f / num_test_datas
  @last_log[:test_loss] = mean_loss
  @last_log[:test_accuracy] = acc
  [acc, mean_loss]
end

#get_all_params_dataObject



423
424
425
426
427
428
429
# File 'lib/dnn/core/models.rb', line 423

def get_all_params_data
  trainable_layers.map do |layer|
    layer.get_params.to_h do |key, param|
      [key, param.data]
    end
  end
end

#get_layer(name) ⇒ DNN::Layers::Layer

Get the layer that the model has.

Parameters:

  • name (Symbol)

    The name of the layer to get.

Returns:



406
407
408
409
410
# File 'lib/dnn/core/models.rb', line 406

def get_layer(name)
  layer = instance_variable_get("@#{name}")
  return layer if layer.is_a?(Layers::Layer) || layer.is_a?(Chain) || layer.is_a?(LayersList)
  nil
end

#load_params(file_name) ⇒ Object

Load marshal params.

Parameters:

  • file_name (String)

    File name of marshal model to load.



373
374
375
376
# File 'lib/dnn/core/models.rb', line 373

def load_params(file_name)
  loader = Loaders::MarshalLoader.new(self)
  loader.load(file_name)
end

#predict(x, use_loss_activation: true) ⇒ Object

Predict data.

Parameters:

  • x (Numo::SFloat)

    Input data.

  • use_loss_activation (Boolean) (defaults to: true)

    Use loss activation when loss has an activation.



341
342
343
344
345
346
347
348
349
350
# File 'lib/dnn/core/models.rb', line 341

def predict(x, use_loss_activation: true)
  check_xy_type(x)
  DNN.learning_phase = false
  out = call(Tensor.convert(x))
  y = out.data
  if use_loss_activation && @loss_func.class.respond_to?(:activation)
    y = @loss_func.class.activation(y)
  end
  y
end

#predict1(x, use_loss_activation: true) ⇒ Object

Predict one data.

Parameters:

  • x (Numo::SFloat)

    Input data. However, x is single data.



354
355
356
357
# File 'lib/dnn/core/models.rb', line 354

def predict1(x, use_loss_activation: true)
  check_xy_type(x)
  predict(x.reshape(1, *x.shape), use_loss_activation: use_loss_activation)[0, false]
end

#save(file_name) ⇒ Object

Save the model in marshal format.

Parameters:

  • file_name (String)

    Name to save model.



380
381
382
383
# File 'lib/dnn/core/models.rb', line 380

def save(file_name)
  saver = Savers::MarshalSaver.new(self, include_model: true)
  saver.save(file_name)
end

#save_params(file_name) ⇒ Object

Save the params in marshal format.

Parameters:

  • file_name (String)

    Name to save model.



387
388
389
390
# File 'lib/dnn/core/models.rb', line 387

def save_params(file_name)
  saver = Savers::MarshalSaver.new(self, include_model: false)
  saver.save(file_name)
end

#set_all_params_data(params_data) ⇒ Object



431
432
433
434
435
436
437
# File 'lib/dnn/core/models.rb', line 431

def set_all_params_data(params_data)
  trainable_layers.each.with_index do |layer, i|
    params_data[i].each do |(key, data)|
      layer.get_params[key].data = data
    end
  end
end

#setup(optimizer, loss_func) ⇒ Object

Set optimizer and loss_func to model.

Parameters:



139
140
141
142
143
144
145
146
147
148
# File 'lib/dnn/core/models.rb', line 139

def setup(optimizer, loss_func)
  unless optimizer.is_a?(Optimizers::Optimizer)
    raise TypeError, "optimizer:#{optimizer.class} is not an instance of DNN::Optimizers::Optimizer class."
  end
  unless loss_func.is_a?(Losses::Loss)
    raise TypeError, "loss_func:#{loss_func.class} is not an instance of DNN::Losses::Loss class."
  end
  @optimizer = optimizer
  @loss_func = loss_func
end

#test_on_batch(x, y) ⇒ Array

Evaluate once.

Parameters:

  • x (Numo::SFloat)

    Input test data.

  • y (Numo::SFloat)

    Output test data.

Returns:

  • (Array)

    Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].



308
309
310
311
312
313
314
315
316
# File 'lib/dnn/core/models.rb', line 308

def test_on_batch(x, y)
  call_callbacks(:before_test_on_batch)
  DNN.learning_phase = false
  out = call(Tensor.convert(x))
  correct = accuracy(out.data, y)
  loss = @loss_func.(out, Tensor.convert(y))
  call_callbacks(:after_test_on_batch)
  [correct, loss.data]
end

#train(x, y, epochs, batch_size: 1, initial_epoch: 1, test: nil, verbose: true) ⇒ Object Also known as: fit

Start training. Setup the model before use this method.

Parameters:

  • x (Numo::SFloat)

    Input training data.

  • y (Numo::SFloat)

    Output training data.

  • epochs (Integer)

    Number of training.

  • batch_size (Integer) (defaults to: 1)

    Batch size used for one training.

  • initial_epoch (Integer) (defaults to: 1)

    Initial epoch.

  • test (Array | NilClass) (defaults to: nil)

    If you to test the model for every 1 epoch, specify [x_test, y_test]. Don’t test to the model, specify nil.

  • verbose (Boolean) (defaults to: true)

    Set true to display the log. If false is set, the log is not displayed.



160
161
162
163
164
165
166
167
168
169
170
171
172
# File 'lib/dnn/core/models.rb', line 160

def train(x, y, epochs,
          batch_size: 1,
          initial_epoch: 1,
          test: nil,
          verbose: true)
  check_xy_type(x, y)
  train_iterator = Iterator.new(x, y)
  train_by_iterator(train_iterator, epochs,
                    batch_size: batch_size,
                    initial_epoch: initial_epoch,
                    test: test,
                    verbose: verbose)
end

#train_by_iterator(train_iterator, epochs, batch_size: 1, initial_epoch: 1, test: nil, verbose: true) ⇒ Object Also known as: fit_by_iterator

Start training by iterator. Setup the model before use this method.

Parameters:

  • train_iterator (DNN::Iterator)

    Iterator used for training.

  • epochs (Integer)

    Number of training.

  • batch_size (Integer) (defaults to: 1)

    Batch size used for one training.

  • initial_epoch (Integer) (defaults to: 1)

    Initial epoch.

  • test (Array | NilClass) (defaults to: nil)

    If you to test the model for every 1 epoch, specify [x_test, y_test]. Don’t test to the model, specify nil.

  • verbose (Boolean) (defaults to: true)

    Set true to display the log. If false is set, the log is not displayed.

Raises:



185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
# File 'lib/dnn/core/models.rb', line 185

def train_by_iterator(train_iterator, epochs,
                      batch_size: 1,
                      initial_epoch: 1,
                      test: nil,
                      verbose: true)
  raise DNN_Error, "The model is not optimizer setup complete." unless @optimizer
  raise DNN_Error, "The model is not loss_func setup complete." unless @loss_func

  num_train_datas = train_iterator.num_datas
  num_train_datas = num_train_datas / batch_size * batch_size if train_iterator.last_round_down

  stopped = catch(:stop) do
    (initial_epoch..epochs).each do |epoch|
      @last_log[:epoch] = epoch
      call_callbacks(:before_epoch)
      puts "【 epoch #{epoch}/#{epochs} 】" if verbose

      train_iterator.foreach(batch_size) do |x_batch, y_batch, index|
        train_step_met = train_step(x_batch, y_batch)
        num_trained_datas = (index + 1) * batch_size
        num_trained_datas = num_trained_datas > num_train_datas ? num_train_datas : num_trained_datas
        log = "\r"
        40.times do |i|
          if i < num_trained_datas * 40 / num_train_datas
            log << "="
          elsif i == num_trained_datas * 40 / num_train_datas
            log << ">"
          else
            log << "_"
          end
        end

        log << "  #{num_trained_datas}/#{num_train_datas} "
        log << metrics_to_str(train_step_met)
        print log if verbose
      end

      if test
        acc, loss = if test.is_a?(Array)
                      evaluate(test[0], test[1], batch_size: batch_size)
                    else
                      evaluate_by_iterator(test, batch_size: batch_size)
                    end
        print "  " + metrics_to_str({ accuracy: acc, test_loss: loss }) if verbose
      end
      puts "" if verbose
      call_callbacks(:after_epoch)
    end
    nil
  end

  if stopped
    puts "\n#{stopped}" if verbose
  end
end

#train_on_batch(x, y) ⇒ Float | Numo::SFloat

Training once. Setup the model before use this method.

Parameters:

  • x (Numo::SFloat)

    Input training data.

  • y (Numo::SFloat)

    Output training data.

Returns:

  • (Float | Numo::SFloat)

    Return loss value in the form of Float or Numo::SFloat.

Raises:



257
258
259
260
261
262
263
264
265
266
267
268
269
270
# File 'lib/dnn/core/models.rb', line 257

def train_on_batch(x, y)
  raise DNN_Error, "The model is not optimizer setup complete." unless @optimizer
  raise DNN_Error, "The model is not loss_func setup complete." unless @loss_func
  check_xy_type(x, y)
  call_callbacks(:before_train_on_batch)
  DNN.learning_phase = true
  out = call(Tensor.convert(x))
  loss = @loss_func.loss(out, Tensor.convert(y), layers)
  loss.link.backward(Xumo::SFloat.zeros(y[0...1, false].shape))
  @optimizer.update(get_all_trainable_params)
  @last_log[:train_loss] = loss.data
  call_callbacks(:after_train_on_batch)
  loss.data
end

#trainable_layersArray

Get the all trainable layers.

Returns:

  • (Array)

    All has param layers array.



399
400
401
# File 'lib/dnn/core/models.rb', line 399

def trainable_layers
  layers.select { |layer| layer.is_a?(Layers::TrainableLayer) }
end