Class: DNN::Models::Model

Inherits:
Chain
  • Object
show all
Defined in:
lib/dnn/core/models.rb

Overview

This class deals with the model of the network.

Direct Known Subclasses

Sequential

Instance Attribute Summary collapse

Class Method Summary collapse

Instance Method Summary collapse

Methods inherited from Chain

#forward, #layers

Constructor Details

#initializeModel

Returns a new instance of Model.



93
94
95
96
97
98
99
100
# File 'lib/dnn/core/models.rb', line 93

def initialize
  super
  @optimizer = nil
  @loss_func = nil
  @built = false
  @callbacks = []
  @last_log = {}
end

Instance Attribute Details

#last_logObject (readonly)

Returns the value of attribute last_log.



81
82
83
# File 'lib/dnn/core/models.rb', line 81

def last_log
  @last_log
end

#loss_funcObject

Returns the value of attribute loss_func.



80
81
82
# File 'lib/dnn/core/models.rb', line 80

def loss_func
  @loss_func
end

#optimizerObject

Returns the value of attribute optimizer.



79
80
81
# File 'lib/dnn/core/models.rb', line 79

def optimizer
  @optimizer
end

Class Method Details

.load(file_name) ⇒ DNN::Models::Model

Load marshal model.

Parameters:

  • file_name (String)

    File name of marshal model to load.

Returns:



86
87
88
89
90
91
# File 'lib/dnn/core/models.rb', line 86

def self.load(file_name)
  model = self.allocate
  loader = Loaders::MarshalLoader.new(model)
  loader.load(file_name)
  model
end

Instance Method Details

#add_callback(callback) ⇒ Object

Add callback function.

Parameters:

  • callback (Callback)

    Callback object.



338
339
340
341
# File 'lib/dnn/core/models.rb', line 338

def add_callback(callback)
  callback.model = self
  @callbacks << callback
end

#built?Boolean

Returns If model have already been built then return true.

Returns:

  • (Boolean)

    If model have already been built then return true.



390
391
392
# File 'lib/dnn/core/models.rb', line 390

def built?
  @built
end

#call(input_tensors) ⇒ Object



102
103
104
105
106
# File 'lib/dnn/core/models.rb', line 102

def call(input_tensors)
  output_tensor = forward(input_tensors)
  @built = true unless @built
  output_tensor
end

#clean_layersObject

Clean all layers.



395
396
397
398
399
# File 'lib/dnn/core/models.rb', line 395

def clean_layers
  layers.each(&:clean)
  @loss_func.clean
  @layers_cache = nil
end

#clear_callbacksObject

Clear the callback function registered for each event.



344
345
346
# File 'lib/dnn/core/models.rb', line 344

def clear_callbacks
  @callbacks = []
end

#copyDNN::Models::Model

Return the copy this model.

Returns:



370
371
372
# File 'lib/dnn/core/models.rb', line 370

def copy
  Marshal.load(Marshal.dump(self))
end

#evaluate(x, y, batch_size: 100) ⇒ Array

Evaluate model and get accuracy and loss of test data.

Parameters:

  • x (Numo::SFloat)

    Input test data.

  • y (Numo::SFloat)

    Output test data.

  • batch_size (Integer) (defaults to: 100)

    Batch size used for one test.

Returns:

  • (Array)

    Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].



249
250
251
252
# File 'lib/dnn/core/models.rb', line 249

def evaluate(x, y, batch_size: 100)
  check_xy_type(x, y)
  evaluate_by_iterator(Iterator.new(x, y, random: false), batch_size: batch_size)
end

#evaluate_by_iterator(test_iterator, batch_size: 100) ⇒ Array

Evaluate model by iterator.

Parameters:

  • test_iterator (DNN::Iterator)

    Iterator used for testing.

  • batch_size (Integer) (defaults to: 100)

    Batch size used for one test.

Returns:

  • (Array)

    Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].



258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
# File 'lib/dnn/core/models.rb', line 258

def evaluate_by_iterator(test_iterator, batch_size: 100)
  num_test_datas = test_iterator.num_datas
  batch_size = batch_size >= num_test_datas ? num_test_datas : batch_size
  total_correct = 0
  sum_loss = 0
  max_steps = (num_test_datas.to_f / batch_size).ceil
  test_iterator.foreach(batch_size) do |x_batch, y_batch|
    correct, loss_value = test_on_batch(x_batch, y_batch)
    total_correct += correct
    sum_loss += loss_value
  end
  mean_loss = sum_loss / max_steps
  acc = total_correct.to_f / num_test_datas
  @last_log[:test_loss] = mean_loss
  @last_log[:test_accuracy] = acc
  [acc, mean_loss]
end

#get_all_params_dataArray

Get parameter data of all layers.

Returns:

  • (Array)

    Parameter data.



403
404
405
406
407
408
409
# File 'lib/dnn/core/models.rb', line 403

def get_all_params_data
  trainable_layers.map do |layer|
    layer.get_params.to_h do |key, param|
      [key, param.data]
    end
  end
end

#get_layer(name) ⇒ DNN::Layers::Layer

Get the layer that the model has.

Parameters:

  • name (Symbol)

    The name of the layer to get.

Returns:



383
384
385
386
387
# File 'lib/dnn/core/models.rb', line 383

def get_layer(name)
  layer = instance_variable_get("@#{name}")
  return layer if layer.is_a?(Layers::Layer) || layer.is_a?(Chain) || layer.is_a?(LayersList)
  nil
end

#load_params(file_name) ⇒ Object

Load marshal params.

Parameters:

  • file_name (String)

    File name of marshal model to load.



350
351
352
353
# File 'lib/dnn/core/models.rb', line 350

def load_params(file_name)
  loader = Loaders::MarshalLoader.new(self)
  loader.load(file_name)
end

#predict(x, use_loss_activation: true) ⇒ Object

Predict data.

Parameters:

  • x (Numo::SFloat)

    Input data.

  • use_loss_activation (Boolean) (defaults to: true)

    Use loss activation when loss has an activation.



313
314
315
316
317
318
319
320
321
322
# File 'lib/dnn/core/models.rb', line 313

def predict(x, use_loss_activation: true)
  check_xy_type(x)
  DNN.learning_phase = false
  out = call(Tensor.convert(x))
  y = out.data
  if use_loss_activation && @loss_func.class.respond_to?(:activation)
    y = @loss_func.class.activation(y)
  end
  y
end

#predict1(x, use_loss_activation: true) ⇒ Object

Predict one data.

Parameters:

  • x (Numo::SFloat)

    Input data. However, x is single data.



326
327
328
329
330
331
332
333
334
# File 'lib/dnn/core/models.rb', line 326

def predict1(x, use_loss_activation: true)
  check_xy_type(x)
  input = if x.is_a?(Array)
            x.map { |v| v.reshape(1, *v.shape) }
          else
            x.reshape(1, *x.shape)
          end
  predict(input, use_loss_activation: use_loss_activation)[0, false]
end

#save(file_name) ⇒ Object

Save the model in marshal format.

Parameters:

  • file_name (String)

    Name to save model.



357
358
359
360
# File 'lib/dnn/core/models.rb', line 357

def save(file_name)
  saver = Savers::MarshalSaver.new(self, include_model: true)
  saver.save(file_name)
end

#save_params(file_name) ⇒ Object

Save the params in marshal format.

Parameters:

  • file_name (String)

    Name to save model.



364
365
366
367
# File 'lib/dnn/core/models.rb', line 364

def save_params(file_name)
  saver = Savers::MarshalSaver.new(self, include_model: false)
  saver.save(file_name)
end

#set_all_params_data(params_data) ⇒ Object

Set parameter data of all layers.

Parameters:

  • params_data (Array)

    Parameter data obtained by get_all_params_data.



413
414
415
416
417
418
419
# File 'lib/dnn/core/models.rb', line 413

def set_all_params_data(params_data)
  trainable_layers.each.with_index do |layer, i|
    params_data[i].each do |(key, data)|
      layer.get_params[key].data = data
    end
  end
end

#setup(optimizer, loss_func) ⇒ Object

Set optimizer and loss_func to model.

Parameters:



111
112
113
114
115
116
117
118
119
120
# File 'lib/dnn/core/models.rb', line 111

def setup(optimizer, loss_func)
  unless optimizer.is_a?(Optimizers::Optimizer)
    raise TypeError, "optimizer:#{optimizer.class} is not an instance of DNN::Optimizers::Optimizer class."
  end
  unless loss_func.is_a?(Losses::Loss)
    raise TypeError, "loss_func:#{loss_func.class} is not an instance of DNN::Losses::Loss class."
  end
  @optimizer = optimizer
  @loss_func = loss_func
end

#test_on_batch(x, y) ⇒ Array

Evaluate once.

Parameters:

  • x (Numo::SFloat)

    Input test data.

  • y (Numo::SFloat)

    Output test data.

Returns:

  • (Array)

    Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].



280
281
282
283
284
285
286
287
288
# File 'lib/dnn/core/models.rb', line 280

def test_on_batch(x, y)
  call_callbacks(:before_test_on_batch)
  DNN.learning_phase = false
  out = call(Tensor.convert(x))
  correct = accuracy(out.data, y)
  loss = @loss_func.(out, Tensor.convert(y))
  call_callbacks(:after_test_on_batch)
  [correct, loss.data]
end

#train(x, y, epochs, batch_size: 1, initial_epoch: 1, test: nil, verbose: true) ⇒ Object Also known as: fit

Start training. Setup the model before use this method.

Parameters:

  • x (Numo::SFloat)

    Input training data.

  • y (Numo::SFloat)

    Output training data.

  • epochs (Integer)

    Number of training.

  • batch_size (Integer) (defaults to: 1)

    Batch size used for one training.

  • initial_epoch (Integer) (defaults to: 1)

    Initial epoch.

  • test (Array | NilClass) (defaults to: nil)

    If you to test the model for every 1 epoch, specify [x_test, y_test]. Don’t test to the model, specify nil.

  • verbose (Boolean) (defaults to: true)

    Set true to display the log. If false is set, the log is not displayed.



132
133
134
135
136
137
138
139
140
141
142
143
144
# File 'lib/dnn/core/models.rb', line 132

def train(x, y, epochs,
          batch_size: 1,
          initial_epoch: 1,
          test: nil,
          verbose: true)
  check_xy_type(x, y)
  train_iterator = Iterator.new(x, y)
  train_by_iterator(train_iterator, epochs,
                    batch_size: batch_size,
                    initial_epoch: initial_epoch,
                    test: test,
                    verbose: verbose)
end

#train_by_iterator(train_iterator, epochs, batch_size: 1, initial_epoch: 1, test: nil, verbose: true) ⇒ Object Also known as: fit_by_iterator

Start training by iterator. Setup the model before use this method.

Parameters:

  • train_iterator (DNN::Iterator)

    Iterator used for training.

  • epochs (Integer)

    Number of training.

  • batch_size (Integer) (defaults to: 1)

    Batch size used for one training.

  • initial_epoch (Integer) (defaults to: 1)

    Initial epoch.

  • test (Array | NilClass) (defaults to: nil)

    If you to test the model for every 1 epoch, specify [x_test, y_test]. Don’t test to the model, specify nil.

  • verbose (Boolean) (defaults to: true)

    Set true to display the log. If false is set, the log is not displayed.

Raises:



157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
# File 'lib/dnn/core/models.rb', line 157

def train_by_iterator(train_iterator, epochs,
                      batch_size: 1,
                      initial_epoch: 1,
                      test: nil,
                      verbose: true)
  raise DNNError, "The model is not optimizer setup complete." unless @optimizer
  raise DNNError, "The model is not loss_func setup complete." unless @loss_func

  num_train_datas = train_iterator.num_datas
  num_train_datas = num_train_datas / batch_size * batch_size if train_iterator.last_round_down

  stopped = catch(:stop) do
    (initial_epoch..epochs).each do |epoch|
      @last_log[:epoch] = epoch
      call_callbacks(:before_epoch)
      puts "【 epoch #{epoch}/#{epochs}" if verbose

      train_iterator.foreach(batch_size) do |x_batch, y_batch, index|
        train_step_met = train_step(x_batch, y_batch)
        num_trained_datas = (index + 1) * batch_size
        num_trained_datas = num_trained_datas > num_train_datas ? num_train_datas : num_trained_datas
        log = "\r"
        40.times do |i|
          if i < num_trained_datas * 40 / num_train_datas
            log << "="
          elsif i == num_trained_datas * 40 / num_train_datas
            log << ">"
          else
            log << "_"
          end
        end

        log << "  #{num_trained_datas}/#{num_train_datas} "
        log << metrics_to_str(train_step_met)
        print log if verbose
      end

      if test
        acc, loss = if test.is_a?(Array)
                      evaluate(test[0], test[1], batch_size: batch_size)
                    else
                      evaluate_by_iterator(test, batch_size: batch_size)
                    end
        print "  " + metrics_to_str({ accuracy: acc, test_loss: loss }) if verbose
      end
      puts "" if verbose
      call_callbacks(:after_epoch)
    end
    nil
  end

  if stopped
    puts "\n#{stopped}" if verbose
  end
end

#train_on_batch(x, y) ⇒ Float | Numo::SFloat

Training once. Setup the model before use this method.

Parameters:

  • x (Numo::SFloat)

    Input training data.

  • y (Numo::SFloat)

    Output training data.

Returns:

  • (Float | Numo::SFloat)

    Return loss value in the form of Float or Numo::SFloat.

Raises:



229
230
231
232
233
234
235
236
237
238
239
240
241
242
# File 'lib/dnn/core/models.rb', line 229

def train_on_batch(x, y)
  raise DNNError, "The model is not optimizer setup complete." unless @optimizer
  raise DNNError, "The model is not loss_func setup complete." unless @loss_func
  check_xy_type(x, y)
  call_callbacks(:before_train_on_batch)
  DNN.learning_phase = true
  out = call(Tensor.convert(x))
  loss = @loss_func.loss(out, Tensor.convert(y), layers)
  loss.link.backward(Xumo::SFloat.ones(y[0...1, false].shape[0], 1))
  @optimizer.update(get_all_trainable_params)
  @last_log[:train_loss] = loss.data
  call_callbacks(:after_train_on_batch)
  loss.data
end

#trainable_layersArray

Get the all trainable layers.

Returns:

  • (Array)

    All has param layers array.



376
377
378
# File 'lib/dnn/core/models.rb', line 376

def trainable_layers
  layers.select { |layer| layer.is_a?(Layers::TrainableLayer) }
end