Class: DNN::Models::Model

Inherits:
Object
  • Object
show all
Defined in:
lib/dnn/core/models.rb

Overview

This class deals with the model of the network.

Direct Known Subclasses

Sequential

Instance Attribute Summary collapse

Class Method Summary collapse

Instance Method Summary collapse

Constructor Details

#initializeModel

Returns a new instance of Model.



19
20
21
22
23
24
25
26
27
# File 'lib/dnn/core/models.rb', line 19

def initialize
  @optimizer = nil
  @loss_func = nil
  @last_link = nil
  @built = false
  @callbacks = []
  @layers_cache = nil
  @last_log = {}
end

Instance Attribute Details

#last_logObject (readonly)

Returns the value of attribute last_log.



7
8
9
# File 'lib/dnn/core/models.rb', line 7

def last_log
  @last_log
end

#loss_funcObject

Returns the value of attribute loss_func.



6
7
8
# File 'lib/dnn/core/models.rb', line 6

def loss_func
  @loss_func
end

#optimizerObject

Returns the value of attribute optimizer.



5
6
7
# File 'lib/dnn/core/models.rb', line 5

def optimizer
  @optimizer
end

Class Method Details

.load(file_name) ⇒ DNN::Models::Model

Load marshal model.

Parameters:

  • file_name (String)

    File name of marshal model to load.

Returns:



12
13
14
15
16
17
# File 'lib/dnn/core/models.rb', line 12

def self.load(file_name)
  model = new
  loader = Loaders::MarshalLoader.new(model)
  loader.load(file_name)
  model
end

Instance Method Details

#accuracy(x, y, batch_size: 100) ⇒ Array

Evaluate model and get accuracy of test data.

Parameters:

  • x (Numo::SFloat)

    Input test data.

  • y (Numo::SFloat)

    Output test data.

  • batch_size (Integer) (defaults to: 100)

    Batch size used for one test.

Returns:

  • (Array)

    Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].



177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
# File 'lib/dnn/core/models.rb', line 177

def accuracy(x, y, batch_size: 100)
  check_xy_type(x, y)
  num_test_datas = x.is_a?(Array) ? x[0].shape[0] : x.shape[0]
  batch_size = batch_size >= num_test_datas[0] ? num_test_datas : batch_size
  iter = Iterator.new(x, y, random: false)
  total_correct = 0
  sum_loss = Xumo::SFloat[0]
  max_steps = (num_test_datas.to_f / batch_size).ceil
  iter.foreach(batch_size) do |x_batch, y_batch|
    correct, loss_value = test_on_batch(x_batch, y_batch)
    total_correct += correct
    sum_loss += loss_value
  end
  mean_loss = sum_loss / max_steps
  acc = total_correct.to_f / num_test_datas
  @last_log[:test_loss] = mean_loss
  @last_log[:test_accuracy] = acc
  [acc, mean_loss]
end

#add_callback(callback) ⇒ Object

Add callback function.

Parameters:

  • callback (Callback)

    Callback object.



250
251
252
253
# File 'lib/dnn/core/models.rb', line 250

def add_callback(callback)
  callback.model = self
  @callbacks << callback
end

#built?Boolean

Returns If model have already been built then return true.

Returns:

  • (Boolean)

    If model have already been built then return true.



307
308
309
# File 'lib/dnn/core/models.rb', line 307

def built?
  @built
end

#clear_callbacksObject

Clear the callback function registered for each event.



256
257
258
# File 'lib/dnn/core/models.rb', line 256

def clear_callbacks
  @callbacks = []
end

#copyDNN::Models::Model

Return the copy this model.

Returns:



269
270
271
# File 'lib/dnn/core/models.rb', line 269

def copy
  Marshal.load(Marshal.dump(self))
end

#get_layer(name) ⇒ DNN::Layers::Layer

Get the layer that the model has.

Parameters:

  • name (Symbol)

    The name of the layer to get.

Returns:



302
303
304
# File 'lib/dnn/core/models.rb', line 302

def get_layer(name)
  layers.find { |layer| layer.name == name }
end

#has_param_layersArray

Get the all has param layers.

Returns:

  • (Array)

    All has param layers array.



295
296
297
# File 'lib/dnn/core/models.rb', line 295

def has_param_layers
  layers.select { |layer| layer.is_a?(Layers::HasParamLayer) }
end

#layersArray

Get the all layers.

Returns:

  • (Array)

    All layers array.

Raises:



275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
# File 'lib/dnn/core/models.rb', line 275

def layers
  raise DNN_Error, "This model is not built. You need build this model using predict or train." unless built?
  return @layers_cache if @layers_cache
  layers = []
  get_layers = -> link do
    return unless link
    layers.unshift(link.layer)
    if link.is_a?(TwoInputLink)
      get_layers.(link.prev1)
      get_layers.(link.prev2)
    else
      get_layers.(link.prev)
    end
  end
  get_layers.(@last_link)
  @layers_cache = layers.uniq
end

#predict(x, use_loss_activation: true) ⇒ Object

Predict data.

Parameters:

  • x (Numo::SFloat)

    Input data.

  • use_loss_activation (Boolean) (defaults to: true)

    Use loss activation when loss has an activation.



232
233
234
235
236
237
238
239
# File 'lib/dnn/core/models.rb', line 232

def predict(x, use_loss_activation: true)
  check_xy_type(x)
  y = forward(x, false)
  if use_loss_activation && @loss_func.class.respond_to?(:activation)
    y = @loss_func.class.activation(y)
  end
  y
end

#predict1(x, use_loss_activation: true) ⇒ Object

Predict one data.

Parameters:

  • x (Numo::SFloat)

    Input data. However, x is single data.



243
244
245
246
# File 'lib/dnn/core/models.rb', line 243

def predict1(x, use_loss_activation: true)
  check_xy_type(x)
  predict(x.reshape(1, *x.shape), use_loss_activation: use_loss_activation)[0, false]
end

#save(file_name, include_optimizer: true) ⇒ Object

Save the model in marshal format.

Parameters:

  • file_name (String)

    Name to save model.

  • include_optimizer (Boolean) (defaults to: true)

    Set true to save data included optimizer status.



263
264
265
266
# File 'lib/dnn/core/models.rb', line 263

def save(file_name, include_optimizer: true)
  saver = Savers::MarshalSaver.new(self, include_optimizer: include_optimizer)
  saver.save(file_name)
end

#setup(optimizer, loss_func) ⇒ Object

Set optimizer and loss_func to model.

Parameters:



32
33
34
35
36
37
38
39
40
41
# File 'lib/dnn/core/models.rb', line 32

def setup(optimizer, loss_func)
  unless optimizer.is_a?(Optimizers::Optimizer)
    raise TypeError, "optimizer:#{optimizer.class} is not an instance of DNN::Optimizers::Optimizer class."
  end
  unless loss_func.is_a?(Losses::Loss)
    raise TypeError, "loss_func:#{loss_func.class} is not an instance of DNN::Losses::Loss class."
  end
  @optimizer = optimizer
  @loss_func = loss_func
end

#test_on_batch(x, y) ⇒ Array

Evaluate once.

Parameters:

  • x (Numo::SFloat)

    Input test data.

  • y (Numo::SFloat)

    Output test data.

Returns:

  • (Array)

    Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].



201
202
203
204
205
206
207
208
# File 'lib/dnn/core/models.rb', line 201

def test_on_batch(x, y)
  call_callbacks(:before_test_on_batch)
  x = forward(x, false)
  correct = evaluate(x, y)
  loss_value = @loss_func.loss(x, y, layers)
  call_callbacks(:after_test_on_batch)
  [correct, loss_value]
end

#train(x, y, epochs, batch_size: 1, initial_epoch: 1, test: nil, verbose: true) ⇒ Object Also known as: fit

Start training. Setup the model before use this method.

Parameters:

  • x (Numo::SFloat)

    Input training data.

  • y (Numo::SFloat)

    Output training data.

  • epochs (Integer)

    Number of training.

  • batch_size (Integer) (defaults to: 1)

    Batch size used for one training.

  • initial_epoch (Integer) (defaults to: 1)

    Initial epoch.

  • test (Array | NilClass) (defaults to: nil)

    If you to test the model for every 1 epoch, specify [x_test, y_test]. Don’t test to the model, specify nil.

  • verbose (Boolean) (defaults to: true)

    Set true to display the log. If false is set, the log is not displayed.



53
54
55
56
57
58
59
60
61
62
63
64
65
# File 'lib/dnn/core/models.rb', line 53

def train(x, y, epochs,
          batch_size: 1,
          initial_epoch: 1,
          test: nil,
          verbose: true)
  check_xy_type(x, y)
  train_iterator = Iterator.new(x, y)
  train_by_iterator(train_iterator, epochs,
                    batch_size: batch_size,
                    initial_epoch: initial_epoch,
                    test: test,
                    verbose: verbose)
end

#train_by_iterator(train_iterator, epochs, batch_size: 1, initial_epoch: 1, test: nil, verbose: true) ⇒ Object Also known as: fit_by_iterator

Start training by iterator. Setup the model before use this method.

Parameters:

  • train_iterator (Iterator)

    Iterator used for training.

  • epochs (Integer)

    Number of training.

  • batch_size (Integer) (defaults to: 1)

    Batch size used for one training.

  • initial_epoch (Integer) (defaults to: 1)

    Initial epoch.

  • test (Array | NilClass) (defaults to: nil)

    If you to test the model for every 1 epoch, specify [x_test, y_test]. Don’t test to the model, specify nil.

  • verbose (Boolean) (defaults to: true)

    Set true to display the log. If false is set, the log is not displayed.

Raises:



78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
# File 'lib/dnn/core/models.rb', line 78

def train_by_iterator(train_iterator, epochs,
                      batch_size: 1,
                      initial_epoch: 1,
                      test: nil,
                      verbose: true)
  raise DNN_Error, "The model is not optimizer setup complete." unless @optimizer
  raise DNN_Error, "The model is not loss_func setup complete." unless @loss_func

  num_train_datas = train_iterator.num_datas
  num_train_datas = num_train_datas / batch_size * batch_size if train_iterator.last_round_down

  stopped = catch(:stop) do
    (initial_epoch..epochs).each do |epoch|
      @last_log[:epoch] = epoch
      call_callbacks(:before_epoch)
      puts "【 epoch #{epoch}/#{epochs}" if verbose

      train_iterator.foreach(batch_size) do |x_batch, y_batch, index|
        train_step_met = train_step(x_batch, y_batch)
        num_trained_datas = (index + 1) * batch_size
        num_trained_datas = num_trained_datas > num_train_datas ? num_train_datas : num_trained_datas
        log = "\r"
        40.times do |i|
          if i < num_trained_datas * 40 / num_train_datas
            log << "="
          elsif i == num_trained_datas * 40 / num_train_datas
            log << ">"
          else
            log << "_"
          end
        end

        log << "  #{num_trained_datas}/#{num_train_datas} "
        log << metrics_to_str(train_step_met)
        print log if verbose
      end

      if test
        test_met = test(test[0], test[1], batch_size: batch_size)
        print "  " + metrics_to_str(test_met) if verbose
      end
      puts "" if verbose
      call_callbacks(:after_epoch)
    end
    nil
  end

  if stopped
    puts "\n#{stopped}" if verbose
  end
end

#train_on_batch(x, y) ⇒ Float | Numo::SFloat

Training once. Setup the model before use this method.

Parameters:

  • x (Numo::SFloat)

    Input training data.

  • y (Numo::SFloat)

    Output training data.

Returns:

  • (Float | Numo::SFloat)

    Return loss value in the form of Float or Numo::SFloat.

Raises:



156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
# File 'lib/dnn/core/models.rb', line 156

def train_on_batch(x, y)
  raise DNN_Error, "The model is not optimizer setup complete." unless @optimizer
  raise DNN_Error, "The model is not loss_func setup complete." unless @loss_func
  check_xy_type(x, y)
  call_callbacks(:before_train_on_batch)
  x = forward(x, true)
  loss_value = @loss_func.loss(x, y, layers)
  dy = @loss_func.backward(x, y)
  backward(dy)
  @optimizer.update(layers)
  @loss_func.regularizers_backward(layers)
  @last_log[:train_loss] = loss_value
  call_callbacks(:after_train_on_batch)
  loss_value
end