Module: Qoa::Training

Includes:
Err::Validations, MatrixHelpers
Included in:
NeuralNetwork
Defined in:
lib/qoa/training.rb

Instance Method Summary collapse

Methods included from Err::Validations

#validate_calculate_loss_args, #validate_constructor_args, #validate_query_args, #validate_train_args

Methods included from MatrixHelpers

#apply_dropout, #apply_function, #matrix_add, #matrix_multiply, #matrix_multiply_element_wise, #matrix_pow, #matrix_subtract, #mean, #normalize, #scalar_add, #scalar_multiply, #scale_and_shift, #transpose, #update_beta, #update_gamma, #variance

Instance Method Details

#backward_pass(inputs, targets, layer_outputs) ⇒ Object



101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
# File 'lib/qoa/training.rb', line 101

def backward_pass(inputs, targets, layer_outputs)
  derivative_func = "#{@activation_func}_derivative"
  inputs = inputs.map { |x| [x] } # Convert to column vector
  targets = targets.map { |x| [x] } # Convert to column vector

  # Compute errors
  errors = [matrix_subtract(targets, layer_outputs.last)]
  (@layers.size - 2).downto(0) do |i|
    errors << matrix_multiply(transpose(@layers[i + 1].weights), errors.last)
  end

  # Compute weight deltas
  weight_deltas = []
  @layers.each_with_index do |layer, i|
    gradients = matrix_multiply_element_wise(errors[i], apply_function(layer_outputs[i + 1], ActivationFunctions.method(derivative_func)))
    if layer.is_a?(Qoa::Layers::ConvolutionalLayer)
      w_delta = conv_weight_delta(layer, gradients, layer_outputs[i])
    elsif layer.is_a?(Qoa::Layers::PoolingLayer)
      w_delta = pool_weight_delta(layer, gradients, layer_outputs[i])
    else
      w_delta = matrix_multiply(gradients, transpose(layer_outputs[i]))
    end
    weight_deltas << w_delta
  end

  weight_deltas
end

#calculate_regularization_penalty(weights, l1_lambda, l2_lambda) ⇒ Object



129
130
131
132
133
134
135
136
137
138
# File 'lib/qoa/training.rb', line 129

def calculate_regularization_penalty(weights, l1_lambda, l2_lambda)
  l1_penalty = weights.map do |row|
    row.nil? ? nil : row.map { |x| x.nil? ? nil : (x < 0 ? -1 : 1) }
  end
  l1_penalty = scalar_multiply(l1_lambda, l1_penalty)

  l2_penalty = scalar_multiply(l2_lambda, weights)

  matrix_add(l1_penalty, l2_penalty)
end

#conv_weight_delta(layer, gradients, inputs) ⇒ Object



179
180
181
182
183
184
185
186
187
188
189
190
# File 'lib/qoa/training.rb', line 179

def conv_weight_delta(layer, gradients, inputs)
  kernel_size = layer.kernel_size
  stride = layer.stride

  deltas = layer.weights.map do |row|
    inputs.each_cons(kernel_size).map do |input_slice|
      row.zip(input_slice).map { |a, b| a * b }.reduce(:+)
    end
  end

  deltas
end

#convolution(layer, inputs) ⇒ Object



140
141
142
143
144
145
146
147
148
149
150
151
152
153
# File 'lib/qoa/training.rb', line 140

def convolution(layer, inputs)
  output_size = layer.output_size
  kernel_size = layer.kernel_size
  stride = layer.stride

  output = Array.new(output_size) { Array.new(inputs.length - kernel_size + 1) }
  layer.weights.each_with_index do |row, i|
    inputs.each_cons(kernel_size).each_with_index do |input_slice, j|
      output[i][j] = row.zip(input_slice).map { |a, b| a * b }.reduce(:+)
    end
  end

  output
end

#forward_pass(inputs) ⇒ Object



79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# File 'lib/qoa/training.rb', line 79

def forward_pass(inputs)
  inputs = inputs.map { |x| [x] } # Convert to column vector

  layer_outputs = [inputs]
  @layers.each_with_index do |layer, i|
    if layer.is_a?(Qoa::Layers::ConvolutionalLayer)
      layer_inputs = convolution(layer, layer_outputs[-1])
    elsif layer.is_a?(Qoa::Layers::PoolingLayer)
      layer_inputs = pooling(layer, layer_outputs[-1])
    else
      layer_inputs = matrix_multiply(layer.weights, layer_outputs[-1])
    end

    layer_outputs << apply_function(layer_inputs, ActivationFunctions.method(@activation_func))

    # Apply dropout to hidden layers
    layer_outputs[-1] = apply_dropout(layer_outputs[-1], @dropout_rate) if i < @layers.size - 2
  end

  layer_outputs
end

#pool_weight_delta(layer, gradients, inputs) ⇒ Object



192
193
194
195
196
197
198
199
200
201
202
203
204
# File 'lib/qoa/training.rb', line 192

def pool_weight_delta(layer, gradients, inputs)
  pool_size = layer.pool_size
  stride = layer.stride

  deltas = inputs.each_slice(stride).map do |input_slice|
    input_slice.each_cons(pool_size).map do |pool_slice|
      max_index = pool_slice.each_with_index.max[1]
      pool_slice.map.with_index { |v, i| (i == max_index) ? v * gradients[i] : 0 }
    end
  end

  deltas
end

#pooling(layer, inputs) ⇒ Object



155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
# File 'lib/qoa/training.rb', line 155

def pooling(layer, inputs)
  output_size = layer.output_size
  pool_size = layer.pool_size
  stride = layer.stride || 1

  # Calculate the number of columns in the output array
  output_columns = inputs[0].length - pool_size
  output_columns = output_columns <= 0 ? 1 : ((output_columns) / stride.to_f).ceil + 1

  output = Array.new(output_size) { Array.new(output_columns) }

  (0...output_size).each do |i|
    (0...output_columns).each do |j|
      start_idx = j * stride
      end_idx = start_idx + pool_size - 1
      next if inputs[i].nil? || inputs[i].length < end_idx + 1 # Add this check to avoid accessing an index that does not exist
      pool_slice = inputs[i].slice(start_idx..end_idx)
      output[i][j] = pool_slice.max
    end
  end

  output
end

#train(inputs, targets) ⇒ Object



10
11
12
13
14
15
16
# File 'lib/qoa/training.rb', line 10

def train(inputs, targets)
  validate_train_args(inputs, targets)

  inputs.zip(targets).each_slice(@batch_size) do |batch|
    train_batch(batch)
  end
end

#train_batch(batch) ⇒ Object



18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
# File 'lib/qoa/training.rb', line 18

def train_batch(batch)
  derivative_func = "#{@activation_func}_derivative"
  batch_inputs = batch.map { |x, _| x }
  batch_targets = batch.map { |_, y| y }

  # Forward pass
  layer_outputs = batch_inputs.map { |inputs| forward_pass(inputs) }

  # Backward pass
  # Using thread pool to parallelize the backward pass for each input in the batch
  pool = Concurrent::FixedThreadPool.new(4)
  #   weight_deltas = Array.new(@layers.size - 1) { |i| Array.new(@layers[i].output_size) { Array.new(@layers[i].input_size, 0) } }
  weight_deltas = Array.new(@layers.size) { |i| Array.new(@layers[i].output_size) { Array.new(@layers[i].input_size, 0) } }
  mutex = Mutex.new

  batch.zip(layer_outputs).each do |(inputs, targets), outputs|
    pool.post do
      deltas = backward_pass(inputs, targets, outputs)
      mutex.synchronize do
        @layers.each_with_index do |_, i|
          weight_deltas[i] = matrix_add(weight_deltas[i], deltas[i])
        end
      end
    end
  end

  pool.shutdown
  pool.wait_for_termination

  # Update weights
  @layers.each_with_index do |layer, i|
    regularization_penalty = calculate_regularization_penalty(layer.weights, @l1_lambda, @l2_lambda)
    layer.weights = matrix_add(layer.weights, scalar_multiply(@learning_rate / batch.size, matrix_add(weight_deltas[i], regularization_penalty)))
  end
end

#train_with_early_stopping(inputs, targets, validation_inputs, validation_targets, max_epochs, patience) ⇒ Object



54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# File 'lib/qoa/training.rb', line 54

def train_with_early_stopping(inputs, targets, validation_inputs, validation_targets, max_epochs, patience)
  best_validation_loss = Float::INFINITY
  patience_left = patience
  epoch = 0

  while epoch < max_epochs && patience_left > 0
    train(inputs, targets)
    validation_loss = calculate_loss(validation_inputs, validation_targets)
    puts "Epoch #{epoch + 1}: Validation loss = #{validation_loss}"

    if validation_loss < best_validation_loss
      best_validation_loss = validation_loss
      save_model('best_model.json')
      patience_left = patience
    else
      patience_left -= 1
    end

    epoch += 1
  end

  puts "Training stopped. Best validation loss = #{best_validation_loss}"
  load_model('best_model.json')
end