Class: Secryst::Trainer
- Inherits:
-
Object
- Object
- Secryst::Trainer
- Defined in:
- lib/secryst/trainer.rb
Instance Method Summary collapse
-
#initialize(model:, batch_size:, lr:, data_input:, data_target:, hyperparameters:, max_epochs: nil, log_interval: 1, checkpoint_every:, checkpoint_dir:, scheduler_step_size:, gamma:) ⇒ Trainer
constructor
A new instance of Trainer.
- #train ⇒ Object
Constructor Details
#initialize(model:, batch_size:, lr:, data_input:, data_target:, hyperparameters:, max_epochs: nil, log_interval: 1, checkpoint_every:, checkpoint_dir:, scheduler_step_size:, gamma:) ⇒ Trainer
Returns a new instance of Trainer.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
# File 'lib/secryst/trainer.rb', line 4 def initialize( model:, batch_size:, lr:, data_input:, data_target:, hyperparameters:, max_epochs: nil, log_interval: 1, checkpoint_every:, checkpoint_dir:, scheduler_step_size:, gamma: ) @data_input = File.readlines(data_input, chomp: true) @data_target = File.readlines(data_target, chomp: true) @device = "cpu" @lr = lr @scheduler_step_size = scheduler_step_size @gamma = gamma @batch_size = batch_size @model_name = model @max_epochs = max_epochs @log_interval = log_interval @checkpoint_every = checkpoint_every @checkpoint_dir = checkpoint_dir FileUtils.mkdir_p(@checkpoint_dir) generate_vocabs_and_data save_vocabs case model when 'transformer' @model = Secryst::Transformer.new(hyperparameters.merge({ input_vocab_size: @input_vocab.length, target_vocab_size: @target_vocab.length, })) else raise ArgumentError, 'Only transformer model is currently supported' end end |
Instance Method Details
#train ⇒ Object
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
# File 'lib/secryst/trainer.rb', line 46 def train best_model = nil best_val_loss = 1.0/0.0 # infinity return unless @model_name == 'transformer' criterion = Torch::NN::CrossEntropyLoss.new(ignore_index: index_of('<pad>')).to(@device) optimizer = Torch::Optim::SGD.new(@model.parameters, lr: @lr) scheduler = Torch::Optim::LRScheduler::StepLR.new(optimizer, step_size: @scheduler_step_size, gamma: @gamma) total_loss = 0.0 start_time = Time.now ntokens = @target_vocab.length epoch = 0 loop do epoch_start_time = Time.now @model.train @train_data.each.with_index do |batch, i| inputs, targets, decoder_inputs, src_mask, tgt_mask, memory_mask = batch inputs = Torch.tensor(inputs).t decoder_inputs = Torch.tensor(decoder_inputs).t targets = Torch.tensor(targets).t src_key_padding_mask = inputs.t.eq(1) tgt_key_padding_mask = decoder_inputs.t.eq(1) optimizer.zero_grad opts = { # src_mask: src_mask, tgt_mask: tgt_mask, # memory_mask: memory_mask, src_key_padding_mask: src_key_padding_mask, tgt_key_padding_mask: tgt_key_padding_mask, memory_key_padding_mask: src_key_padding_mask, } output = @model.call(inputs, decoder_inputs, opts) loss = criterion.call(output.transpose(0,1).reshape(-1, ntokens), targets.t.view(-1)) loss.backward ClipGradNorm.clip_grad_norm(@model.parameters, max_norm: 0.5) optimizer.step # puts "i[#{i}] loss: #{loss}" total_loss += loss.item() if ( (i + 1) % @log_interval == 0 ) cur_loss = total_loss / @log_interval elapsed = Time.now - start_time puts "| epoch #{epoch} | #{i + 1}/#{@train_data.length} batches | "\ "lr #{scheduler.get_lr()[0].round(4)} | ms/batch #{(1000*elapsed.to_f / @log_interval).round} | "\ "loss #{cur_loss.round(5)} | ppl #{Math.exp(cur_loss).round(5)}" total_loss = 0 start_time = Time.now end end if epoch > 0 && epoch % @checkpoint_every == 0 puts ">> Saving checkpoint '#{@checkpoint_dir}/checkpoint-#{epoch}.pth'" Torch.save(@model.state_dict, "#{@checkpoint_dir}/checkpoint-#{epoch}.pth") end # Evaluate @model.eval() total_loss = 0.0 Torch.no_grad do @eval_data.each.with_index do |batch, i| inputs, targets, decoder_inputs, src_mask, tgt_mask, memory_mask = batch inputs = Torch.tensor(inputs).t decoder_inputs = Torch.tensor(decoder_inputs).t targets = Torch.tensor(targets).t src_key_padding_mask = inputs.t.eq(1) tgt_key_padding_mask = decoder_inputs.t.eq(1) opts = { # src_mask: src_mask, tgt_mask: tgt_mask, # memory_mask: memory_mask, src_key_padding_mask: src_key_padding_mask, tgt_key_padding_mask: tgt_key_padding_mask, memory_key_padding_mask: src_key_padding_mask, } output = @model.call(inputs, decoder_inputs, **opts) output_flat = output.transpose(0,1).reshape(-1, ntokens) total_loss += criterion.call(output_flat, targets.t.view(-1)).item end total_loss = total_loss / @eval_data.length puts('-' * 89) puts "| end of epoch #{epoch} | time: #{(Time.now - epoch_start_time).round(3)}s | "\ " valid loss #{total_loss.round(5)} | valid ppl #{Math.exp(total_loss).round(5)} " puts('-' * 89) if total_loss < best_val_loss best_model = @model best_val_loss = total_loss end end scheduler.step epoch += 1 break if @max_epochs && @max_epochs < epoch end end |