Method List
-
#[] NanoGPT::TrainConfig
-
#[] NanoGPT::SampleConfig
-
#[]= NanoGPT::TrainConfig
-
#[]= NanoGPT::SampleConfig
-
auto NanoGPT::Device
-
#batch_size NanoGPT::DataLoader
-
#best_val_loss NanoGPT::Trainer
-
#bias NanoGPT::Layers::LayerNorm
-
#bias NanoGPT::GPTConfig
-
#block_size NanoGPT::DataLoader
-
#block_size NanoGPT::GPTConfig
-
#config NanoGPT::Trainer
-
#config NanoGPT::GPT
-
#configure_optimizers NanoGPT::GPT
-
#crop_block_size NanoGPT::GPT
-
cuda_available? NanoGPT::Device
-
#decode NanoGPT::CharTokenizer
-
#decode NanoGPT::GPT2Tokenizer
-
#decode NanoGPT::Tokenizer
-
#default_config NanoGPT::Trainer
-
#dropout NanoGPT::GPTConfig
-
#encode NanoGPT::Tokenizer
-
#encode NanoGPT::GPT2Tokenizer
-
#encode NanoGPT::CharTokenizer
-
#eot_token NanoGPT::GPT2Tokenizer
-
#estimate_loss NanoGPT::Trainer
-
#estimate_mfu NanoGPT::GPT
-
for_dataset NanoGPT::Tokenizer
-
#forward NanoGPT::Layers::CausalSelfAttention
-
#forward NanoGPT::Layers::LayerNorm
-
#forward NanoGPT::Layers::Block
-
#forward NanoGPT::GPT
-
#forward NanoGPT::Layers::MLP
-
from_file NanoGPT::CharTokenizer
-
from_text NanoGPT::CharTokenizer
-
#generate NanoGPT::GPT
-
#get_batch NanoGPT::DataLoader
-
#get_lr NanoGPT::LRScheduler
-
gpu? NanoGPT::Device
-
#head_size NanoGPT::GPTConfig
-
info NanoGPT::Device
-
#initialize NanoGPT::TrainConfig
-
#initialize NanoGPT::Layers::LayerNorm
-
#initialize NanoGPT::Trainer
-
#initialize NanoGPT::SampleConfig
-
#initialize NanoGPT::LRScheduler
-
#initialize NanoGPT::Layers::Block
-
#initialize NanoGPT::GPTConfig
-
#initialize NanoGPT::Layers::CausalSelfAttention
-
#initialize NanoGPT::GPT
-
#initialize NanoGPT::DataLoader
-
#initialize NanoGPT::CharTokenizer
-
#initialize NanoGPT::GPT2Tokenizer
-
#initialize NanoGPT::Layers::MLP
-
#iter_num NanoGPT::Trainer
-
#itos NanoGPT::CharTokenizer
-
#learning_rate NanoGPT::LRScheduler
-
load NanoGPT::SampleConfig
-
load NanoGPT::TrainConfig
-
#load_checkpoint NanoGPT::Trainer
-
#load_json NanoGPT::SampleConfig
-
#load_json NanoGPT::TrainConfig
-
#lr_decay_iters NanoGPT::LRScheduler
-
#min_lr NanoGPT::LRScheduler
-
#model NanoGPT::Trainer
-
mps_available? NanoGPT::Device
-
#n_embd NanoGPT::GPTConfig
-
#n_head NanoGPT::GPTConfig
-
#n_layer NanoGPT::GPTConfig
-
#num_params NanoGPT::GPT
-
#optimizer NanoGPT::Trainer
-
parse_value NanoGPT::TrainConfig
-
parse_value NanoGPT::SampleConfig
-
#save NanoGPT::CharTokenizer
-
#save_checkpoint NanoGPT::Trainer
-
#save_json NanoGPT::TrainConfig
-
#step NanoGPT::LRScheduler
-
#stoi NanoGPT::CharTokenizer
-
#to_h NanoGPT::SampleConfig
-
#to_h NanoGPT::GPTConfig
-
#to_h NanoGPT::TrainConfig
-
#train NanoGPT::Trainer
-
#train_size NanoGPT::DataLoader
-
type NanoGPT::Device
-
#val_size NanoGPT::DataLoader
-
#values NanoGPT::SampleConfig
-
#values NanoGPT::TrainConfig
-
#vocab_size NanoGPT::Tokenizer
-
#vocab_size NanoGPT::GPTConfig
-
#warmup_iters NanoGPT::LRScheduler
-
#weight NanoGPT::Layers::LayerNorm