Method List
Search:
-
#char_to_token Tokenizers::Encoding
-
#char_to_word Tokenizers::Encoding
-
#decode Tokenizers::Tokenizer
-
#decode Tokenizers::CharBPETokenizer
-
#decode_batch Tokenizers::Tokenizer
-
#enable_padding Tokenizers::Tokenizer
-
#enable_truncation Tokenizers::Tokenizer
-
#encode Tokenizers::CharBPETokenizer
-
#encode Tokenizers::Tokenizer
-
#encode_batch Tokenizers::Tokenizer
-
from_file Tokenizers
-
from_file Tokenizers::Models::WordLevel
-
from_pretrained Tokenizers
-
#from_pretrained Tokenizers::FromPretrained
-
#initialize Tokenizers::CharBPETokenizer
-
new Tokenizers::Normalizers::BertNormalizer
-
new Tokenizers::Processors::TemplateProcessing
-
new Tokenizers::Models::BPE
-
new Tokenizers::Decoders::Strip
-
new Tokenizers::Decoders::CTC
-
new Tokenizers::Models::Unigram
-
new Tokenizers::Models::WordPiece
-
new Tokenizers::Models::WordLevel
-
new Tokenizers::Processors::RobertaProcessing
-
new Tokenizers::Normalizers::Strip
-
new Tokenizers::Trainers::WordLevelTrainer
-
new Tokenizers::Trainers::WordPieceTrainer
-
new Tokenizers::PreTokenizers::Punctuation
-
new Tokenizers::Decoders::Metaspace
-
new Tokenizers::Decoders::WordPiece
-
new Tokenizers::Normalizers::Prepend
-
new Tokenizers::Decoders::BPEDecoder
-
new Tokenizers::PreTokenizers::Split
-
new Tokenizers::Trainers::BpeTrainer
-
new Tokenizers::PreTokenizers::Digits
-
new Tokenizers::Processors::ByteLevel
-
new Tokenizers::PreTokenizers::Metaspace
-
new Tokenizers::Trainers::UnigramTrainer
-
new Tokenizers::PreTokenizers::ByteLevel
-
#save Tokenizers::Tokenizer
-
#to_s Tokenizers::Tokenizer
-
#vocab Tokenizers::Tokenizer
-
#vocab_size Tokenizers::Tokenizer
-
#word_to_chars Tokenizers::Encoding
-
#word_to_tokens Tokenizers::Encoding