Method List
Search:
-
#[] Segment::Index
-
#__define_regexp_hook RegExpNER
-
#add Dictionary::TF_IDF
-
#add Dictionary::KL
-
#add Dictionary
-
#add DocumentRepo
-
#add_document Corpus
-
#add_instance Finder
-
#add_pmid Corpus
-
#add_pubmed_query Corpus
-
#add_regexp RegExpNER
-
#advance TokenTrieNER::EnumeratedArray
-
align Segment
-
all_annotations Token
-
#appearence_order Normalizer
-
#arabic String
-
#back TokenTrieNER::EnumeratedArray
-
#best Dictionary::TF_IDF
-
#best Dictionary::KL
-
#bigrams String
-
bigrams BagOfWords
-
#case_insensitive NGramPrefixDictionary
-
clean TokenTrieNER
-
#clean CueIndex
-
clean_sort Segment
-
#code TokenTrieNER::Code
-
#config CueIndex
-
#corpora_path Corpus
-
corpus Document
-
count BagOfWords
-
#cues CueIndex
-
#data Segment::Index
-
#define CueIndex
-
define Document
-
#define_comparisons Tokenizer
-
#define_regexp RegExpNER
-
#define_tokens Tokenizer
-
#df Dictionary::TF_IDF
-
#docid Document
-
#docid Corpus
-
#docid DocumentRepo
-
#docid2fields DocumentRepo
-
#docs Dictionary::TF_IDF
-
#document Corpus
-
#document_repo Corpus
-
#downcase_first String
-
#end Token
-
#end Segment
-
#entities NER
-
#entity NamedEntity
-
#entrez_score Normalizer
-
#eval Tokenizer::Operation
-
#eval Tokenizer::Custom
-
#evaluate Tokenizer
-
#evaluate_tokens Tokenizer
-
event_extraction NLP
-
#exists? Corpus
-
extended TokenTrieNER::EnumeratedArray
-
features BagOfWords
-
#fields2docid DocumentRepo
-
find TokenTrieNER
-
#find Finder::Instance
-
#find Finder
-
#find DocumentRepo
-
#find Corpus
-
#find_docid Corpus
-
#find_docid DocumentRepo
-
find_fail TokenTrieNER
-
#fix_segment Transformed
-
follow TokenTrieNER
-
#format Finder::Instance
-
gdep_chunks NLP
-
get_best Normalizer
-
#global_annotations Corpus
-
#global_persistence Document
-
#hash Document
-
#html Relationship
-
#html NamedEntity
-
#html_with_entities Relationship
-
#id Token
-
#id Document
-
#idf Dictionary::TF_IDF
-
ignore_case Tokenizer
-
#ignore_case Tokenizer::Custom
-
#ignore_case Tokenizer::Operation
-
#includes? Segment
-
#index Segment::Index
-
index Segment
-
#index TokenTrieNER
-
#index NGramPrefixDictionary
-
index_for_tokens TokenTrieNER
-
#info Token
-
init OSCAR4
-
init Banner
-
init Abner
-
init OSCAR3
-
init ChemicalTagger
-
init Linnaeus
-
#initialize Corpus
-
#initialize RegExpNER
-
#initialize Dictionary
-
#initialize Tokenizer
-
#initialize PatternRelExt
-
#initialize TokenTrieNER
-
#initialize Segment::Index
-
#initialize TokenTrieNER::Code
-
#initialize Tokenizer::Custom
-
#initialize Finder
-
#initialize Finder::Instance
-
#initialize Tokenizer::Transform
-
#initialize Banner
-
#initialize Abner
-
#initialize NGramPrefixDictionary
-
#initialize CueIndex
-
#initialize Normalizer
-
#initialize Tokenizer::Operation
-
#initialize Document
-
#initialize Dictionary::KL
-
#initialize Dictionary::TF_IDF
-
#instances Finder
-
#is_special? String
-
#kl Dictionary::KL
-
#last? TokenTrieNER::EnumeratedArray
-
#left? TokenTrieNER::EnumeratedArray
-
#load CueIndex
-
#load_into Document
-
load_tsv Segment
-
load_tsv_values Segment
-
#longest_match TokenTrieNER
-
#main Tokenizer
-
make_match TokenTrieNER
-
#make_relative Segment
-
#mask SegmentWithDocid
-
#masked? SegmentWithDocid
-
#masked_length SegmentWithDocid
-
#match RegExpNER
-
match NGramPrefixDictionary
-
match Linnaeus
-
#match CueIndex
-
#match OSCAR4
-
match OSCAR4
-
#match OSCAR3
-
match OSCAR3
-
#match Banner
-
match ChemicalTagger
-
#match Normalizer
-
#match Abner
-
#match TokenTrieNER
-
#match ChemicalTagger
-
#match NGramPrefixDictionary
-
#match_chunks PatternRelExt
-
match_regexp RegExpNER
-
match_regexp_hash RegExpNER
-
match_regexp_list RegExpNER
-
#match_sentences PatternRelExt
-
merge TokenTrieNER
-
#merge TokenTrieNER
-
merge_vp_chunks NLP
-
#method_missing Tokenizer::Operation
-
#method_missing Tokenizer::Custom
-
#method_missing Tokenizer::Transform
-
#namespace Finder::Instance
-
#namespace Document
-
#neg_dict Dictionary::KL
-
#new_token_trie PatternRelExt
-
#next TokenTrieNER::EnumeratedArray
-
#no_clean TokenTrieNER
-
#normalizer Finder::Instance
-
#num_docs Dictionary::TF_IDF
-
#offset Token
-
#offset= Segment
-
open_tokyocabinet DocumentRepo
-
#original Token
-
overlaps Segment
-
#peek TokenTrieNER::EnumeratedArray
-
persist Document
-
#persist_dir Document
-
persist_in_global_tsv Document
-
persist_in_tsv Document
-
#persistence_dir Corpus
-
#persistence_for Corpus
-
#pos TokenTrieNER::EnumeratedArray
-
#pos_dict Dictionary::KL
-
prepare_chunk_patterns PatternRelExt
-
prepare_token TokenTrieNER
-
process TokenTrieNER
-
process_hash NGramPrefixDictionary
-
process_labels NLP
-
process_stream NGramPrefixDictionary
-
#pull Segment
-
#push Segment
-
#range Segment
-
#range Token
-
#range_in Segment
-
#regexps RegExpNER
-
#replace Transformed
-
#report NamedEntity
-
#resolve Normalizer
-
#restore Transformed
-
returnFeatures NLP
-
#segment_indeces Document
-
#segment_index Document
-
#segment_length SegmentWithDocid
-
#segment_length Segment
-
#segments Document
-
#select Normalizer
-
sentence_split_detector OpenNLP
-
sentence_splitter OpenNLP
-
set_tsv_fields Segment
-
setup Token
-
#shift Transformed
-
simple_pattern PatternRelExt
-
#slack PatternRelExt
-
#slack TokenTrieNER
-
sort Transformed
-
sort Segment
-
split Segment
-
#split_at TokenTrieNER
-
#split_segments Segmented
-
#str_length SegmentWithDocid
-
tagger OSCAR4
-
#terms Dictionary::KL
-
#terms Dictionary
-
#terms Dictionary::TF_IDF
-
terms BagOfWords
-
#text Document
-
#tf Dictionary::TF_IDF
-
#tf_idf Dictionary::TF_IDF
-
#to_s TokenTrieNER::Code
-
#token_score Normalizer
-
#token_trie PatternRelExt
-
#token_types Tokenizer
-
#tokenize Tokenizer
-
tokenize Token
-
tokenize TokenTrieNER
-
#total_terms Dictionary::TF_IDF
-
#transform Tokenizer::Transform
-
transform Transformed
-
transform_index PatternRelExt
-
transform_key PatternRelExt
-
#transformation_stack Transformed
-
#transformed_segments Transformed
-
tsv Segment
-
tsv_values_for_segment Segment
-
#type Tokenizer
-
#type Document
-
#type PatternRelExt
-
#type NGramPrefixDictionary
-
#type TokenTrieNER
-
#type TokenTrieNER::Code
-
#unmask SegmentWithDocid
-
#unmasked_text SegmentWithDocid
-
#update_docid Document
-
#weights Dictionary::KL
-
#weights Dictionary::TF_IDF
-
with_transform Transformed
-
#words String
-
words BagOfWords