StefanH commited on
Commit
3e1f999
1 Parent(s): ca90852

add tokenizer

Browse files
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"[eot]": 50258, "<|question|>": 50259, "[QUES]": 50263, "<|text|>": 50260, "<|answer|>": 50261, "[ANSW]": 50265, "[PAD]": 50257, "[TEXT]": 50264, "<|answer_sep|>": 50262}
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>", "pad_token": "[PAD]", "additional_special_tokens": ["[eot]"]}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "/Users/stefanhg/Documents/UMich/Research/Zeroshot Text Classification/models-upload/2022-06-13_19-09-32_NVIDIA-GPT2-explicit-aspect-norm/trained", "use_fast": true, "tokenizer_class": "ZsGPT2Tokenizer"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff