crumb commited on
Commit
f071f2c
1 Parent(s): 5008042

Upload tokenizer

Browse files
added_tokens.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<causal>": 50260,
3
+ "<fitm_result>": 50259,
4
+ "<fitm_start>": 50257,
5
+ "<mlm_end>": 50263,
6
+ "<mlm_start>": 50261,
7
+ "<multiple_tok_mask>": 50258,
8
+ "<single_tok_mask>": 50262
9
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<fitm_start>",
4
+ "<multiple_tok_mask>",
5
+ "<fitm_result>",
6
+ "<causal>",
7
+ "<mlm_start>",
8
+ "<single_tok_mask>",
9
+ "<mlm_end>"
10
+ ],
11
+ "bos_token": "<|endoftext|>",
12
+ "eos_token": "<|endoftext|>",
13
+ "pad_token": "<|endoftext|>",
14
+ "unk_token": "<|endoftext|>"
15
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "eos_token": "<|endoftext|>",
5
+ "model_max_length": 1024,
6
+ "special_tokens_map_file": null,
7
+ "tokenizer_class": "GPT2Tokenizer",
8
+ "unk_token": "<|endoftext|>"
9
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff