Chakita commited on
Commit
c484ae0
1 Parent(s): 10058fc

Upload tokenizer

Browse files
added_tokens.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "<BRG>": 50257,
3
+ "<|keywordtext|>": 50258,
4
+ "<|padtext|>": 50260,
5
+ "<|septext|>": 50261,
6
+ "<|startoftext|>": 50259
7
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<BRG>",
4
+ "<|keywordtext|>"
5
+ ],
6
+ "bos_token": "<|startoftext|>",
7
+ "eos_token": "<|endoftext|>",
8
+ "pad_token": "<|padtext|>",
9
+ "sep_token": "<|septext|>",
10
+ "unk_token": {
11
+ "content": "<|endoftext|>",
12
+ "lstrip": false,
13
+ "normalized": true,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ }
17
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "additional_special_tokens": [
5
+ "<BRG>",
6
+ "<|keywordtext|>"
7
+ ],
8
+ "bos_token": {
9
+ "__type": "AddedToken",
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "__type": "AddedToken",
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "errors": "replace",
25
+ "model_max_length": 1024,
26
+ "name_or_path": "gpt2",
27
+ "pad_token": null,
28
+ "special_tokens_map_file": null,
29
+ "tokenizer_class": "GPT2Tokenizer",
30
+ "unk_token": {
31
+ "__type": "AddedToken",
32
+ "content": "<|endoftext|>",
33
+ "lstrip": false,
34
+ "normalized": true,
35
+ "rstrip": false,
36
+ "single_word": false
37
+ }
38
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff