abacaj commited on
Commit
36a4249
1 Parent(s): 8d48e9a

Upload tokenizer

Browse files
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<|pad|>": 49152
3
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "<fim_prefix>",
5
+ "<fim_middle>",
6
+ "<fim_suffix>",
7
+ "<fim_pad>",
8
+ "<filename>",
9
+ "<gh_stars>",
10
+ "<issue_start>",
11
+ "<issue_comment>",
12
+ "<issue_closed>",
13
+ "<jupyter_start>",
14
+ "<jupyter_text>",
15
+ "<jupyter_code>",
16
+ "<jupyter_output>",
17
+ "<empty_output>",
18
+ "<commit_before>",
19
+ "<commit_msg>",
20
+ "<commit_after>",
21
+ "<reponame>"
22
+ ],
23
+ "bos_token": "<|endoftext|>",
24
+ "eos_token": "<|endoftext|>",
25
+ "pad_token": {
26
+ "content": "<|pad|>",
27
+ "lstrip": false,
28
+ "normalized": true,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ },
32
+ "unk_token": "<|endoftext|>"
33
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "additional_special_tokens": [
5
+ "<|endoftext|>",
6
+ "<fim_prefix>",
7
+ "<fim_middle>",
8
+ "<fim_suffix>",
9
+ "<fim_pad>",
10
+ "<filename>",
11
+ "<gh_stars>",
12
+ "<issue_start>",
13
+ "<issue_comment>",
14
+ "<issue_closed>",
15
+ "<jupyter_start>",
16
+ "<jupyter_text>",
17
+ "<jupyter_code>",
18
+ "<jupyter_output>",
19
+ "<empty_output>",
20
+ "<commit_before>",
21
+ "<commit_msg>",
22
+ "<commit_after>",
23
+ "<reponame>"
24
+ ],
25
+ "bos_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<|endoftext|>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ },
33
+ "clean_up_tokenization_spaces": true,
34
+ "eos_token": {
35
+ "__type": "AddedToken",
36
+ "content": "<|endoftext|>",
37
+ "lstrip": false,
38
+ "normalized": true,
39
+ "rstrip": false,
40
+ "single_word": false
41
+ },
42
+ "errors": "replace",
43
+ "model_max_length": 2048,
44
+ "pad_token": {
45
+ "__type": "AddedToken",
46
+ "content": "<|pad|>",
47
+ "lstrip": false,
48
+ "normalized": true,
49
+ "rstrip": false,
50
+ "single_word": false
51
+ },
52
+ "padding_side": "left",
53
+ "tokenizer_class": "GPT2Tokenizer",
54
+ "unk_token": {
55
+ "__type": "AddedToken",
56
+ "content": "<|endoftext|>",
57
+ "lstrip": false,
58
+ "normalized": true,
59
+ "rstrip": false,
60
+ "single_word": false
61
+ },
62
+ "vocab_size": 49152
63
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff