litagin commited on
Commit
dc6a71c
1 Parent(s): acfb457

Upload tokenizer

Browse files
Files changed (5) hide show
  1. merges.txt +41 -0
  2. special_tokens_map.json +30 -0
  3. tokenizer.json +217 -0
  4. tokenizer_config.json +44 -0
  5. vocab.json +1 -0
merges.txt ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #version: 0.2
2
+ 4 3
3
+ 3 2
4
+ 2 5
5
+ 3 3
6
+ 2 3
7
+ 2 0
8
+ 3 0
9
+ 1 4
10
+ 3 9
11
+ 1 3
12
+ 2 7
13
+ 2 2
14
+ 2 9
15
+ 1 5
16
+ 3 8
17
+ 1 6
18
+ 4 9
19
+ 4 8
20
+ 1 7
21
+ 3 4
22
+ 4 7
23
+ 4 2
24
+ 1 2
25
+ 4 5
26
+ 4 6
27
+ 3 7
28
+ 3 5
29
+ 1 8
30
+ 4 1
31
+ 4 0
32
+ 4 4
33
+ 2 8
34
+ 3 6
35
+ 2 4
36
+ 2 1
37
+ 2 6
38
+ 1 1
39
+ 1 9
40
+ 1 0
41
+ 3 1
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "[BOS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "[EOS]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "[UNK]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 2048,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
+ "padding": null,
10
+ "added_tokens": [
11
+ {
12
+ "id": 0,
13
+ "content": "[UNK]",
14
+ "single_word": false,
15
+ "lstrip": false,
16
+ "rstrip": false,
17
+ "normalized": false,
18
+ "special": true
19
+ },
20
+ {
21
+ "id": 1,
22
+ "content": "[BOS]",
23
+ "single_word": false,
24
+ "lstrip": false,
25
+ "rstrip": false,
26
+ "normalized": false,
27
+ "special": true
28
+ },
29
+ {
30
+ "id": 2,
31
+ "content": "[EOS]",
32
+ "single_word": false,
33
+ "lstrip": false,
34
+ "rstrip": false,
35
+ "normalized": false,
36
+ "special": true
37
+ },
38
+ {
39
+ "id": 3,
40
+ "content": "[PAD]",
41
+ "single_word": false,
42
+ "lstrip": false,
43
+ "rstrip": false,
44
+ "normalized": false,
45
+ "special": true
46
+ }
47
+ ],
48
+ "normalizer": null,
49
+ "pre_tokenizer": {
50
+ "type": "Whitespace"
51
+ },
52
+ "post_processor": {
53
+ "type": "TemplateProcessing",
54
+ "single": [
55
+ {
56
+ "SpecialToken": {
57
+ "id": "[BOS]",
58
+ "type_id": 0
59
+ }
60
+ },
61
+ {
62
+ "Sequence": {
63
+ "id": "A",
64
+ "type_id": 0
65
+ }
66
+ },
67
+ {
68
+ "SpecialToken": {
69
+ "id": "[EOS]",
70
+ "type_id": 0
71
+ }
72
+ }
73
+ ],
74
+ "pair": [
75
+ {
76
+ "Sequence": {
77
+ "id": "A",
78
+ "type_id": 0
79
+ }
80
+ },
81
+ {
82
+ "Sequence": {
83
+ "id": "B",
84
+ "type_id": 1
85
+ }
86
+ }
87
+ ],
88
+ "special_tokens": {
89
+ "[BOS]": {
90
+ "id": "[BOS]",
91
+ "ids": [
92
+ 1
93
+ ],
94
+ "tokens": [
95
+ "[BOS]"
96
+ ]
97
+ },
98
+ "[EOS]": {
99
+ "id": "[EOS]",
100
+ "ids": [
101
+ 2
102
+ ],
103
+ "tokens": [
104
+ "[EOS]"
105
+ ]
106
+ }
107
+ }
108
+ },
109
+ "decoder": null,
110
+ "model": {
111
+ "type": "BPE",
112
+ "dropout": null,
113
+ "unk_token": "[UNK]",
114
+ "continuing_subword_prefix": null,
115
+ "end_of_word_suffix": null,
116
+ "fuse_unk": false,
117
+ "byte_fallback": false,
118
+ "vocab": {
119
+ "[UNK]": 0,
120
+ "[BOS]": 1,
121
+ "[EOS]": 2,
122
+ "[PAD]": 3,
123
+ "0": 4,
124
+ "1": 5,
125
+ "2": 6,
126
+ "3": 7,
127
+ "4": 8,
128
+ "5": 9,
129
+ "6": 10,
130
+ "7": 11,
131
+ "8": 12,
132
+ "9": 13,
133
+ "43": 14,
134
+ "32": 15,
135
+ "25": 16,
136
+ "33": 17,
137
+ "23": 18,
138
+ "20": 19,
139
+ "30": 20,
140
+ "14": 21,
141
+ "39": 22,
142
+ "13": 23,
143
+ "27": 24,
144
+ "22": 25,
145
+ "29": 26,
146
+ "15": 27,
147
+ "38": 28,
148
+ "16": 29,
149
+ "49": 30,
150
+ "48": 31,
151
+ "17": 32,
152
+ "34": 33,
153
+ "47": 34,
154
+ "42": 35,
155
+ "12": 36,
156
+ "45": 37,
157
+ "46": 38,
158
+ "37": 39,
159
+ "35": 40,
160
+ "18": 41,
161
+ "41": 42,
162
+ "40": 43,
163
+ "44": 44,
164
+ "28": 45,
165
+ "36": 46,
166
+ "24": 47,
167
+ "21": 48,
168
+ "26": 49,
169
+ "11": 50,
170
+ "19": 51,
171
+ "10": 52,
172
+ "31": 53
173
+ },
174
+ "merges": [
175
+ "4 3",
176
+ "3 2",
177
+ "2 5",
178
+ "3 3",
179
+ "2 3",
180
+ "2 0",
181
+ "3 0",
182
+ "1 4",
183
+ "3 9",
184
+ "1 3",
185
+ "2 7",
186
+ "2 2",
187
+ "2 9",
188
+ "1 5",
189
+ "3 8",
190
+ "1 6",
191
+ "4 9",
192
+ "4 8",
193
+ "1 7",
194
+ "3 4",
195
+ "4 7",
196
+ "4 2",
197
+ "1 2",
198
+ "4 5",
199
+ "4 6",
200
+ "3 7",
201
+ "3 5",
202
+ "1 8",
203
+ "4 1",
204
+ "4 0",
205
+ "4 4",
206
+ "2 8",
207
+ "3 6",
208
+ "2 4",
209
+ "2 1",
210
+ "2 6",
211
+ "1 1",
212
+ "1 9",
213
+ "1 0",
214
+ "3 1"
215
+ ]
216
+ }
217
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "[UNK]",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "[BOS]",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "[EOS]",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "[PAD]",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "[BOS]",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "[EOS]",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "[PAD]",
42
+ "tokenizer_class": "GPT2Tokenizer",
43
+ "unk_token": "[UNK]"
44
+ }
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"[UNK]":0,"[BOS]":1,"[EOS]":2,"[PAD]":3,"0":4,"1":5,"2":6,"3":7,"4":8,"5":9,"6":10,"7":11,"8":12,"9":13,"43":14,"32":15,"25":16,"33":17,"23":18,"20":19,"30":20,"14":21,"39":22,"13":23,"27":24,"22":25,"29":26,"15":27,"38":28,"16":29,"49":30,"48":31,"17":32,"34":33,"47":34,"42":35,"12":36,"45":37,"46":38,"37":39,"35":40,"18":41,"41":42,"40":43,"44":44,"28":45,"36":46,"24":47,"21":48,"26":49,"11":50,"19":51,"10":52,"31":53}