Upload tokenizer
Browse files- special_tokens_map.json +3 -3
- tokenizer.json +2 -41
- tokenizer_config.json +4 -35
special_tokens_map.json
CHANGED
@@ -35,14 +35,14 @@
|
|
35 |
"<|extra0|>"
|
36 |
],
|
37 |
"bos_token": {
|
38 |
-
"content": "
|
39 |
"lstrip": false,
|
40 |
"normalized": false,
|
41 |
"rstrip": false,
|
42 |
"single_word": false
|
43 |
},
|
44 |
"eos_token": {
|
45 |
-
"content": "
|
46 |
"lstrip": false,
|
47 |
"normalized": false,
|
48 |
"rstrip": false,
|
@@ -56,7 +56,7 @@
|
|
56 |
"single_word": false
|
57 |
},
|
58 |
"unk_token": {
|
59 |
-
"content": "
|
60 |
"lstrip": false,
|
61 |
"normalized": false,
|
62 |
"rstrip": false,
|
|
|
35 |
"<|extra0|>"
|
36 |
],
|
37 |
"bos_token": {
|
38 |
+
"content": "<|endoftext|>",
|
39 |
"lstrip": false,
|
40 |
"normalized": false,
|
41 |
"rstrip": false,
|
42 |
"single_word": false
|
43 |
},
|
44 |
"eos_token": {
|
45 |
+
"content": "<|endoftext|>",
|
46 |
"lstrip": false,
|
47 |
"normalized": false,
|
48 |
"rstrip": false,
|
|
|
56 |
"single_word": false
|
57 |
},
|
58 |
"unk_token": {
|
59 |
+
"content": "<|endoftext|>",
|
60 |
"lstrip": false,
|
61 |
"normalized": false,
|
62 |
"rstrip": false,
|
tokenizer.json
CHANGED
@@ -1,19 +1,7 @@
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
-
"truncation":
|
4 |
-
|
5 |
-
"max_length": 2048,
|
6 |
-
"strategy": "LongestFirst",
|
7 |
-
"stride": 0
|
8 |
-
},
|
9 |
-
"padding": {
|
10 |
-
"strategy": "BatchLongest",
|
11 |
-
"direction": "Right",
|
12 |
-
"pad_to_multiple_of": null,
|
13 |
-
"pad_id": 100257,
|
14 |
-
"pad_type_id": 0,
|
15 |
-
"pad_token": "<|endoftext|>"
|
16 |
-
},
|
17 |
"added_tokens": [
|
18 |
{
|
19 |
"id": 100256,
|
@@ -311,33 +299,6 @@
|
|
311 |
"rstrip": false,
|
312 |
"normalized": false,
|
313 |
"special": true
|
314 |
-
},
|
315 |
-
{
|
316 |
-
"id": 100289,
|
317 |
-
"content": "<s>",
|
318 |
-
"single_word": false,
|
319 |
-
"lstrip": false,
|
320 |
-
"rstrip": false,
|
321 |
-
"normalized": false,
|
322 |
-
"special": true
|
323 |
-
},
|
324 |
-
{
|
325 |
-
"id": 100290,
|
326 |
-
"content": "</s>",
|
327 |
-
"single_word": false,
|
328 |
-
"lstrip": false,
|
329 |
-
"rstrip": false,
|
330 |
-
"normalized": false,
|
331 |
-
"special": true
|
332 |
-
},
|
333 |
-
{
|
334 |
-
"id": 100291,
|
335 |
-
"content": "<unk>",
|
336 |
-
"single_word": false,
|
337 |
-
"lstrip": false,
|
338 |
-
"rstrip": false,
|
339 |
-
"normalized": false,
|
340 |
-
"special": true
|
341 |
}
|
342 |
],
|
343 |
"normalizer": null,
|
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
+
"truncation": null,
|
4 |
+
"padding": null,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
"added_tokens": [
|
6 |
{
|
7 |
"id": 100256,
|
|
|
299 |
"rstrip": false,
|
300 |
"normalized": false,
|
301 |
"special": true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
302 |
}
|
303 |
],
|
304 |
"normalizer": null,
|
tokenizer_config.json
CHANGED
@@ -264,30 +264,6 @@
|
|
264 |
"rstrip": false,
|
265 |
"single_word": false,
|
266 |
"special": true
|
267 |
-
},
|
268 |
-
"100289": {
|
269 |
-
"content": "<s>",
|
270 |
-
"lstrip": false,
|
271 |
-
"normalized": false,
|
272 |
-
"rstrip": false,
|
273 |
-
"single_word": false,
|
274 |
-
"special": true
|
275 |
-
},
|
276 |
-
"100290": {
|
277 |
-
"content": "</s>",
|
278 |
-
"lstrip": false,
|
279 |
-
"normalized": false,
|
280 |
-
"rstrip": false,
|
281 |
-
"single_word": false,
|
282 |
-
"special": true
|
283 |
-
},
|
284 |
-
"100291": {
|
285 |
-
"content": "<unk>",
|
286 |
-
"lstrip": false,
|
287 |
-
"normalized": false,
|
288 |
-
"rstrip": false,
|
289 |
-
"single_word": false,
|
290 |
-
"special": true
|
291 |
}
|
292 |
},
|
293 |
"additional_special_tokens": [
|
@@ -325,19 +301,12 @@
|
|
325 |
"<|reg7|>",
|
326 |
"<|extra0|>"
|
327 |
],
|
328 |
-
"bos_token": "
|
329 |
-
"chat_template": "{%
|
330 |
"clean_up_tokenization_spaces": true,
|
331 |
-
"eos_token": "
|
332 |
-
"max_length": 2048,
|
333 |
"model_max_length": 2048,
|
334 |
-
"pad_to_multiple_of": null,
|
335 |
"pad_token": "<|endoftext|>",
|
336 |
-
"pad_token_type_id": 0,
|
337 |
-
"padding_side": "right",
|
338 |
-
"stride": 0,
|
339 |
"tokenizer_class": "GPT2Tokenizer",
|
340 |
-
"
|
341 |
-
"truncation_strategy": "longest_first",
|
342 |
-
"unk_token": "<unk>"
|
343 |
}
|
|
|
264 |
"rstrip": false,
|
265 |
"single_word": false,
|
266 |
"special": true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
267 |
}
|
268 |
},
|
269 |
"additional_special_tokens": [
|
|
|
301 |
"<|reg7|>",
|
302 |
"<|extra0|>"
|
303 |
],
|
304 |
+
"bos_token": "<|endoftext|>",
|
305 |
+
"chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|system|>\\n' + system_message + '<|endoftext|>' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|user|>\\n' + content + '<|endoftext|>' + '\\n<|assistant|>\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|endoftext|>' + '\\n' }}{% endif %}{% endfor %}",
|
306 |
"clean_up_tokenization_spaces": true,
|
307 |
+
"eos_token": "<|endoftext|>",
|
|
|
308 |
"model_max_length": 2048,
|
|
|
309 |
"pad_token": "<|endoftext|>",
|
|
|
|
|
|
|
310 |
"tokenizer_class": "GPT2Tokenizer",
|
311 |
+
"unk_token": "<|endoftext|>"
|
|
|
|
|
312 |
}
|