whizzzzkid commited on
Commit
0ee7bf8
1 Parent(s): e85be3f

Upload tokenizer

Browse files
added_tokens.json CHANGED
@@ -1,5 +1,3 @@
1
  {
2
- "</s>": 100290,
3
- "<s>": 100289,
4
- "<unk>": 100291
5
  }
 
1
  {
2
+ "<|end_of_text|>": 100289
 
 
3
  }
special_tokens_map.json CHANGED
@@ -35,28 +35,28 @@
35
  "<|extra0|>"
36
  ],
37
  "bos_token": {
38
- "content": "<s>",
39
  "lstrip": false,
40
  "normalized": false,
41
  "rstrip": false,
42
  "single_word": false
43
  },
44
  "eos_token": {
45
- "content": "</s>",
46
  "lstrip": false,
47
  "normalized": false,
48
  "rstrip": false,
49
  "single_word": false
50
  },
51
  "pad_token": {
52
- "content": "<|endoftext|>",
53
  "lstrip": false,
54
  "normalized": false,
55
  "rstrip": false,
56
  "single_word": false
57
  },
58
  "unk_token": {
59
- "content": "<unk>",
60
  "lstrip": false,
61
  "normalized": false,
62
  "rstrip": false,
 
35
  "<|extra0|>"
36
  ],
37
  "bos_token": {
38
+ "content": "<|endoftext|>",
39
  "lstrip": false,
40
  "normalized": false,
41
  "rstrip": false,
42
  "single_word": false
43
  },
44
  "eos_token": {
45
+ "content": "<|endoftext|>",
46
  "lstrip": false,
47
  "normalized": false,
48
  "rstrip": false,
49
  "single_word": false
50
  },
51
  "pad_token": {
52
+ "content": "<|end_of_text|>",
53
  "lstrip": false,
54
  "normalized": false,
55
  "rstrip": false,
56
  "single_word": false
57
  },
58
  "unk_token": {
59
+ "content": "<|endoftext|>",
60
  "lstrip": false,
61
  "normalized": false,
62
  "rstrip": false,
tokenizer.json CHANGED
@@ -1,19 +1,7 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 2048,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
- "padding": {
10
- "strategy": "BatchLongest",
11
- "direction": "Right",
12
- "pad_to_multiple_of": null,
13
- "pad_id": 100257,
14
- "pad_type_id": 0,
15
- "pad_token": "<|endoftext|>"
16
- },
17
  "added_tokens": [
18
  {
19
  "id": 100256,
@@ -314,25 +302,7 @@
314
  },
315
  {
316
  "id": 100289,
317
- "content": "<s>",
318
- "single_word": false,
319
- "lstrip": false,
320
- "rstrip": false,
321
- "normalized": false,
322
- "special": true
323
- },
324
- {
325
- "id": 100290,
326
- "content": "</s>",
327
- "single_word": false,
328
- "lstrip": false,
329
- "rstrip": false,
330
- "normalized": false,
331
- "special": true
332
- },
333
- {
334
- "id": 100291,
335
- "content": "<unk>",
336
  "single_word": false,
337
  "lstrip": false,
338
  "rstrip": false,
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
 
 
 
 
 
 
 
 
 
 
 
 
5
  "added_tokens": [
6
  {
7
  "id": 100256,
 
302
  },
303
  {
304
  "id": 100289,
305
+ "content": "<|end_of_text|>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306
  "single_word": false,
307
  "lstrip": false,
308
  "rstrip": false,
tokenizer_config.json CHANGED
@@ -266,23 +266,7 @@
266
  "special": true
267
  },
268
  "100289": {
269
- "content": "<s>",
270
- "lstrip": false,
271
- "normalized": false,
272
- "rstrip": false,
273
- "single_word": false,
274
- "special": true
275
- },
276
- "100290": {
277
- "content": "</s>",
278
- "lstrip": false,
279
- "normalized": false,
280
- "rstrip": false,
281
- "single_word": false,
282
- "special": true
283
- },
284
- "100291": {
285
- "content": "<unk>",
286
  "lstrip": false,
287
  "normalized": false,
288
  "rstrip": false,
@@ -325,19 +309,12 @@
325
  "<|reg7|>",
326
  "<|extra0|>"
327
  ],
328
- "bos_token": "<s>",
329
  "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
330
  "clean_up_tokenization_spaces": true,
331
- "eos_token": "</s>",
332
- "max_length": 2048,
333
  "model_max_length": 2048,
334
- "pad_to_multiple_of": null,
335
- "pad_token": "<|endoftext|>",
336
- "pad_token_type_id": 0,
337
- "padding_side": "right",
338
- "stride": 0,
339
  "tokenizer_class": "GPT2Tokenizer",
340
- "truncation_side": "right",
341
- "truncation_strategy": "longest_first",
342
- "unk_token": "<unk>"
343
  }
 
266
  "special": true
267
  },
268
  "100289": {
269
+ "content": "<|end_of_text|>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
270
  "lstrip": false,
271
  "normalized": false,
272
  "rstrip": false,
 
309
  "<|reg7|>",
310
  "<|extra0|>"
311
  ],
312
+ "bos_token": "<|endoftext|>",
313
  "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
314
  "clean_up_tokenization_spaces": true,
315
+ "eos_token": "<|endoftext|>",
 
316
  "model_max_length": 2048,
317
+ "pad_token": "<|end_of_text|>",
 
 
 
 
318
  "tokenizer_class": "GPT2Tokenizer",
319
+ "unk_token": "<|endoftext|>"
 
 
320
  }