Upload tokenizer
Browse files- tokenizer.json +2 -2
- tokenizer_config.json +4 -0
tokenizer.json
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
"version": "1.0",
|
3 |
"truncation": {
|
4 |
"direction": "Right",
|
5 |
-
"max_length":
|
6 |
"strategy": "LongestFirst",
|
7 |
"stride": 0
|
8 |
},
|
@@ -32,7 +32,7 @@
|
|
32 |
"single_word": false,
|
33 |
"lstrip": false,
|
34 |
"rstrip": false,
|
35 |
-
"normalized":
|
36 |
"special": true
|
37 |
},
|
38 |
{
|
|
|
2 |
"version": "1.0",
|
3 |
"truncation": {
|
4 |
"direction": "Right",
|
5 |
+
"max_length": 600,
|
6 |
"strategy": "LongestFirst",
|
7 |
"stride": 0
|
8 |
},
|
|
|
32 |
"single_word": false,
|
33 |
"lstrip": false,
|
34 |
"rstrip": false,
|
35 |
+
"normalized": false,
|
36 |
"special": true
|
37 |
},
|
38 |
{
|
tokenizer_config.json
CHANGED
@@ -66,10 +66,14 @@
|
|
66 |
"clean_up_tokenization_spaces": false,
|
67 |
"eos_token": "</s>",
|
68 |
"legacy": false,
|
|
|
69 |
"model_max_length": 1000000000000000019884624838656,
|
70 |
"pad_token": "</s>",
|
71 |
"sp_model_kwargs": {},
|
|
|
72 |
"tokenizer_class": "LlamaTokenizer",
|
|
|
|
|
73 |
"unk_token": "<unk>",
|
74 |
"use_default_system_prompt": true
|
75 |
}
|
|
|
66 |
"clean_up_tokenization_spaces": false,
|
67 |
"eos_token": "</s>",
|
68 |
"legacy": false,
|
69 |
+
"max_length": 300,
|
70 |
"model_max_length": 1000000000000000019884624838656,
|
71 |
"pad_token": "</s>",
|
72 |
"sp_model_kwargs": {},
|
73 |
+
"stride": 0,
|
74 |
"tokenizer_class": "LlamaTokenizer",
|
75 |
+
"truncation_side": "right",
|
76 |
+
"truncation_strategy": "longest_first",
|
77 |
"unk_token": "<unk>",
|
78 |
"use_default_system_prompt": true
|
79 |
}
|