xiamengzhou
commited on
Commit
•
56dd2ca
1
Parent(s):
9fc6fe1
update
Browse files- special_tokens_map.json +7 -0
- tokenizer.json +4 -64
- tokenizer_config.json +3 -1
special_tokens_map.json
CHANGED
@@ -12,5 +12,12 @@
|
|
12 |
"normalized": false,
|
13 |
"rstrip": false,
|
14 |
"single_word": false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
}
|
16 |
}
|
|
|
12 |
"normalized": false,
|
13 |
"rstrip": false,
|
14 |
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "<|end_of_text|>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
}
|
23 |
}
|
tokenizer.json
CHANGED
@@ -2329,69 +2329,10 @@
|
|
2329 |
]
|
2330 |
},
|
2331 |
"post_processor": {
|
2332 |
-
"type": "
|
2333 |
-
"
|
2334 |
-
|
2335 |
-
|
2336 |
-
"add_prefix_space": true,
|
2337 |
-
"trim_offsets": false,
|
2338 |
-
"use_regex": true
|
2339 |
-
},
|
2340 |
-
{
|
2341 |
-
"type": "TemplateProcessing",
|
2342 |
-
"single": [
|
2343 |
-
{
|
2344 |
-
"SpecialToken": {
|
2345 |
-
"id": "<|begin_of_text|>",
|
2346 |
-
"type_id": 0
|
2347 |
-
}
|
2348 |
-
},
|
2349 |
-
{
|
2350 |
-
"Sequence": {
|
2351 |
-
"id": "A",
|
2352 |
-
"type_id": 0
|
2353 |
-
}
|
2354 |
-
}
|
2355 |
-
],
|
2356 |
-
"pair": [
|
2357 |
-
{
|
2358 |
-
"SpecialToken": {
|
2359 |
-
"id": "<|begin_of_text|>",
|
2360 |
-
"type_id": 0
|
2361 |
-
}
|
2362 |
-
},
|
2363 |
-
{
|
2364 |
-
"Sequence": {
|
2365 |
-
"id": "A",
|
2366 |
-
"type_id": 0
|
2367 |
-
}
|
2368 |
-
},
|
2369 |
-
{
|
2370 |
-
"SpecialToken": {
|
2371 |
-
"id": "<|begin_of_text|>",
|
2372 |
-
"type_id": 1
|
2373 |
-
}
|
2374 |
-
},
|
2375 |
-
{
|
2376 |
-
"Sequence": {
|
2377 |
-
"id": "B",
|
2378 |
-
"type_id": 1
|
2379 |
-
}
|
2380 |
-
}
|
2381 |
-
],
|
2382 |
-
"special_tokens": {
|
2383 |
-
"<|begin_of_text|>": {
|
2384 |
-
"id": "<|begin_of_text|>",
|
2385 |
-
"ids": [
|
2386 |
-
128000
|
2387 |
-
],
|
2388 |
-
"tokens": [
|
2389 |
-
"<|begin_of_text|>"
|
2390 |
-
]
|
2391 |
-
}
|
2392 |
-
}
|
2393 |
-
}
|
2394 |
-
]
|
2395 |
},
|
2396 |
"decoder": {
|
2397 |
"type": "ByteLevel",
|
@@ -2407,7 +2348,6 @@
|
|
2407 |
"end_of_word_suffix": null,
|
2408 |
"fuse_unk": false,
|
2409 |
"byte_fallback": false,
|
2410 |
-
"ignore_merges": true,
|
2411 |
"vocab": {
|
2412 |
"!": 0,
|
2413 |
"\"": 1,
|
|
|
2329 |
]
|
2330 |
},
|
2331 |
"post_processor": {
|
2332 |
+
"type": "ByteLevel",
|
2333 |
+
"add_prefix_space": true,
|
2334 |
+
"trim_offsets": false,
|
2335 |
+
"use_regex": true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2336 |
},
|
2337 |
"decoder": {
|
2338 |
"type": "ByteLevel",
|
|
|
2348 |
"end_of_word_suffix": null,
|
2349 |
"fuse_unk": false,
|
2350 |
"byte_fallback": false,
|
|
|
2351 |
"vocab": {
|
2352 |
"!": 0,
|
2353 |
"\"": 1,
|
tokenizer_config.json
CHANGED
@@ -2050,12 +2050,14 @@
|
|
2050 |
}
|
2051 |
},
|
2052 |
"bos_token": "<|begin_of_text|>",
|
|
|
2053 |
"clean_up_tokenization_spaces": true,
|
2054 |
"eos_token": "<|end_of_text|>",
|
2055 |
"model_input_names": [
|
2056 |
"input_ids",
|
2057 |
"attention_mask"
|
2058 |
],
|
2059 |
-
"model_max_length":
|
|
|
2060 |
"tokenizer_class": "PreTrainedTokenizerFast"
|
2061 |
}
|
|
|
2050 |
}
|
2051 |
},
|
2052 |
"bos_token": "<|begin_of_text|>",
|
2053 |
+
"chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}",
|
2054 |
"clean_up_tokenization_spaces": true,
|
2055 |
"eos_token": "<|end_of_text|>",
|
2056 |
"model_input_names": [
|
2057 |
"input_ids",
|
2058 |
"attention_mask"
|
2059 |
],
|
2060 |
+
"model_max_length": 2048,
|
2061 |
+
"pad_token": "<|end_of_text|>",
|
2062 |
"tokenizer_class": "PreTrainedTokenizerFast"
|
2063 |
}
|