Snoopy04 commited on
Commit
c443608
1 Parent(s): 2821883

Update tokenizer

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. special_tokens_map.json +1 -1
  3. tokenizer_config.json +1 -1
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
- datasets: wikitext
3
  license: other
 
4
  license_link: https://llama.meta.com/llama3/license/
5
  ---
6
  This is a quantized model of [Meta-Llama-3-70B-Instruct.yaml](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct.yaml) using GPTQ developed by [IST Austria](https://ist.ac.at/en/research/alistarh-group/)
 
1
  ---
 
2
  license: other
3
+ datasets: wikitext
4
  license_link: https://llama.meta.com/llama3/license/
5
  ---
6
  This is a quantized model of [Meta-Llama-3-70B-Instruct.yaml](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct.yaml) using GPTQ developed by [IST Austria](https://ist.ac.at/en/research/alistarh-group/)
special_tokens_map.json CHANGED
@@ -7,7 +7,7 @@
7
  "single_word": false
8
  },
9
  "eos_token": {
10
- "content": "<|end_of_text|>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
 
7
  "single_word": false
8
  },
9
  "eos_token": {
10
+ "content": "<|eot_id|>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
tokenizer_config.json CHANGED
@@ -2052,7 +2052,7 @@
2052
  "bos_token": "<|begin_of_text|>",
2053
  "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}",
2054
  "clean_up_tokenization_spaces": true,
2055
- "eos_token": "<|end_of_text|>",
2056
  "model_input_names": [
2057
  "input_ids",
2058
  "attention_mask"
 
2052
  "bos_token": "<|begin_of_text|>",
2053
  "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}",
2054
  "clean_up_tokenization_spaces": true,
2055
+ "eos_token": "<|eot_id|>",
2056
  "model_input_names": [
2057
  "input_ids",
2058
  "attention_mask"