Compilade commited on
Commit
9e2f067
1 Parent(s): a5ba4ce

Use `<|im_end|>` as EOS token in `tokenizer_config.json`

Browse files

This is more consistent with `config.json` and `generation_config.json`.
Moreover, this fixes endless outputs in llama.cpp (since their
conversion script gets the EOS token from `transformers.AutoTokenizer`
which reads `tokenizer_config.json`).

Files changed (1) hide show
  1. tokenizer_config.json +1 -1
tokenizer_config.json CHANGED
@@ -2,7 +2,7 @@
2
  "add_prefix_space": false,
3
  "bos_token": "<|endoftext|>",
4
  "clean_up_tokenization_spaces": true,
5
- "eos_token": "<|endoftext|>",
6
  "model_max_length": 1000000000000000019884624838656,
7
  "tokenizer_class": "GPTNeoXTokenizer",
8
  "unk_token": "<|endoftext|>"
 
2
  "add_prefix_space": false,
3
  "bos_token": "<|endoftext|>",
4
  "clean_up_tokenization_spaces": true,
5
+ "eos_token": "<|im_end|>",
6
  "model_max_length": 1000000000000000019884624838656,
7
  "tokenizer_class": "GPTNeoXTokenizer",
8
  "unk_token": "<|endoftext|>"