Tokenizer_config.json seems broken

#3
by MagiaSN - opened

I got nearly random results with tokenizer_config.json in Chinese-LLaMA-2-13B-hf. After I replaced it with tokenizer_config.json in Chinese-LLaMA-2-7B-hf, I got readable results.

tokenizer_config.json in Chinese-LLaMA-2-13B-hf:

{"bos_token": "", "eos_token": "", "model_max_length": 1000000000000000019884624838656, "tokenizer_class": "LlamaTokenizer", "unk_token": ""}

tokenizer_config.json in Chinese-LLaMA-2-7B-hf:

{
  "add_bos_token": true,
  "add_eos_token": false,
  "bos_token": {
    "__type": "AddedToken",
    "content": "<s>",
    "lstrip": false,
    "normalized": false,
    "rstrip": false,
    "single_word": false
  },
  "clean_up_tokenization_spaces": false,
  "eos_token": {
    "__type": "AddedToken",
    "content": "</s>",
    "lstrip": false,
    "normalized": false,
    "rstrip": false,
    "single_word": false
  },
  "legacy": false,
  "model_max_length": 1000000000000000019884624838656,
  "pad_token": null,
  "padding_side": "right",
  "sp_model_kwargs": {},
  "tokenizer_class": "LlamaTokenizer",
  "unk_token": {
    "__type": "AddedToken",
    "content": "<unk>",
    "lstrip": false,
    "normalized": false,
    "rstrip": false,
    "single_word": false
  }
}

Sign up or log in to comment