{ "add_bos_token": true, "add_eos_token": false, "add_prefix_space": null, "added_tokens_decoder": { "0": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "1": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "2": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "32000": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "32001": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true } }, "bos_token": "", "chat_template": "\n{\n \"id\": \"{{chat_id}}\",\n \"conversations\": [\n {% for message in messages %}\n {\n \"from\": \"{% if message['role'] == 'user' %}human{% else %}gpt{% endif %}\",\n \"value\": \"{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}{% endif %}{% endfor %}\"\n }{% if not loop.last %},{% endif %}\n {% endfor %}\n ]\n}\n", "clean_up_tokenization_spaces": false, "eos_token": "", "legacy": false, "model_max_length": 1000000000000000019884624838656, "pad_token": "", "padding_side": "left", "processor_class": "LlavaProcessor", "sp_model_kwargs": {}, "tokenizer_class": "LlamaTokenizer", "trust_remote_code": false, "unk_token": "", "use_default_system_prompt": false }