Text Generation
Transformers
Safetensors
llama
conversational
Inference Endpoints
text-generation-inference
Locutusque commited on
Commit
1c93cce
1 Parent(s): f58023f

Upload tokenizer

Browse files
Files changed (1) hide show
  1. tokenizer_config.json +8 -1
tokenizer_config.json CHANGED
@@ -2053,11 +2053,18 @@
2053
  "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}",
2054
  "clean_up_tokenization_spaces": true,
2055
  "eos_token": "<|end_of_text|>",
 
2056
  "model_input_names": [
2057
  "input_ids",
2058
  "attention_mask"
2059
  ],
2060
  "model_max_length": 1000000000000000019884624838656,
 
2061
  "pad_token": "<|end_of_text|>",
2062
- "tokenizer_class": "PreTrainedTokenizerFast"
 
 
 
 
 
2063
  }
 
2053
  "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}",
2054
  "clean_up_tokenization_spaces": true,
2055
  "eos_token": "<|end_of_text|>",
2056
+ "max_length": 512,
2057
  "model_input_names": [
2058
  "input_ids",
2059
  "attention_mask"
2060
  ],
2061
  "model_max_length": 1000000000000000019884624838656,
2062
+ "pad_to_multiple_of": null,
2063
  "pad_token": "<|end_of_text|>",
2064
+ "pad_token_type_id": 0,
2065
+ "padding_side": "right",
2066
+ "stride": 0,
2067
+ "tokenizer_class": "PreTrainedTokenizerFast",
2068
+ "truncation_side": "right",
2069
+ "truncation_strategy": "longest_first"
2070
  }