Files changed (3) hide show
  1. config.json +1 -0
  2. generation_config.json +1 -1
  3. tokenizer_config.json +2 -2
config.json CHANGED
@@ -6,6 +6,7 @@
6
  "attention_bias": false,
7
  "bos_token_id": 1,
8
  "eos_token_id": 2,
 
9
  "hidden_act": "silu",
10
  "hidden_size": 4096,
11
  "initializer_range": 0.02,
 
6
  "attention_bias": false,
7
  "bos_token_id": 1,
8
  "eos_token_id": 2,
9
+ "pad_token_id": 2,
10
  "hidden_act": "silu",
11
  "hidden_size": 4096,
12
  "initializer_range": 0.02,
generation_config.json CHANGED
@@ -2,7 +2,7 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
 
5
  "transformers_version": "4.35.2",
6
  "use_cache": false
7
  }
8
-
 
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
+ "pad_token_id": 2,
6
  "transformers_version": "4.35.2",
7
  "use_cache": false
8
  }
 
tokenizer_config.json CHANGED
@@ -28,13 +28,13 @@
28
  }
29
  },
30
  "additional_special_tokens": [],
31
- "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
32
  "bos_token": "<s>",
33
  "clean_up_tokenization_spaces": false,
34
  "eos_token": "</s>",
35
  "legacy": true,
36
  "model_max_length": 1000000000000000019884624838656,
37
- "pad_token": null,
38
  "sp_model_kwargs": {},
39
  "spaces_between_special_tokens": false,
40
  "tokenizer_class": "LlamaTokenizer",
 
28
  }
29
  },
30
  "additional_special_tokens": [],
31
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{% if message['content']%}{{'### System:\n' + message['content']+'\n\n'}}{% endif %}{% elif message['role'] == 'user' %}{{'### User:\n' + message['content']+'\n\n'}}{% elif message['role'] == 'assistant' %}{{'### Assistant:\n' + message['content']}}{% endif %}{% if loop.last and add_generation_prompt %}{{ '### Assistant:\n' }}{% endif %}{% endfor %}",
32
  "bos_token": "<s>",
33
  "clean_up_tokenization_spaces": false,
34
  "eos_token": "</s>",
35
  "legacy": true,
36
  "model_max_length": 1000000000000000019884624838656,
37
+ "pad_token": "</s>",
38
  "sp_model_kwargs": {},
39
  "spaces_between_special_tokens": false,
40
  "tokenizer_class": "LlamaTokenizer",