build: AutoGPTQ for meta-llama/Llama-2-7b-chat-hf: 4bits, gr128, desc_act=False
Browse files- generation_config.json +2 -1
generation_config.json
CHANGED
@@ -2,7 +2,8 @@
|
|
2 |
"_from_model_config": true,
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
-
"
|
|
|
6 |
"temperature": 0.9,
|
7 |
"top_p": 0.6,
|
8 |
"transformers_version": "4.30.2"
|
|
|
2 |
"_from_model_config": true,
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
+
"max_length": 4096,
|
6 |
+
"pad_token_id": 0,
|
7 |
"temperature": 0.9,
|
8 |
"top_p": 0.6,
|
9 |
"transformers_version": "4.30.2"
|