Update config.json
Browse filesUpdating config.json to use `modules_to_not_convert` to standardize its use across all quantizers in transformers : https://github.com/huggingface/transformers/pull/35161
- config.json +1 -1
config.json
CHANGED
@@ -19,7 +19,7 @@
|
|
19 |
"pretraining_tp": 1,
|
20 |
"quantization_config": {
|
21 |
"in_group_size": 8,
|
22 |
-
"
|
23 |
"model.embed_tokens.weight",
|
24 |
"lm_head.weight"
|
25 |
],
|
|
|
19 |
"pretraining_tp": 1,
|
20 |
"quantization_config": {
|
21 |
"in_group_size": 8,
|
22 |
+
"modules_to_not_convert": [
|
23 |
"model.embed_tokens.weight",
|
24 |
"lm_head.weight"
|
25 |
],
|