medmekk HF staff commited on
Commit
9e5e9ed
1 Parent(s): 9746e51

Update config.json

Browse files

Updating config.json to use `modules_to_not_convert` to standardize its use across all quantizers in transformers : https://github.com/huggingface/transformers/pull/35161

Files changed (1) hide show
  1. config.json +1 -1
config.json CHANGED
@@ -19,7 +19,7 @@
19
  "pretraining_tp": 1,
20
  "quantization_config": {
21
  "in_group_size": 8,
22
- "linear_weights_not_to_quantize": [
23
  "model.embed_tokens.weight",
24
  "lm_head.weight"
25
  ],
 
19
  "pretraining_tp": 1,
20
  "quantization_config": {
21
  "in_group_size": 8,
22
+ "modules_to_not_convert": [
23
  "model.embed_tokens.weight",
24
  "lm_head.weight"
25
  ],