winglian commited on
Commit
a94f9cb
1 Parent(s): c1921c9

fix for quant config from model (#540)

Browse files
Files changed (1) hide show
  1. src/axolotl/utils/models.py +1 -1
src/axolotl/utils/models.py CHANGED
@@ -160,7 +160,7 @@ def load_model(
160
  model_kwargs["revision"] = cfg.model_revision
161
  if cfg.gptq:
162
  model_config = load_model_config(cfg)
163
- if hasattr(model_config, "quantization_config"):
164
  LOG.warning("model config does not contain quantization_config information")
165
  else:
166
  model_kwargs["quantization_config"] = GPTQConfig(
 
160
  model_kwargs["revision"] = cfg.model_revision
161
  if cfg.gptq:
162
  model_config = load_model_config(cfg)
163
+ if not hasattr(model_config, "quantization_config"):
164
  LOG.warning("model config does not contain quantization_config information")
165
  else:
166
  model_kwargs["quantization_config"] = GPTQConfig(