{ | |
"bits": 4, | |
"group_size": -1, | |
"damp_percent": 0.01, | |
"desc_act": true, | |
"static_groups": false, | |
"sym": true, | |
"true_sequential": true, | |
"model_name_or_path": "meta-llama/Llama-2-7b-hf-4bit-nuq", | |
"model_file_base_name": "gptq_model-4bit--1g", | |
"is_marlin_format": false, | |
"quant_method": "gptq" | |
} |