File size: 340 Bytes
6bbe42e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
{
"bits": 4,
"group_size": 128,
"desc_act": true,
"static_groups": false,
"sym": true,
"lm_head": false,
"damp_percent": 0.01,
"true_sequential": true,
"model_name_or_path": "",
"model_file_base_name": "",
"quant_method": "gptq",
"checkpoint_format": "gptq",
"meta": {
"quantizer": "gptqmodel:0.9.9-dev0"
}
} |