OPEA
/

Safetensors
cogvlm2-llama3-chat-19B-int4-sym-inc / quantize_config.json
weiweiz1's picture
auto_gptq format
80a3f3d
{
"bits": 4,
"group_size": 128,
"sym": true,
"data_type": "int",
"enable_quanted_input": true,
"enable_minmax_tuning": true,
"seqlen": 512,
"batch_size": 8,
"scale_dtype": "torch.float16",
"lr": 1.0,
"minmax_lr": 1.0,
"gradient_accumulate_steps": 1,
"iters": 1,
"amp": true,
"nsamples": 512,
"low_gpu_mem_usage": false,
"to_quant_block_names": [
[
"model.layers.0",
"model.layers.1",
"model.layers.2",
"model.layers.3",
"model.layers.4",
"model.layers.5",
"model.layers.6",
"model.layers.7",
"model.layers.8",
"model.layers.9",
"model.layers.10",
"model.layers.11",
"model.layers.12",
"model.layers.13",
"model.layers.14",
"model.layers.15",
"model.layers.16",
"model.layers.17",
"model.layers.18",
"model.layers.19",
"model.layers.20",
"model.layers.21",
"model.layers.22",
"model.layers.23",
"model.layers.24",
"model.layers.25",
"model.layers.26",
"model.layers.27",
"model.layers.28",
"model.layers.29",
"model.layers.30",
"model.layers.31"
]
],
"enable_norm_bias_tuning": false,
"autoround_version": "0.4.2.dev",
"quant_method": "gptq",
"desc_act": false,
"true_sequential": false,
"damp_percent": 0.01
}