test_stage: | |
obcq_modifiers: | |
LogarithmicEqualizationModifier: | |
mappings: [ | |
[["re:.*q_proj", "re:.*k_proj", "re:.*v_proj"], "re:.*input_layernorm"], | |
[["re:.*gate_proj", "re:.*up_proj"], "re:.*post_attention_layernorm"], | |
] | |
QuantizationModifier: | |
ignore: | |
# These operations don't make sense to quantize | |
- LlamaRotaryEmbedding | |
- LlamaRMSNorm | |
- SiLUActivation | |
- MatMulOutput_QK | |
- MatMulOutput_PV | |
# Skip quantizing the layers with the most sensitive activations | |
# - model.layers.2.mlp.down_proj | |
# - model.layers.59.mlp.down_proj | |
# - model.layers.58.mlp.down_proj | |
# - model.layers.22.mlp.down_proj | |
# - model.layers.20.mlp.down_proj | |
post_oneshot_calibration: true | |
scheme_overrides: | |
# Enable channelwise quantization for better accuracy | |
Linear: | |
weights: | |
num_bits: 8 | |
symmetric: true | |
strategy: channel | |
MatMulLeftInput_QK: | |
input_activations: | |
num_bits: 8 | |
symmetric: true | |
MatMulLeftInput_PV: | |
input_activations: | |
num_bits: 8 | |
symmetric: true | |
# For the embeddings, only weight-quantization makes sense | |
Embedding: | |
input_activations: null | |
weights: | |
num_bits: 8 | |
symmetric: false | |
SparseGPTModifier: | |
sparsity: 0.5 | |
block_size: 128 | |
sequential_update: true | |
quantize: true | |
percdamp: 0.01 | |
mask_structure: "0:0" | |
targets: ["re:model.layers.\\d*$"] | |