mwitiderrick commited on
Commit
d383b7f
1 Parent(s): 152919e

Create recipe.yaml

Browse files
Files changed (1) hide show
  1. recipe.yaml +35 -0
recipe.yaml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_stage:
2
+ obcq_modifiers:
3
+ SmoothQuantModifier:
4
+ smoothing_strength: 0.5
5
+ mappings: [
6
+ [["re:.*q_proj", "re:.*k_proj", "re:.*v_proj"], "re:.*input_layernorm"],
7
+ [["re:.*gate_proj", "re:.*up_proj"], "re:.*post_attention_layernorm"]
8
+ ]
9
+ QuantizationModifier:
10
+ ignore:
11
+ # These operations don't make sense to quantize
12
+ - MistralRotaryEmbedding
13
+ - MistralRMSNorm
14
+ - SiLUActivation
15
+ # Skip quantizing the layers with the most sensitive activations
16
+ - model.layers.1.mlp.down_proj
17
+ - model.layers.31.mlp.down_proj
18
+ - model.layers.30.mlp.down_proj
19
+ - model.layers.30.mlp.gate_proj
20
+ - model.layers.30.mlp.up_proj
21
+ post_oneshot_calibration: true
22
+ scheme_overrides:
23
+ Embedding:
24
+ input_activations: null
25
+ weights:
26
+ num_bits: 8
27
+ symmetric: false
28
+ SparseGPTModifier:
29
+ sparsity: 0.5
30
+ block_size: 128
31
+ sequential_update: true
32
+ quantize: true
33
+ percdamp: 0.01
34
+ mask_structure: "0:0"
35
+ targets: ["re:model.layers.\\d*$"]