OPEA
/

Safetensors
llama
4-bit precision
intel/auto-round
n1ck-guo commited on
Commit
12cbcc0
1 Parent(s): 177a72a

upload auto_gptq format

Browse files

Signed-off-by: n1ck-guo <heng.guo@intel.com>

Files changed (2) hide show
  1. config.json +5 -4
  2. quantize_config.json +25 -0
config.json CHANGED
@@ -26,11 +26,11 @@
26
  "quantization_config": {
27
  "amp": true,
28
  "autoround_version": "0.4.2.dev",
29
- "backend": "auto_round:gptq:exllamav2",
30
  "batch_size": 8,
31
  "bits": 4,
 
32
  "data_type": "int",
33
- "dataset": "NeelNanda/pile-10k",
34
  "enable_minmax_tuning": true,
35
  "enable_norm_bias_tuning": false,
36
  "enable_quanted_input": true,
@@ -41,11 +41,12 @@
41
  "lr": 0.001,
42
  "minmax_lr": 0.001,
43
  "nsamples": 512,
44
- "quant_method": "intel/auto-round",
45
  "scale_dtype": "torch.float16",
46
  "seqlen": 2048,
47
  "sym": true,
48
- "to_quant_block_names": null
 
49
  },
50
  "rms_norm_eps": 1e-05,
51
  "rope_scaling": {
 
26
  "quantization_config": {
27
  "amp": true,
28
  "autoround_version": "0.4.2.dev",
 
29
  "batch_size": 8,
30
  "bits": 4,
31
+ "damp_percent": 0.01,
32
  "data_type": "int",
33
+ "desc_act": false,
34
  "enable_minmax_tuning": true,
35
  "enable_norm_bias_tuning": false,
36
  "enable_quanted_input": true,
 
41
  "lr": 0.001,
42
  "minmax_lr": 0.001,
43
  "nsamples": 512,
44
+ "quant_method": "gptq",
45
  "scale_dtype": "torch.float16",
46
  "seqlen": 2048,
47
  "sym": true,
48
+ "to_quant_block_names": null,
49
+ "true_sequential": false
50
  },
51
  "rms_norm_eps": 1e-05,
52
  "rope_scaling": {
quantize_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bits": 4,
3
+ "group_size": 128,
4
+ "sym": true,
5
+ "data_type": "int",
6
+ "enable_quanted_input": true,
7
+ "enable_minmax_tuning": true,
8
+ "seqlen": 2048,
9
+ "batch_size": 8,
10
+ "scale_dtype": "torch.float16",
11
+ "lr": 0.001,
12
+ "minmax_lr": 0.001,
13
+ "gradient_accumulate_steps": 1,
14
+ "iters": 1000,
15
+ "amp": true,
16
+ "nsamples": 512,
17
+ "low_gpu_mem_usage": true,
18
+ "to_quant_block_names": null,
19
+ "enable_norm_bias_tuning": false,
20
+ "autoround_version": "0.4.2.dev",
21
+ "quant_method": "gptq",
22
+ "desc_act": false,
23
+ "true_sequential": false,
24
+ "damp_percent": 0.01
25
+ }