ChenMnZ commited on
Commit
3f5d0d7
1 Parent(s): acb8078

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Block-AP (EfficientQAT w/o E2E-AP)
2
+
3
+ [EfficientQAT](https://arxiv.org/abs/2407.11062) involves two consecutive training phases: Block-wise training of all parameters (Block-AP) and end-to-end training of quantization parameters (E2E-QP).
4
+
5
+ In this repo, we provide the quantized checkpoints of Block-AP. Anyone can use them to reproduce our results or carry following research.
6
+
7
+ ## Performance
8
+
9
+ | Model | Quantization | WikiText2 PPL | Avg. Accuracy | Model Size (GB) | Hub link|
10
+ |-------|--------------|---------------|---------------|-----------------|----------|
11
+ Llama-2-7B|fp16|5.47|64.86|13.2|-|
12
+ Llama-2-7B|w4g128|5.56|64.07|3.7|[Link](https://huggingface.co/ChenMnZ/Llama-2-7b-BlockAd-w4g128)|
13
+ Llama-2-7B|w3g128|5.89|63.96|3.1|[Link](https://huggingface.co/ChenMnZ/Llama-2-7b-BlockAP-w3g128)|
14
+ Llama-2-7B|w2g64|7.65|59.54|2.3|[Link](https://huggingface.co/ChenMnZ/Llama-2-7b-BlockAP-w2g64)|
15
+ Llama-2-7B|w2g128|7.94|58.72|2.2|[Link](https://huggingface.co/ChenMnZ/Llama-2-7b-BlockAP-w2g128)|
16
+ Llama-2-13B|fp16|4.88|67.81|25.4|-|
17
+ Llama-2-13B|w4g128|4.96|67.27|6.8|[Link](https://huggingface.co/ChenMnZ/Llama-2-13b-BlockAP-w4g128)|
18
+ Llama-2-13B|w3g128|5.20|67.30|5.6|[Link](https://huggingface.co/ChenMnZ/Llama-2-13b-BlockAP-w3g128)|
19
+ Llama-2-13B|w2g64|6.55|63.10|4.0|[Link](https://huggingface.co/ChenMnZ/Llama-2-13b-BlockAP-w2g64)|
20
+ Llama-2-13B|w2g128|6.68|63.49|3.8|[Link](https://huggingface.co/ChenMnZ/Llama-2-13b-BlockAP-w2g128)|
21
+ Llama-2-70B|fp16|3.32|72.41|131.6|-|
22
+ Llama-2-70B|w4g128|3.41|72.54|35.8|[Link](https://huggingface.co/ChenMnZ/Llama-2-70b-BlockAP-w4g128)|
23
+ Llama-2-70B|w3g128|3.65|71.88|29.1|[Link](https://huggingface.co/ChenMnZ/Llama-2-70b-BlockAP-w3g128)|
24
+ Llama-2-70B|w2g64|4.96|69.44|20.1|[Link](https://huggingface.co/ChenMnZ/Llama-2-70b-BlockAP-w2g64)|
25
+ Llama-2-70B|w2g128|5.26|68.73|18.9|[Link](https://huggingface.co/ChenMnZ/Llama-2-70b-BlockAP-w2g128)|
26
+ Llama-3-8B|fp16|6.14|68.58|13.0|-|
27
+ Llama-3-8B|w4g128|6.50|68.43|5.4|[Link](https://huggingface.co/ChenMnZ/Llama-3-8b-BlockAP-w4g128)|
28
+ Llama-3-8B|w3g128|7.34|66.72|4.7|[Link](https://huggingface.co/ChenMnZ/Llama-3-8b-BlockAP-w3g128)|
29
+ Llama-3-8B|w2g64|12.47|58.65|3.9|[Link](https://huggingface.co/ChenMnZ/Llama-3-8b-BlockAP-w2g64)|
30
+ Llama-3-8B|w2g128|13.25|58.23|3.8|[Link](https://huggingface.co/ChenMnZ/Llama-3-8b-BlockAP-w2g128)|
31
+ Llama-3-70B|fp16|2.85|75.33|137.8|-|
32
+ Llama-3-70B|w4g128|3.18|74.50|38.9|[Link](https://huggingface.co/ChenMnZ/Llama-3-70b-BlockAP-w4g128)|
33
+ Llama-3-70B|w3g128|4.88|71.90|32.2|[Link](https://huggingface.co/ChenMnZ/Llama-3-70b-BlockAP-w3g128)|
34
+ Llama-3-70B|w2g64|13.75|66.70|23.2|[Link](https://huggingface.co/ChenMnZ/Llama-3-70b-BlockAP-w2g64)|
35
+ Llama-3-70B|w2g128|16.79|65.06|22.0|[Link](https://huggingface.co/ChenMnZ/Llama-3-70b-BlockAP-w2g128)|
36
+ Llama-3-8B-Instruct|fp16|8.29|68.43|13.0|-|
37
+ Llama-3-8B-Instruct|w4g128|8.76|67.80|5.4|[Link](https://huggingface.co/ChenMnZ/Llama-3-8b-instruct-BlockAP-w4g128)|
38
+ Llama-3-8B-Instruct|w3g128|9.83|66.54|4.7|[Link](https://huggingface.co/ChenMnZ/Llama-3-8b-instruct-BlockAP-w3g128)|
39
+ Llama-3-8B-Instruct|w2g64|16.77|58.62|3.9|[Link](https://huggingface.co/ChenMnZ/Llama-3-8b-instruct-BlockAP-w2g64)|
40
+ Llama-3-8B-Instruct|w2g128|18.02|57.19|3.8|[Link](https://huggingface.co/ChenMnZ/Llama-3-8b-instruct-BlockAP-w2g128)|
41
+ Llama-3-70B-Instruct|fp16|5.33|73.78|137.8|-|
42
+ Llama-3-70B-Instruct|w4g128|5.77|73.52|38.9|[Link](https://huggingface.co/ChenMnZ/Llama-3-70b-instruct-BlockAP-w4g128)|
43
+ Llama-3-70B-Instruct|w3g128|7.25|69.80|32.2|[Link](https://huggingface.co/ChenMnZ/Llama-3-70b-instruct-BlockAP-w3g128)|
44
+ Llama-3-70B-Instruct|w2g64|12.48|65.60|23.2|[Link](https://huggingface.co/ChenMnZ/Llama-3-70b-instruct-BlockAP-w2g64)|
45
+ Llama-3-70B-Instruct|w2g128|13.48|61.75|22.0|[Link](https://huggingface.co/ChenMnZ/Llama-3-70b-instruct-BlockAP-w2g128)|
46
+
47
+
48
+ ## Usage
49
+ Please refer [https://github.com/OpenGVLab/EfficientQAT](https://github.com/OpenGVLab/EfficientQAT) for details. These checkpoints can be used to [following E2E-AP](https://github.com/OpenGVLab/EfficientQAT?tab=readme-ov-file#training), as well as be [inferenced](https://github.com/OpenGVLab/EfficientQAT?tab=readme-ov-file#inference) directly.
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/cpfs01/user/chenmengzhao/llama_quantization/llama2-hf/Llama-2-70b",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 8192,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 28672,
14
+ "max_position_embeddings": 4096,
15
+ "model_type": "llama",
16
+ "num_attention_heads": 64,
17
+ "num_hidden_layers": 80,
18
+ "num_key_value_heads": 8,
19
+ "pad_token_id": 0,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000.0,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "float16",
26
+ "transformers_version": "4.40.0",
27
+ "use_cache": true,
28
+ "vocab_size": 32000
29
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.40.0"
7
+ }
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1499493829a9f7c7f33e82935f3e7dc84927839ad029d7772080c347c9c39707
3
+ size 4988027856
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:877e559c6d448b542d9aa1b6e0d6749f4bd8b26df8c0bbf4d5b5d891baadf5cd
3
+ size 4981662032
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:362413e9585e1f1e685e89d8310adfb09195d2855df14aefc8322e662dfcc06b
3
+ size 4999731664
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc02e65b8790e94f0792ea53b4880b8b10835e127c2104a285933de69e6c9dab
3
+ size 4422968112
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": false,
35
+ "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": null,
37
+ "sp_model_kwargs": {},
38
+ "spaces_between_special_tokens": false,
39
+ "tokenizer_class": "LlamaTokenizer",
40
+ "unk_token": "<unk>",
41
+ "use_default_system_prompt": false
42
+ }