wenhuach commited on
Commit
1888e1e
1 Parent(s): 4a3503e

upload model file

Browse files

Signed-off-by: wenhuach <wenhuach87@gmail.com>

README.md CHANGED
@@ -1,3 +1,108 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+
5
+
6
+
7
+ ## Model Details
8
+
9
+ This model is an int4 model with group_size 128 of [google/gemma-2b](https://huggingface.co/google/gemma-2b) generated by [intel/auto-round](https://github.com/intel/auto-round).
10
+
11
+
12
+
13
+ ### Use the model
14
+
15
+ ### INT4 Inference with AutoGPTQ's kernel
16
+
17
+ Install the latest [AutoGPTQ](https://github.com/AutoGPTQ/AutoGPTQ) from source first
18
+
19
+ ```python
20
+ ##pip install auto-gptq[triton]
21
+ ##pip install triton==2.2.0
22
+ from transformers import AutoModelForCausalLM, AutoTokenizer
23
+ quantized_model_dir = "Intel/gemma-2b-int4-inc"
24
+ tokenizer = AutoTokenizer.from_pretrained(quantized_model_dir)
25
+ model = AutoModelForCausalLM.from_pretrained(quantized_model_dir,
26
+ device_map="auto",
27
+ trust_remote_code=False,
28
+ )
29
+ tokenizer = AutoTokenizer.from_pretrained(quantized_model_dir, use_fast=True)
30
+ text = "There is a girl who likes adventure,"
31
+ inputs = tokenizer(text, return_tensors="pt").to(model.device)
32
+ print(tokenizer.decode(model.generate(**inputs, max_new_tokens=50, do_sample=True)[0]))
33
+ ```
34
+
35
+
36
+
37
+ ### Evaluate the model
38
+
39
+ Install [lm-eval-harness](https://github.com/EleutherAI/lm-evaluation-harness.git) from source, and the git id we used is 96d185fa6232a5ab685ba7c43e45d1dbb3bb906d
40
+
41
+ pip install auto-gptq[triton]
42
+ pip install triton==2.2.0
43
+
44
+ Please note that there is a discrepancy between the baseline result and the official data, which is a known issue within the official model card community.
45
+
46
+ ```bash
47
+ lm_eval --model hf --model_args pretrained="Intel/gemma-2b-int4-inc",autogptq=True,gptq_use_triton=True --device cuda:0 --tasks lambada_openai,hellaswag,piqa,winogrande,truthfulqa_mc1,openbookqa,boolq,rte,arc_easy,arc_challenge,mmlu --batch_size 16
48
+ ```
49
+
50
+ | Metric | FP16 | int4 |
51
+ | -------------- | ------ | ------ |
52
+ | Avg. | 0.5383 | 0.5338 |
53
+ | mmlu | 0.3337 | 0.3276 |
54
+ | lambada_openai | 0.6398 | 0.6319 |
55
+ | hellaswag | 0.5271 | 0.5161 |
56
+ | winogrande | 0.6472 | 0.6472 |
57
+ | piqa | 0.7699 | 0.7622 |
58
+ | truthfulqa_mc1 | 0.2203 | 0.2191 |
59
+ | openbookqa | 0.3020 | 0.2980 |
60
+ | boolq | 0.6939 | 0.6939 |
61
+ | rte | 0.6426 | 0.6498 |
62
+ | arc_easy | 0.7424 | 0.7348 |
63
+ | arc_challenge | 0.4019 | 0.3908 |
64
+
65
+
66
+
67
+ ### Reproduce the model
68
+
69
+ Here is the sample command to reproduce the model
70
+
71
+ ```bash
72
+ git clone https://github.com/intel/auto-round
73
+ cd auto-round/examples/language-modeling
74
+ pip install -r requirements.txt
75
+ python3 main.py \
76
+ --model_name google/gemma-2b \
77
+ --device 0 \
78
+ --group_size 128 \
79
+ --bits 4 \
80
+ --iters 400 \
81
+ --use_quant_input \
82
+ --deployment_device 'gpu' \
83
+ --output_dir "./tmp_autoround"
84
+
85
+ ```
86
+
87
+
88
+
89
+ ## Ethical Considerations and Limitations
90
+
91
+ The model can produce factually incorrect output, and should not be relied on to produce factually accurate information. Because of the limitations of the pretrained model and the finetuning datasets, it is possible that this model could generate lewd, biased or otherwise offensive outputs.
92
+
93
+ Therefore, before deploying any applications of the model, developers should perform safety testing.
94
+
95
+ ## Caveats and Recommendations
96
+
97
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model.
98
+
99
+ Here are a couple of useful links to learn more about Intel's AI software:
100
+
101
+ * Intel Neural Compressor [link](https://github.com/intel/neural-compressor)
102
+ * Intel Extension for Transformers [link](https://github.com/intel/intel-extension-for-transformers)
103
+
104
+ ## Disclaimer
105
+
106
+ The license on this model does not constitute legal advice. We are not responsible for the actions of third parties who use this model. Please consult an attorney before using this model for commercial purposes.
107
+
108
+
config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/models/gemma-2b",
3
+ "architectures": [
4
+ "GemmaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 2,
9
+ "eos_token_id": 1,
10
+ "head_dim": 256,
11
+ "hidden_act": "gelu",
12
+ "hidden_size": 2048,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 16384,
15
+ "max_position_embeddings": 8192,
16
+ "model_type": "gemma",
17
+ "num_attention_heads": 8,
18
+ "num_hidden_layers": 18,
19
+ "num_key_value_heads": 1,
20
+ "pad_token_id": 0,
21
+ "quantization_config": {
22
+ "autoround_version": "0.1",
23
+ "bits": 4,
24
+ "damp_percent": 0.01,
25
+ "desc_act": false,
26
+ "enable_minmax_tuning": true,
27
+ "group_size": 128,
28
+ "is_marlin_format": false,
29
+ "iters": 400,
30
+ "lr": 0.0025,
31
+ "minmax_lr": 0.0025,
32
+ "model_file_base_name": "model",
33
+ "model_name_or_path": null,
34
+ "quant_method": "gptq",
35
+ "scale_dtype": "torch.float32",
36
+ "static_groups": false,
37
+ "sym": false,
38
+ "true_sequential": false,
39
+ "use_quant_input": true
40
+ },
41
+ "rms_norm_eps": 1e-06,
42
+ "rope_scaling": null,
43
+ "rope_theta": 10000.0,
44
+ "torch_dtype": "bfloat16",
45
+ "transformers_version": "4.38.2",
46
+ "use_cache": true,
47
+ "vocab_size": 256000
48
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ee3302033aa9b45890be54a999acad6b7531ff900408b325eafd6a21bc20399
3
+ size 3130472776
quantize_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bits": 4,
3
+ "group_size": 128,
4
+ "damp_percent": 0.01,
5
+ "desc_act": false,
6
+ "static_groups": false,
7
+ "sym": false,
8
+ "true_sequential": false,
9
+ "model_name_or_path": null,
10
+ "model_file_base_name": "model",
11
+ "is_marlin_format": false,
12
+ "quant_method": "intel/auto-round",
13
+ "autoround_version": "0.1",
14
+ "iters": 400,
15
+ "lr": 0.0025,
16
+ "minmax_lr": 0.0025,
17
+ "enable_minmax_tuning": true,
18
+ "use_quant_input": true,
19
+ "scale_dtype": "torch.float32"
20
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<bos>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<eos>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61a7b147390c64585d6c3543dd6fc636906c9af3865a5548f27f31aee1d4c8e2
3
+ size 4241003
tokenizer_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<pad>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<eos>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "<bos>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "3": {
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ }
37
+ },
38
+ "bos_token": "<bos>",
39
+ "clean_up_tokenization_spaces": false,
40
+ "eos_token": "<eos>",
41
+ "legacy": null,
42
+ "model_max_length": 1000000000000000019884624838656,
43
+ "pad_token": "<pad>",
44
+ "sp_model_kwargs": {},
45
+ "spaces_between_special_tokens": false,
46
+ "tokenizer_class": "GemmaTokenizer",
47
+ "unk_token": "<unk>",
48
+ "use_default_system_prompt": false
49
+ }