cicdatopea
commited on
Commit
•
9d986f6
1
Parent(s):
3d49c61
Update README.md
Browse files
README.md
CHANGED
@@ -13,7 +13,7 @@ This model is an int4 model with group_size 128 and symmetric quantization of [t
|
|
13 |
from auto_round import AutoRoundConfig ##must import for auto_round format
|
14 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
15 |
|
16 |
-
quantized_model_dir = "OPEA/
|
17 |
tokenizer = AutoTokenizer.from_pretrained(quantized_model_dir)
|
18 |
model = AutoModelForCausalLM.from_pretrained(
|
19 |
quantized_model_dir,
|
@@ -71,7 +71,7 @@ text = "There is a girl who likes adventure,"
|
|
71 |
pip3 install lm-eval==0.4.5
|
72 |
|
73 |
```bash
|
74 |
-
auto-round --model "OPEA/
|
75 |
```
|
76 |
|
77 |
| Metric | BF16 | INT4 |
|
|
|
13 |
from auto_round import AutoRoundConfig ##must import for auto_round format
|
14 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
15 |
|
16 |
+
quantized_model_dir = "OPEA/Falcon3-3B-Base-int4-sym-inc"
|
17 |
tokenizer = AutoTokenizer.from_pretrained(quantized_model_dir)
|
18 |
model = AutoModelForCausalLM.from_pretrained(
|
19 |
quantized_model_dir,
|
|
|
71 |
pip3 install lm-eval==0.4.5
|
72 |
|
73 |
```bash
|
74 |
+
auto-round --model "OPEA/Falcon3-3B-Base-int4-sym-inc" --eval --eval_bs 16 --tasks lambada_openai,hellaswag,piqa,winogrande,truthfulqa_mc1,openbookqa,boolq,arc_easy,arc_challenge,mmlu
|
75 |
```
|
76 |
|
77 |
| Metric | BF16 | INT4 |
|