cicdatopea commited on
Commit
162764a
·
verified ·
1 Parent(s): cedf6a8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -20,7 +20,7 @@ This model is an int4 model with group_size 128 and symmetric quantization of [t
20
  from auto_round import AutoRoundConfig ##must import for auto_round format
21
  from transformers import AutoModelForCausalLM, AutoTokenizer
22
 
23
- quantized_model_dir = "OPEA/falcon3-10B-int4-sym-inc"
24
  tokenizer = AutoTokenizer.from_pretrained(quantized_model_dir)
25
  model = AutoModelForCausalLM.from_pretrained(
26
  quantized_model_dir,
@@ -96,7 +96,7 @@ text = "There is a girl who likes adventure,"
96
  pip3 install lm-eval==0.4.5
97
 
98
  ```bash
99
- auto-round --model "OPEA/falcon3-10B-int4-sym-inc" --eval --eval_bs 16 --tasks lambada_openai,hellaswag,piqa,winogrande,truthfulqa_mc1,openbookqa,boolq,arc_easy,arc_challenge,mmlu
100
  ```
101
 
102
 
 
20
  from auto_round import AutoRoundConfig ##must import for auto_round format
21
  from transformers import AutoModelForCausalLM, AutoTokenizer
22
 
23
+ quantized_model_dir = "OPEA/Falcon3-10B-Base-int4-sym-inc"
24
  tokenizer = AutoTokenizer.from_pretrained(quantized_model_dir)
25
  model = AutoModelForCausalLM.from_pretrained(
26
  quantized_model_dir,
 
96
  pip3 install lm-eval==0.4.5
97
 
98
  ```bash
99
+ auto-round --model "OPEA/Falcon3-10B-Base-int4-sym-inc" --eval --eval_bs 16 --tasks lambada_openai,hellaswag,piqa,winogrande,truthfulqa_mc1,openbookqa,boolq,arc_easy,arc_challenge,mmlu
100
  ```
101
 
102