LoftQ commited on
Commit
5c9a217
1 Parent(s): 01e28d6

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -27,14 +27,14 @@ from transformers import AutoModelForCausalLM
27
 
28
  MODEL_ID = "LoftQ/Llama-2-7b-hf-fp16-64rank-gsm8k"
29
 
30
- base_model = AutoModelForCausalLM.from_pretrained(
31
  MODEL_ID,
32
  torch_dtype=torch.bfloat16, # you may change it with different models
33
  token=YOUR_HF_TOKEN,
34
 
35
  )
36
 
37
- # Do inference with peft_model ...
38
  ```
39
 
40
  See full evaluation on GSM8K on [Github](https://github.com/yxli2123/LoftQ/blob/main/test_gsm8k.py).
 
27
 
28
  MODEL_ID = "LoftQ/Llama-2-7b-hf-fp16-64rank-gsm8k"
29
 
30
+ model = AutoModelForCausalLM.from_pretrained(
31
  MODEL_ID,
32
  torch_dtype=torch.bfloat16, # you may change it with different models
33
  token=YOUR_HF_TOKEN,
34
 
35
  )
36
 
37
+ # Do inference with `model` ...
38
  ```
39
 
40
  See full evaluation on GSM8K on [Github](https://github.com/yxli2123/LoftQ/blob/main/test_gsm8k.py).