Update README.md
Browse files
README.md
CHANGED
@@ -33,14 +33,14 @@ Now, let's start to download the model.
|
|
33 |
import torch
|
34 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
35 |
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
device_map="mps", # FIX mps if not MacOS
|
40 |
torch_dtype=torch.float32,
|
41 |
trust_remote_code=True,
|
42 |
)
|
43 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
44 |
```
|
45 |
|
46 |
## Example of inference
|
@@ -49,7 +49,7 @@ tokenizer = AutoTokenizer.from_pretrained(model_path)
|
|
49 |
input_text = "<|user|>將這五種動物分成兩組。\n老虎、鯊魚、大象、鯨魚、袋鼠 <|end|>\n<|assistant|>"
|
50 |
inputs = tokenizer(input_text, return_tensors="pt").to(torch.device("mps")) # FIX mps if not MacOS
|
51 |
|
52 |
-
outputs =
|
53 |
**inputs,
|
54 |
temperature = 0.0,
|
55 |
max_length = 500,
|
|
|
33 |
import torch
|
34 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
35 |
|
36 |
+
model_id = "Mike0307/Phi-3-mini-4k-instruct-chinese-lora"
|
37 |
+
model = AutoModelForCausalLM.from_pretrained(
|
38 |
+
model_id,
|
39 |
device_map="mps", # FIX mps if not MacOS
|
40 |
torch_dtype=torch.float32,
|
41 |
trust_remote_code=True,
|
42 |
)
|
43 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
44 |
```
|
45 |
|
46 |
## Example of inference
|
|
|
49 |
input_text = "<|user|>將這五種動物分成兩組。\n老虎、鯊魚、大象、鯨魚、袋鼠 <|end|>\n<|assistant|>"
|
50 |
inputs = tokenizer(input_text, return_tensors="pt").to(torch.device("mps")) # FIX mps if not MacOS
|
51 |
|
52 |
+
outputs = model.generate(
|
53 |
**inputs,
|
54 |
temperature = 0.0,
|
55 |
max_length = 500,
|