runningSnail
commited on
Commit
•
6e1d240
1
Parent(s):
fc231d4
update
Browse files
README.md
CHANGED
@@ -21,23 +21,20 @@ Below is a code snippet to use Octopus Planner:
|
|
21 |
|
22 |
Run below code to use Octopus Planner for a given question:
|
23 |
```python
|
|
|
24 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
25 |
|
26 |
-
model_id = "
|
27 |
-
tokenizer_id = "microsoft/Phi-3-mini-128k-instruct"
|
28 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)
|
29 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
30 |
|
31 |
question = "Find my presentation for tomorrow's meeting, connect to the conference room projector via Bluetooth, increase the screen brightness, take a screenshot of the final summary slide, and email it to all participants"
|
32 |
-
|
33 |
inputs = f"<|user|>{question}<|end|><|assistant|>"
|
34 |
input_ids = tokenizer(inputs, return_tensors="pt").to(model.device)
|
35 |
-
|
36 |
outputs = model.generate(
|
37 |
input_ids=input_ids["input_ids"],
|
38 |
max_length=1024,
|
39 |
do_sample=False)
|
40 |
-
|
41 |
res = tokenizer.decode(outputs.tolist()[0])
|
42 |
print(f"=== inference result ===\n{res}")
|
43 |
```
|
|
|
21 |
|
22 |
Run below code to use Octopus Planner for a given question:
|
23 |
```python
|
24 |
+
import torch
|
25 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
26 |
|
27 |
+
model_id = "NexaAIDev/octopus-planning"
|
|
|
28 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)
|
29 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
30 |
|
31 |
question = "Find my presentation for tomorrow's meeting, connect to the conference room projector via Bluetooth, increase the screen brightness, take a screenshot of the final summary slide, and email it to all participants"
|
|
|
32 |
inputs = f"<|user|>{question}<|end|><|assistant|>"
|
33 |
input_ids = tokenizer(inputs, return_tensors="pt").to(model.device)
|
|
|
34 |
outputs = model.generate(
|
35 |
input_ids=input_ids["input_ids"],
|
36 |
max_length=1024,
|
37 |
do_sample=False)
|
|
|
38 |
res = tokenizer.decode(outputs.tolist()[0])
|
39 |
print(f"=== inference result ===\n{res}")
|
40 |
```
|