Update README.md
Browse files
README.md
CHANGED
@@ -19,11 +19,12 @@ This model is released under the Apache License 2.0.
|
|
19 |
## Usage
|
20 |
Install the required libraries as follows:
|
21 |
```sh
|
22 |
-
>>> python -m pip install numpy sentencepiece torch transformers
|
23 |
```
|
24 |
|
25 |
Execute the following python code:
|
26 |
```python
|
|
|
27 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
28 |
tokenizer = AutoTokenizer.from_pretrained(
|
29 |
"pfnet/plamo-13b-instruct",
|
@@ -35,7 +36,9 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
35 |
torch_dtype=torch.bfloat16,
|
36 |
device_map="auto",
|
37 |
)
|
|
|
38 |
|
|
|
39 |
def completion(prompt: str, max_new_tokens: int = 128) -> str:
|
40 |
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
|
41 |
generated_ids = model.generate(
|
@@ -58,7 +61,7 @@ def generate_prompt(messages: list) -> str:
|
|
58 |
]
|
59 |
roles = {"instruction": "指示", "response": "応答", "input": "入力"}
|
60 |
for msg in messages:
|
61 |
-
prompt.append(sep + roles[msg["role"]] + ":\n" + msg[
|
62 |
prompt.append(sep + roles["response"] + ":\n")
|
63 |
return "".join(prompt)
|
64 |
```
|
|
|
19 |
## Usage
|
20 |
Install the required libraries as follows:
|
21 |
```sh
|
22 |
+
>>> python -m pip install numpy sentencepiece torch transformers accelerate
|
23 |
```
|
24 |
|
25 |
Execute the following python code:
|
26 |
```python
|
27 |
+
import torch
|
28 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
29 |
tokenizer = AutoTokenizer.from_pretrained(
|
30 |
"pfnet/plamo-13b-instruct",
|
|
|
36 |
torch_dtype=torch.bfloat16,
|
37 |
device_map="auto",
|
38 |
)
|
39 |
+
```
|
40 |
|
41 |
+
```python
|
42 |
def completion(prompt: str, max_new_tokens: int = 128) -> str:
|
43 |
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
|
44 |
generated_ids = model.generate(
|
|
|
61 |
]
|
62 |
roles = {"instruction": "指示", "response": "応答", "input": "入力"}
|
63 |
for msg in messages:
|
64 |
+
prompt.append(sep + roles[msg["role"]] + ":\n" + msg["content"])
|
65 |
prompt.append(sep + roles["response"] + ":\n")
|
66 |
return "".join(prompt)
|
67 |
```
|