AkimfromParis
commited on
Commit
•
64f1af2
1
Parent(s):
a904d68
Update README.md
Browse files
README.md
CHANGED
@@ -43,22 +43,24 @@ dtype: bfloat16
|
|
43 |
## 🤗 Usage for HuggingFace
|
44 |
|
45 |
```python
|
46 |
-
!pip install -qU transformers accelerate
|
47 |
|
48 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
49 |
import torch
|
50 |
|
51 |
-
|
52 |
-
model = AutoModelForCausalLM.from_pretrained("AkimfromParis/Heliotrope-Ely-Swa-slerp-7B")
|
53 |
|
54 |
-
|
|
|
55 |
|
56 |
-
|
57 |
-
messages.append({"role": "user", "content": "大谷翔平選手について教えてください"})
|
58 |
|
59 |
-
|
60 |
-
|
61 |
-
|
|
|
|
|
62 |
```
|
63 |
|
64 |
# 🔖 Citation
|
|
|
43 |
## 🤗 Usage for HuggingFace
|
44 |
|
45 |
```python
|
46 |
+
# !pip install -qU transformers accelerate
|
47 |
|
48 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
49 |
+
from transformers import pipeline
|
50 |
import torch
|
51 |
|
52 |
+
model_name = "AkimfromParis/Heliotrope-Ely-Swa-slerp-7B"
|
|
|
53 |
|
54 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
55 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16)
|
56 |
|
57 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, pad_token_id=tokenizer.eos_token_id)
|
|
|
58 |
|
59 |
+
messages = [
|
60 |
+
{"role": "system", "content": "あなたは誠実で優秀な日本人のアシスタントです。以下のトピックに関する詳細な情報を提供してください。"},
|
61 |
+
{"role": "user", "content": "大谷翔平選手は誰ですか?"},
|
62 |
+
]
|
63 |
+
print(pipe(messages, max_new_tokens=256)[0]['generated_text'][-1])
|
64 |
```
|
65 |
|
66 |
# 🔖 Citation
|