AkimfromParis commited on
Commit
64f1af2
1 Parent(s): a904d68

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +11 -9
README.md CHANGED
@@ -43,22 +43,24 @@ dtype: bfloat16
43
  ## 🤗 Usage for HuggingFace
44
 
45
  ```python
46
- !pip install -qU transformers accelerate
47
 
48
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
49
  import torch
50
 
51
- tokenizer = AutoTokenizer.from_pretrained(model)
52
- model = AutoModelForCausalLM.from_pretrained("AkimfromParis/Heliotrope-Ely-Swa-slerp-7B")
53
 
54
- pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, device_map="auto", trust_remote_code=True)
 
55
 
56
- messages = [{"role": "system", "content": "あなたはAIアシスタントです。"}]
57
- messages.append({"role": "user", "content": "大谷翔平選手について教えてください"})
58
 
59
- prompt = tokenizer.apply_chat_template(conversation=messages, add_generation_prompt=True, tokenize=False)
60
-
61
- pipeline(prompt, max_new_tokens=256, do_sample=False, temperature=0.0, return_full_text=False)
 
 
62
  ```
63
 
64
  # 🔖 Citation
 
43
  ## 🤗 Usage for HuggingFace
44
 
45
  ```python
46
+ # !pip install -qU transformers accelerate
47
 
48
  from transformers import AutoTokenizer, AutoModelForCausalLM
49
+ from transformers import pipeline
50
  import torch
51
 
52
+ model_name = "AkimfromParis/Heliotrope-Ely-Swa-slerp-7B"
 
53
 
54
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
55
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16)
56
 
57
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, pad_token_id=tokenizer.eos_token_id)
 
58
 
59
+ messages = [
60
+ {"role": "system", "content": "あなたは誠実で優秀な日本人のアシスタントです。以下のトピックに関する詳細な情報を提供してください。"},
61
+ {"role": "user", "content": "大谷翔平選手は誰ですか?"},
62
+ ]
63
+ print(pipe(messages, max_new_tokens=256)[0]['generated_text'][-1])
64
  ```
65
 
66
  # 🔖 Citation