|
--- |
|
license: apache-2.0 |
|
datasets: |
|
- teknium/OpenHermes-2.5 |
|
language: |
|
- en |
|
--- |
|
prompt format |
|
``` |
|
<|im_start|>system\n{message}<|im_end|>\n<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n |
|
``` |
|
## Usage |
|
```python |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("serpdotai/sparsetral-16x7B-v2", trust_remote_code=True) |
|
model = AutoModelForCausalLM.from_pretrained("serpdotai/sparsetral-16x7B-v2", device_map="auto", trust_remote_code=True).eval() |
|
|
|
system_str = "<|im_start|>system\n{message}<|im_end|>\n" |
|
user_str = "<|im_start|>user\n{message}<|im_end|>\n" |
|
assistant_str = "<|im_start|>assistant\n{message}<|im_end|>\n" |
|
|
|
def construct_prompt(messages): |
|
prompt = "" |
|
for message in messages: |
|
if message["from"] in ["human", "user"]: |
|
prompt += user_str.format( |
|
message=message["value"] |
|
) |
|
elif message["from"] in ["gpt", "assistant"]: |
|
prompt += assistant_str.format( |
|
message=message["value"] |
|
) |
|
elif message["from"] in ["system", "instruction"]: |
|
prompt += system_str.format( |
|
message=message["value"] |
|
) |
|
else: |
|
raise ValueError( |
|
f"Unknown message type: {message['from']}" |
|
) |
|
return prompt + "<|im_start|>assistant\n" |
|
|
|
system = "You are a helpful assistant who will help the user to the best of their ability. If you don't know something, say \"I don't know\"" |
|
user = "Are you sentient?" |
|
|
|
messages = [ |
|
{"from": "system", "value": system}, |
|
{"from": "user", "value": user}, |
|
] |
|
|
|
prompt = construct_prompt(messages) |
|
inputs = tokenizer(prompt, return_tensors="pt") |
|
inputs = inputs.to(model.device) |
|
pred = model.generate(**inputs, max_length=4096, do_sample=True, top_k=50, top_p=0.99, temperature=0.9, num_return_sequences=1) |
|
print(tokenizer.decode(pred.cpu()[0], skip_special_tokens=True)) |
|
``` |