Update README.md
Browse files
README.md
CHANGED
@@ -53,4 +53,41 @@ experts:
|
|
53 |
- "solve"
|
54 |
- "count"
|
55 |
tokenizer_source: union
|
56 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
- "solve"
|
54 |
- "count"
|
55 |
tokenizer_source: union
|
56 |
+
```
|
57 |
+
|
58 |
+
## 💻 Usage
|
59 |
+
```python
|
60 |
+
!pip install -qU transformers bitsandbytes accelerate
|
61 |
+
|
62 |
+
from transformers import AutoTokenizer
|
63 |
+
import transformers
|
64 |
+
import torch
|
65 |
+
|
66 |
+
model = "mychen76/openmixtral-6x7b-v2"
|
67 |
+
|
68 |
+
tokenizer = AutoTokenizer.from_pretrained(model)
|
69 |
+
pipeline = transformers.pipeline(
|
70 |
+
"text-generation",
|
71 |
+
model=model,
|
72 |
+
model_kwargs={"torch_dtype": torch.float16, "load_in_4bit": True},
|
73 |
+
)
|
74 |
+
|
75 |
+
messages = [{"role": "user", "content": "Why the sky is blue"}]
|
76 |
+
prompt = pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
77 |
+
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
78 |
+
print(outputs[0]["generated_text"])
|
79 |
+
```
|
80 |
+
|
81 |
+
# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
|
82 |
+
Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_mychen76__openmixtral-6x7b-v2)
|
83 |
+
|
84 |
+
|
85 |
+
| Metric |Value|
|
86 |
+
|---------------------------------|----:|
|
87 |
+
|Avg. |72.33|
|
88 |
+
|AI2 Reasoning Challenge (25-Shot)|68.52|
|
89 |
+
|HellaSwag (10-Shot) |86.75|
|
90 |
+
|MMLU (5-Shot) |65.11|
|
91 |
+
|TruthfulQA (0-shot) |65.13|
|
92 |
+
|Winogrande (5-shot) |79.87|
|
93 |
+
|GSM8k (5-shot) |68.61|
|