Adding Evaluation Results
#1
by
leaderboard-pr-bot
- opened
README.md
CHANGED
@@ -1,15 +1,22 @@
|
|
1 |
---
|
2 |
-
|
3 |
-
|
4 |
-
model_name: Luminia 13B v3
|
5 |
-
pretty_name: Luminia
|
6 |
-
model_type: llama2
|
7 |
-
prompt_template: >-
|
8 |
-
Below is an instruction that describes a task. Write a response that
|
9 |
-
appropriately completes the request. ### Instruction: {Instruction} {summary} ### input: {category} ### Response: {prompt}
|
10 |
-
base_model: meta-llama/Llama-2-13b-chat-hf
|
11 |
-
library_name: peft
|
12 |
license: apache-2.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
datasets:
|
14 |
- Nekochu/discord-unstable-diffusion-SD-prompts
|
15 |
- glaiveai/glaive-function-calling-v2
|
@@ -18,36 +25,40 @@ datasets:
|
|
18 |
- GAIR/lima
|
19 |
- sahil2801/CodeAlpaca-20k
|
20 |
- garage-bAInd/Open-Platypus
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
23 |
pipeline_tag: text-generation
|
24 |
task_categories:
|
25 |
- question-answering
|
26 |
- text2text-generation
|
27 |
- conversational
|
28 |
-
inference:
|
29 |
widget:
|
30 |
- example_title: prompt assistant
|
31 |
messages:
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
output:
|
37 |
text: Luminia, 1girl, solo, blonde hair, long hair,
|
38 |
-
tags:
|
39 |
-
- llama-factory
|
40 |
-
- lora
|
41 |
-
- generated_from_trainer
|
42 |
-
- llama2
|
43 |
-
- llama
|
44 |
-
- instruct
|
45 |
-
- finetune
|
46 |
-
- gpt4
|
47 |
-
- synthetic data
|
48 |
-
- stable diffusion
|
49 |
-
- alpaca
|
50 |
-
- llm
|
51 |
model-index:
|
52 |
- name: Luminia-13B-v3
|
53 |
results: []
|
@@ -161,4 +172,17 @@ The following hyperparameters were used during training:
|
|
161 |
- Datasets 2.14.5
|
162 |
- Tokenizers 0.15.0
|
163 |
|
164 |
-
</details>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
language:
|
3 |
+
- en
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
license: apache-2.0
|
5 |
+
library_name: peft
|
6 |
+
tags:
|
7 |
+
- llama-factory
|
8 |
+
- lora
|
9 |
+
- generated_from_trainer
|
10 |
+
- llama2
|
11 |
+
- llama
|
12 |
+
- instruct
|
13 |
+
- finetune
|
14 |
+
- gpt4
|
15 |
+
- synthetic data
|
16 |
+
- stable diffusion
|
17 |
+
- alpaca
|
18 |
+
- llm
|
19 |
+
base_model: meta-llama/Llama-2-13b-chat-hf
|
20 |
datasets:
|
21 |
- Nekochu/discord-unstable-diffusion-SD-prompts
|
22 |
- glaiveai/glaive-function-calling-v2
|
|
|
25 |
- GAIR/lima
|
26 |
- sahil2801/CodeAlpaca-20k
|
27 |
- garage-bAInd/Open-Platypus
|
28 |
+
model_creator: Nekochu
|
29 |
+
quantized_by: Nekochu
|
30 |
+
pretty_name: Luminia
|
31 |
+
model_type: llama2
|
32 |
+
prompt_template: 'Below is an instruction that describes a task. Write a response
|
33 |
+
that appropriately completes the request. ### Instruction: {Instruction} {summary}
|
34 |
+
### input: {category} ### Response: {prompt}'
|
35 |
pipeline_tag: text-generation
|
36 |
task_categories:
|
37 |
- question-answering
|
38 |
- text2text-generation
|
39 |
- conversational
|
40 |
+
inference: true
|
41 |
widget:
|
42 |
- example_title: prompt assistant
|
43 |
messages:
|
44 |
+
- role: system
|
45 |
+
content: Below is an instruction that describes a task, paired with an input that
|
46 |
+
provides further context. Write a response that appropriately completes the
|
47 |
+
request.
|
48 |
+
- role: user
|
49 |
+
content: '### Instruction:
|
50 |
+
|
51 |
+
Create stable diffusion metadata based on the given english description. Luminia
|
52 |
+
|
53 |
+
### Input:
|
54 |
+
|
55 |
+
favorites and popular SFW
|
56 |
+
|
57 |
+
### Response:
|
58 |
+
|
59 |
+
'
|
60 |
output:
|
61 |
text: Luminia, 1girl, solo, blonde hair, long hair,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
model-index:
|
63 |
- name: Luminia-13B-v3
|
64 |
results: []
|
|
|
172 |
- Datasets 2.14.5
|
173 |
- Tokenizers 0.15.0
|
174 |
|
175 |
+
</details>
|
176 |
+
# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard)
|
177 |
+
Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Nekochu__Luminia-13B-v3)
|
178 |
+
|
179 |
+
| Metric |Value|
|
180 |
+
|-------------------|----:|
|
181 |
+
|Avg. |11.55|
|
182 |
+
|IFEval (0-Shot) |25.23|
|
183 |
+
|BBH (3-Shot) |17.69|
|
184 |
+
|MATH Lvl 5 (4-Shot)| 1.28|
|
185 |
+
|GPQA (0-shot) | 2.68|
|
186 |
+
|MuSR (0-shot) | 8.89|
|
187 |
+
|MMLU-PRO (5-shot) |13.50|
|
188 |
+
|