Files changed (1) hide show
  1. README.md +113 -5
README.md CHANGED
@@ -1,15 +1,109 @@
1
  ---
 
 
 
 
2
  base_model:
3
  - lemon07r/Gemma-2-Ataraxy-9B
4
  - wzhouad/gemma-2-9b-it-WPO-HB
5
  - rtzr/ko-gemma-2-9b-it
6
  - ghost613/gemma9_on_korean_summary_events
7
  - rtzr/ko-gemma-2-9b-it
8
- library_name: transformers
9
- tags:
10
- - mergekit
11
- - merge
12
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  ---
14
  # merge
15
 
@@ -62,3 +156,17 @@ merge_method: breadcrumbs_ties
62
  base_model: lemon07r/Gemma-2-Ataraxy-9B
63
  dtype: bfloat16
64
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ library_name: transformers
3
+ tags:
4
+ - mergekit
5
+ - merge
6
  base_model:
7
  - lemon07r/Gemma-2-Ataraxy-9B
8
  - wzhouad/gemma-2-9b-it-WPO-HB
9
  - rtzr/ko-gemma-2-9b-it
10
  - ghost613/gemma9_on_korean_summary_events
11
  - rtzr/ko-gemma-2-9b-it
12
+ model-index:
13
+ - name: Gemma-Ko-Merge
14
+ results:
15
+ - task:
16
+ type: text-generation
17
+ name: Text Generation
18
+ dataset:
19
+ name: IFEval (0-Shot)
20
+ type: HuggingFaceH4/ifeval
21
+ args:
22
+ num_few_shot: 0
23
+ metrics:
24
+ - type: inst_level_strict_acc and prompt_level_strict_acc
25
+ value: 64.16
26
+ name: strict accuracy
27
+ source:
28
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Gunulhona/Gemma-Ko-Merge
29
+ name: Open LLM Leaderboard
30
+ - task:
31
+ type: text-generation
32
+ name: Text Generation
33
+ dataset:
34
+ name: BBH (3-Shot)
35
+ type: BBH
36
+ args:
37
+ num_few_shot: 3
38
+ metrics:
39
+ - type: acc_norm
40
+ value: 38.79
41
+ name: normalized accuracy
42
+ source:
43
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Gunulhona/Gemma-Ko-Merge
44
+ name: Open LLM Leaderboard
45
+ - task:
46
+ type: text-generation
47
+ name: Text Generation
48
+ dataset:
49
+ name: MATH Lvl 5 (4-Shot)
50
+ type: hendrycks/competition_math
51
+ args:
52
+ num_few_shot: 4
53
+ metrics:
54
+ - type: exact_match
55
+ value: 0.15
56
+ name: exact match
57
+ source:
58
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Gunulhona/Gemma-Ko-Merge
59
+ name: Open LLM Leaderboard
60
+ - task:
61
+ type: text-generation
62
+ name: Text Generation
63
+ dataset:
64
+ name: GPQA (0-shot)
65
+ type: Idavidrein/gpqa
66
+ args:
67
+ num_few_shot: 0
68
+ metrics:
69
+ - type: acc_norm
70
+ value: 11.41
71
+ name: acc_norm
72
+ source:
73
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Gunulhona/Gemma-Ko-Merge
74
+ name: Open LLM Leaderboard
75
+ - task:
76
+ type: text-generation
77
+ name: Text Generation
78
+ dataset:
79
+ name: MuSR (0-shot)
80
+ type: TAUR-Lab/MuSR
81
+ args:
82
+ num_few_shot: 0
83
+ metrics:
84
+ - type: acc_norm
85
+ value: 9.12
86
+ name: acc_norm
87
+ source:
88
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Gunulhona/Gemma-Ko-Merge
89
+ name: Open LLM Leaderboard
90
+ - task:
91
+ type: text-generation
92
+ name: Text Generation
93
+ dataset:
94
+ name: MMLU-PRO (5-shot)
95
+ type: TIGER-Lab/MMLU-Pro
96
+ config: main
97
+ split: test
98
+ args:
99
+ num_few_shot: 5
100
+ metrics:
101
+ - type: acc
102
+ value: 31.99
103
+ name: accuracy
104
+ source:
105
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Gunulhona/Gemma-Ko-Merge
106
+ name: Open LLM Leaderboard
107
  ---
108
  # merge
109
 
 
156
  base_model: lemon07r/Gemma-2-Ataraxy-9B
157
  dtype: bfloat16
158
  ```
159
+
160
+ # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard)
161
+ Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Gunulhona__Gemma-Ko-Merge)
162
+
163
+ | Metric |Value|
164
+ |-------------------|----:|
165
+ |Avg. |25.94|
166
+ |IFEval (0-Shot) |64.16|
167
+ |BBH (3-Shot) |38.79|
168
+ |MATH Lvl 5 (4-Shot)| 0.15|
169
+ |GPQA (0-shot) |11.41|
170
+ |MuSR (0-shot) | 9.12|
171
+ |MMLU-PRO (5-shot) |31.99|
172
+