Files changed (1) hide show
  1. README.md +106 -0
README.md CHANGED
@@ -110,6 +110,98 @@ model-index:
110
  source:
111
  url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=altomek/YiSM-34B-0rn
112
  name: Open LLM Leaderboard
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  ---
114
 
115
  #
@@ -225,3 +317,17 @@ Detailed results can be found [here](https://huggingface.co/datasets/open-llm-le
225
 
226
  5th in 34B size range excluding "Private or deleted" or 8th with all models included as of 2024-06-10 ;P
227
  <img src=https://huggingface.co/altomek/YiSM-34B-0rn/resolve/main/5thIn34B.png>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
  source:
111
  url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=altomek/YiSM-34B-0rn
112
  name: Open LLM Leaderboard
113
+ - task:
114
+ type: text-generation
115
+ name: Text Generation
116
+ dataset:
117
+ name: IFEval (0-Shot)
118
+ type: HuggingFaceH4/ifeval
119
+ args:
120
+ num_few_shot: 0
121
+ metrics:
122
+ - type: inst_level_strict_acc and prompt_level_strict_acc
123
+ value: 42.84
124
+ name: strict accuracy
125
+ source:
126
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=altomek/YiSM-34B-0rn
127
+ name: Open LLM Leaderboard
128
+ - task:
129
+ type: text-generation
130
+ name: Text Generation
131
+ dataset:
132
+ name: BBH (3-Shot)
133
+ type: BBH
134
+ args:
135
+ num_few_shot: 3
136
+ metrics:
137
+ - type: acc_norm
138
+ value: 45.38
139
+ name: normalized accuracy
140
+ source:
141
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=altomek/YiSM-34B-0rn
142
+ name: Open LLM Leaderboard
143
+ - task:
144
+ type: text-generation
145
+ name: Text Generation
146
+ dataset:
147
+ name: MATH Lvl 5 (4-Shot)
148
+ type: hendrycks/competition_math
149
+ args:
150
+ num_few_shot: 4
151
+ metrics:
152
+ - type: exact_match
153
+ value: 20.62
154
+ name: exact match
155
+ source:
156
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=altomek/YiSM-34B-0rn
157
+ name: Open LLM Leaderboard
158
+ - task:
159
+ type: text-generation
160
+ name: Text Generation
161
+ dataset:
162
+ name: GPQA (0-shot)
163
+ type: Idavidrein/gpqa
164
+ args:
165
+ num_few_shot: 0
166
+ metrics:
167
+ - type: acc_norm
168
+ value: 16.22
169
+ name: acc_norm
170
+ source:
171
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=altomek/YiSM-34B-0rn
172
+ name: Open LLM Leaderboard
173
+ - task:
174
+ type: text-generation
175
+ name: Text Generation
176
+ dataset:
177
+ name: MuSR (0-shot)
178
+ type: TAUR-Lab/MuSR
179
+ args:
180
+ num_few_shot: 0
181
+ metrics:
182
+ - type: acc_norm
183
+ value: 14.76
184
+ name: acc_norm
185
+ source:
186
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=altomek/YiSM-34B-0rn
187
+ name: Open LLM Leaderboard
188
+ - task:
189
+ type: text-generation
190
+ name: Text Generation
191
+ dataset:
192
+ name: MMLU-PRO (5-shot)
193
+ type: TIGER-Lab/MMLU-Pro
194
+ config: main
195
+ split: test
196
+ args:
197
+ num_few_shot: 5
198
+ metrics:
199
+ - type: acc
200
+ value: 41.06
201
+ name: accuracy
202
+ source:
203
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=altomek/YiSM-34B-0rn
204
+ name: Open LLM Leaderboard
205
  ---
206
 
207
  #
 
317
 
318
  5th in 34B size range excluding "Private or deleted" or 8th with all models included as of 2024-06-10 ;P
319
  <img src=https://huggingface.co/altomek/YiSM-34B-0rn/resolve/main/5thIn34B.png>
320
+
321
+ # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard)
322
+ Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_altomek__YiSM-34B-0rn)
323
+
324
+ | Metric |Value|
325
+ |-------------------|----:|
326
+ |Avg. |30.15|
327
+ |IFEval (0-Shot) |42.84|
328
+ |BBH (3-Shot) |45.38|
329
+ |MATH Lvl 5 (4-Shot)|20.62|
330
+ |GPQA (0-shot) |16.22|
331
+ |MuSR (0-shot) |14.76|
332
+ |MMLU-PRO (5-shot) |41.06|
333
+