ggbetz commited on
Commit
a5f082c
1 Parent(s): 96b53e1

Upload results for model openchat/openchat-3.5-0106 (#13)

Browse files

- Upload results for model openchat/openchat-3.5-0106 (727ccde26a69dae6062de93052195e66a203b276)

data/openchat/openchat-3.5-0106/base/24-02-07-18:33:32.json ADDED
@@ -0,0 +1,1162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "molestiae-aperiam_lsat-rc_base": {
4
+ "acc,none": 0.4349442379182156,
5
+ "acc_stderr,none": 0.030282731632881112,
6
+ "alias": "molestiae-aperiam_lsat-rc_base"
7
+ },
8
+ "molestiae-aperiam_lsat-lr_base": {
9
+ "acc,none": 0.3176470588235294,
10
+ "acc_stderr,none": 0.0206356456645464,
11
+ "alias": "molestiae-aperiam_lsat-lr_base"
12
+ },
13
+ "molestiae-aperiam_lsat-ar_base": {
14
+ "acc,none": 0.1956521739130435,
15
+ "acc_stderr,none": 0.026214799709819592,
16
+ "alias": "molestiae-aperiam_lsat-ar_base"
17
+ },
18
+ "molestiae-aperiam_logiqa_base": {
19
+ "acc,none": 0.329073482428115,
20
+ "acc_stderr,none": 0.018795068527281092,
21
+ "alias": "molestiae-aperiam_logiqa_base"
22
+ },
23
+ "molestiae-aperiam_logiqa2_base": {
24
+ "acc,none": 0.3816793893129771,
25
+ "acc_stderr,none": 0.012256546675202993,
26
+ "alias": "molestiae-aperiam_logiqa2_base"
27
+ },
28
+ "iure-at_lsat-rc_base": {
29
+ "acc,none": 0.4349442379182156,
30
+ "acc_stderr,none": 0.030282731632881112,
31
+ "alias": "iure-at_lsat-rc_base"
32
+ },
33
+ "iure-at_lsat-lr_base": {
34
+ "acc,none": 0.3176470588235294,
35
+ "acc_stderr,none": 0.0206356456645464,
36
+ "alias": "iure-at_lsat-lr_base"
37
+ },
38
+ "iure-at_lsat-ar_base": {
39
+ "acc,none": 0.1956521739130435,
40
+ "acc_stderr,none": 0.026214799709819592,
41
+ "alias": "iure-at_lsat-ar_base"
42
+ },
43
+ "iure-at_logiqa_base": {
44
+ "acc,none": 0.329073482428115,
45
+ "acc_stderr,none": 0.018795068527281092,
46
+ "alias": "iure-at_logiqa_base"
47
+ },
48
+ "iure-at_logiqa2_base": {
49
+ "acc,none": 0.3816793893129771,
50
+ "acc_stderr,none": 0.012256546675202993,
51
+ "alias": "iure-at_logiqa2_base"
52
+ },
53
+ "facere-optio_lsat-rc_base": {
54
+ "acc,none": 0.4349442379182156,
55
+ "acc_stderr,none": 0.030282731632881112,
56
+ "alias": "facere-optio_lsat-rc_base"
57
+ },
58
+ "facere-optio_lsat-lr_base": {
59
+ "acc,none": 0.3176470588235294,
60
+ "acc_stderr,none": 0.0206356456645464,
61
+ "alias": "facere-optio_lsat-lr_base"
62
+ },
63
+ "facere-optio_lsat-ar_base": {
64
+ "acc,none": 0.1956521739130435,
65
+ "acc_stderr,none": 0.026214799709819592,
66
+ "alias": "facere-optio_lsat-ar_base"
67
+ },
68
+ "facere-optio_logiqa_base": {
69
+ "acc,none": 0.329073482428115,
70
+ "acc_stderr,none": 0.018795068527281092,
71
+ "alias": "facere-optio_logiqa_base"
72
+ },
73
+ "facere-optio_logiqa2_base": {
74
+ "acc,none": 0.3816793893129771,
75
+ "acc_stderr,none": 0.012256546675202993,
76
+ "alias": "facere-optio_logiqa2_base"
77
+ },
78
+ "et-praesentium_lsat-rc_base": {
79
+ "acc,none": 0.4349442379182156,
80
+ "acc_stderr,none": 0.030282731632881112,
81
+ "alias": "et-praesentium_lsat-rc_base"
82
+ },
83
+ "et-praesentium_lsat-lr_base": {
84
+ "acc,none": 0.3176470588235294,
85
+ "acc_stderr,none": 0.0206356456645464,
86
+ "alias": "et-praesentium_lsat-lr_base"
87
+ },
88
+ "et-praesentium_lsat-ar_base": {
89
+ "acc,none": 0.1956521739130435,
90
+ "acc_stderr,none": 0.026214799709819592,
91
+ "alias": "et-praesentium_lsat-ar_base"
92
+ },
93
+ "et-praesentium_logiqa_base": {
94
+ "acc,none": 0.329073482428115,
95
+ "acc_stderr,none": 0.018795068527281092,
96
+ "alias": "et-praesentium_logiqa_base"
97
+ },
98
+ "et-praesentium_logiqa2_base": {
99
+ "acc,none": 0.3816793893129771,
100
+ "acc_stderr,none": 0.012256546675202993,
101
+ "alias": "et-praesentium_logiqa2_base"
102
+ },
103
+ "eligendi-commodi_lsat-rc_base": {
104
+ "acc,none": 0.4349442379182156,
105
+ "acc_stderr,none": 0.030282731632881112,
106
+ "alias": "eligendi-commodi_lsat-rc_base"
107
+ },
108
+ "eligendi-commodi_lsat-lr_base": {
109
+ "acc,none": 0.3176470588235294,
110
+ "acc_stderr,none": 0.0206356456645464,
111
+ "alias": "eligendi-commodi_lsat-lr_base"
112
+ },
113
+ "eligendi-commodi_lsat-ar_base": {
114
+ "acc,none": 0.1956521739130435,
115
+ "acc_stderr,none": 0.026214799709819592,
116
+ "alias": "eligendi-commodi_lsat-ar_base"
117
+ },
118
+ "eligendi-commodi_logiqa_base": {
119
+ "acc,none": 0.329073482428115,
120
+ "acc_stderr,none": 0.018795068527281092,
121
+ "alias": "eligendi-commodi_logiqa_base"
122
+ },
123
+ "eligendi-commodi_logiqa2_base": {
124
+ "acc,none": 0.3816793893129771,
125
+ "acc_stderr,none": 0.012256546675202993,
126
+ "alias": "eligendi-commodi_logiqa2_base"
127
+ },
128
+ "doloremque-rem_lsat-rc_base": {
129
+ "acc,none": 0.4349442379182156,
130
+ "acc_stderr,none": 0.030282731632881112,
131
+ "alias": "doloremque-rem_lsat-rc_base"
132
+ },
133
+ "doloremque-rem_lsat-lr_base": {
134
+ "acc,none": 0.3176470588235294,
135
+ "acc_stderr,none": 0.0206356456645464,
136
+ "alias": "doloremque-rem_lsat-lr_base"
137
+ },
138
+ "doloremque-rem_lsat-ar_base": {
139
+ "acc,none": 0.1956521739130435,
140
+ "acc_stderr,none": 0.026214799709819592,
141
+ "alias": "doloremque-rem_lsat-ar_base"
142
+ },
143
+ "doloremque-rem_logiqa_base": {
144
+ "acc,none": 0.329073482428115,
145
+ "acc_stderr,none": 0.018795068527281092,
146
+ "alias": "doloremque-rem_logiqa_base"
147
+ },
148
+ "doloremque-rem_logiqa2_base": {
149
+ "acc,none": 0.3816793893129771,
150
+ "acc_stderr,none": 0.012256546675202993,
151
+ "alias": "doloremque-rem_logiqa2_base"
152
+ }
153
+ },
154
+ "configs": {
155
+ "doloremque-rem_logiqa2_base": {
156
+ "task": "doloremque-rem_logiqa2_base",
157
+ "group": "logikon-bench",
158
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
159
+ "dataset_kwargs": {
160
+ "data_files": {
161
+ "test": "doloremque-rem-logiqa2/test-00000-of-00001.parquet"
162
+ }
163
+ },
164
+ "test_split": "test",
165
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
166
+ "doc_to_target": "{{answer}}",
167
+ "doc_to_choice": "{{options}}",
168
+ "description": "",
169
+ "target_delimiter": " ",
170
+ "fewshot_delimiter": "\n\n",
171
+ "num_fewshot": 0,
172
+ "metric_list": [
173
+ {
174
+ "metric": "acc",
175
+ "aggregation": "mean",
176
+ "higher_is_better": true
177
+ }
178
+ ],
179
+ "output_type": "multiple_choice",
180
+ "repeats": 1,
181
+ "should_decontaminate": false,
182
+ "metadata": {
183
+ "version": 0.0
184
+ }
185
+ },
186
+ "doloremque-rem_logiqa_base": {
187
+ "task": "doloremque-rem_logiqa_base",
188
+ "group": "logikon-bench",
189
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
190
+ "dataset_kwargs": {
191
+ "data_files": {
192
+ "test": "doloremque-rem-logiqa/test-00000-of-00001.parquet"
193
+ }
194
+ },
195
+ "test_split": "test",
196
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
197
+ "doc_to_target": "{{answer}}",
198
+ "doc_to_choice": "{{options}}",
199
+ "description": "",
200
+ "target_delimiter": " ",
201
+ "fewshot_delimiter": "\n\n",
202
+ "num_fewshot": 0,
203
+ "metric_list": [
204
+ {
205
+ "metric": "acc",
206
+ "aggregation": "mean",
207
+ "higher_is_better": true
208
+ }
209
+ ],
210
+ "output_type": "multiple_choice",
211
+ "repeats": 1,
212
+ "should_decontaminate": false,
213
+ "metadata": {
214
+ "version": 0.0
215
+ }
216
+ },
217
+ "doloremque-rem_lsat-ar_base": {
218
+ "task": "doloremque-rem_lsat-ar_base",
219
+ "group": "logikon-bench",
220
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
221
+ "dataset_kwargs": {
222
+ "data_files": {
223
+ "test": "doloremque-rem-lsat-ar/test-00000-of-00001.parquet"
224
+ }
225
+ },
226
+ "test_split": "test",
227
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
228
+ "doc_to_target": "{{answer}}",
229
+ "doc_to_choice": "{{options}}",
230
+ "description": "",
231
+ "target_delimiter": " ",
232
+ "fewshot_delimiter": "\n\n",
233
+ "num_fewshot": 0,
234
+ "metric_list": [
235
+ {
236
+ "metric": "acc",
237
+ "aggregation": "mean",
238
+ "higher_is_better": true
239
+ }
240
+ ],
241
+ "output_type": "multiple_choice",
242
+ "repeats": 1,
243
+ "should_decontaminate": false,
244
+ "metadata": {
245
+ "version": 0.0
246
+ }
247
+ },
248
+ "doloremque-rem_lsat-lr_base": {
249
+ "task": "doloremque-rem_lsat-lr_base",
250
+ "group": "logikon-bench",
251
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
252
+ "dataset_kwargs": {
253
+ "data_files": {
254
+ "test": "doloremque-rem-lsat-lr/test-00000-of-00001.parquet"
255
+ }
256
+ },
257
+ "test_split": "test",
258
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
259
+ "doc_to_target": "{{answer}}",
260
+ "doc_to_choice": "{{options}}",
261
+ "description": "",
262
+ "target_delimiter": " ",
263
+ "fewshot_delimiter": "\n\n",
264
+ "num_fewshot": 0,
265
+ "metric_list": [
266
+ {
267
+ "metric": "acc",
268
+ "aggregation": "mean",
269
+ "higher_is_better": true
270
+ }
271
+ ],
272
+ "output_type": "multiple_choice",
273
+ "repeats": 1,
274
+ "should_decontaminate": false,
275
+ "metadata": {
276
+ "version": 0.0
277
+ }
278
+ },
279
+ "doloremque-rem_lsat-rc_base": {
280
+ "task": "doloremque-rem_lsat-rc_base",
281
+ "group": "logikon-bench",
282
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
283
+ "dataset_kwargs": {
284
+ "data_files": {
285
+ "test": "doloremque-rem-lsat-rc/test-00000-of-00001.parquet"
286
+ }
287
+ },
288
+ "test_split": "test",
289
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
290
+ "doc_to_target": "{{answer}}",
291
+ "doc_to_choice": "{{options}}",
292
+ "description": "",
293
+ "target_delimiter": " ",
294
+ "fewshot_delimiter": "\n\n",
295
+ "num_fewshot": 0,
296
+ "metric_list": [
297
+ {
298
+ "metric": "acc",
299
+ "aggregation": "mean",
300
+ "higher_is_better": true
301
+ }
302
+ ],
303
+ "output_type": "multiple_choice",
304
+ "repeats": 1,
305
+ "should_decontaminate": false,
306
+ "metadata": {
307
+ "version": 0.0
308
+ }
309
+ },
310
+ "eligendi-commodi_logiqa2_base": {
311
+ "task": "eligendi-commodi_logiqa2_base",
312
+ "group": "logikon-bench",
313
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
314
+ "dataset_kwargs": {
315
+ "data_files": {
316
+ "test": "eligendi-commodi-logiqa2/test-00000-of-00001.parquet"
317
+ }
318
+ },
319
+ "test_split": "test",
320
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
321
+ "doc_to_target": "{{answer}}",
322
+ "doc_to_choice": "{{options}}",
323
+ "description": "",
324
+ "target_delimiter": " ",
325
+ "fewshot_delimiter": "\n\n",
326
+ "num_fewshot": 0,
327
+ "metric_list": [
328
+ {
329
+ "metric": "acc",
330
+ "aggregation": "mean",
331
+ "higher_is_better": true
332
+ }
333
+ ],
334
+ "output_type": "multiple_choice",
335
+ "repeats": 1,
336
+ "should_decontaminate": false,
337
+ "metadata": {
338
+ "version": 0.0
339
+ }
340
+ },
341
+ "eligendi-commodi_logiqa_base": {
342
+ "task": "eligendi-commodi_logiqa_base",
343
+ "group": "logikon-bench",
344
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
345
+ "dataset_kwargs": {
346
+ "data_files": {
347
+ "test": "eligendi-commodi-logiqa/test-00000-of-00001.parquet"
348
+ }
349
+ },
350
+ "test_split": "test",
351
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
352
+ "doc_to_target": "{{answer}}",
353
+ "doc_to_choice": "{{options}}",
354
+ "description": "",
355
+ "target_delimiter": " ",
356
+ "fewshot_delimiter": "\n\n",
357
+ "num_fewshot": 0,
358
+ "metric_list": [
359
+ {
360
+ "metric": "acc",
361
+ "aggregation": "mean",
362
+ "higher_is_better": true
363
+ }
364
+ ],
365
+ "output_type": "multiple_choice",
366
+ "repeats": 1,
367
+ "should_decontaminate": false,
368
+ "metadata": {
369
+ "version": 0.0
370
+ }
371
+ },
372
+ "eligendi-commodi_lsat-ar_base": {
373
+ "task": "eligendi-commodi_lsat-ar_base",
374
+ "group": "logikon-bench",
375
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
376
+ "dataset_kwargs": {
377
+ "data_files": {
378
+ "test": "eligendi-commodi-lsat-ar/test-00000-of-00001.parquet"
379
+ }
380
+ },
381
+ "test_split": "test",
382
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
383
+ "doc_to_target": "{{answer}}",
384
+ "doc_to_choice": "{{options}}",
385
+ "description": "",
386
+ "target_delimiter": " ",
387
+ "fewshot_delimiter": "\n\n",
388
+ "num_fewshot": 0,
389
+ "metric_list": [
390
+ {
391
+ "metric": "acc",
392
+ "aggregation": "mean",
393
+ "higher_is_better": true
394
+ }
395
+ ],
396
+ "output_type": "multiple_choice",
397
+ "repeats": 1,
398
+ "should_decontaminate": false,
399
+ "metadata": {
400
+ "version": 0.0
401
+ }
402
+ },
403
+ "eligendi-commodi_lsat-lr_base": {
404
+ "task": "eligendi-commodi_lsat-lr_base",
405
+ "group": "logikon-bench",
406
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
407
+ "dataset_kwargs": {
408
+ "data_files": {
409
+ "test": "eligendi-commodi-lsat-lr/test-00000-of-00001.parquet"
410
+ }
411
+ },
412
+ "test_split": "test",
413
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
414
+ "doc_to_target": "{{answer}}",
415
+ "doc_to_choice": "{{options}}",
416
+ "description": "",
417
+ "target_delimiter": " ",
418
+ "fewshot_delimiter": "\n\n",
419
+ "num_fewshot": 0,
420
+ "metric_list": [
421
+ {
422
+ "metric": "acc",
423
+ "aggregation": "mean",
424
+ "higher_is_better": true
425
+ }
426
+ ],
427
+ "output_type": "multiple_choice",
428
+ "repeats": 1,
429
+ "should_decontaminate": false,
430
+ "metadata": {
431
+ "version": 0.0
432
+ }
433
+ },
434
+ "eligendi-commodi_lsat-rc_base": {
435
+ "task": "eligendi-commodi_lsat-rc_base",
436
+ "group": "logikon-bench",
437
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
438
+ "dataset_kwargs": {
439
+ "data_files": {
440
+ "test": "eligendi-commodi-lsat-rc/test-00000-of-00001.parquet"
441
+ }
442
+ },
443
+ "test_split": "test",
444
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
445
+ "doc_to_target": "{{answer}}",
446
+ "doc_to_choice": "{{options}}",
447
+ "description": "",
448
+ "target_delimiter": " ",
449
+ "fewshot_delimiter": "\n\n",
450
+ "num_fewshot": 0,
451
+ "metric_list": [
452
+ {
453
+ "metric": "acc",
454
+ "aggregation": "mean",
455
+ "higher_is_better": true
456
+ }
457
+ ],
458
+ "output_type": "multiple_choice",
459
+ "repeats": 1,
460
+ "should_decontaminate": false,
461
+ "metadata": {
462
+ "version": 0.0
463
+ }
464
+ },
465
+ "et-praesentium_logiqa2_base": {
466
+ "task": "et-praesentium_logiqa2_base",
467
+ "group": "logikon-bench",
468
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
469
+ "dataset_kwargs": {
470
+ "data_files": {
471
+ "test": "et-praesentium-logiqa2/test-00000-of-00001.parquet"
472
+ }
473
+ },
474
+ "test_split": "test",
475
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
476
+ "doc_to_target": "{{answer}}",
477
+ "doc_to_choice": "{{options}}",
478
+ "description": "",
479
+ "target_delimiter": " ",
480
+ "fewshot_delimiter": "\n\n",
481
+ "num_fewshot": 0,
482
+ "metric_list": [
483
+ {
484
+ "metric": "acc",
485
+ "aggregation": "mean",
486
+ "higher_is_better": true
487
+ }
488
+ ],
489
+ "output_type": "multiple_choice",
490
+ "repeats": 1,
491
+ "should_decontaminate": false,
492
+ "metadata": {
493
+ "version": 0.0
494
+ }
495
+ },
496
+ "et-praesentium_logiqa_base": {
497
+ "task": "et-praesentium_logiqa_base",
498
+ "group": "logikon-bench",
499
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
500
+ "dataset_kwargs": {
501
+ "data_files": {
502
+ "test": "et-praesentium-logiqa/test-00000-of-00001.parquet"
503
+ }
504
+ },
505
+ "test_split": "test",
506
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
507
+ "doc_to_target": "{{answer}}",
508
+ "doc_to_choice": "{{options}}",
509
+ "description": "",
510
+ "target_delimiter": " ",
511
+ "fewshot_delimiter": "\n\n",
512
+ "num_fewshot": 0,
513
+ "metric_list": [
514
+ {
515
+ "metric": "acc",
516
+ "aggregation": "mean",
517
+ "higher_is_better": true
518
+ }
519
+ ],
520
+ "output_type": "multiple_choice",
521
+ "repeats": 1,
522
+ "should_decontaminate": false,
523
+ "metadata": {
524
+ "version": 0.0
525
+ }
526
+ },
527
+ "et-praesentium_lsat-ar_base": {
528
+ "task": "et-praesentium_lsat-ar_base",
529
+ "group": "logikon-bench",
530
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
531
+ "dataset_kwargs": {
532
+ "data_files": {
533
+ "test": "et-praesentium-lsat-ar/test-00000-of-00001.parquet"
534
+ }
535
+ },
536
+ "test_split": "test",
537
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
538
+ "doc_to_target": "{{answer}}",
539
+ "doc_to_choice": "{{options}}",
540
+ "description": "",
541
+ "target_delimiter": " ",
542
+ "fewshot_delimiter": "\n\n",
543
+ "num_fewshot": 0,
544
+ "metric_list": [
545
+ {
546
+ "metric": "acc",
547
+ "aggregation": "mean",
548
+ "higher_is_better": true
549
+ }
550
+ ],
551
+ "output_type": "multiple_choice",
552
+ "repeats": 1,
553
+ "should_decontaminate": false,
554
+ "metadata": {
555
+ "version": 0.0
556
+ }
557
+ },
558
+ "et-praesentium_lsat-lr_base": {
559
+ "task": "et-praesentium_lsat-lr_base",
560
+ "group": "logikon-bench",
561
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
562
+ "dataset_kwargs": {
563
+ "data_files": {
564
+ "test": "et-praesentium-lsat-lr/test-00000-of-00001.parquet"
565
+ }
566
+ },
567
+ "test_split": "test",
568
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
569
+ "doc_to_target": "{{answer}}",
570
+ "doc_to_choice": "{{options}}",
571
+ "description": "",
572
+ "target_delimiter": " ",
573
+ "fewshot_delimiter": "\n\n",
574
+ "num_fewshot": 0,
575
+ "metric_list": [
576
+ {
577
+ "metric": "acc",
578
+ "aggregation": "mean",
579
+ "higher_is_better": true
580
+ }
581
+ ],
582
+ "output_type": "multiple_choice",
583
+ "repeats": 1,
584
+ "should_decontaminate": false,
585
+ "metadata": {
586
+ "version": 0.0
587
+ }
588
+ },
589
+ "et-praesentium_lsat-rc_base": {
590
+ "task": "et-praesentium_lsat-rc_base",
591
+ "group": "logikon-bench",
592
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
593
+ "dataset_kwargs": {
594
+ "data_files": {
595
+ "test": "et-praesentium-lsat-rc/test-00000-of-00001.parquet"
596
+ }
597
+ },
598
+ "test_split": "test",
599
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
600
+ "doc_to_target": "{{answer}}",
601
+ "doc_to_choice": "{{options}}",
602
+ "description": "",
603
+ "target_delimiter": " ",
604
+ "fewshot_delimiter": "\n\n",
605
+ "num_fewshot": 0,
606
+ "metric_list": [
607
+ {
608
+ "metric": "acc",
609
+ "aggregation": "mean",
610
+ "higher_is_better": true
611
+ }
612
+ ],
613
+ "output_type": "multiple_choice",
614
+ "repeats": 1,
615
+ "should_decontaminate": false,
616
+ "metadata": {
617
+ "version": 0.0
618
+ }
619
+ },
620
+ "facere-optio_logiqa2_base": {
621
+ "task": "facere-optio_logiqa2_base",
622
+ "group": "logikon-bench",
623
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
624
+ "dataset_kwargs": {
625
+ "data_files": {
626
+ "test": "facere-optio-logiqa2/test-00000-of-00001.parquet"
627
+ }
628
+ },
629
+ "test_split": "test",
630
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
631
+ "doc_to_target": "{{answer}}",
632
+ "doc_to_choice": "{{options}}",
633
+ "description": "",
634
+ "target_delimiter": " ",
635
+ "fewshot_delimiter": "\n\n",
636
+ "num_fewshot": 0,
637
+ "metric_list": [
638
+ {
639
+ "metric": "acc",
640
+ "aggregation": "mean",
641
+ "higher_is_better": true
642
+ }
643
+ ],
644
+ "output_type": "multiple_choice",
645
+ "repeats": 1,
646
+ "should_decontaminate": false,
647
+ "metadata": {
648
+ "version": 0.0
649
+ }
650
+ },
651
+ "facere-optio_logiqa_base": {
652
+ "task": "facere-optio_logiqa_base",
653
+ "group": "logikon-bench",
654
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
655
+ "dataset_kwargs": {
656
+ "data_files": {
657
+ "test": "facere-optio-logiqa/test-00000-of-00001.parquet"
658
+ }
659
+ },
660
+ "test_split": "test",
661
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
662
+ "doc_to_target": "{{answer}}",
663
+ "doc_to_choice": "{{options}}",
664
+ "description": "",
665
+ "target_delimiter": " ",
666
+ "fewshot_delimiter": "\n\n",
667
+ "num_fewshot": 0,
668
+ "metric_list": [
669
+ {
670
+ "metric": "acc",
671
+ "aggregation": "mean",
672
+ "higher_is_better": true
673
+ }
674
+ ],
675
+ "output_type": "multiple_choice",
676
+ "repeats": 1,
677
+ "should_decontaminate": false,
678
+ "metadata": {
679
+ "version": 0.0
680
+ }
681
+ },
682
+ "facere-optio_lsat-ar_base": {
683
+ "task": "facere-optio_lsat-ar_base",
684
+ "group": "logikon-bench",
685
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
686
+ "dataset_kwargs": {
687
+ "data_files": {
688
+ "test": "facere-optio-lsat-ar/test-00000-of-00001.parquet"
689
+ }
690
+ },
691
+ "test_split": "test",
692
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
693
+ "doc_to_target": "{{answer}}",
694
+ "doc_to_choice": "{{options}}",
695
+ "description": "",
696
+ "target_delimiter": " ",
697
+ "fewshot_delimiter": "\n\n",
698
+ "num_fewshot": 0,
699
+ "metric_list": [
700
+ {
701
+ "metric": "acc",
702
+ "aggregation": "mean",
703
+ "higher_is_better": true
704
+ }
705
+ ],
706
+ "output_type": "multiple_choice",
707
+ "repeats": 1,
708
+ "should_decontaminate": false,
709
+ "metadata": {
710
+ "version": 0.0
711
+ }
712
+ },
713
+ "facere-optio_lsat-lr_base": {
714
+ "task": "facere-optio_lsat-lr_base",
715
+ "group": "logikon-bench",
716
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
717
+ "dataset_kwargs": {
718
+ "data_files": {
719
+ "test": "facere-optio-lsat-lr/test-00000-of-00001.parquet"
720
+ }
721
+ },
722
+ "test_split": "test",
723
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
724
+ "doc_to_target": "{{answer}}",
725
+ "doc_to_choice": "{{options}}",
726
+ "description": "",
727
+ "target_delimiter": " ",
728
+ "fewshot_delimiter": "\n\n",
729
+ "num_fewshot": 0,
730
+ "metric_list": [
731
+ {
732
+ "metric": "acc",
733
+ "aggregation": "mean",
734
+ "higher_is_better": true
735
+ }
736
+ ],
737
+ "output_type": "multiple_choice",
738
+ "repeats": 1,
739
+ "should_decontaminate": false,
740
+ "metadata": {
741
+ "version": 0.0
742
+ }
743
+ },
744
+ "facere-optio_lsat-rc_base": {
745
+ "task": "facere-optio_lsat-rc_base",
746
+ "group": "logikon-bench",
747
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
748
+ "dataset_kwargs": {
749
+ "data_files": {
750
+ "test": "facere-optio-lsat-rc/test-00000-of-00001.parquet"
751
+ }
752
+ },
753
+ "test_split": "test",
754
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
755
+ "doc_to_target": "{{answer}}",
756
+ "doc_to_choice": "{{options}}",
757
+ "description": "",
758
+ "target_delimiter": " ",
759
+ "fewshot_delimiter": "\n\n",
760
+ "num_fewshot": 0,
761
+ "metric_list": [
762
+ {
763
+ "metric": "acc",
764
+ "aggregation": "mean",
765
+ "higher_is_better": true
766
+ }
767
+ ],
768
+ "output_type": "multiple_choice",
769
+ "repeats": 1,
770
+ "should_decontaminate": false,
771
+ "metadata": {
772
+ "version": 0.0
773
+ }
774
+ },
775
+ "iure-at_logiqa2_base": {
776
+ "task": "iure-at_logiqa2_base",
777
+ "group": "logikon-bench",
778
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
779
+ "dataset_kwargs": {
780
+ "data_files": {
781
+ "test": "iure-at-logiqa2/test-00000-of-00001.parquet"
782
+ }
783
+ },
784
+ "test_split": "test",
785
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
786
+ "doc_to_target": "{{answer}}",
787
+ "doc_to_choice": "{{options}}",
788
+ "description": "",
789
+ "target_delimiter": " ",
790
+ "fewshot_delimiter": "\n\n",
791
+ "num_fewshot": 0,
792
+ "metric_list": [
793
+ {
794
+ "metric": "acc",
795
+ "aggregation": "mean",
796
+ "higher_is_better": true
797
+ }
798
+ ],
799
+ "output_type": "multiple_choice",
800
+ "repeats": 1,
801
+ "should_decontaminate": false,
802
+ "metadata": {
803
+ "version": 0.0
804
+ }
805
+ },
806
+ "iure-at_logiqa_base": {
807
+ "task": "iure-at_logiqa_base",
808
+ "group": "logikon-bench",
809
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
810
+ "dataset_kwargs": {
811
+ "data_files": {
812
+ "test": "iure-at-logiqa/test-00000-of-00001.parquet"
813
+ }
814
+ },
815
+ "test_split": "test",
816
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
817
+ "doc_to_target": "{{answer}}",
818
+ "doc_to_choice": "{{options}}",
819
+ "description": "",
820
+ "target_delimiter": " ",
821
+ "fewshot_delimiter": "\n\n",
822
+ "num_fewshot": 0,
823
+ "metric_list": [
824
+ {
825
+ "metric": "acc",
826
+ "aggregation": "mean",
827
+ "higher_is_better": true
828
+ }
829
+ ],
830
+ "output_type": "multiple_choice",
831
+ "repeats": 1,
832
+ "should_decontaminate": false,
833
+ "metadata": {
834
+ "version": 0.0
835
+ }
836
+ },
837
+ "iure-at_lsat-ar_base": {
838
+ "task": "iure-at_lsat-ar_base",
839
+ "group": "logikon-bench",
840
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
841
+ "dataset_kwargs": {
842
+ "data_files": {
843
+ "test": "iure-at-lsat-ar/test-00000-of-00001.parquet"
844
+ }
845
+ },
846
+ "test_split": "test",
847
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
848
+ "doc_to_target": "{{answer}}",
849
+ "doc_to_choice": "{{options}}",
850
+ "description": "",
851
+ "target_delimiter": " ",
852
+ "fewshot_delimiter": "\n\n",
853
+ "num_fewshot": 0,
854
+ "metric_list": [
855
+ {
856
+ "metric": "acc",
857
+ "aggregation": "mean",
858
+ "higher_is_better": true
859
+ }
860
+ ],
861
+ "output_type": "multiple_choice",
862
+ "repeats": 1,
863
+ "should_decontaminate": false,
864
+ "metadata": {
865
+ "version": 0.0
866
+ }
867
+ },
868
+ "iure-at_lsat-lr_base": {
869
+ "task": "iure-at_lsat-lr_base",
870
+ "group": "logikon-bench",
871
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
872
+ "dataset_kwargs": {
873
+ "data_files": {
874
+ "test": "iure-at-lsat-lr/test-00000-of-00001.parquet"
875
+ }
876
+ },
877
+ "test_split": "test",
878
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
879
+ "doc_to_target": "{{answer}}",
880
+ "doc_to_choice": "{{options}}",
881
+ "description": "",
882
+ "target_delimiter": " ",
883
+ "fewshot_delimiter": "\n\n",
884
+ "num_fewshot": 0,
885
+ "metric_list": [
886
+ {
887
+ "metric": "acc",
888
+ "aggregation": "mean",
889
+ "higher_is_better": true
890
+ }
891
+ ],
892
+ "output_type": "multiple_choice",
893
+ "repeats": 1,
894
+ "should_decontaminate": false,
895
+ "metadata": {
896
+ "version": 0.0
897
+ }
898
+ },
899
+ "iure-at_lsat-rc_base": {
900
+ "task": "iure-at_lsat-rc_base",
901
+ "group": "logikon-bench",
902
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
903
+ "dataset_kwargs": {
904
+ "data_files": {
905
+ "test": "iure-at-lsat-rc/test-00000-of-00001.parquet"
906
+ }
907
+ },
908
+ "test_split": "test",
909
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
910
+ "doc_to_target": "{{answer}}",
911
+ "doc_to_choice": "{{options}}",
912
+ "description": "",
913
+ "target_delimiter": " ",
914
+ "fewshot_delimiter": "\n\n",
915
+ "num_fewshot": 0,
916
+ "metric_list": [
917
+ {
918
+ "metric": "acc",
919
+ "aggregation": "mean",
920
+ "higher_is_better": true
921
+ }
922
+ ],
923
+ "output_type": "multiple_choice",
924
+ "repeats": 1,
925
+ "should_decontaminate": false,
926
+ "metadata": {
927
+ "version": 0.0
928
+ }
929
+ },
930
+ "molestiae-aperiam_logiqa2_base": {
931
+ "task": "molestiae-aperiam_logiqa2_base",
932
+ "group": "logikon-bench",
933
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
934
+ "dataset_kwargs": {
935
+ "data_files": {
936
+ "test": "molestiae-aperiam-logiqa2/test-00000-of-00001.parquet"
937
+ }
938
+ },
939
+ "test_split": "test",
940
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
941
+ "doc_to_target": "{{answer}}",
942
+ "doc_to_choice": "{{options}}",
943
+ "description": "",
944
+ "target_delimiter": " ",
945
+ "fewshot_delimiter": "\n\n",
946
+ "num_fewshot": 0,
947
+ "metric_list": [
948
+ {
949
+ "metric": "acc",
950
+ "aggregation": "mean",
951
+ "higher_is_better": true
952
+ }
953
+ ],
954
+ "output_type": "multiple_choice",
955
+ "repeats": 1,
956
+ "should_decontaminate": false,
957
+ "metadata": {
958
+ "version": 0.0
959
+ }
960
+ },
961
+ "molestiae-aperiam_logiqa_base": {
962
+ "task": "molestiae-aperiam_logiqa_base",
963
+ "group": "logikon-bench",
964
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
965
+ "dataset_kwargs": {
966
+ "data_files": {
967
+ "test": "molestiae-aperiam-logiqa/test-00000-of-00001.parquet"
968
+ }
969
+ },
970
+ "test_split": "test",
971
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
972
+ "doc_to_target": "{{answer}}",
973
+ "doc_to_choice": "{{options}}",
974
+ "description": "",
975
+ "target_delimiter": " ",
976
+ "fewshot_delimiter": "\n\n",
977
+ "num_fewshot": 0,
978
+ "metric_list": [
979
+ {
980
+ "metric": "acc",
981
+ "aggregation": "mean",
982
+ "higher_is_better": true
983
+ }
984
+ ],
985
+ "output_type": "multiple_choice",
986
+ "repeats": 1,
987
+ "should_decontaminate": false,
988
+ "metadata": {
989
+ "version": 0.0
990
+ }
991
+ },
992
+ "molestiae-aperiam_lsat-ar_base": {
993
+ "task": "molestiae-aperiam_lsat-ar_base",
994
+ "group": "logikon-bench",
995
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
996
+ "dataset_kwargs": {
997
+ "data_files": {
998
+ "test": "molestiae-aperiam-lsat-ar/test-00000-of-00001.parquet"
999
+ }
1000
+ },
1001
+ "test_split": "test",
1002
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
1003
+ "doc_to_target": "{{answer}}",
1004
+ "doc_to_choice": "{{options}}",
1005
+ "description": "",
1006
+ "target_delimiter": " ",
1007
+ "fewshot_delimiter": "\n\n",
1008
+ "num_fewshot": 0,
1009
+ "metric_list": [
1010
+ {
1011
+ "metric": "acc",
1012
+ "aggregation": "mean",
1013
+ "higher_is_better": true
1014
+ }
1015
+ ],
1016
+ "output_type": "multiple_choice",
1017
+ "repeats": 1,
1018
+ "should_decontaminate": false,
1019
+ "metadata": {
1020
+ "version": 0.0
1021
+ }
1022
+ },
1023
+ "molestiae-aperiam_lsat-lr_base": {
1024
+ "task": "molestiae-aperiam_lsat-lr_base",
1025
+ "group": "logikon-bench",
1026
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
1027
+ "dataset_kwargs": {
1028
+ "data_files": {
1029
+ "test": "molestiae-aperiam-lsat-lr/test-00000-of-00001.parquet"
1030
+ }
1031
+ },
1032
+ "test_split": "test",
1033
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
1034
+ "doc_to_target": "{{answer}}",
1035
+ "doc_to_choice": "{{options}}",
1036
+ "description": "",
1037
+ "target_delimiter": " ",
1038
+ "fewshot_delimiter": "\n\n",
1039
+ "num_fewshot": 0,
1040
+ "metric_list": [
1041
+ {
1042
+ "metric": "acc",
1043
+ "aggregation": "mean",
1044
+ "higher_is_better": true
1045
+ }
1046
+ ],
1047
+ "output_type": "multiple_choice",
1048
+ "repeats": 1,
1049
+ "should_decontaminate": false,
1050
+ "metadata": {
1051
+ "version": 0.0
1052
+ }
1053
+ },
1054
+ "molestiae-aperiam_lsat-rc_base": {
1055
+ "task": "molestiae-aperiam_lsat-rc_base",
1056
+ "group": "logikon-bench",
1057
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
1058
+ "dataset_kwargs": {
1059
+ "data_files": {
1060
+ "test": "molestiae-aperiam-lsat-rc/test-00000-of-00001.parquet"
1061
+ }
1062
+ },
1063
+ "test_split": "test",
1064
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
1065
+ "doc_to_target": "{{answer}}",
1066
+ "doc_to_choice": "{{options}}",
1067
+ "description": "",
1068
+ "target_delimiter": " ",
1069
+ "fewshot_delimiter": "\n\n",
1070
+ "num_fewshot": 0,
1071
+ "metric_list": [
1072
+ {
1073
+ "metric": "acc",
1074
+ "aggregation": "mean",
1075
+ "higher_is_better": true
1076
+ }
1077
+ ],
1078
+ "output_type": "multiple_choice",
1079
+ "repeats": 1,
1080
+ "should_decontaminate": false,
1081
+ "metadata": {
1082
+ "version": 0.0
1083
+ }
1084
+ }
1085
+ },
1086
+ "versions": {
1087
+ "doloremque-rem_logiqa2_base": 0.0,
1088
+ "doloremque-rem_logiqa_base": 0.0,
1089
+ "doloremque-rem_lsat-ar_base": 0.0,
1090
+ "doloremque-rem_lsat-lr_base": 0.0,
1091
+ "doloremque-rem_lsat-rc_base": 0.0,
1092
+ "eligendi-commodi_logiqa2_base": 0.0,
1093
+ "eligendi-commodi_logiqa_base": 0.0,
1094
+ "eligendi-commodi_lsat-ar_base": 0.0,
1095
+ "eligendi-commodi_lsat-lr_base": 0.0,
1096
+ "eligendi-commodi_lsat-rc_base": 0.0,
1097
+ "et-praesentium_logiqa2_base": 0.0,
1098
+ "et-praesentium_logiqa_base": 0.0,
1099
+ "et-praesentium_lsat-ar_base": 0.0,
1100
+ "et-praesentium_lsat-lr_base": 0.0,
1101
+ "et-praesentium_lsat-rc_base": 0.0,
1102
+ "facere-optio_logiqa2_base": 0.0,
1103
+ "facere-optio_logiqa_base": 0.0,
1104
+ "facere-optio_lsat-ar_base": 0.0,
1105
+ "facere-optio_lsat-lr_base": 0.0,
1106
+ "facere-optio_lsat-rc_base": 0.0,
1107
+ "iure-at_logiqa2_base": 0.0,
1108
+ "iure-at_logiqa_base": 0.0,
1109
+ "iure-at_lsat-ar_base": 0.0,
1110
+ "iure-at_lsat-lr_base": 0.0,
1111
+ "iure-at_lsat-rc_base": 0.0,
1112
+ "molestiae-aperiam_logiqa2_base": 0.0,
1113
+ "molestiae-aperiam_logiqa_base": 0.0,
1114
+ "molestiae-aperiam_lsat-ar_base": 0.0,
1115
+ "molestiae-aperiam_lsat-lr_base": 0.0,
1116
+ "molestiae-aperiam_lsat-rc_base": 0.0
1117
+ },
1118
+ "n-shot": {
1119
+ "doloremque-rem_logiqa2_base": 0,
1120
+ "doloremque-rem_logiqa_base": 0,
1121
+ "doloremque-rem_lsat-ar_base": 0,
1122
+ "doloremque-rem_lsat-lr_base": 0,
1123
+ "doloremque-rem_lsat-rc_base": 0,
1124
+ "eligendi-commodi_logiqa2_base": 0,
1125
+ "eligendi-commodi_logiqa_base": 0,
1126
+ "eligendi-commodi_lsat-ar_base": 0,
1127
+ "eligendi-commodi_lsat-lr_base": 0,
1128
+ "eligendi-commodi_lsat-rc_base": 0,
1129
+ "et-praesentium_logiqa2_base": 0,
1130
+ "et-praesentium_logiqa_base": 0,
1131
+ "et-praesentium_lsat-ar_base": 0,
1132
+ "et-praesentium_lsat-lr_base": 0,
1133
+ "et-praesentium_lsat-rc_base": 0,
1134
+ "facere-optio_logiqa2_base": 0,
1135
+ "facere-optio_logiqa_base": 0,
1136
+ "facere-optio_lsat-ar_base": 0,
1137
+ "facere-optio_lsat-lr_base": 0,
1138
+ "facere-optio_lsat-rc_base": 0,
1139
+ "iure-at_logiqa2_base": 0,
1140
+ "iure-at_logiqa_base": 0,
1141
+ "iure-at_lsat-ar_base": 0,
1142
+ "iure-at_lsat-lr_base": 0,
1143
+ "iure-at_lsat-rc_base": 0,
1144
+ "molestiae-aperiam_logiqa2_base": 0,
1145
+ "molestiae-aperiam_logiqa_base": 0,
1146
+ "molestiae-aperiam_lsat-ar_base": 0,
1147
+ "molestiae-aperiam_lsat-lr_base": 0,
1148
+ "molestiae-aperiam_lsat-rc_base": 0
1149
+ },
1150
+ "config": {
1151
+ "model": "vllm",
1152
+ "model_args": "pretrained=openchat/openchat-3.5-0106,revision=main,dtype=auto,tensor_parallel_size=1,gpu_memory_utilization=0.9,trust_remote_code=true,max_length=4096",
1153
+ "batch_size": "auto",
1154
+ "batch_sizes": [],
1155
+ "device": null,
1156
+ "use_cache": null,
1157
+ "limit": null,
1158
+ "bootstrap_iters": 100000,
1159
+ "gen_kwargs": null
1160
+ },
1161
+ "git_hash": "a1d6b70"
1162
+ }