ggbetz commited on
Commit
c0a232e
1 Parent(s): e6409b6

Upload results for model microsoft/phi-2 (#11)

Browse files

- Upload results for model microsoft/phi-2 (d6834a91504cd347af40e7fd4c5397f5830667ce)

data/microsoft/phi-2/cot/24-02-05-18:00:45.json ADDED
@@ -0,0 +1,1162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "repellendus-laborum_lsat-rc_cot": {
4
+ "acc,none": 0.39776951672862454,
5
+ "acc_stderr,none": 0.02989714509220832,
6
+ "alias": "repellendus-laborum_lsat-rc_cot"
7
+ },
8
+ "repellendus-laborum_lsat-lr_cot": {
9
+ "acc,none": 0.3215686274509804,
10
+ "acc_stderr,none": 0.020702886736741085,
11
+ "alias": "repellendus-laborum_lsat-lr_cot"
12
+ },
13
+ "repellendus-laborum_lsat-ar_cot": {
14
+ "acc,none": 0.1956521739130435,
15
+ "acc_stderr,none": 0.026214799709819596,
16
+ "alias": "repellendus-laborum_lsat-ar_cot"
17
+ },
18
+ "repellendus-laborum_logiqa_cot": {
19
+ "acc,none": 0.35303514376996803,
20
+ "acc_stderr,none": 0.019116540734485793,
21
+ "alias": "repellendus-laborum_logiqa_cot"
22
+ },
23
+ "repellendus-laborum_logiqa2_cot": {
24
+ "acc,none": 0.38040712468193383,
25
+ "acc_stderr,none": 0.01224868415939611,
26
+ "alias": "repellendus-laborum_logiqa2_cot"
27
+ },
28
+ "possimus-voluptate_lsat-rc_cot": {
29
+ "acc,none": 0.3048327137546468,
30
+ "acc_stderr,none": 0.02811952967561346,
31
+ "alias": "possimus-voluptate_lsat-rc_cot"
32
+ },
33
+ "possimus-voluptate_lsat-lr_cot": {
34
+ "acc,none": 0.2901960784313726,
35
+ "acc_stderr,none": 0.020116669259866344,
36
+ "alias": "possimus-voluptate_lsat-lr_cot"
37
+ },
38
+ "possimus-voluptate_lsat-ar_cot": {
39
+ "acc,none": 0.21304347826086956,
40
+ "acc_stderr,none": 0.027057754389936194,
41
+ "alias": "possimus-voluptate_lsat-ar_cot"
42
+ },
43
+ "possimus-voluptate_logiqa_cot": {
44
+ "acc,none": 0.31309904153354634,
45
+ "acc_stderr,none": 0.018550171178695694,
46
+ "alias": "possimus-voluptate_logiqa_cot"
47
+ },
48
+ "possimus-voluptate_logiqa2_cot": {
49
+ "acc,none": 0.34478371501272265,
50
+ "acc_stderr,none": 0.011991613472848751,
51
+ "alias": "possimus-voluptate_logiqa2_cot"
52
+ },
53
+ "maxime-expedita_lsat-rc_cot": {
54
+ "acc,none": 0.3382899628252788,
55
+ "acc_stderr,none": 0.028900876908980185,
56
+ "alias": "maxime-expedita_lsat-rc_cot"
57
+ },
58
+ "maxime-expedita_lsat-lr_cot": {
59
+ "acc,none": 0.2568627450980392,
60
+ "acc_stderr,none": 0.019365387229579173,
61
+ "alias": "maxime-expedita_lsat-lr_cot"
62
+ },
63
+ "maxime-expedita_lsat-ar_cot": {
64
+ "acc,none": 0.24782608695652175,
65
+ "acc_stderr,none": 0.02853086259541007,
66
+ "alias": "maxime-expedita_lsat-ar_cot"
67
+ },
68
+ "maxime-expedita_logiqa_cot": {
69
+ "acc,none": 0.3083067092651757,
70
+ "acc_stderr,none": 0.018471759300608265,
71
+ "alias": "maxime-expedita_logiqa_cot"
72
+ },
73
+ "maxime-expedita_logiqa2_cot": {
74
+ "acc,none": 0.3237913486005089,
75
+ "acc_stderr,none": 0.01180551369127738,
76
+ "alias": "maxime-expedita_logiqa2_cot"
77
+ },
78
+ "eveniet-ea_lsat-rc_cot": {
79
+ "acc,none": 0.35315985130111527,
80
+ "acc_stderr,none": 0.029195555959749025,
81
+ "alias": "eveniet-ea_lsat-rc_cot"
82
+ },
83
+ "eveniet-ea_lsat-lr_cot": {
84
+ "acc,none": 0.2823529411764706,
85
+ "acc_stderr,none": 0.01995228875819785,
86
+ "alias": "eveniet-ea_lsat-lr_cot"
87
+ },
88
+ "eveniet-ea_lsat-ar_cot": {
89
+ "acc,none": 0.2565217391304348,
90
+ "acc_stderr,none": 0.028858814315305643,
91
+ "alias": "eveniet-ea_lsat-ar_cot"
92
+ },
93
+ "eveniet-ea_logiqa_cot": {
94
+ "acc,none": 0.3226837060702875,
95
+ "acc_stderr,none": 0.01870011473363866,
96
+ "alias": "eveniet-ea_logiqa_cot"
97
+ },
98
+ "eveniet-ea_logiqa2_cot": {
99
+ "acc,none": 0.36323155216284986,
100
+ "acc_stderr,none": 0.012133733683836153,
101
+ "alias": "eveniet-ea_logiqa2_cot"
102
+ },
103
+ "distinctio-unde_lsat-rc_cot": {
104
+ "acc,none": 0.34572490706319703,
105
+ "acc_stderr,none": 0.029052140190085934,
106
+ "alias": "distinctio-unde_lsat-rc_cot"
107
+ },
108
+ "distinctio-unde_lsat-lr_cot": {
109
+ "acc,none": 0.2803921568627451,
110
+ "acc_stderr,none": 0.01991003317147411,
111
+ "alias": "distinctio-unde_lsat-lr_cot"
112
+ },
113
+ "distinctio-unde_lsat-ar_cot": {
114
+ "acc,none": 0.23043478260869565,
115
+ "acc_stderr,none": 0.027827807522276156,
116
+ "alias": "distinctio-unde_lsat-ar_cot"
117
+ },
118
+ "distinctio-unde_logiqa_cot": {
119
+ "acc,none": 0.329073482428115,
120
+ "acc_stderr,none": 0.018795068527281106,
121
+ "alias": "distinctio-unde_logiqa_cot"
122
+ },
123
+ "distinctio-unde_logiqa2_cot": {
124
+ "acc,none": 0.361323155216285,
125
+ "acc_stderr,none": 0.012119937772570024,
126
+ "alias": "distinctio-unde_logiqa2_cot"
127
+ },
128
+ "aspernatur-sint_lsat-rc_cot": {
129
+ "acc,none": 0.32342007434944237,
130
+ "acc_stderr,none": 0.02857430284450382,
131
+ "alias": "aspernatur-sint_lsat-rc_cot"
132
+ },
133
+ "aspernatur-sint_lsat-lr_cot": {
134
+ "acc,none": 0.2901960784313726,
135
+ "acc_stderr,none": 0.020116669259866347,
136
+ "alias": "aspernatur-sint_lsat-lr_cot"
137
+ },
138
+ "aspernatur-sint_lsat-ar_cot": {
139
+ "acc,none": 0.22608695652173913,
140
+ "acc_stderr,none": 0.02764178570724133,
141
+ "alias": "aspernatur-sint_lsat-ar_cot"
142
+ },
143
+ "aspernatur-sint_logiqa_cot": {
144
+ "acc,none": 0.31150159744408945,
145
+ "acc_stderr,none": 0.01852429117602582,
146
+ "alias": "aspernatur-sint_logiqa_cot"
147
+ },
148
+ "aspernatur-sint_logiqa2_cot": {
149
+ "acc,none": 0.35814249363867684,
150
+ "acc_stderr,none": 0.012096483748969475,
151
+ "alias": "aspernatur-sint_logiqa2_cot"
152
+ }
153
+ },
154
+ "configs": {
155
+ "aspernatur-sint_logiqa2_cot": {
156
+ "task": "aspernatur-sint_logiqa2_cot",
157
+ "group": "logikon-bench",
158
+ "dataset_path": "logikon/cot-eval-traces",
159
+ "dataset_kwargs": {
160
+ "data_files": {
161
+ "test": "aspernatur-sint-logiqa2/test-00000-of-00001.parquet"
162
+ }
163
+ },
164
+ "test_split": "test",
165
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
166
+ "doc_to_target": "{{answer}}",
167
+ "doc_to_choice": "{{options}}",
168
+ "description": "",
169
+ "target_delimiter": " ",
170
+ "fewshot_delimiter": "\n\n",
171
+ "num_fewshot": 0,
172
+ "metric_list": [
173
+ {
174
+ "metric": "acc",
175
+ "aggregation": "mean",
176
+ "higher_is_better": true
177
+ }
178
+ ],
179
+ "output_type": "multiple_choice",
180
+ "repeats": 1,
181
+ "should_decontaminate": false,
182
+ "metadata": {
183
+ "version": 0.0
184
+ }
185
+ },
186
+ "aspernatur-sint_logiqa_cot": {
187
+ "task": "aspernatur-sint_logiqa_cot",
188
+ "group": "logikon-bench",
189
+ "dataset_path": "logikon/cot-eval-traces",
190
+ "dataset_kwargs": {
191
+ "data_files": {
192
+ "test": "aspernatur-sint-logiqa/test-00000-of-00001.parquet"
193
+ }
194
+ },
195
+ "test_split": "test",
196
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
197
+ "doc_to_target": "{{answer}}",
198
+ "doc_to_choice": "{{options}}",
199
+ "description": "",
200
+ "target_delimiter": " ",
201
+ "fewshot_delimiter": "\n\n",
202
+ "num_fewshot": 0,
203
+ "metric_list": [
204
+ {
205
+ "metric": "acc",
206
+ "aggregation": "mean",
207
+ "higher_is_better": true
208
+ }
209
+ ],
210
+ "output_type": "multiple_choice",
211
+ "repeats": 1,
212
+ "should_decontaminate": false,
213
+ "metadata": {
214
+ "version": 0.0
215
+ }
216
+ },
217
+ "aspernatur-sint_lsat-ar_cot": {
218
+ "task": "aspernatur-sint_lsat-ar_cot",
219
+ "group": "logikon-bench",
220
+ "dataset_path": "logikon/cot-eval-traces",
221
+ "dataset_kwargs": {
222
+ "data_files": {
223
+ "test": "aspernatur-sint-lsat-ar/test-00000-of-00001.parquet"
224
+ }
225
+ },
226
+ "test_split": "test",
227
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
228
+ "doc_to_target": "{{answer}}",
229
+ "doc_to_choice": "{{options}}",
230
+ "description": "",
231
+ "target_delimiter": " ",
232
+ "fewshot_delimiter": "\n\n",
233
+ "num_fewshot": 0,
234
+ "metric_list": [
235
+ {
236
+ "metric": "acc",
237
+ "aggregation": "mean",
238
+ "higher_is_better": true
239
+ }
240
+ ],
241
+ "output_type": "multiple_choice",
242
+ "repeats": 1,
243
+ "should_decontaminate": false,
244
+ "metadata": {
245
+ "version": 0.0
246
+ }
247
+ },
248
+ "aspernatur-sint_lsat-lr_cot": {
249
+ "task": "aspernatur-sint_lsat-lr_cot",
250
+ "group": "logikon-bench",
251
+ "dataset_path": "logikon/cot-eval-traces",
252
+ "dataset_kwargs": {
253
+ "data_files": {
254
+ "test": "aspernatur-sint-lsat-lr/test-00000-of-00001.parquet"
255
+ }
256
+ },
257
+ "test_split": "test",
258
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
259
+ "doc_to_target": "{{answer}}",
260
+ "doc_to_choice": "{{options}}",
261
+ "description": "",
262
+ "target_delimiter": " ",
263
+ "fewshot_delimiter": "\n\n",
264
+ "num_fewshot": 0,
265
+ "metric_list": [
266
+ {
267
+ "metric": "acc",
268
+ "aggregation": "mean",
269
+ "higher_is_better": true
270
+ }
271
+ ],
272
+ "output_type": "multiple_choice",
273
+ "repeats": 1,
274
+ "should_decontaminate": false,
275
+ "metadata": {
276
+ "version": 0.0
277
+ }
278
+ },
279
+ "aspernatur-sint_lsat-rc_cot": {
280
+ "task": "aspernatur-sint_lsat-rc_cot",
281
+ "group": "logikon-bench",
282
+ "dataset_path": "logikon/cot-eval-traces",
283
+ "dataset_kwargs": {
284
+ "data_files": {
285
+ "test": "aspernatur-sint-lsat-rc/test-00000-of-00001.parquet"
286
+ }
287
+ },
288
+ "test_split": "test",
289
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
290
+ "doc_to_target": "{{answer}}",
291
+ "doc_to_choice": "{{options}}",
292
+ "description": "",
293
+ "target_delimiter": " ",
294
+ "fewshot_delimiter": "\n\n",
295
+ "num_fewshot": 0,
296
+ "metric_list": [
297
+ {
298
+ "metric": "acc",
299
+ "aggregation": "mean",
300
+ "higher_is_better": true
301
+ }
302
+ ],
303
+ "output_type": "multiple_choice",
304
+ "repeats": 1,
305
+ "should_decontaminate": false,
306
+ "metadata": {
307
+ "version": 0.0
308
+ }
309
+ },
310
+ "distinctio-unde_logiqa2_cot": {
311
+ "task": "distinctio-unde_logiqa2_cot",
312
+ "group": "logikon-bench",
313
+ "dataset_path": "logikon/cot-eval-traces",
314
+ "dataset_kwargs": {
315
+ "data_files": {
316
+ "test": "distinctio-unde-logiqa2/test-00000-of-00001.parquet"
317
+ }
318
+ },
319
+ "test_split": "test",
320
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
321
+ "doc_to_target": "{{answer}}",
322
+ "doc_to_choice": "{{options}}",
323
+ "description": "",
324
+ "target_delimiter": " ",
325
+ "fewshot_delimiter": "\n\n",
326
+ "num_fewshot": 0,
327
+ "metric_list": [
328
+ {
329
+ "metric": "acc",
330
+ "aggregation": "mean",
331
+ "higher_is_better": true
332
+ }
333
+ ],
334
+ "output_type": "multiple_choice",
335
+ "repeats": 1,
336
+ "should_decontaminate": false,
337
+ "metadata": {
338
+ "version": 0.0
339
+ }
340
+ },
341
+ "distinctio-unde_logiqa_cot": {
342
+ "task": "distinctio-unde_logiqa_cot",
343
+ "group": "logikon-bench",
344
+ "dataset_path": "logikon/cot-eval-traces",
345
+ "dataset_kwargs": {
346
+ "data_files": {
347
+ "test": "distinctio-unde-logiqa/test-00000-of-00001.parquet"
348
+ }
349
+ },
350
+ "test_split": "test",
351
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
352
+ "doc_to_target": "{{answer}}",
353
+ "doc_to_choice": "{{options}}",
354
+ "description": "",
355
+ "target_delimiter": " ",
356
+ "fewshot_delimiter": "\n\n",
357
+ "num_fewshot": 0,
358
+ "metric_list": [
359
+ {
360
+ "metric": "acc",
361
+ "aggregation": "mean",
362
+ "higher_is_better": true
363
+ }
364
+ ],
365
+ "output_type": "multiple_choice",
366
+ "repeats": 1,
367
+ "should_decontaminate": false,
368
+ "metadata": {
369
+ "version": 0.0
370
+ }
371
+ },
372
+ "distinctio-unde_lsat-ar_cot": {
373
+ "task": "distinctio-unde_lsat-ar_cot",
374
+ "group": "logikon-bench",
375
+ "dataset_path": "logikon/cot-eval-traces",
376
+ "dataset_kwargs": {
377
+ "data_files": {
378
+ "test": "distinctio-unde-lsat-ar/test-00000-of-00001.parquet"
379
+ }
380
+ },
381
+ "test_split": "test",
382
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
383
+ "doc_to_target": "{{answer}}",
384
+ "doc_to_choice": "{{options}}",
385
+ "description": "",
386
+ "target_delimiter": " ",
387
+ "fewshot_delimiter": "\n\n",
388
+ "num_fewshot": 0,
389
+ "metric_list": [
390
+ {
391
+ "metric": "acc",
392
+ "aggregation": "mean",
393
+ "higher_is_better": true
394
+ }
395
+ ],
396
+ "output_type": "multiple_choice",
397
+ "repeats": 1,
398
+ "should_decontaminate": false,
399
+ "metadata": {
400
+ "version": 0.0
401
+ }
402
+ },
403
+ "distinctio-unde_lsat-lr_cot": {
404
+ "task": "distinctio-unde_lsat-lr_cot",
405
+ "group": "logikon-bench",
406
+ "dataset_path": "logikon/cot-eval-traces",
407
+ "dataset_kwargs": {
408
+ "data_files": {
409
+ "test": "distinctio-unde-lsat-lr/test-00000-of-00001.parquet"
410
+ }
411
+ },
412
+ "test_split": "test",
413
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
414
+ "doc_to_target": "{{answer}}",
415
+ "doc_to_choice": "{{options}}",
416
+ "description": "",
417
+ "target_delimiter": " ",
418
+ "fewshot_delimiter": "\n\n",
419
+ "num_fewshot": 0,
420
+ "metric_list": [
421
+ {
422
+ "metric": "acc",
423
+ "aggregation": "mean",
424
+ "higher_is_better": true
425
+ }
426
+ ],
427
+ "output_type": "multiple_choice",
428
+ "repeats": 1,
429
+ "should_decontaminate": false,
430
+ "metadata": {
431
+ "version": 0.0
432
+ }
433
+ },
434
+ "distinctio-unde_lsat-rc_cot": {
435
+ "task": "distinctio-unde_lsat-rc_cot",
436
+ "group": "logikon-bench",
437
+ "dataset_path": "logikon/cot-eval-traces",
438
+ "dataset_kwargs": {
439
+ "data_files": {
440
+ "test": "distinctio-unde-lsat-rc/test-00000-of-00001.parquet"
441
+ }
442
+ },
443
+ "test_split": "test",
444
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
445
+ "doc_to_target": "{{answer}}",
446
+ "doc_to_choice": "{{options}}",
447
+ "description": "",
448
+ "target_delimiter": " ",
449
+ "fewshot_delimiter": "\n\n",
450
+ "num_fewshot": 0,
451
+ "metric_list": [
452
+ {
453
+ "metric": "acc",
454
+ "aggregation": "mean",
455
+ "higher_is_better": true
456
+ }
457
+ ],
458
+ "output_type": "multiple_choice",
459
+ "repeats": 1,
460
+ "should_decontaminate": false,
461
+ "metadata": {
462
+ "version": 0.0
463
+ }
464
+ },
465
+ "eveniet-ea_logiqa2_cot": {
466
+ "task": "eveniet-ea_logiqa2_cot",
467
+ "group": "logikon-bench",
468
+ "dataset_path": "logikon/cot-eval-traces",
469
+ "dataset_kwargs": {
470
+ "data_files": {
471
+ "test": "eveniet-ea-logiqa2/test-00000-of-00001.parquet"
472
+ }
473
+ },
474
+ "test_split": "test",
475
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
476
+ "doc_to_target": "{{answer}}",
477
+ "doc_to_choice": "{{options}}",
478
+ "description": "",
479
+ "target_delimiter": " ",
480
+ "fewshot_delimiter": "\n\n",
481
+ "num_fewshot": 0,
482
+ "metric_list": [
483
+ {
484
+ "metric": "acc",
485
+ "aggregation": "mean",
486
+ "higher_is_better": true
487
+ }
488
+ ],
489
+ "output_type": "multiple_choice",
490
+ "repeats": 1,
491
+ "should_decontaminate": false,
492
+ "metadata": {
493
+ "version": 0.0
494
+ }
495
+ },
496
+ "eveniet-ea_logiqa_cot": {
497
+ "task": "eveniet-ea_logiqa_cot",
498
+ "group": "logikon-bench",
499
+ "dataset_path": "logikon/cot-eval-traces",
500
+ "dataset_kwargs": {
501
+ "data_files": {
502
+ "test": "eveniet-ea-logiqa/test-00000-of-00001.parquet"
503
+ }
504
+ },
505
+ "test_split": "test",
506
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
507
+ "doc_to_target": "{{answer}}",
508
+ "doc_to_choice": "{{options}}",
509
+ "description": "",
510
+ "target_delimiter": " ",
511
+ "fewshot_delimiter": "\n\n",
512
+ "num_fewshot": 0,
513
+ "metric_list": [
514
+ {
515
+ "metric": "acc",
516
+ "aggregation": "mean",
517
+ "higher_is_better": true
518
+ }
519
+ ],
520
+ "output_type": "multiple_choice",
521
+ "repeats": 1,
522
+ "should_decontaminate": false,
523
+ "metadata": {
524
+ "version": 0.0
525
+ }
526
+ },
527
+ "eveniet-ea_lsat-ar_cot": {
528
+ "task": "eveniet-ea_lsat-ar_cot",
529
+ "group": "logikon-bench",
530
+ "dataset_path": "logikon/cot-eval-traces",
531
+ "dataset_kwargs": {
532
+ "data_files": {
533
+ "test": "eveniet-ea-lsat-ar/test-00000-of-00001.parquet"
534
+ }
535
+ },
536
+ "test_split": "test",
537
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
538
+ "doc_to_target": "{{answer}}",
539
+ "doc_to_choice": "{{options}}",
540
+ "description": "",
541
+ "target_delimiter": " ",
542
+ "fewshot_delimiter": "\n\n",
543
+ "num_fewshot": 0,
544
+ "metric_list": [
545
+ {
546
+ "metric": "acc",
547
+ "aggregation": "mean",
548
+ "higher_is_better": true
549
+ }
550
+ ],
551
+ "output_type": "multiple_choice",
552
+ "repeats": 1,
553
+ "should_decontaminate": false,
554
+ "metadata": {
555
+ "version": 0.0
556
+ }
557
+ },
558
+ "eveniet-ea_lsat-lr_cot": {
559
+ "task": "eveniet-ea_lsat-lr_cot",
560
+ "group": "logikon-bench",
561
+ "dataset_path": "logikon/cot-eval-traces",
562
+ "dataset_kwargs": {
563
+ "data_files": {
564
+ "test": "eveniet-ea-lsat-lr/test-00000-of-00001.parquet"
565
+ }
566
+ },
567
+ "test_split": "test",
568
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
569
+ "doc_to_target": "{{answer}}",
570
+ "doc_to_choice": "{{options}}",
571
+ "description": "",
572
+ "target_delimiter": " ",
573
+ "fewshot_delimiter": "\n\n",
574
+ "num_fewshot": 0,
575
+ "metric_list": [
576
+ {
577
+ "metric": "acc",
578
+ "aggregation": "mean",
579
+ "higher_is_better": true
580
+ }
581
+ ],
582
+ "output_type": "multiple_choice",
583
+ "repeats": 1,
584
+ "should_decontaminate": false,
585
+ "metadata": {
586
+ "version": 0.0
587
+ }
588
+ },
589
+ "eveniet-ea_lsat-rc_cot": {
590
+ "task": "eveniet-ea_lsat-rc_cot",
591
+ "group": "logikon-bench",
592
+ "dataset_path": "logikon/cot-eval-traces",
593
+ "dataset_kwargs": {
594
+ "data_files": {
595
+ "test": "eveniet-ea-lsat-rc/test-00000-of-00001.parquet"
596
+ }
597
+ },
598
+ "test_split": "test",
599
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
600
+ "doc_to_target": "{{answer}}",
601
+ "doc_to_choice": "{{options}}",
602
+ "description": "",
603
+ "target_delimiter": " ",
604
+ "fewshot_delimiter": "\n\n",
605
+ "num_fewshot": 0,
606
+ "metric_list": [
607
+ {
608
+ "metric": "acc",
609
+ "aggregation": "mean",
610
+ "higher_is_better": true
611
+ }
612
+ ],
613
+ "output_type": "multiple_choice",
614
+ "repeats": 1,
615
+ "should_decontaminate": false,
616
+ "metadata": {
617
+ "version": 0.0
618
+ }
619
+ },
620
+ "maxime-expedita_logiqa2_cot": {
621
+ "task": "maxime-expedita_logiqa2_cot",
622
+ "group": "logikon-bench",
623
+ "dataset_path": "logikon/cot-eval-traces",
624
+ "dataset_kwargs": {
625
+ "data_files": {
626
+ "test": "maxime-expedita-logiqa2/test-00000-of-00001.parquet"
627
+ }
628
+ },
629
+ "test_split": "test",
630
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
631
+ "doc_to_target": "{{answer}}",
632
+ "doc_to_choice": "{{options}}",
633
+ "description": "",
634
+ "target_delimiter": " ",
635
+ "fewshot_delimiter": "\n\n",
636
+ "num_fewshot": 0,
637
+ "metric_list": [
638
+ {
639
+ "metric": "acc",
640
+ "aggregation": "mean",
641
+ "higher_is_better": true
642
+ }
643
+ ],
644
+ "output_type": "multiple_choice",
645
+ "repeats": 1,
646
+ "should_decontaminate": false,
647
+ "metadata": {
648
+ "version": 0.0
649
+ }
650
+ },
651
+ "maxime-expedita_logiqa_cot": {
652
+ "task": "maxime-expedita_logiqa_cot",
653
+ "group": "logikon-bench",
654
+ "dataset_path": "logikon/cot-eval-traces",
655
+ "dataset_kwargs": {
656
+ "data_files": {
657
+ "test": "maxime-expedita-logiqa/test-00000-of-00001.parquet"
658
+ }
659
+ },
660
+ "test_split": "test",
661
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
662
+ "doc_to_target": "{{answer}}",
663
+ "doc_to_choice": "{{options}}",
664
+ "description": "",
665
+ "target_delimiter": " ",
666
+ "fewshot_delimiter": "\n\n",
667
+ "num_fewshot": 0,
668
+ "metric_list": [
669
+ {
670
+ "metric": "acc",
671
+ "aggregation": "mean",
672
+ "higher_is_better": true
673
+ }
674
+ ],
675
+ "output_type": "multiple_choice",
676
+ "repeats": 1,
677
+ "should_decontaminate": false,
678
+ "metadata": {
679
+ "version": 0.0
680
+ }
681
+ },
682
+ "maxime-expedita_lsat-ar_cot": {
683
+ "task": "maxime-expedita_lsat-ar_cot",
684
+ "group": "logikon-bench",
685
+ "dataset_path": "logikon/cot-eval-traces",
686
+ "dataset_kwargs": {
687
+ "data_files": {
688
+ "test": "maxime-expedita-lsat-ar/test-00000-of-00001.parquet"
689
+ }
690
+ },
691
+ "test_split": "test",
692
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
693
+ "doc_to_target": "{{answer}}",
694
+ "doc_to_choice": "{{options}}",
695
+ "description": "",
696
+ "target_delimiter": " ",
697
+ "fewshot_delimiter": "\n\n",
698
+ "num_fewshot": 0,
699
+ "metric_list": [
700
+ {
701
+ "metric": "acc",
702
+ "aggregation": "mean",
703
+ "higher_is_better": true
704
+ }
705
+ ],
706
+ "output_type": "multiple_choice",
707
+ "repeats": 1,
708
+ "should_decontaminate": false,
709
+ "metadata": {
710
+ "version": 0.0
711
+ }
712
+ },
713
+ "maxime-expedita_lsat-lr_cot": {
714
+ "task": "maxime-expedita_lsat-lr_cot",
715
+ "group": "logikon-bench",
716
+ "dataset_path": "logikon/cot-eval-traces",
717
+ "dataset_kwargs": {
718
+ "data_files": {
719
+ "test": "maxime-expedita-lsat-lr/test-00000-of-00001.parquet"
720
+ }
721
+ },
722
+ "test_split": "test",
723
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
724
+ "doc_to_target": "{{answer}}",
725
+ "doc_to_choice": "{{options}}",
726
+ "description": "",
727
+ "target_delimiter": " ",
728
+ "fewshot_delimiter": "\n\n",
729
+ "num_fewshot": 0,
730
+ "metric_list": [
731
+ {
732
+ "metric": "acc",
733
+ "aggregation": "mean",
734
+ "higher_is_better": true
735
+ }
736
+ ],
737
+ "output_type": "multiple_choice",
738
+ "repeats": 1,
739
+ "should_decontaminate": false,
740
+ "metadata": {
741
+ "version": 0.0
742
+ }
743
+ },
744
+ "maxime-expedita_lsat-rc_cot": {
745
+ "task": "maxime-expedita_lsat-rc_cot",
746
+ "group": "logikon-bench",
747
+ "dataset_path": "logikon/cot-eval-traces",
748
+ "dataset_kwargs": {
749
+ "data_files": {
750
+ "test": "maxime-expedita-lsat-rc/test-00000-of-00001.parquet"
751
+ }
752
+ },
753
+ "test_split": "test",
754
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
755
+ "doc_to_target": "{{answer}}",
756
+ "doc_to_choice": "{{options}}",
757
+ "description": "",
758
+ "target_delimiter": " ",
759
+ "fewshot_delimiter": "\n\n",
760
+ "num_fewshot": 0,
761
+ "metric_list": [
762
+ {
763
+ "metric": "acc",
764
+ "aggregation": "mean",
765
+ "higher_is_better": true
766
+ }
767
+ ],
768
+ "output_type": "multiple_choice",
769
+ "repeats": 1,
770
+ "should_decontaminate": false,
771
+ "metadata": {
772
+ "version": 0.0
773
+ }
774
+ },
775
+ "possimus-voluptate_logiqa2_cot": {
776
+ "task": "possimus-voluptate_logiqa2_cot",
777
+ "group": "logikon-bench",
778
+ "dataset_path": "logikon/cot-eval-traces",
779
+ "dataset_kwargs": {
780
+ "data_files": {
781
+ "test": "possimus-voluptate-logiqa2/test-00000-of-00001.parquet"
782
+ }
783
+ },
784
+ "test_split": "test",
785
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
786
+ "doc_to_target": "{{answer}}",
787
+ "doc_to_choice": "{{options}}",
788
+ "description": "",
789
+ "target_delimiter": " ",
790
+ "fewshot_delimiter": "\n\n",
791
+ "num_fewshot": 0,
792
+ "metric_list": [
793
+ {
794
+ "metric": "acc",
795
+ "aggregation": "mean",
796
+ "higher_is_better": true
797
+ }
798
+ ],
799
+ "output_type": "multiple_choice",
800
+ "repeats": 1,
801
+ "should_decontaminate": false,
802
+ "metadata": {
803
+ "version": 0.0
804
+ }
805
+ },
806
+ "possimus-voluptate_logiqa_cot": {
807
+ "task": "possimus-voluptate_logiqa_cot",
808
+ "group": "logikon-bench",
809
+ "dataset_path": "logikon/cot-eval-traces",
810
+ "dataset_kwargs": {
811
+ "data_files": {
812
+ "test": "possimus-voluptate-logiqa/test-00000-of-00001.parquet"
813
+ }
814
+ },
815
+ "test_split": "test",
816
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
817
+ "doc_to_target": "{{answer}}",
818
+ "doc_to_choice": "{{options}}",
819
+ "description": "",
820
+ "target_delimiter": " ",
821
+ "fewshot_delimiter": "\n\n",
822
+ "num_fewshot": 0,
823
+ "metric_list": [
824
+ {
825
+ "metric": "acc",
826
+ "aggregation": "mean",
827
+ "higher_is_better": true
828
+ }
829
+ ],
830
+ "output_type": "multiple_choice",
831
+ "repeats": 1,
832
+ "should_decontaminate": false,
833
+ "metadata": {
834
+ "version": 0.0
835
+ }
836
+ },
837
+ "possimus-voluptate_lsat-ar_cot": {
838
+ "task": "possimus-voluptate_lsat-ar_cot",
839
+ "group": "logikon-bench",
840
+ "dataset_path": "logikon/cot-eval-traces",
841
+ "dataset_kwargs": {
842
+ "data_files": {
843
+ "test": "possimus-voluptate-lsat-ar/test-00000-of-00001.parquet"
844
+ }
845
+ },
846
+ "test_split": "test",
847
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
848
+ "doc_to_target": "{{answer}}",
849
+ "doc_to_choice": "{{options}}",
850
+ "description": "",
851
+ "target_delimiter": " ",
852
+ "fewshot_delimiter": "\n\n",
853
+ "num_fewshot": 0,
854
+ "metric_list": [
855
+ {
856
+ "metric": "acc",
857
+ "aggregation": "mean",
858
+ "higher_is_better": true
859
+ }
860
+ ],
861
+ "output_type": "multiple_choice",
862
+ "repeats": 1,
863
+ "should_decontaminate": false,
864
+ "metadata": {
865
+ "version": 0.0
866
+ }
867
+ },
868
+ "possimus-voluptate_lsat-lr_cot": {
869
+ "task": "possimus-voluptate_lsat-lr_cot",
870
+ "group": "logikon-bench",
871
+ "dataset_path": "logikon/cot-eval-traces",
872
+ "dataset_kwargs": {
873
+ "data_files": {
874
+ "test": "possimus-voluptate-lsat-lr/test-00000-of-00001.parquet"
875
+ }
876
+ },
877
+ "test_split": "test",
878
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
879
+ "doc_to_target": "{{answer}}",
880
+ "doc_to_choice": "{{options}}",
881
+ "description": "",
882
+ "target_delimiter": " ",
883
+ "fewshot_delimiter": "\n\n",
884
+ "num_fewshot": 0,
885
+ "metric_list": [
886
+ {
887
+ "metric": "acc",
888
+ "aggregation": "mean",
889
+ "higher_is_better": true
890
+ }
891
+ ],
892
+ "output_type": "multiple_choice",
893
+ "repeats": 1,
894
+ "should_decontaminate": false,
895
+ "metadata": {
896
+ "version": 0.0
897
+ }
898
+ },
899
+ "possimus-voluptate_lsat-rc_cot": {
900
+ "task": "possimus-voluptate_lsat-rc_cot",
901
+ "group": "logikon-bench",
902
+ "dataset_path": "logikon/cot-eval-traces",
903
+ "dataset_kwargs": {
904
+ "data_files": {
905
+ "test": "possimus-voluptate-lsat-rc/test-00000-of-00001.parquet"
906
+ }
907
+ },
908
+ "test_split": "test",
909
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
910
+ "doc_to_target": "{{answer}}",
911
+ "doc_to_choice": "{{options}}",
912
+ "description": "",
913
+ "target_delimiter": " ",
914
+ "fewshot_delimiter": "\n\n",
915
+ "num_fewshot": 0,
916
+ "metric_list": [
917
+ {
918
+ "metric": "acc",
919
+ "aggregation": "mean",
920
+ "higher_is_better": true
921
+ }
922
+ ],
923
+ "output_type": "multiple_choice",
924
+ "repeats": 1,
925
+ "should_decontaminate": false,
926
+ "metadata": {
927
+ "version": 0.0
928
+ }
929
+ },
930
+ "repellendus-laborum_logiqa2_cot": {
931
+ "task": "repellendus-laborum_logiqa2_cot",
932
+ "group": "logikon-bench",
933
+ "dataset_path": "logikon/cot-eval-traces",
934
+ "dataset_kwargs": {
935
+ "data_files": {
936
+ "test": "repellendus-laborum-logiqa2/test-00000-of-00001.parquet"
937
+ }
938
+ },
939
+ "test_split": "test",
940
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
941
+ "doc_to_target": "{{answer}}",
942
+ "doc_to_choice": "{{options}}",
943
+ "description": "",
944
+ "target_delimiter": " ",
945
+ "fewshot_delimiter": "\n\n",
946
+ "num_fewshot": 0,
947
+ "metric_list": [
948
+ {
949
+ "metric": "acc",
950
+ "aggregation": "mean",
951
+ "higher_is_better": true
952
+ }
953
+ ],
954
+ "output_type": "multiple_choice",
955
+ "repeats": 1,
956
+ "should_decontaminate": false,
957
+ "metadata": {
958
+ "version": 0.0
959
+ }
960
+ },
961
+ "repellendus-laborum_logiqa_cot": {
962
+ "task": "repellendus-laborum_logiqa_cot",
963
+ "group": "logikon-bench",
964
+ "dataset_path": "logikon/cot-eval-traces",
965
+ "dataset_kwargs": {
966
+ "data_files": {
967
+ "test": "repellendus-laborum-logiqa/test-00000-of-00001.parquet"
968
+ }
969
+ },
970
+ "test_split": "test",
971
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
972
+ "doc_to_target": "{{answer}}",
973
+ "doc_to_choice": "{{options}}",
974
+ "description": "",
975
+ "target_delimiter": " ",
976
+ "fewshot_delimiter": "\n\n",
977
+ "num_fewshot": 0,
978
+ "metric_list": [
979
+ {
980
+ "metric": "acc",
981
+ "aggregation": "mean",
982
+ "higher_is_better": true
983
+ }
984
+ ],
985
+ "output_type": "multiple_choice",
986
+ "repeats": 1,
987
+ "should_decontaminate": false,
988
+ "metadata": {
989
+ "version": 0.0
990
+ }
991
+ },
992
+ "repellendus-laborum_lsat-ar_cot": {
993
+ "task": "repellendus-laborum_lsat-ar_cot",
994
+ "group": "logikon-bench",
995
+ "dataset_path": "logikon/cot-eval-traces",
996
+ "dataset_kwargs": {
997
+ "data_files": {
998
+ "test": "repellendus-laborum-lsat-ar/test-00000-of-00001.parquet"
999
+ }
1000
+ },
1001
+ "test_split": "test",
1002
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
1003
+ "doc_to_target": "{{answer}}",
1004
+ "doc_to_choice": "{{options}}",
1005
+ "description": "",
1006
+ "target_delimiter": " ",
1007
+ "fewshot_delimiter": "\n\n",
1008
+ "num_fewshot": 0,
1009
+ "metric_list": [
1010
+ {
1011
+ "metric": "acc",
1012
+ "aggregation": "mean",
1013
+ "higher_is_better": true
1014
+ }
1015
+ ],
1016
+ "output_type": "multiple_choice",
1017
+ "repeats": 1,
1018
+ "should_decontaminate": false,
1019
+ "metadata": {
1020
+ "version": 0.0
1021
+ }
1022
+ },
1023
+ "repellendus-laborum_lsat-lr_cot": {
1024
+ "task": "repellendus-laborum_lsat-lr_cot",
1025
+ "group": "logikon-bench",
1026
+ "dataset_path": "logikon/cot-eval-traces",
1027
+ "dataset_kwargs": {
1028
+ "data_files": {
1029
+ "test": "repellendus-laborum-lsat-lr/test-00000-of-00001.parquet"
1030
+ }
1031
+ },
1032
+ "test_split": "test",
1033
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
1034
+ "doc_to_target": "{{answer}}",
1035
+ "doc_to_choice": "{{options}}",
1036
+ "description": "",
1037
+ "target_delimiter": " ",
1038
+ "fewshot_delimiter": "\n\n",
1039
+ "num_fewshot": 0,
1040
+ "metric_list": [
1041
+ {
1042
+ "metric": "acc",
1043
+ "aggregation": "mean",
1044
+ "higher_is_better": true
1045
+ }
1046
+ ],
1047
+ "output_type": "multiple_choice",
1048
+ "repeats": 1,
1049
+ "should_decontaminate": false,
1050
+ "metadata": {
1051
+ "version": 0.0
1052
+ }
1053
+ },
1054
+ "repellendus-laborum_lsat-rc_cot": {
1055
+ "task": "repellendus-laborum_lsat-rc_cot",
1056
+ "group": "logikon-bench",
1057
+ "dataset_path": "logikon/cot-eval-traces",
1058
+ "dataset_kwargs": {
1059
+ "data_files": {
1060
+ "test": "repellendus-laborum-lsat-rc/test-00000-of-00001.parquet"
1061
+ }
1062
+ },
1063
+ "test_split": "test",
1064
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
1065
+ "doc_to_target": "{{answer}}",
1066
+ "doc_to_choice": "{{options}}",
1067
+ "description": "",
1068
+ "target_delimiter": " ",
1069
+ "fewshot_delimiter": "\n\n",
1070
+ "num_fewshot": 0,
1071
+ "metric_list": [
1072
+ {
1073
+ "metric": "acc",
1074
+ "aggregation": "mean",
1075
+ "higher_is_better": true
1076
+ }
1077
+ ],
1078
+ "output_type": "multiple_choice",
1079
+ "repeats": 1,
1080
+ "should_decontaminate": false,
1081
+ "metadata": {
1082
+ "version": 0.0
1083
+ }
1084
+ }
1085
+ },
1086
+ "versions": {
1087
+ "aspernatur-sint_logiqa2_cot": 0.0,
1088
+ "aspernatur-sint_logiqa_cot": 0.0,
1089
+ "aspernatur-sint_lsat-ar_cot": 0.0,
1090
+ "aspernatur-sint_lsat-lr_cot": 0.0,
1091
+ "aspernatur-sint_lsat-rc_cot": 0.0,
1092
+ "distinctio-unde_logiqa2_cot": 0.0,
1093
+ "distinctio-unde_logiqa_cot": 0.0,
1094
+ "distinctio-unde_lsat-ar_cot": 0.0,
1095
+ "distinctio-unde_lsat-lr_cot": 0.0,
1096
+ "distinctio-unde_lsat-rc_cot": 0.0,
1097
+ "eveniet-ea_logiqa2_cot": 0.0,
1098
+ "eveniet-ea_logiqa_cot": 0.0,
1099
+ "eveniet-ea_lsat-ar_cot": 0.0,
1100
+ "eveniet-ea_lsat-lr_cot": 0.0,
1101
+ "eveniet-ea_lsat-rc_cot": 0.0,
1102
+ "maxime-expedita_logiqa2_cot": 0.0,
1103
+ "maxime-expedita_logiqa_cot": 0.0,
1104
+ "maxime-expedita_lsat-ar_cot": 0.0,
1105
+ "maxime-expedita_lsat-lr_cot": 0.0,
1106
+ "maxime-expedita_lsat-rc_cot": 0.0,
1107
+ "possimus-voluptate_logiqa2_cot": 0.0,
1108
+ "possimus-voluptate_logiqa_cot": 0.0,
1109
+ "possimus-voluptate_lsat-ar_cot": 0.0,
1110
+ "possimus-voluptate_lsat-lr_cot": 0.0,
1111
+ "possimus-voluptate_lsat-rc_cot": 0.0,
1112
+ "repellendus-laborum_logiqa2_cot": 0.0,
1113
+ "repellendus-laborum_logiqa_cot": 0.0,
1114
+ "repellendus-laborum_lsat-ar_cot": 0.0,
1115
+ "repellendus-laborum_lsat-lr_cot": 0.0,
1116
+ "repellendus-laborum_lsat-rc_cot": 0.0
1117
+ },
1118
+ "n-shot": {
1119
+ "aspernatur-sint_logiqa2_cot": 0,
1120
+ "aspernatur-sint_logiqa_cot": 0,
1121
+ "aspernatur-sint_lsat-ar_cot": 0,
1122
+ "aspernatur-sint_lsat-lr_cot": 0,
1123
+ "aspernatur-sint_lsat-rc_cot": 0,
1124
+ "distinctio-unde_logiqa2_cot": 0,
1125
+ "distinctio-unde_logiqa_cot": 0,
1126
+ "distinctio-unde_lsat-ar_cot": 0,
1127
+ "distinctio-unde_lsat-lr_cot": 0,
1128
+ "distinctio-unde_lsat-rc_cot": 0,
1129
+ "eveniet-ea_logiqa2_cot": 0,
1130
+ "eveniet-ea_logiqa_cot": 0,
1131
+ "eveniet-ea_lsat-ar_cot": 0,
1132
+ "eveniet-ea_lsat-lr_cot": 0,
1133
+ "eveniet-ea_lsat-rc_cot": 0,
1134
+ "maxime-expedita_logiqa2_cot": 0,
1135
+ "maxime-expedita_logiqa_cot": 0,
1136
+ "maxime-expedita_lsat-ar_cot": 0,
1137
+ "maxime-expedita_lsat-lr_cot": 0,
1138
+ "maxime-expedita_lsat-rc_cot": 0,
1139
+ "possimus-voluptate_logiqa2_cot": 0,
1140
+ "possimus-voluptate_logiqa_cot": 0,
1141
+ "possimus-voluptate_lsat-ar_cot": 0,
1142
+ "possimus-voluptate_lsat-lr_cot": 0,
1143
+ "possimus-voluptate_lsat-rc_cot": 0,
1144
+ "repellendus-laborum_logiqa2_cot": 0,
1145
+ "repellendus-laborum_logiqa_cot": 0,
1146
+ "repellendus-laborum_lsat-ar_cot": 0,
1147
+ "repellendus-laborum_lsat-lr_cot": 0,
1148
+ "repellendus-laborum_lsat-rc_cot": 0
1149
+ },
1150
+ "config": {
1151
+ "model": "vllm",
1152
+ "model_args": "pretrained=microsoft/phi-2,revision=main,dtype=auto,tensor_parallel_size=1,gpu_memory_utilization=0.9,trust_remote_code=true,max_length=2048",
1153
+ "batch_size": "auto",
1154
+ "batch_sizes": [],
1155
+ "device": null,
1156
+ "use_cache": null,
1157
+ "limit": null,
1158
+ "bootstrap_iters": 100000,
1159
+ "gen_kwargs": null
1160
+ },
1161
+ "git_hash": "3d5b980"
1162
+ }