ggbetz commited on
Commit
bbe84ed
1 Parent(s): 1de5b71

Upload results for model Deci/DeciLM-7B

Browse files
data/Deci/DeciLM-7B/cot/24-02-03-00:47:02.json ADDED
@@ -0,0 +1,1162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "veritatis-velit_lsat-rc_cot": {
4
+ "acc,none": 0.4163568773234201,
5
+ "acc_stderr,none": 0.03011196940753653,
6
+ "alias": "veritatis-velit_lsat-rc_cot"
7
+ },
8
+ "veritatis-velit_lsat-lr_cot": {
9
+ "acc,none": 0.3137254901960784,
10
+ "acc_stderr,none": 0.02056671577177923,
11
+ "alias": "veritatis-velit_lsat-lr_cot"
12
+ },
13
+ "veritatis-velit_lsat-ar_cot": {
14
+ "acc,none": 0.23043478260869565,
15
+ "acc_stderr,none": 0.027827807522276156,
16
+ "alias": "veritatis-velit_lsat-ar_cot"
17
+ },
18
+ "veritatis-velit_logiqa_cot": {
19
+ "acc,none": 0.2987220447284345,
20
+ "acc_stderr,none": 0.01830790800596066,
21
+ "alias": "veritatis-velit_logiqa_cot"
22
+ },
23
+ "veritatis-velit_logiqa2_cot": {
24
+ "acc,none": 0.3530534351145038,
25
+ "acc_stderr,none": 0.012057751628201937,
26
+ "alias": "veritatis-velit_logiqa2_cot"
27
+ },
28
+ "saepe-fuga_lsat-rc_cot": {
29
+ "acc,none": 0.4312267657992565,
30
+ "acc_stderr,none": 0.030252065567665472,
31
+ "alias": "saepe-fuga_lsat-rc_cot"
32
+ },
33
+ "saepe-fuga_lsat-lr_cot": {
34
+ "acc,none": 0.296078431372549,
35
+ "acc_stderr,none": 0.020235159438512108,
36
+ "alias": "saepe-fuga_lsat-lr_cot"
37
+ },
38
+ "saepe-fuga_lsat-ar_cot": {
39
+ "acc,none": 0.18695652173913044,
40
+ "acc_stderr,none": 0.02576377239851234,
41
+ "alias": "saepe-fuga_lsat-ar_cot"
42
+ },
43
+ "saepe-fuga_logiqa_cot": {
44
+ "acc,none": 0.28913738019169327,
45
+ "acc_stderr,none": 0.018134473494097365,
46
+ "alias": "saepe-fuga_logiqa_cot"
47
+ },
48
+ "saepe-fuga_logiqa2_cot": {
49
+ "acc,none": 0.356234096692112,
50
+ "acc_stderr,none": 0.012082133651061318,
51
+ "alias": "saepe-fuga_logiqa2_cot"
52
+ },
53
+ "nisi-sunt_lsat-rc_cot": {
54
+ "acc,none": 0.34572490706319703,
55
+ "acc_stderr,none": 0.029052140190085934,
56
+ "alias": "nisi-sunt_lsat-rc_cot"
57
+ },
58
+ "nisi-sunt_lsat-lr_cot": {
59
+ "acc,none": 0.27647058823529413,
60
+ "acc_stderr,none": 0.019824108780753004,
61
+ "alias": "nisi-sunt_lsat-lr_cot"
62
+ },
63
+ "nisi-sunt_lsat-ar_cot": {
64
+ "acc,none": 0.27391304347826084,
65
+ "acc_stderr,none": 0.029470189815005897,
66
+ "alias": "nisi-sunt_lsat-ar_cot"
67
+ },
68
+ "nisi-sunt_logiqa_cot": {
69
+ "acc,none": 0.31629392971246006,
70
+ "acc_stderr,none": 0.018601164683514252,
71
+ "alias": "nisi-sunt_logiqa_cot"
72
+ },
73
+ "nisi-sunt_logiqa2_cot": {
74
+ "acc,none": 0.3708651399491094,
75
+ "acc_stderr,none": 0.012186859070473788,
76
+ "alias": "nisi-sunt_logiqa2_cot"
77
+ },
78
+ "laboriosam-molestiae_lsat-rc_cot": {
79
+ "acc,none": 0.3680297397769517,
80
+ "acc_stderr,none": 0.029459297142360178,
81
+ "alias": "laboriosam-molestiae_lsat-rc_cot"
82
+ },
83
+ "laboriosam-molestiae_lsat-lr_cot": {
84
+ "acc,none": 0.2823529411764706,
85
+ "acc_stderr,none": 0.019952288758197854,
86
+ "alias": "laboriosam-molestiae_lsat-lr_cot"
87
+ },
88
+ "laboriosam-molestiae_lsat-ar_cot": {
89
+ "acc,none": 0.2217391304347826,
90
+ "acc_stderr,none": 0.027451496604058916,
91
+ "alias": "laboriosam-molestiae_lsat-ar_cot"
92
+ },
93
+ "laboriosam-molestiae_logiqa_cot": {
94
+ "acc,none": 0.3083067092651757,
95
+ "acc_stderr,none": 0.018471759300608265,
96
+ "alias": "laboriosam-molestiae_logiqa_cot"
97
+ },
98
+ "laboriosam-molestiae_logiqa2_cot": {
99
+ "acc,none": 0.36895674300254455,
100
+ "acc_stderr,none": 0.012173885104839207,
101
+ "alias": "laboriosam-molestiae_logiqa2_cot"
102
+ },
103
+ "iste-molestias_lsat-rc_cot": {
104
+ "acc,none": 0.4275092936802974,
105
+ "acc_stderr,none": 0.030219662071838058,
106
+ "alias": "iste-molestias_lsat-rc_cot"
107
+ },
108
+ "iste-molestias_lsat-lr_cot": {
109
+ "acc,none": 0.2647058823529412,
110
+ "acc_stderr,none": 0.01955480325785009,
111
+ "alias": "iste-molestias_lsat-lr_cot"
112
+ },
113
+ "iste-molestias_lsat-ar_cot": {
114
+ "acc,none": 0.20434782608695654,
115
+ "acc_stderr,none": 0.02664580815001135,
116
+ "alias": "iste-molestias_lsat-ar_cot"
117
+ },
118
+ "iste-molestias_logiqa_cot": {
119
+ "acc,none": 0.3003194888178914,
120
+ "acc_stderr,none": 0.018335874932123606,
121
+ "alias": "iste-molestias_logiqa_cot"
122
+ },
123
+ "iste-molestias_logiqa2_cot": {
124
+ "acc,none": 0.3505089058524173,
125
+ "acc_stderr,none": 0.012037825298569541,
126
+ "alias": "iste-molestias_logiqa2_cot"
127
+ },
128
+ "eum-saepe_lsat-rc_cot": {
129
+ "acc,none": 0.44609665427509293,
130
+ "acc_stderr,none": 0.030364356394504122,
131
+ "alias": "eum-saepe_lsat-rc_cot"
132
+ },
133
+ "eum-saepe_lsat-lr_cot": {
134
+ "acc,none": 0.2803921568627451,
135
+ "acc_stderr,none": 0.019910033171474082,
136
+ "alias": "eum-saepe_lsat-lr_cot"
137
+ },
138
+ "eum-saepe_lsat-ar_cot": {
139
+ "acc,none": 0.23478260869565218,
140
+ "acc_stderr,none": 0.028009647070930125,
141
+ "alias": "eum-saepe_lsat-ar_cot"
142
+ },
143
+ "eum-saepe_logiqa_cot": {
144
+ "acc,none": 0.30670926517571884,
145
+ "acc_stderr,none": 0.018445105229565346,
146
+ "alias": "eum-saepe_logiqa_cot"
147
+ },
148
+ "eum-saepe_logiqa2_cot": {
149
+ "acc,none": 0.36323155216284986,
150
+ "acc_stderr,none": 0.012133733683836157,
151
+ "alias": "eum-saepe_logiqa2_cot"
152
+ }
153
+ },
154
+ "configs": {
155
+ "eum-saepe_logiqa2_cot": {
156
+ "task": "eum-saepe_logiqa2_cot",
157
+ "group": "logikon-bench",
158
+ "dataset_path": "logikon/cot-eval-traces",
159
+ "dataset_kwargs": {
160
+ "data_files": {
161
+ "test": "eum-saepe-logiqa2/test-00000-of-00001.parquet"
162
+ }
163
+ },
164
+ "test_split": "test",
165
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
166
+ "doc_to_target": "{{answer}}",
167
+ "doc_to_choice": "{{options}}",
168
+ "description": "",
169
+ "target_delimiter": " ",
170
+ "fewshot_delimiter": "\n\n",
171
+ "num_fewshot": 0,
172
+ "metric_list": [
173
+ {
174
+ "metric": "acc",
175
+ "aggregation": "mean",
176
+ "higher_is_better": true
177
+ }
178
+ ],
179
+ "output_type": "multiple_choice",
180
+ "repeats": 1,
181
+ "should_decontaminate": false,
182
+ "metadata": {
183
+ "version": 0.0
184
+ }
185
+ },
186
+ "eum-saepe_logiqa_cot": {
187
+ "task": "eum-saepe_logiqa_cot",
188
+ "group": "logikon-bench",
189
+ "dataset_path": "logikon/cot-eval-traces",
190
+ "dataset_kwargs": {
191
+ "data_files": {
192
+ "test": "eum-saepe-logiqa/test-00000-of-00001.parquet"
193
+ }
194
+ },
195
+ "test_split": "test",
196
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
197
+ "doc_to_target": "{{answer}}",
198
+ "doc_to_choice": "{{options}}",
199
+ "description": "",
200
+ "target_delimiter": " ",
201
+ "fewshot_delimiter": "\n\n",
202
+ "num_fewshot": 0,
203
+ "metric_list": [
204
+ {
205
+ "metric": "acc",
206
+ "aggregation": "mean",
207
+ "higher_is_better": true
208
+ }
209
+ ],
210
+ "output_type": "multiple_choice",
211
+ "repeats": 1,
212
+ "should_decontaminate": false,
213
+ "metadata": {
214
+ "version": 0.0
215
+ }
216
+ },
217
+ "eum-saepe_lsat-ar_cot": {
218
+ "task": "eum-saepe_lsat-ar_cot",
219
+ "group": "logikon-bench",
220
+ "dataset_path": "logikon/cot-eval-traces",
221
+ "dataset_kwargs": {
222
+ "data_files": {
223
+ "test": "eum-saepe-lsat-ar/test-00000-of-00001.parquet"
224
+ }
225
+ },
226
+ "test_split": "test",
227
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
228
+ "doc_to_target": "{{answer}}",
229
+ "doc_to_choice": "{{options}}",
230
+ "description": "",
231
+ "target_delimiter": " ",
232
+ "fewshot_delimiter": "\n\n",
233
+ "num_fewshot": 0,
234
+ "metric_list": [
235
+ {
236
+ "metric": "acc",
237
+ "aggregation": "mean",
238
+ "higher_is_better": true
239
+ }
240
+ ],
241
+ "output_type": "multiple_choice",
242
+ "repeats": 1,
243
+ "should_decontaminate": false,
244
+ "metadata": {
245
+ "version": 0.0
246
+ }
247
+ },
248
+ "eum-saepe_lsat-lr_cot": {
249
+ "task": "eum-saepe_lsat-lr_cot",
250
+ "group": "logikon-bench",
251
+ "dataset_path": "logikon/cot-eval-traces",
252
+ "dataset_kwargs": {
253
+ "data_files": {
254
+ "test": "eum-saepe-lsat-lr/test-00000-of-00001.parquet"
255
+ }
256
+ },
257
+ "test_split": "test",
258
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
259
+ "doc_to_target": "{{answer}}",
260
+ "doc_to_choice": "{{options}}",
261
+ "description": "",
262
+ "target_delimiter": " ",
263
+ "fewshot_delimiter": "\n\n",
264
+ "num_fewshot": 0,
265
+ "metric_list": [
266
+ {
267
+ "metric": "acc",
268
+ "aggregation": "mean",
269
+ "higher_is_better": true
270
+ }
271
+ ],
272
+ "output_type": "multiple_choice",
273
+ "repeats": 1,
274
+ "should_decontaminate": false,
275
+ "metadata": {
276
+ "version": 0.0
277
+ }
278
+ },
279
+ "eum-saepe_lsat-rc_cot": {
280
+ "task": "eum-saepe_lsat-rc_cot",
281
+ "group": "logikon-bench",
282
+ "dataset_path": "logikon/cot-eval-traces",
283
+ "dataset_kwargs": {
284
+ "data_files": {
285
+ "test": "eum-saepe-lsat-rc/test-00000-of-00001.parquet"
286
+ }
287
+ },
288
+ "test_split": "test",
289
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
290
+ "doc_to_target": "{{answer}}",
291
+ "doc_to_choice": "{{options}}",
292
+ "description": "",
293
+ "target_delimiter": " ",
294
+ "fewshot_delimiter": "\n\n",
295
+ "num_fewshot": 0,
296
+ "metric_list": [
297
+ {
298
+ "metric": "acc",
299
+ "aggregation": "mean",
300
+ "higher_is_better": true
301
+ }
302
+ ],
303
+ "output_type": "multiple_choice",
304
+ "repeats": 1,
305
+ "should_decontaminate": false,
306
+ "metadata": {
307
+ "version": 0.0
308
+ }
309
+ },
310
+ "iste-molestias_logiqa2_cot": {
311
+ "task": "iste-molestias_logiqa2_cot",
312
+ "group": "logikon-bench",
313
+ "dataset_path": "logikon/cot-eval-traces",
314
+ "dataset_kwargs": {
315
+ "data_files": {
316
+ "test": "iste-molestias-logiqa2/test-00000-of-00001.parquet"
317
+ }
318
+ },
319
+ "test_split": "test",
320
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
321
+ "doc_to_target": "{{answer}}",
322
+ "doc_to_choice": "{{options}}",
323
+ "description": "",
324
+ "target_delimiter": " ",
325
+ "fewshot_delimiter": "\n\n",
326
+ "num_fewshot": 0,
327
+ "metric_list": [
328
+ {
329
+ "metric": "acc",
330
+ "aggregation": "mean",
331
+ "higher_is_better": true
332
+ }
333
+ ],
334
+ "output_type": "multiple_choice",
335
+ "repeats": 1,
336
+ "should_decontaminate": false,
337
+ "metadata": {
338
+ "version": 0.0
339
+ }
340
+ },
341
+ "iste-molestias_logiqa_cot": {
342
+ "task": "iste-molestias_logiqa_cot",
343
+ "group": "logikon-bench",
344
+ "dataset_path": "logikon/cot-eval-traces",
345
+ "dataset_kwargs": {
346
+ "data_files": {
347
+ "test": "iste-molestias-logiqa/test-00000-of-00001.parquet"
348
+ }
349
+ },
350
+ "test_split": "test",
351
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
352
+ "doc_to_target": "{{answer}}",
353
+ "doc_to_choice": "{{options}}",
354
+ "description": "",
355
+ "target_delimiter": " ",
356
+ "fewshot_delimiter": "\n\n",
357
+ "num_fewshot": 0,
358
+ "metric_list": [
359
+ {
360
+ "metric": "acc",
361
+ "aggregation": "mean",
362
+ "higher_is_better": true
363
+ }
364
+ ],
365
+ "output_type": "multiple_choice",
366
+ "repeats": 1,
367
+ "should_decontaminate": false,
368
+ "metadata": {
369
+ "version": 0.0
370
+ }
371
+ },
372
+ "iste-molestias_lsat-ar_cot": {
373
+ "task": "iste-molestias_lsat-ar_cot",
374
+ "group": "logikon-bench",
375
+ "dataset_path": "logikon/cot-eval-traces",
376
+ "dataset_kwargs": {
377
+ "data_files": {
378
+ "test": "iste-molestias-lsat-ar/test-00000-of-00001.parquet"
379
+ }
380
+ },
381
+ "test_split": "test",
382
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
383
+ "doc_to_target": "{{answer}}",
384
+ "doc_to_choice": "{{options}}",
385
+ "description": "",
386
+ "target_delimiter": " ",
387
+ "fewshot_delimiter": "\n\n",
388
+ "num_fewshot": 0,
389
+ "metric_list": [
390
+ {
391
+ "metric": "acc",
392
+ "aggregation": "mean",
393
+ "higher_is_better": true
394
+ }
395
+ ],
396
+ "output_type": "multiple_choice",
397
+ "repeats": 1,
398
+ "should_decontaminate": false,
399
+ "metadata": {
400
+ "version": 0.0
401
+ }
402
+ },
403
+ "iste-molestias_lsat-lr_cot": {
404
+ "task": "iste-molestias_lsat-lr_cot",
405
+ "group": "logikon-bench",
406
+ "dataset_path": "logikon/cot-eval-traces",
407
+ "dataset_kwargs": {
408
+ "data_files": {
409
+ "test": "iste-molestias-lsat-lr/test-00000-of-00001.parquet"
410
+ }
411
+ },
412
+ "test_split": "test",
413
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
414
+ "doc_to_target": "{{answer}}",
415
+ "doc_to_choice": "{{options}}",
416
+ "description": "",
417
+ "target_delimiter": " ",
418
+ "fewshot_delimiter": "\n\n",
419
+ "num_fewshot": 0,
420
+ "metric_list": [
421
+ {
422
+ "metric": "acc",
423
+ "aggregation": "mean",
424
+ "higher_is_better": true
425
+ }
426
+ ],
427
+ "output_type": "multiple_choice",
428
+ "repeats": 1,
429
+ "should_decontaminate": false,
430
+ "metadata": {
431
+ "version": 0.0
432
+ }
433
+ },
434
+ "iste-molestias_lsat-rc_cot": {
435
+ "task": "iste-molestias_lsat-rc_cot",
436
+ "group": "logikon-bench",
437
+ "dataset_path": "logikon/cot-eval-traces",
438
+ "dataset_kwargs": {
439
+ "data_files": {
440
+ "test": "iste-molestias-lsat-rc/test-00000-of-00001.parquet"
441
+ }
442
+ },
443
+ "test_split": "test",
444
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
445
+ "doc_to_target": "{{answer}}",
446
+ "doc_to_choice": "{{options}}",
447
+ "description": "",
448
+ "target_delimiter": " ",
449
+ "fewshot_delimiter": "\n\n",
450
+ "num_fewshot": 0,
451
+ "metric_list": [
452
+ {
453
+ "metric": "acc",
454
+ "aggregation": "mean",
455
+ "higher_is_better": true
456
+ }
457
+ ],
458
+ "output_type": "multiple_choice",
459
+ "repeats": 1,
460
+ "should_decontaminate": false,
461
+ "metadata": {
462
+ "version": 0.0
463
+ }
464
+ },
465
+ "laboriosam-molestiae_logiqa2_cot": {
466
+ "task": "laboriosam-molestiae_logiqa2_cot",
467
+ "group": "logikon-bench",
468
+ "dataset_path": "logikon/cot-eval-traces",
469
+ "dataset_kwargs": {
470
+ "data_files": {
471
+ "test": "laboriosam-molestiae-logiqa2/test-00000-of-00001.parquet"
472
+ }
473
+ },
474
+ "test_split": "test",
475
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
476
+ "doc_to_target": "{{answer}}",
477
+ "doc_to_choice": "{{options}}",
478
+ "description": "",
479
+ "target_delimiter": " ",
480
+ "fewshot_delimiter": "\n\n",
481
+ "num_fewshot": 0,
482
+ "metric_list": [
483
+ {
484
+ "metric": "acc",
485
+ "aggregation": "mean",
486
+ "higher_is_better": true
487
+ }
488
+ ],
489
+ "output_type": "multiple_choice",
490
+ "repeats": 1,
491
+ "should_decontaminate": false,
492
+ "metadata": {
493
+ "version": 0.0
494
+ }
495
+ },
496
+ "laboriosam-molestiae_logiqa_cot": {
497
+ "task": "laboriosam-molestiae_logiqa_cot",
498
+ "group": "logikon-bench",
499
+ "dataset_path": "logikon/cot-eval-traces",
500
+ "dataset_kwargs": {
501
+ "data_files": {
502
+ "test": "laboriosam-molestiae-logiqa/test-00000-of-00001.parquet"
503
+ }
504
+ },
505
+ "test_split": "test",
506
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
507
+ "doc_to_target": "{{answer}}",
508
+ "doc_to_choice": "{{options}}",
509
+ "description": "",
510
+ "target_delimiter": " ",
511
+ "fewshot_delimiter": "\n\n",
512
+ "num_fewshot": 0,
513
+ "metric_list": [
514
+ {
515
+ "metric": "acc",
516
+ "aggregation": "mean",
517
+ "higher_is_better": true
518
+ }
519
+ ],
520
+ "output_type": "multiple_choice",
521
+ "repeats": 1,
522
+ "should_decontaminate": false,
523
+ "metadata": {
524
+ "version": 0.0
525
+ }
526
+ },
527
+ "laboriosam-molestiae_lsat-ar_cot": {
528
+ "task": "laboriosam-molestiae_lsat-ar_cot",
529
+ "group": "logikon-bench",
530
+ "dataset_path": "logikon/cot-eval-traces",
531
+ "dataset_kwargs": {
532
+ "data_files": {
533
+ "test": "laboriosam-molestiae-lsat-ar/test-00000-of-00001.parquet"
534
+ }
535
+ },
536
+ "test_split": "test",
537
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
538
+ "doc_to_target": "{{answer}}",
539
+ "doc_to_choice": "{{options}}",
540
+ "description": "",
541
+ "target_delimiter": " ",
542
+ "fewshot_delimiter": "\n\n",
543
+ "num_fewshot": 0,
544
+ "metric_list": [
545
+ {
546
+ "metric": "acc",
547
+ "aggregation": "mean",
548
+ "higher_is_better": true
549
+ }
550
+ ],
551
+ "output_type": "multiple_choice",
552
+ "repeats": 1,
553
+ "should_decontaminate": false,
554
+ "metadata": {
555
+ "version": 0.0
556
+ }
557
+ },
558
+ "laboriosam-molestiae_lsat-lr_cot": {
559
+ "task": "laboriosam-molestiae_lsat-lr_cot",
560
+ "group": "logikon-bench",
561
+ "dataset_path": "logikon/cot-eval-traces",
562
+ "dataset_kwargs": {
563
+ "data_files": {
564
+ "test": "laboriosam-molestiae-lsat-lr/test-00000-of-00001.parquet"
565
+ }
566
+ },
567
+ "test_split": "test",
568
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
569
+ "doc_to_target": "{{answer}}",
570
+ "doc_to_choice": "{{options}}",
571
+ "description": "",
572
+ "target_delimiter": " ",
573
+ "fewshot_delimiter": "\n\n",
574
+ "num_fewshot": 0,
575
+ "metric_list": [
576
+ {
577
+ "metric": "acc",
578
+ "aggregation": "mean",
579
+ "higher_is_better": true
580
+ }
581
+ ],
582
+ "output_type": "multiple_choice",
583
+ "repeats": 1,
584
+ "should_decontaminate": false,
585
+ "metadata": {
586
+ "version": 0.0
587
+ }
588
+ },
589
+ "laboriosam-molestiae_lsat-rc_cot": {
590
+ "task": "laboriosam-molestiae_lsat-rc_cot",
591
+ "group": "logikon-bench",
592
+ "dataset_path": "logikon/cot-eval-traces",
593
+ "dataset_kwargs": {
594
+ "data_files": {
595
+ "test": "laboriosam-molestiae-lsat-rc/test-00000-of-00001.parquet"
596
+ }
597
+ },
598
+ "test_split": "test",
599
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
600
+ "doc_to_target": "{{answer}}",
601
+ "doc_to_choice": "{{options}}",
602
+ "description": "",
603
+ "target_delimiter": " ",
604
+ "fewshot_delimiter": "\n\n",
605
+ "num_fewshot": 0,
606
+ "metric_list": [
607
+ {
608
+ "metric": "acc",
609
+ "aggregation": "mean",
610
+ "higher_is_better": true
611
+ }
612
+ ],
613
+ "output_type": "multiple_choice",
614
+ "repeats": 1,
615
+ "should_decontaminate": false,
616
+ "metadata": {
617
+ "version": 0.0
618
+ }
619
+ },
620
+ "nisi-sunt_logiqa2_cot": {
621
+ "task": "nisi-sunt_logiqa2_cot",
622
+ "group": "logikon-bench",
623
+ "dataset_path": "logikon/cot-eval-traces",
624
+ "dataset_kwargs": {
625
+ "data_files": {
626
+ "test": "nisi-sunt-logiqa2/test-00000-of-00001.parquet"
627
+ }
628
+ },
629
+ "test_split": "test",
630
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
631
+ "doc_to_target": "{{answer}}",
632
+ "doc_to_choice": "{{options}}",
633
+ "description": "",
634
+ "target_delimiter": " ",
635
+ "fewshot_delimiter": "\n\n",
636
+ "num_fewshot": 0,
637
+ "metric_list": [
638
+ {
639
+ "metric": "acc",
640
+ "aggregation": "mean",
641
+ "higher_is_better": true
642
+ }
643
+ ],
644
+ "output_type": "multiple_choice",
645
+ "repeats": 1,
646
+ "should_decontaminate": false,
647
+ "metadata": {
648
+ "version": 0.0
649
+ }
650
+ },
651
+ "nisi-sunt_logiqa_cot": {
652
+ "task": "nisi-sunt_logiqa_cot",
653
+ "group": "logikon-bench",
654
+ "dataset_path": "logikon/cot-eval-traces",
655
+ "dataset_kwargs": {
656
+ "data_files": {
657
+ "test": "nisi-sunt-logiqa/test-00000-of-00001.parquet"
658
+ }
659
+ },
660
+ "test_split": "test",
661
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
662
+ "doc_to_target": "{{answer}}",
663
+ "doc_to_choice": "{{options}}",
664
+ "description": "",
665
+ "target_delimiter": " ",
666
+ "fewshot_delimiter": "\n\n",
667
+ "num_fewshot": 0,
668
+ "metric_list": [
669
+ {
670
+ "metric": "acc",
671
+ "aggregation": "mean",
672
+ "higher_is_better": true
673
+ }
674
+ ],
675
+ "output_type": "multiple_choice",
676
+ "repeats": 1,
677
+ "should_decontaminate": false,
678
+ "metadata": {
679
+ "version": 0.0
680
+ }
681
+ },
682
+ "nisi-sunt_lsat-ar_cot": {
683
+ "task": "nisi-sunt_lsat-ar_cot",
684
+ "group": "logikon-bench",
685
+ "dataset_path": "logikon/cot-eval-traces",
686
+ "dataset_kwargs": {
687
+ "data_files": {
688
+ "test": "nisi-sunt-lsat-ar/test-00000-of-00001.parquet"
689
+ }
690
+ },
691
+ "test_split": "test",
692
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
693
+ "doc_to_target": "{{answer}}",
694
+ "doc_to_choice": "{{options}}",
695
+ "description": "",
696
+ "target_delimiter": " ",
697
+ "fewshot_delimiter": "\n\n",
698
+ "num_fewshot": 0,
699
+ "metric_list": [
700
+ {
701
+ "metric": "acc",
702
+ "aggregation": "mean",
703
+ "higher_is_better": true
704
+ }
705
+ ],
706
+ "output_type": "multiple_choice",
707
+ "repeats": 1,
708
+ "should_decontaminate": false,
709
+ "metadata": {
710
+ "version": 0.0
711
+ }
712
+ },
713
+ "nisi-sunt_lsat-lr_cot": {
714
+ "task": "nisi-sunt_lsat-lr_cot",
715
+ "group": "logikon-bench",
716
+ "dataset_path": "logikon/cot-eval-traces",
717
+ "dataset_kwargs": {
718
+ "data_files": {
719
+ "test": "nisi-sunt-lsat-lr/test-00000-of-00001.parquet"
720
+ }
721
+ },
722
+ "test_split": "test",
723
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
724
+ "doc_to_target": "{{answer}}",
725
+ "doc_to_choice": "{{options}}",
726
+ "description": "",
727
+ "target_delimiter": " ",
728
+ "fewshot_delimiter": "\n\n",
729
+ "num_fewshot": 0,
730
+ "metric_list": [
731
+ {
732
+ "metric": "acc",
733
+ "aggregation": "mean",
734
+ "higher_is_better": true
735
+ }
736
+ ],
737
+ "output_type": "multiple_choice",
738
+ "repeats": 1,
739
+ "should_decontaminate": false,
740
+ "metadata": {
741
+ "version": 0.0
742
+ }
743
+ },
744
+ "nisi-sunt_lsat-rc_cot": {
745
+ "task": "nisi-sunt_lsat-rc_cot",
746
+ "group": "logikon-bench",
747
+ "dataset_path": "logikon/cot-eval-traces",
748
+ "dataset_kwargs": {
749
+ "data_files": {
750
+ "test": "nisi-sunt-lsat-rc/test-00000-of-00001.parquet"
751
+ }
752
+ },
753
+ "test_split": "test",
754
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
755
+ "doc_to_target": "{{answer}}",
756
+ "doc_to_choice": "{{options}}",
757
+ "description": "",
758
+ "target_delimiter": " ",
759
+ "fewshot_delimiter": "\n\n",
760
+ "num_fewshot": 0,
761
+ "metric_list": [
762
+ {
763
+ "metric": "acc",
764
+ "aggregation": "mean",
765
+ "higher_is_better": true
766
+ }
767
+ ],
768
+ "output_type": "multiple_choice",
769
+ "repeats": 1,
770
+ "should_decontaminate": false,
771
+ "metadata": {
772
+ "version": 0.0
773
+ }
774
+ },
775
+ "saepe-fuga_logiqa2_cot": {
776
+ "task": "saepe-fuga_logiqa2_cot",
777
+ "group": "logikon-bench",
778
+ "dataset_path": "logikon/cot-eval-traces",
779
+ "dataset_kwargs": {
780
+ "data_files": {
781
+ "test": "saepe-fuga-logiqa2/test-00000-of-00001.parquet"
782
+ }
783
+ },
784
+ "test_split": "test",
785
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
786
+ "doc_to_target": "{{answer}}",
787
+ "doc_to_choice": "{{options}}",
788
+ "description": "",
789
+ "target_delimiter": " ",
790
+ "fewshot_delimiter": "\n\n",
791
+ "num_fewshot": 0,
792
+ "metric_list": [
793
+ {
794
+ "metric": "acc",
795
+ "aggregation": "mean",
796
+ "higher_is_better": true
797
+ }
798
+ ],
799
+ "output_type": "multiple_choice",
800
+ "repeats": 1,
801
+ "should_decontaminate": false,
802
+ "metadata": {
803
+ "version": 0.0
804
+ }
805
+ },
806
+ "saepe-fuga_logiqa_cot": {
807
+ "task": "saepe-fuga_logiqa_cot",
808
+ "group": "logikon-bench",
809
+ "dataset_path": "logikon/cot-eval-traces",
810
+ "dataset_kwargs": {
811
+ "data_files": {
812
+ "test": "saepe-fuga-logiqa/test-00000-of-00001.parquet"
813
+ }
814
+ },
815
+ "test_split": "test",
816
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
817
+ "doc_to_target": "{{answer}}",
818
+ "doc_to_choice": "{{options}}",
819
+ "description": "",
820
+ "target_delimiter": " ",
821
+ "fewshot_delimiter": "\n\n",
822
+ "num_fewshot": 0,
823
+ "metric_list": [
824
+ {
825
+ "metric": "acc",
826
+ "aggregation": "mean",
827
+ "higher_is_better": true
828
+ }
829
+ ],
830
+ "output_type": "multiple_choice",
831
+ "repeats": 1,
832
+ "should_decontaminate": false,
833
+ "metadata": {
834
+ "version": 0.0
835
+ }
836
+ },
837
+ "saepe-fuga_lsat-ar_cot": {
838
+ "task": "saepe-fuga_lsat-ar_cot",
839
+ "group": "logikon-bench",
840
+ "dataset_path": "logikon/cot-eval-traces",
841
+ "dataset_kwargs": {
842
+ "data_files": {
843
+ "test": "saepe-fuga-lsat-ar/test-00000-of-00001.parquet"
844
+ }
845
+ },
846
+ "test_split": "test",
847
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
848
+ "doc_to_target": "{{answer}}",
849
+ "doc_to_choice": "{{options}}",
850
+ "description": "",
851
+ "target_delimiter": " ",
852
+ "fewshot_delimiter": "\n\n",
853
+ "num_fewshot": 0,
854
+ "metric_list": [
855
+ {
856
+ "metric": "acc",
857
+ "aggregation": "mean",
858
+ "higher_is_better": true
859
+ }
860
+ ],
861
+ "output_type": "multiple_choice",
862
+ "repeats": 1,
863
+ "should_decontaminate": false,
864
+ "metadata": {
865
+ "version": 0.0
866
+ }
867
+ },
868
+ "saepe-fuga_lsat-lr_cot": {
869
+ "task": "saepe-fuga_lsat-lr_cot",
870
+ "group": "logikon-bench",
871
+ "dataset_path": "logikon/cot-eval-traces",
872
+ "dataset_kwargs": {
873
+ "data_files": {
874
+ "test": "saepe-fuga-lsat-lr/test-00000-of-00001.parquet"
875
+ }
876
+ },
877
+ "test_split": "test",
878
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
879
+ "doc_to_target": "{{answer}}",
880
+ "doc_to_choice": "{{options}}",
881
+ "description": "",
882
+ "target_delimiter": " ",
883
+ "fewshot_delimiter": "\n\n",
884
+ "num_fewshot": 0,
885
+ "metric_list": [
886
+ {
887
+ "metric": "acc",
888
+ "aggregation": "mean",
889
+ "higher_is_better": true
890
+ }
891
+ ],
892
+ "output_type": "multiple_choice",
893
+ "repeats": 1,
894
+ "should_decontaminate": false,
895
+ "metadata": {
896
+ "version": 0.0
897
+ }
898
+ },
899
+ "saepe-fuga_lsat-rc_cot": {
900
+ "task": "saepe-fuga_lsat-rc_cot",
901
+ "group": "logikon-bench",
902
+ "dataset_path": "logikon/cot-eval-traces",
903
+ "dataset_kwargs": {
904
+ "data_files": {
905
+ "test": "saepe-fuga-lsat-rc/test-00000-of-00001.parquet"
906
+ }
907
+ },
908
+ "test_split": "test",
909
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
910
+ "doc_to_target": "{{answer}}",
911
+ "doc_to_choice": "{{options}}",
912
+ "description": "",
913
+ "target_delimiter": " ",
914
+ "fewshot_delimiter": "\n\n",
915
+ "num_fewshot": 0,
916
+ "metric_list": [
917
+ {
918
+ "metric": "acc",
919
+ "aggregation": "mean",
920
+ "higher_is_better": true
921
+ }
922
+ ],
923
+ "output_type": "multiple_choice",
924
+ "repeats": 1,
925
+ "should_decontaminate": false,
926
+ "metadata": {
927
+ "version": 0.0
928
+ }
929
+ },
930
+ "veritatis-velit_logiqa2_cot": {
931
+ "task": "veritatis-velit_logiqa2_cot",
932
+ "group": "logikon-bench",
933
+ "dataset_path": "logikon/cot-eval-traces",
934
+ "dataset_kwargs": {
935
+ "data_files": {
936
+ "test": "veritatis-velit-logiqa2/test-00000-of-00001.parquet"
937
+ }
938
+ },
939
+ "test_split": "test",
940
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
941
+ "doc_to_target": "{{answer}}",
942
+ "doc_to_choice": "{{options}}",
943
+ "description": "",
944
+ "target_delimiter": " ",
945
+ "fewshot_delimiter": "\n\n",
946
+ "num_fewshot": 0,
947
+ "metric_list": [
948
+ {
949
+ "metric": "acc",
950
+ "aggregation": "mean",
951
+ "higher_is_better": true
952
+ }
953
+ ],
954
+ "output_type": "multiple_choice",
955
+ "repeats": 1,
956
+ "should_decontaminate": false,
957
+ "metadata": {
958
+ "version": 0.0
959
+ }
960
+ },
961
+ "veritatis-velit_logiqa_cot": {
962
+ "task": "veritatis-velit_logiqa_cot",
963
+ "group": "logikon-bench",
964
+ "dataset_path": "logikon/cot-eval-traces",
965
+ "dataset_kwargs": {
966
+ "data_files": {
967
+ "test": "veritatis-velit-logiqa/test-00000-of-00001.parquet"
968
+ }
969
+ },
970
+ "test_split": "test",
971
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
972
+ "doc_to_target": "{{answer}}",
973
+ "doc_to_choice": "{{options}}",
974
+ "description": "",
975
+ "target_delimiter": " ",
976
+ "fewshot_delimiter": "\n\n",
977
+ "num_fewshot": 0,
978
+ "metric_list": [
979
+ {
980
+ "metric": "acc",
981
+ "aggregation": "mean",
982
+ "higher_is_better": true
983
+ }
984
+ ],
985
+ "output_type": "multiple_choice",
986
+ "repeats": 1,
987
+ "should_decontaminate": false,
988
+ "metadata": {
989
+ "version": 0.0
990
+ }
991
+ },
992
+ "veritatis-velit_lsat-ar_cot": {
993
+ "task": "veritatis-velit_lsat-ar_cot",
994
+ "group": "logikon-bench",
995
+ "dataset_path": "logikon/cot-eval-traces",
996
+ "dataset_kwargs": {
997
+ "data_files": {
998
+ "test": "veritatis-velit-lsat-ar/test-00000-of-00001.parquet"
999
+ }
1000
+ },
1001
+ "test_split": "test",
1002
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
1003
+ "doc_to_target": "{{answer}}",
1004
+ "doc_to_choice": "{{options}}",
1005
+ "description": "",
1006
+ "target_delimiter": " ",
1007
+ "fewshot_delimiter": "\n\n",
1008
+ "num_fewshot": 0,
1009
+ "metric_list": [
1010
+ {
1011
+ "metric": "acc",
1012
+ "aggregation": "mean",
1013
+ "higher_is_better": true
1014
+ }
1015
+ ],
1016
+ "output_type": "multiple_choice",
1017
+ "repeats": 1,
1018
+ "should_decontaminate": false,
1019
+ "metadata": {
1020
+ "version": 0.0
1021
+ }
1022
+ },
1023
+ "veritatis-velit_lsat-lr_cot": {
1024
+ "task": "veritatis-velit_lsat-lr_cot",
1025
+ "group": "logikon-bench",
1026
+ "dataset_path": "logikon/cot-eval-traces",
1027
+ "dataset_kwargs": {
1028
+ "data_files": {
1029
+ "test": "veritatis-velit-lsat-lr/test-00000-of-00001.parquet"
1030
+ }
1031
+ },
1032
+ "test_split": "test",
1033
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
1034
+ "doc_to_target": "{{answer}}",
1035
+ "doc_to_choice": "{{options}}",
1036
+ "description": "",
1037
+ "target_delimiter": " ",
1038
+ "fewshot_delimiter": "\n\n",
1039
+ "num_fewshot": 0,
1040
+ "metric_list": [
1041
+ {
1042
+ "metric": "acc",
1043
+ "aggregation": "mean",
1044
+ "higher_is_better": true
1045
+ }
1046
+ ],
1047
+ "output_type": "multiple_choice",
1048
+ "repeats": 1,
1049
+ "should_decontaminate": false,
1050
+ "metadata": {
1051
+ "version": 0.0
1052
+ }
1053
+ },
1054
+ "veritatis-velit_lsat-rc_cot": {
1055
+ "task": "veritatis-velit_lsat-rc_cot",
1056
+ "group": "logikon-bench",
1057
+ "dataset_path": "logikon/cot-eval-traces",
1058
+ "dataset_kwargs": {
1059
+ "data_files": {
1060
+ "test": "veritatis-velit-lsat-rc/test-00000-of-00001.parquet"
1061
+ }
1062
+ },
1063
+ "test_split": "test",
1064
+ "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n [Reasoning: <reasoning>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n",
1065
+ "doc_to_target": "{{answer}}",
1066
+ "doc_to_choice": "{{options}}",
1067
+ "description": "",
1068
+ "target_delimiter": " ",
1069
+ "fewshot_delimiter": "\n\n",
1070
+ "num_fewshot": 0,
1071
+ "metric_list": [
1072
+ {
1073
+ "metric": "acc",
1074
+ "aggregation": "mean",
1075
+ "higher_is_better": true
1076
+ }
1077
+ ],
1078
+ "output_type": "multiple_choice",
1079
+ "repeats": 1,
1080
+ "should_decontaminate": false,
1081
+ "metadata": {
1082
+ "version": 0.0
1083
+ }
1084
+ }
1085
+ },
1086
+ "versions": {
1087
+ "eum-saepe_logiqa2_cot": 0.0,
1088
+ "eum-saepe_logiqa_cot": 0.0,
1089
+ "eum-saepe_lsat-ar_cot": 0.0,
1090
+ "eum-saepe_lsat-lr_cot": 0.0,
1091
+ "eum-saepe_lsat-rc_cot": 0.0,
1092
+ "iste-molestias_logiqa2_cot": 0.0,
1093
+ "iste-molestias_logiqa_cot": 0.0,
1094
+ "iste-molestias_lsat-ar_cot": 0.0,
1095
+ "iste-molestias_lsat-lr_cot": 0.0,
1096
+ "iste-molestias_lsat-rc_cot": 0.0,
1097
+ "laboriosam-molestiae_logiqa2_cot": 0.0,
1098
+ "laboriosam-molestiae_logiqa_cot": 0.0,
1099
+ "laboriosam-molestiae_lsat-ar_cot": 0.0,
1100
+ "laboriosam-molestiae_lsat-lr_cot": 0.0,
1101
+ "laboriosam-molestiae_lsat-rc_cot": 0.0,
1102
+ "nisi-sunt_logiqa2_cot": 0.0,
1103
+ "nisi-sunt_logiqa_cot": 0.0,
1104
+ "nisi-sunt_lsat-ar_cot": 0.0,
1105
+ "nisi-sunt_lsat-lr_cot": 0.0,
1106
+ "nisi-sunt_lsat-rc_cot": 0.0,
1107
+ "saepe-fuga_logiqa2_cot": 0.0,
1108
+ "saepe-fuga_logiqa_cot": 0.0,
1109
+ "saepe-fuga_lsat-ar_cot": 0.0,
1110
+ "saepe-fuga_lsat-lr_cot": 0.0,
1111
+ "saepe-fuga_lsat-rc_cot": 0.0,
1112
+ "veritatis-velit_logiqa2_cot": 0.0,
1113
+ "veritatis-velit_logiqa_cot": 0.0,
1114
+ "veritatis-velit_lsat-ar_cot": 0.0,
1115
+ "veritatis-velit_lsat-lr_cot": 0.0,
1116
+ "veritatis-velit_lsat-rc_cot": 0.0
1117
+ },
1118
+ "n-shot": {
1119
+ "eum-saepe_logiqa2_cot": 0,
1120
+ "eum-saepe_logiqa_cot": 0,
1121
+ "eum-saepe_lsat-ar_cot": 0,
1122
+ "eum-saepe_lsat-lr_cot": 0,
1123
+ "eum-saepe_lsat-rc_cot": 0,
1124
+ "iste-molestias_logiqa2_cot": 0,
1125
+ "iste-molestias_logiqa_cot": 0,
1126
+ "iste-molestias_lsat-ar_cot": 0,
1127
+ "iste-molestias_lsat-lr_cot": 0,
1128
+ "iste-molestias_lsat-rc_cot": 0,
1129
+ "laboriosam-molestiae_logiqa2_cot": 0,
1130
+ "laboriosam-molestiae_logiqa_cot": 0,
1131
+ "laboriosam-molestiae_lsat-ar_cot": 0,
1132
+ "laboriosam-molestiae_lsat-lr_cot": 0,
1133
+ "laboriosam-molestiae_lsat-rc_cot": 0,
1134
+ "nisi-sunt_logiqa2_cot": 0,
1135
+ "nisi-sunt_logiqa_cot": 0,
1136
+ "nisi-sunt_lsat-ar_cot": 0,
1137
+ "nisi-sunt_lsat-lr_cot": 0,
1138
+ "nisi-sunt_lsat-rc_cot": 0,
1139
+ "saepe-fuga_logiqa2_cot": 0,
1140
+ "saepe-fuga_logiqa_cot": 0,
1141
+ "saepe-fuga_lsat-ar_cot": 0,
1142
+ "saepe-fuga_lsat-lr_cot": 0,
1143
+ "saepe-fuga_lsat-rc_cot": 0,
1144
+ "veritatis-velit_logiqa2_cot": 0,
1145
+ "veritatis-velit_logiqa_cot": 0,
1146
+ "veritatis-velit_lsat-ar_cot": 0,
1147
+ "veritatis-velit_lsat-lr_cot": 0,
1148
+ "veritatis-velit_lsat-rc_cot": 0
1149
+ },
1150
+ "config": {
1151
+ "model": "vllm",
1152
+ "model_args": "pretrained=Deci/DeciLM-7B,revision=main,dtype=auto,tensor_parallel_size=1,gpu_memory_utilization=0.9,trust_remote_code=true,max_length=4096",
1153
+ "batch_size": "auto",
1154
+ "batch_sizes": [],
1155
+ "device": null,
1156
+ "use_cache": null,
1157
+ "limit": null,
1158
+ "bootstrap_iters": 100000,
1159
+ "gen_kwargs": null
1160
+ },
1161
+ "git_hash": "5044cf9"
1162
+ }