lomahony commited on
Commit
8ca04ae
1 Parent(s): 6c2755b

Upload 8 files

Browse files
base-6.9b-eval-files/EleutherAI-pythia-6.9b-0shot-shelloutput.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bootstrapping for stddev: perplexity
2
+ hf (pretrained=EleutherAI/pythia-6.9b), limit: None, num_fewshot: 0, batch_size: 4
3
+ | Task |Version|Filter| Metric | Value | |Stderr|
4
+ |--------------|-------|------|---------------|------:|---|-----:|
5
+ |arc_challenge |Yaml |none |acc | 0.3140|± |0.0136|
6
+ | | |none |acc_norm | 0.3515|± |0.0140|
7
+ |arc_easy |Yaml |none |acc | 0.6705|± |0.0096|
8
+ | | |none |acc_norm | 0.6128|± |0.0100|
9
+ |boolq |Yaml |none |acc | 0.6352|± |0.0084|
10
+ |hellaswag |Yaml |none |acc | 0.4798|± |0.0050|
11
+ | | |none |acc_norm | 0.6389|± |0.0048|
12
+ |lambada_openai|Yaml |none |perplexity | 4.4566|± |0.1000|
13
+ | | |none |acc | 0.6713|± |0.0065|
14
+ |openbookqa |Yaml |none |acc | 0.2560|± |0.0195|
15
+ | | |none |acc_norm | 0.3720|± |0.0216|
16
+ |piqa |Yaml |none |acc | 0.7524|± |0.0101|
17
+ | | |none |acc_norm | 0.7639|± |0.0099|
18
+ |sciq |Yaml |none |acc | 0.8930|± |0.0098|
19
+ | | |none |acc_norm | 0.8380|± |0.0117|
20
+ |wikitext |Yaml |none |word_perplexity|17.6830| | |
21
+ | | |none |byte_perplexity| 1.6057| | |
22
+ | | |none |bits_per_byte | 0.6832| | |
23
+ |winogrande |Yaml |none |acc | 0.6077|± |0.0137|
24
+
base-6.9b-eval-files/EleutherAI-pythia-6.9b-0shot/results.json ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "arc_challenge": {
4
+ "acc,none": 0.31399317406143346,
5
+ "acc_stderr,none": 0.013562691224726305,
6
+ "acc_norm,none": 0.3515358361774744,
7
+ "acc_norm_stderr,none": 0.013952413699600938
8
+ },
9
+ "arc_easy": {
10
+ "acc,none": 0.6704545454545454,
11
+ "acc_stderr,none": 0.00964518419095386,
12
+ "acc_norm,none": 0.6127946127946128,
13
+ "acc_norm_stderr,none": 0.009995312065890353
14
+ },
15
+ "boolq": {
16
+ "acc,none": 0.6351681957186545,
17
+ "acc_stderr,none": 0.00841944098496366
18
+ },
19
+ "hellaswag": {
20
+ "acc,none": 0.4797849034056961,
21
+ "acc_stderr,none": 0.004985701593897999,
22
+ "acc_norm,none": 0.6389165504879506,
23
+ "acc_norm_stderr,none": 0.0047933305256562106
24
+ },
25
+ "lambada_openai": {
26
+ "perplexity,none": 4.456621453732154,
27
+ "perplexity_stderr,none": 0.10000881772967252,
28
+ "acc,none": 0.6712594605084417,
29
+ "acc_stderr,none": 0.006544612151352774
30
+ },
31
+ "openbookqa": {
32
+ "acc,none": 0.256,
33
+ "acc_stderr,none": 0.019536923574747605,
34
+ "acc_norm,none": 0.372,
35
+ "acc_norm_stderr,none": 0.0216371979857224
36
+ },
37
+ "piqa": {
38
+ "acc,none": 0.7524483133841132,
39
+ "acc_stderr,none": 0.010069703966857088,
40
+ "acc_norm,none": 0.763873775843308,
41
+ "acc_norm_stderr,none": 0.009908965890558214
42
+ },
43
+ "sciq": {
44
+ "acc,none": 0.893,
45
+ "acc_stderr,none": 0.009779910359847167,
46
+ "acc_norm,none": 0.838,
47
+ "acc_norm_stderr,none": 0.011657267771304427
48
+ },
49
+ "wikitext": {
50
+ "word_perplexity,none": 17.682958079421635,
51
+ "byte_perplexity,none": 1.6057045697141277,
52
+ "bits_per_byte,none": 0.6832064787735104
53
+ },
54
+ "winogrande": {
55
+ "acc,none": 0.6077348066298343,
56
+ "acc_stderr,none": 0.013722400462000888
57
+ }
58
+ },
59
+ "configs": {
60
+ "arc_challenge": {
61
+ "task": "arc_challenge",
62
+ "group": [
63
+ "ai2_arc",
64
+ "multiple_choice"
65
+ ],
66
+ "dataset_path": "ai2_arc",
67
+ "dataset_name": "ARC-Challenge",
68
+ "training_split": "train",
69
+ "validation_split": "validation",
70
+ "test_split": "test",
71
+ "doc_to_text": "Question: {{question}}\nAnswer:",
72
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
73
+ "doc_to_choice": "{{choices.text}}",
74
+ "description": "",
75
+ "target_delimiter": " ",
76
+ "fewshot_delimiter": "\n\n",
77
+ "num_fewshot": 0,
78
+ "metric_list": [
79
+ {
80
+ "metric": "acc",
81
+ "aggregation": "mean",
82
+ "higher_is_better": true
83
+ },
84
+ {
85
+ "metric": "acc_norm",
86
+ "aggregation": "mean",
87
+ "higher_is_better": true
88
+ }
89
+ ],
90
+ "output_type": "multiple_choice",
91
+ "repeats": 1,
92
+ "should_decontaminate": true,
93
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
94
+ },
95
+ "arc_easy": {
96
+ "task": "arc_easy",
97
+ "group": [
98
+ "ai2_arc",
99
+ "multiple_choice"
100
+ ],
101
+ "dataset_path": "ai2_arc",
102
+ "dataset_name": "ARC-Easy",
103
+ "training_split": "train",
104
+ "validation_split": "validation",
105
+ "test_split": "test",
106
+ "doc_to_text": "Question: {{question}}\nAnswer:",
107
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
108
+ "doc_to_choice": "{{choices.text}}",
109
+ "description": "",
110
+ "target_delimiter": " ",
111
+ "fewshot_delimiter": "\n\n",
112
+ "num_fewshot": 0,
113
+ "metric_list": [
114
+ {
115
+ "metric": "acc",
116
+ "aggregation": "mean",
117
+ "higher_is_better": true
118
+ },
119
+ {
120
+ "metric": "acc_norm",
121
+ "aggregation": "mean",
122
+ "higher_is_better": true
123
+ }
124
+ ],
125
+ "output_type": "multiple_choice",
126
+ "repeats": 1,
127
+ "should_decontaminate": true,
128
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
129
+ },
130
+ "boolq": {
131
+ "task": "boolq",
132
+ "group": [
133
+ "super-glue-lm-eval-v1"
134
+ ],
135
+ "dataset_path": "super_glue",
136
+ "dataset_name": "boolq",
137
+ "training_split": "train",
138
+ "validation_split": "validation",
139
+ "doc_to_text": "{{passage}}\nQuestion: {{question}}?\nAnswer:",
140
+ "doc_to_target": "label",
141
+ "doc_to_choice": [
142
+ "no",
143
+ "yes"
144
+ ],
145
+ "description": "",
146
+ "target_delimiter": " ",
147
+ "fewshot_delimiter": "\n\n",
148
+ "num_fewshot": 0,
149
+ "metric_list": [
150
+ {
151
+ "metric": "acc"
152
+ }
153
+ ],
154
+ "output_type": "multiple_choice",
155
+ "repeats": 1,
156
+ "should_decontaminate": true,
157
+ "doc_to_decontamination_query": "passage"
158
+ },
159
+ "hellaswag": {
160
+ "task": "hellaswag",
161
+ "group": [
162
+ "multiple_choice"
163
+ ],
164
+ "dataset_path": "hellaswag",
165
+ "training_split": "train",
166
+ "validation_split": "validation",
167
+ "doc_to_text": "{% set text = activity_label ~ ': ' ~ ctx_a ~ ' ' ~ ctx_b.capitalize() %}{{text|trim|replace(' [title]', '. ')|regex_replace('\\[.*?\\]', '')|replace(' ', ' ')}}",
168
+ "doc_to_target": "{{label}}",
169
+ "doc_to_choice": "{{endings|map('trim')|map('replace', ' [title]', '. ')|map('regex_replace', '\\[.*?\\]', '')|map('replace', ' ', ' ')|list}}",
170
+ "description": "",
171
+ "target_delimiter": " ",
172
+ "fewshot_delimiter": "\n\n",
173
+ "num_fewshot": 0,
174
+ "metric_list": [
175
+ {
176
+ "metric": "acc",
177
+ "aggregation": "mean",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc_norm",
182
+ "aggregation": "mean",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "multiple_choice",
187
+ "repeats": 1,
188
+ "should_decontaminate": false
189
+ },
190
+ "lambada_openai": {
191
+ "task": "lambada_openai",
192
+ "group": [
193
+ "lambada",
194
+ "loglikelihood",
195
+ "perplexity"
196
+ ],
197
+ "dataset_path": "EleutherAI/lambada_openai",
198
+ "dataset_name": "default",
199
+ "test_split": "test",
200
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
201
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
202
+ "description": "",
203
+ "target_delimiter": " ",
204
+ "fewshot_delimiter": "\n\n",
205
+ "num_fewshot": 0,
206
+ "metric_list": [
207
+ {
208
+ "metric": "perplexity",
209
+ "aggregation": "perplexity",
210
+ "higher_is_better": false
211
+ },
212
+ {
213
+ "metric": "acc",
214
+ "aggregation": "mean",
215
+ "higher_is_better": true
216
+ }
217
+ ],
218
+ "output_type": "loglikelihood",
219
+ "repeats": 1,
220
+ "should_decontaminate": true,
221
+ "doc_to_decontamination_query": "{{text}}"
222
+ },
223
+ "openbookqa": {
224
+ "task": "openbookqa",
225
+ "group": [
226
+ "multiple_choice"
227
+ ],
228
+ "dataset_path": "openbookqa",
229
+ "dataset_name": "main",
230
+ "training_split": "train",
231
+ "validation_split": "validation",
232
+ "test_split": "test",
233
+ "doc_to_text": "question_stem",
234
+ "doc_to_target": "{{choices.label.index(answerKey.lstrip())}}",
235
+ "doc_to_choice": "{{choices.text}}",
236
+ "description": "",
237
+ "target_delimiter": " ",
238
+ "fewshot_delimiter": "\n\n",
239
+ "num_fewshot": 0,
240
+ "metric_list": [
241
+ {
242
+ "metric": "acc",
243
+ "aggregation": "mean",
244
+ "higher_is_better": true
245
+ },
246
+ {
247
+ "metric": "acc_norm",
248
+ "aggregation": "mean",
249
+ "higher_is_better": true
250
+ }
251
+ ],
252
+ "output_type": "multiple_choice",
253
+ "repeats": 1,
254
+ "should_decontaminate": true,
255
+ "doc_to_decontamination_query": "question_stem"
256
+ },
257
+ "piqa": {
258
+ "task": "piqa",
259
+ "group": [
260
+ "multiple_choice"
261
+ ],
262
+ "dataset_path": "piqa",
263
+ "training_split": "train",
264
+ "validation_split": "validation",
265
+ "doc_to_text": "Question: {{goal}}\nAnswer:",
266
+ "doc_to_target": "label",
267
+ "doc_to_choice": "{{[sol1, sol2]}}",
268
+ "description": "",
269
+ "target_delimiter": " ",
270
+ "fewshot_delimiter": "\n\n",
271
+ "num_fewshot": 0,
272
+ "metric_list": [
273
+ {
274
+ "metric": "acc",
275
+ "aggregation": "mean",
276
+ "higher_is_better": true
277
+ },
278
+ {
279
+ "metric": "acc_norm",
280
+ "aggregation": "mean",
281
+ "higher_is_better": true
282
+ }
283
+ ],
284
+ "output_type": "multiple_choice",
285
+ "repeats": 1,
286
+ "should_decontaminate": true,
287
+ "doc_to_decontamination_query": "goal"
288
+ },
289
+ "sciq": {
290
+ "task": "sciq",
291
+ "group": [
292
+ "multiple_choice"
293
+ ],
294
+ "dataset_path": "sciq",
295
+ "training_split": "train",
296
+ "validation_split": "validation",
297
+ "test_split": "test",
298
+ "doc_to_text": "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:",
299
+ "doc_to_target": 3,
300
+ "doc_to_choice": "{{[distractor1, distractor2, distractor3, correct_answer]}}",
301
+ "description": "",
302
+ "target_delimiter": " ",
303
+ "fewshot_delimiter": "\n\n",
304
+ "num_fewshot": 0,
305
+ "metric_list": [
306
+ {
307
+ "metric": "acc",
308
+ "aggregation": "mean",
309
+ "higher_is_better": true
310
+ },
311
+ {
312
+ "metric": "acc_norm",
313
+ "aggregation": "mean",
314
+ "higher_is_better": true
315
+ }
316
+ ],
317
+ "output_type": "multiple_choice",
318
+ "repeats": 1,
319
+ "should_decontaminate": true,
320
+ "doc_to_decontamination_query": "{{support}} {{question}}"
321
+ },
322
+ "wikitext": {
323
+ "task": "wikitext",
324
+ "group": [
325
+ "perplexity",
326
+ "loglikelihood_rolling"
327
+ ],
328
+ "dataset_path": "EleutherAI/wikitext_document_level",
329
+ "dataset_name": "wikitext-2-raw-v1",
330
+ "training_split": "train",
331
+ "validation_split": "validation",
332
+ "test_split": "test",
333
+ "doc_to_text": "",
334
+ "doc_to_target": "<function wikitext_detokenizer at 0x7ff390401120>",
335
+ "description": "",
336
+ "target_delimiter": " ",
337
+ "fewshot_delimiter": "\n\n",
338
+ "num_fewshot": 0,
339
+ "metric_list": [
340
+ {
341
+ "metric": "word_perplexity"
342
+ },
343
+ {
344
+ "metric": "byte_perplexity"
345
+ },
346
+ {
347
+ "metric": "bits_per_byte"
348
+ }
349
+ ],
350
+ "output_type": "loglikelihood_rolling",
351
+ "repeats": 1,
352
+ "should_decontaminate": true,
353
+ "doc_to_decontamination_query": "{{page}}"
354
+ },
355
+ "winogrande": {
356
+ "task": "winogrande",
357
+ "dataset_path": "winogrande",
358
+ "dataset_name": "winogrande_xl",
359
+ "training_split": "train",
360
+ "validation_split": "validation",
361
+ "doc_to_text": "<function doc_to_text at 0x7ff390401360>",
362
+ "doc_to_target": "<function doc_to_target at 0x7ff3904016c0>",
363
+ "doc_to_choice": "<function doc_to_choice at 0x7ff390401a20>",
364
+ "description": "",
365
+ "target_delimiter": " ",
366
+ "fewshot_delimiter": "\n\n",
367
+ "num_fewshot": 0,
368
+ "metric_list": [
369
+ {
370
+ "metric": "acc",
371
+ "aggregation": "mean",
372
+ "higher_is_better": true
373
+ }
374
+ ],
375
+ "output_type": "multiple_choice",
376
+ "repeats": 1,
377
+ "should_decontaminate": true,
378
+ "doc_to_decontamination_query": "sentence"
379
+ }
380
+ },
381
+ "versions": {
382
+ "arc_challenge": "Yaml",
383
+ "arc_easy": "Yaml",
384
+ "boolq": "Yaml",
385
+ "hellaswag": "Yaml",
386
+ "lambada_openai": "Yaml",
387
+ "openbookqa": "Yaml",
388
+ "piqa": "Yaml",
389
+ "sciq": "Yaml",
390
+ "wikitext": "Yaml",
391
+ "winogrande": "Yaml"
392
+ },
393
+ "config": {
394
+ "model": "hf",
395
+ "model_args": "pretrained=EleutherAI/pythia-6.9b",
396
+ "batch_size": "4",
397
+ "batch_sizes": [],
398
+ "device": null,
399
+ "use_cache": null,
400
+ "limit": null,
401
+ "bootstrap_iters": 100000
402
+ },
403
+ "git_hash": "d1a44c8"
404
+ }
base-6.9b-eval-files/EleutherAI-pythia-6.9b-5shot-shelloutput.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bootstrapping for stddev: perplexity
2
+ hf (pretrained=EleutherAI/pythia-6.9b), limit: None, num_fewshot: 5, batch_size: 4
3
+ | Task |Version|Filter| Metric | Value | |Stderr|
4
+ |--------------|-------|------|---------------|------:|---|-----:|
5
+ |arc_challenge |Yaml |none |acc | 0.3541|± |0.0140|
6
+ | | |none |acc_norm | 0.3891|± |0.0142|
7
+ |arc_easy |Yaml |none |acc | 0.6944|± |0.0095|
8
+ | | |none |acc_norm | 0.7045|± |0.0094|
9
+ |boolq |Yaml |none |acc | 0.6575|± |0.0083|
10
+ |hellaswag |Yaml |none |acc | 0.4804|± |0.0050|
11
+ | | |none |acc_norm | 0.6509|± |0.0048|
12
+ |lambada_openai|Yaml |none |perplexity | 5.6328|± |0.1331|
13
+ | | |none |acc | 0.6231|± |0.0068|
14
+ |openbookqa |Yaml |none |acc | 0.2800|± |0.0201|
15
+ | | |none |acc_norm | 0.3540|± |0.0214|
16
+ |piqa |Yaml |none |acc | 0.7644|± |0.0099|
17
+ | | |none |acc_norm | 0.7633|± |0.0099|
18
+ |sciq |Yaml |none |acc | 0.9470|± |0.0071|
19
+ | | |none |acc_norm | 0.9550|± |0.0066|
20
+ |wikitext |Yaml |none |word_perplexity|17.6830| | |
21
+ | | |none |byte_perplexity| 1.6057| | |
22
+ | | |none |bits_per_byte | 0.6832| | |
23
+ |winogrande |Yaml |none |acc | 0.6069|± |0.0137|
24
+
base-6.9b-eval-files/EleutherAI-pythia-6.9b-5shot/results.json ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "arc_challenge": {
4
+ "acc,none": 0.35409556313993173,
5
+ "acc_stderr,none": 0.01397545412275655,
6
+ "acc_norm,none": 0.3890784982935154,
7
+ "acc_norm_stderr,none": 0.014247309976045609
8
+ },
9
+ "arc_easy": {
10
+ "acc,none": 0.6944444444444444,
11
+ "acc_stderr,none": 0.00945218121359346,
12
+ "acc_norm,none": 0.7045454545454546,
13
+ "acc_norm_stderr,none": 0.009361987126556446
14
+ },
15
+ "boolq": {
16
+ "acc,none": 0.6574923547400612,
17
+ "acc_stderr,none": 0.008299903219506771
18
+ },
19
+ "hellaswag": {
20
+ "acc,none": 0.4803823939454292,
21
+ "acc_stderr,none": 0.004985939292819588,
22
+ "acc_norm,none": 0.650866361282613,
23
+ "acc_norm_stderr,none": 0.004757220449283696
24
+ },
25
+ "lambada_openai": {
26
+ "perplexity,none": 5.632830092794682,
27
+ "perplexity_stderr,none": 0.13309255473880457,
28
+ "acc,none": 0.6231321560256161,
29
+ "acc_stderr,none": 0.006751444407117095
30
+ },
31
+ "openbookqa": {
32
+ "acc,none": 0.28,
33
+ "acc_stderr,none": 0.020099950647503233,
34
+ "acc_norm,none": 0.354,
35
+ "acc_norm_stderr,none": 0.021407582047916447
36
+ },
37
+ "piqa": {
38
+ "acc,none": 0.764417845484222,
39
+ "acc_stderr,none": 0.009901067586473907,
40
+ "acc_norm,none": 0.7633297062023939,
41
+ "acc_norm_stderr,none": 0.009916841655042809
42
+ },
43
+ "sciq": {
44
+ "acc,none": 0.947,
45
+ "acc_stderr,none": 0.007088105617246445,
46
+ "acc_norm,none": 0.955,
47
+ "acc_norm_stderr,none": 0.006558812241406067
48
+ },
49
+ "wikitext": {
50
+ "word_perplexity,none": 17.682958079421635,
51
+ "byte_perplexity,none": 1.6057045697141277,
52
+ "bits_per_byte,none": 0.6832064787735104
53
+ },
54
+ "winogrande": {
55
+ "acc,none": 0.6069455406471981,
56
+ "acc_stderr,none": 0.013727276249108442
57
+ }
58
+ },
59
+ "configs": {
60
+ "arc_challenge": {
61
+ "task": "arc_challenge",
62
+ "group": [
63
+ "ai2_arc",
64
+ "multiple_choice"
65
+ ],
66
+ "dataset_path": "ai2_arc",
67
+ "dataset_name": "ARC-Challenge",
68
+ "training_split": "train",
69
+ "validation_split": "validation",
70
+ "test_split": "test",
71
+ "doc_to_text": "Question: {{question}}\nAnswer:",
72
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
73
+ "doc_to_choice": "{{choices.text}}",
74
+ "description": "",
75
+ "target_delimiter": " ",
76
+ "fewshot_delimiter": "\n\n",
77
+ "num_fewshot": 5,
78
+ "metric_list": [
79
+ {
80
+ "metric": "acc",
81
+ "aggregation": "mean",
82
+ "higher_is_better": true
83
+ },
84
+ {
85
+ "metric": "acc_norm",
86
+ "aggregation": "mean",
87
+ "higher_is_better": true
88
+ }
89
+ ],
90
+ "output_type": "multiple_choice",
91
+ "repeats": 1,
92
+ "should_decontaminate": true,
93
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
94
+ },
95
+ "arc_easy": {
96
+ "task": "arc_easy",
97
+ "group": [
98
+ "ai2_arc",
99
+ "multiple_choice"
100
+ ],
101
+ "dataset_path": "ai2_arc",
102
+ "dataset_name": "ARC-Easy",
103
+ "training_split": "train",
104
+ "validation_split": "validation",
105
+ "test_split": "test",
106
+ "doc_to_text": "Question: {{question}}\nAnswer:",
107
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
108
+ "doc_to_choice": "{{choices.text}}",
109
+ "description": "",
110
+ "target_delimiter": " ",
111
+ "fewshot_delimiter": "\n\n",
112
+ "num_fewshot": 5,
113
+ "metric_list": [
114
+ {
115
+ "metric": "acc",
116
+ "aggregation": "mean",
117
+ "higher_is_better": true
118
+ },
119
+ {
120
+ "metric": "acc_norm",
121
+ "aggregation": "mean",
122
+ "higher_is_better": true
123
+ }
124
+ ],
125
+ "output_type": "multiple_choice",
126
+ "repeats": 1,
127
+ "should_decontaminate": true,
128
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
129
+ },
130
+ "boolq": {
131
+ "task": "boolq",
132
+ "group": [
133
+ "super-glue-lm-eval-v1"
134
+ ],
135
+ "dataset_path": "super_glue",
136
+ "dataset_name": "boolq",
137
+ "training_split": "train",
138
+ "validation_split": "validation",
139
+ "doc_to_text": "{{passage}}\nQuestion: {{question}}?\nAnswer:",
140
+ "doc_to_target": "label",
141
+ "doc_to_choice": [
142
+ "no",
143
+ "yes"
144
+ ],
145
+ "description": "",
146
+ "target_delimiter": " ",
147
+ "fewshot_delimiter": "\n\n",
148
+ "num_fewshot": 5,
149
+ "metric_list": [
150
+ {
151
+ "metric": "acc"
152
+ }
153
+ ],
154
+ "output_type": "multiple_choice",
155
+ "repeats": 1,
156
+ "should_decontaminate": true,
157
+ "doc_to_decontamination_query": "passage"
158
+ },
159
+ "hellaswag": {
160
+ "task": "hellaswag",
161
+ "group": [
162
+ "multiple_choice"
163
+ ],
164
+ "dataset_path": "hellaswag",
165
+ "training_split": "train",
166
+ "validation_split": "validation",
167
+ "doc_to_text": "{% set text = activity_label ~ ': ' ~ ctx_a ~ ' ' ~ ctx_b.capitalize() %}{{text|trim|replace(' [title]', '. ')|regex_replace('\\[.*?\\]', '')|replace(' ', ' ')}}",
168
+ "doc_to_target": "{{label}}",
169
+ "doc_to_choice": "{{endings|map('trim')|map('replace', ' [title]', '. ')|map('regex_replace', '\\[.*?\\]', '')|map('replace', ' ', ' ')|list}}",
170
+ "description": "",
171
+ "target_delimiter": " ",
172
+ "fewshot_delimiter": "\n\n",
173
+ "num_fewshot": 5,
174
+ "metric_list": [
175
+ {
176
+ "metric": "acc",
177
+ "aggregation": "mean",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc_norm",
182
+ "aggregation": "mean",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "multiple_choice",
187
+ "repeats": 1,
188
+ "should_decontaminate": false
189
+ },
190
+ "lambada_openai": {
191
+ "task": "lambada_openai",
192
+ "group": [
193
+ "lambada",
194
+ "loglikelihood",
195
+ "perplexity"
196
+ ],
197
+ "dataset_path": "EleutherAI/lambada_openai",
198
+ "dataset_name": "default",
199
+ "test_split": "test",
200
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
201
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
202
+ "description": "",
203
+ "target_delimiter": " ",
204
+ "fewshot_delimiter": "\n\n",
205
+ "num_fewshot": 5,
206
+ "metric_list": [
207
+ {
208
+ "metric": "perplexity",
209
+ "aggregation": "perplexity",
210
+ "higher_is_better": false
211
+ },
212
+ {
213
+ "metric": "acc",
214
+ "aggregation": "mean",
215
+ "higher_is_better": true
216
+ }
217
+ ],
218
+ "output_type": "loglikelihood",
219
+ "repeats": 1,
220
+ "should_decontaminate": true,
221
+ "doc_to_decontamination_query": "{{text}}"
222
+ },
223
+ "openbookqa": {
224
+ "task": "openbookqa",
225
+ "group": [
226
+ "multiple_choice"
227
+ ],
228
+ "dataset_path": "openbookqa",
229
+ "dataset_name": "main",
230
+ "training_split": "train",
231
+ "validation_split": "validation",
232
+ "test_split": "test",
233
+ "doc_to_text": "question_stem",
234
+ "doc_to_target": "{{choices.label.index(answerKey.lstrip())}}",
235
+ "doc_to_choice": "{{choices.text}}",
236
+ "description": "",
237
+ "target_delimiter": " ",
238
+ "fewshot_delimiter": "\n\n",
239
+ "num_fewshot": 5,
240
+ "metric_list": [
241
+ {
242
+ "metric": "acc",
243
+ "aggregation": "mean",
244
+ "higher_is_better": true
245
+ },
246
+ {
247
+ "metric": "acc_norm",
248
+ "aggregation": "mean",
249
+ "higher_is_better": true
250
+ }
251
+ ],
252
+ "output_type": "multiple_choice",
253
+ "repeats": 1,
254
+ "should_decontaminate": true,
255
+ "doc_to_decontamination_query": "question_stem"
256
+ },
257
+ "piqa": {
258
+ "task": "piqa",
259
+ "group": [
260
+ "multiple_choice"
261
+ ],
262
+ "dataset_path": "piqa",
263
+ "training_split": "train",
264
+ "validation_split": "validation",
265
+ "doc_to_text": "Question: {{goal}}\nAnswer:",
266
+ "doc_to_target": "label",
267
+ "doc_to_choice": "{{[sol1, sol2]}}",
268
+ "description": "",
269
+ "target_delimiter": " ",
270
+ "fewshot_delimiter": "\n\n",
271
+ "num_fewshot": 5,
272
+ "metric_list": [
273
+ {
274
+ "metric": "acc",
275
+ "aggregation": "mean",
276
+ "higher_is_better": true
277
+ },
278
+ {
279
+ "metric": "acc_norm",
280
+ "aggregation": "mean",
281
+ "higher_is_better": true
282
+ }
283
+ ],
284
+ "output_type": "multiple_choice",
285
+ "repeats": 1,
286
+ "should_decontaminate": true,
287
+ "doc_to_decontamination_query": "goal"
288
+ },
289
+ "sciq": {
290
+ "task": "sciq",
291
+ "group": [
292
+ "multiple_choice"
293
+ ],
294
+ "dataset_path": "sciq",
295
+ "training_split": "train",
296
+ "validation_split": "validation",
297
+ "test_split": "test",
298
+ "doc_to_text": "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:",
299
+ "doc_to_target": 3,
300
+ "doc_to_choice": "{{[distractor1, distractor2, distractor3, correct_answer]}}",
301
+ "description": "",
302
+ "target_delimiter": " ",
303
+ "fewshot_delimiter": "\n\n",
304
+ "num_fewshot": 5,
305
+ "metric_list": [
306
+ {
307
+ "metric": "acc",
308
+ "aggregation": "mean",
309
+ "higher_is_better": true
310
+ },
311
+ {
312
+ "metric": "acc_norm",
313
+ "aggregation": "mean",
314
+ "higher_is_better": true
315
+ }
316
+ ],
317
+ "output_type": "multiple_choice",
318
+ "repeats": 1,
319
+ "should_decontaminate": true,
320
+ "doc_to_decontamination_query": "{{support}} {{question}}"
321
+ },
322
+ "wikitext": {
323
+ "task": "wikitext",
324
+ "group": [
325
+ "perplexity",
326
+ "loglikelihood_rolling"
327
+ ],
328
+ "dataset_path": "EleutherAI/wikitext_document_level",
329
+ "dataset_name": "wikitext-2-raw-v1",
330
+ "training_split": "train",
331
+ "validation_split": "validation",
332
+ "test_split": "test",
333
+ "doc_to_text": "",
334
+ "doc_to_target": "<function wikitext_detokenizer at 0x7fd39a70d120>",
335
+ "description": "",
336
+ "target_delimiter": " ",
337
+ "fewshot_delimiter": "\n\n",
338
+ "num_fewshot": 5,
339
+ "metric_list": [
340
+ {
341
+ "metric": "word_perplexity"
342
+ },
343
+ {
344
+ "metric": "byte_perplexity"
345
+ },
346
+ {
347
+ "metric": "bits_per_byte"
348
+ }
349
+ ],
350
+ "output_type": "loglikelihood_rolling",
351
+ "repeats": 1,
352
+ "should_decontaminate": true,
353
+ "doc_to_decontamination_query": "{{page}}"
354
+ },
355
+ "winogrande": {
356
+ "task": "winogrande",
357
+ "dataset_path": "winogrande",
358
+ "dataset_name": "winogrande_xl",
359
+ "training_split": "train",
360
+ "validation_split": "validation",
361
+ "doc_to_text": "<function doc_to_text at 0x7fd39a70d360>",
362
+ "doc_to_target": "<function doc_to_target at 0x7fd39a70d6c0>",
363
+ "doc_to_choice": "<function doc_to_choice at 0x7fd39a70da20>",
364
+ "description": "",
365
+ "target_delimiter": " ",
366
+ "fewshot_delimiter": "\n\n",
367
+ "num_fewshot": 5,
368
+ "metric_list": [
369
+ {
370
+ "metric": "acc",
371
+ "aggregation": "mean",
372
+ "higher_is_better": true
373
+ }
374
+ ],
375
+ "output_type": "multiple_choice",
376
+ "repeats": 1,
377
+ "should_decontaminate": true,
378
+ "doc_to_decontamination_query": "sentence"
379
+ }
380
+ },
381
+ "versions": {
382
+ "arc_challenge": "Yaml",
383
+ "arc_easy": "Yaml",
384
+ "boolq": "Yaml",
385
+ "hellaswag": "Yaml",
386
+ "lambada_openai": "Yaml",
387
+ "openbookqa": "Yaml",
388
+ "piqa": "Yaml",
389
+ "sciq": "Yaml",
390
+ "wikitext": "Yaml",
391
+ "winogrande": "Yaml"
392
+ },
393
+ "config": {
394
+ "model": "hf",
395
+ "model_args": "pretrained=EleutherAI/pythia-6.9b",
396
+ "batch_size": "4",
397
+ "batch_sizes": [],
398
+ "device": null,
399
+ "use_cache": null,
400
+ "limit": null,
401
+ "bootstrap_iters": 100000
402
+ },
403
+ "git_hash": "d1a44c8"
404
+ }
sft-6.9b-eval-files/sft-pythia-6.9b-0shot-shelloutput.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bootstrapping for stddev: perplexity
2
+ hf (pretrained=lomahony/eleuther-pythia6.9b-hh-sft), limit: None, num_fewshot: 0, batch_size: 4
3
+ | Task |Version|Filter| Metric | Value | |Stderr|
4
+ |--------------|-------|------|---------------|------:|---|-----:|
5
+ |arc_challenge |Yaml |none |acc | 0.3080|± |0.0135|
6
+ | | |none |acc_norm | 0.3379|± |0.0138|
7
+ |arc_easy |Yaml |none |acc | 0.6789|± |0.0096|
8
+ | | |none |acc_norm | 0.6048|± |0.0100|
9
+ |boolq |Yaml |none |acc | 0.6657|± |0.0083|
10
+ |hellaswag |Yaml |none |acc | 0.4847|± |0.0050|
11
+ | | |none |acc_norm | 0.6404|± |0.0048|
12
+ |lambada_openai|Yaml |none |perplexity | 3.9049|± |0.0904|
13
+ | | |none |acc | 0.6920|± |0.0064|
14
+ |openbookqa |Yaml |none |acc | 0.2640|± |0.0197|
15
+ | | |none |acc_norm | 0.3740|± |0.0217|
16
+ |piqa |Yaml |none |acc | 0.7601|± |0.0100|
17
+ | | |none |acc_norm | 0.7661|± |0.0099|
18
+ |sciq |Yaml |none |acc | 0.9010|± |0.0094|
19
+ | | |none |acc_norm | 0.8290|± |0.0119|
20
+ |wikitext |Yaml |none |word_perplexity|18.0722| | |
21
+ | | |none |byte_perplexity| 1.6115| | |
22
+ | | |none |bits_per_byte | 0.6884| | |
23
+ |winogrande |Yaml |none |acc | 0.6164|± |0.0137|
24
+
sft-6.9b-eval-files/sft-pythia-6.9b-0shot/results.json ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "arc_challenge": {
4
+ "acc,none": 0.30802047781569963,
5
+ "acc_stderr,none": 0.013491429517292038,
6
+ "acc_norm,none": 0.3378839590443686,
7
+ "acc_norm_stderr,none": 0.013822047922283512
8
+ },
9
+ "arc_easy": {
10
+ "acc,none": 0.6788720538720538,
11
+ "acc_stderr,none": 0.009580787536986797,
12
+ "acc_norm,none": 0.6047979797979798,
13
+ "acc_norm_stderr,none": 0.010031894052790976
14
+ },
15
+ "boolq": {
16
+ "acc,none": 0.6657492354740061,
17
+ "acc_stderr,none": 0.00825057245508343
18
+ },
19
+ "hellaswag": {
20
+ "acc,none": 0.48466440948018324,
21
+ "acc_stderr,none": 0.004987433862274563,
22
+ "acc_norm,none": 0.6404102768372834,
23
+ "acc_norm_stderr,none": 0.004788994060654275
24
+ },
25
+ "lambada_openai": {
26
+ "perplexity,none": 3.9048816842205616,
27
+ "perplexity_stderr,none": 0.0904411518306548,
28
+ "acc,none": 0.6920240636522415,
29
+ "acc_stderr,none": 0.006431778256505186
30
+ },
31
+ "openbookqa": {
32
+ "acc,none": 0.264,
33
+ "acc_stderr,none": 0.019732885585922094,
34
+ "acc_norm,none": 0.374,
35
+ "acc_norm_stderr,none": 0.021660710347204484
36
+ },
37
+ "piqa": {
38
+ "acc,none": 0.7600652883569097,
39
+ "acc_stderr,none": 0.009963625892809545,
40
+ "acc_norm,none": 0.766050054406964,
41
+ "acc_norm_stderr,none": 0.009877236895137463
42
+ },
43
+ "sciq": {
44
+ "acc,none": 0.901,
45
+ "acc_stderr,none": 0.009449248027662744,
46
+ "acc_norm,none": 0.829,
47
+ "acc_norm_stderr,none": 0.011912216456264583
48
+ },
49
+ "wikitext": {
50
+ "word_perplexity,none": 18.072185827315998,
51
+ "byte_perplexity,none": 1.6114783437385283,
52
+ "bits_per_byte,none": 0.6883848005035924
53
+ },
54
+ "winogrande": {
55
+ "acc,none": 0.6164167324388319,
56
+ "acc_stderr,none": 0.01366627588953902
57
+ }
58
+ },
59
+ "configs": {
60
+ "arc_challenge": {
61
+ "task": "arc_challenge",
62
+ "group": [
63
+ "ai2_arc",
64
+ "multiple_choice"
65
+ ],
66
+ "dataset_path": "ai2_arc",
67
+ "dataset_name": "ARC-Challenge",
68
+ "training_split": "train",
69
+ "validation_split": "validation",
70
+ "test_split": "test",
71
+ "doc_to_text": "Question: {{question}}\nAnswer:",
72
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
73
+ "doc_to_choice": "{{choices.text}}",
74
+ "description": "",
75
+ "target_delimiter": " ",
76
+ "fewshot_delimiter": "\n\n",
77
+ "num_fewshot": 0,
78
+ "metric_list": [
79
+ {
80
+ "metric": "acc",
81
+ "aggregation": "mean",
82
+ "higher_is_better": true
83
+ },
84
+ {
85
+ "metric": "acc_norm",
86
+ "aggregation": "mean",
87
+ "higher_is_better": true
88
+ }
89
+ ],
90
+ "output_type": "multiple_choice",
91
+ "repeats": 1,
92
+ "should_decontaminate": true,
93
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
94
+ },
95
+ "arc_easy": {
96
+ "task": "arc_easy",
97
+ "group": [
98
+ "ai2_arc",
99
+ "multiple_choice"
100
+ ],
101
+ "dataset_path": "ai2_arc",
102
+ "dataset_name": "ARC-Easy",
103
+ "training_split": "train",
104
+ "validation_split": "validation",
105
+ "test_split": "test",
106
+ "doc_to_text": "Question: {{question}}\nAnswer:",
107
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
108
+ "doc_to_choice": "{{choices.text}}",
109
+ "description": "",
110
+ "target_delimiter": " ",
111
+ "fewshot_delimiter": "\n\n",
112
+ "num_fewshot": 0,
113
+ "metric_list": [
114
+ {
115
+ "metric": "acc",
116
+ "aggregation": "mean",
117
+ "higher_is_better": true
118
+ },
119
+ {
120
+ "metric": "acc_norm",
121
+ "aggregation": "mean",
122
+ "higher_is_better": true
123
+ }
124
+ ],
125
+ "output_type": "multiple_choice",
126
+ "repeats": 1,
127
+ "should_decontaminate": true,
128
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
129
+ },
130
+ "boolq": {
131
+ "task": "boolq",
132
+ "group": [
133
+ "super-glue-lm-eval-v1"
134
+ ],
135
+ "dataset_path": "super_glue",
136
+ "dataset_name": "boolq",
137
+ "training_split": "train",
138
+ "validation_split": "validation",
139
+ "doc_to_text": "{{passage}}\nQuestion: {{question}}?\nAnswer:",
140
+ "doc_to_target": "label",
141
+ "doc_to_choice": [
142
+ "no",
143
+ "yes"
144
+ ],
145
+ "description": "",
146
+ "target_delimiter": " ",
147
+ "fewshot_delimiter": "\n\n",
148
+ "num_fewshot": 0,
149
+ "metric_list": [
150
+ {
151
+ "metric": "acc"
152
+ }
153
+ ],
154
+ "output_type": "multiple_choice",
155
+ "repeats": 1,
156
+ "should_decontaminate": true,
157
+ "doc_to_decontamination_query": "passage"
158
+ },
159
+ "hellaswag": {
160
+ "task": "hellaswag",
161
+ "group": [
162
+ "multiple_choice"
163
+ ],
164
+ "dataset_path": "hellaswag",
165
+ "training_split": "train",
166
+ "validation_split": "validation",
167
+ "doc_to_text": "{% set text = activity_label ~ ': ' ~ ctx_a ~ ' ' ~ ctx_b.capitalize() %}{{text|trim|replace(' [title]', '. ')|regex_replace('\\[.*?\\]', '')|replace(' ', ' ')}}",
168
+ "doc_to_target": "{{label}}",
169
+ "doc_to_choice": "{{endings|map('trim')|map('replace', ' [title]', '. ')|map('regex_replace', '\\[.*?\\]', '')|map('replace', ' ', ' ')|list}}",
170
+ "description": "",
171
+ "target_delimiter": " ",
172
+ "fewshot_delimiter": "\n\n",
173
+ "num_fewshot": 0,
174
+ "metric_list": [
175
+ {
176
+ "metric": "acc",
177
+ "aggregation": "mean",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc_norm",
182
+ "aggregation": "mean",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "multiple_choice",
187
+ "repeats": 1,
188
+ "should_decontaminate": false
189
+ },
190
+ "lambada_openai": {
191
+ "task": "lambada_openai",
192
+ "group": [
193
+ "lambada",
194
+ "loglikelihood",
195
+ "perplexity"
196
+ ],
197
+ "dataset_path": "EleutherAI/lambada_openai",
198
+ "dataset_name": "default",
199
+ "test_split": "test",
200
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
201
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
202
+ "description": "",
203
+ "target_delimiter": " ",
204
+ "fewshot_delimiter": "\n\n",
205
+ "num_fewshot": 0,
206
+ "metric_list": [
207
+ {
208
+ "metric": "perplexity",
209
+ "aggregation": "perplexity",
210
+ "higher_is_better": false
211
+ },
212
+ {
213
+ "metric": "acc",
214
+ "aggregation": "mean",
215
+ "higher_is_better": true
216
+ }
217
+ ],
218
+ "output_type": "loglikelihood",
219
+ "repeats": 1,
220
+ "should_decontaminate": true,
221
+ "doc_to_decontamination_query": "{{text}}"
222
+ },
223
+ "openbookqa": {
224
+ "task": "openbookqa",
225
+ "group": [
226
+ "multiple_choice"
227
+ ],
228
+ "dataset_path": "openbookqa",
229
+ "dataset_name": "main",
230
+ "training_split": "train",
231
+ "validation_split": "validation",
232
+ "test_split": "test",
233
+ "doc_to_text": "question_stem",
234
+ "doc_to_target": "{{choices.label.index(answerKey.lstrip())}}",
235
+ "doc_to_choice": "{{choices.text}}",
236
+ "description": "",
237
+ "target_delimiter": " ",
238
+ "fewshot_delimiter": "\n\n",
239
+ "num_fewshot": 0,
240
+ "metric_list": [
241
+ {
242
+ "metric": "acc",
243
+ "aggregation": "mean",
244
+ "higher_is_better": true
245
+ },
246
+ {
247
+ "metric": "acc_norm",
248
+ "aggregation": "mean",
249
+ "higher_is_better": true
250
+ }
251
+ ],
252
+ "output_type": "multiple_choice",
253
+ "repeats": 1,
254
+ "should_decontaminate": true,
255
+ "doc_to_decontamination_query": "question_stem"
256
+ },
257
+ "piqa": {
258
+ "task": "piqa",
259
+ "group": [
260
+ "multiple_choice"
261
+ ],
262
+ "dataset_path": "piqa",
263
+ "training_split": "train",
264
+ "validation_split": "validation",
265
+ "doc_to_text": "Question: {{goal}}\nAnswer:",
266
+ "doc_to_target": "label",
267
+ "doc_to_choice": "{{[sol1, sol2]}}",
268
+ "description": "",
269
+ "target_delimiter": " ",
270
+ "fewshot_delimiter": "\n\n",
271
+ "num_fewshot": 0,
272
+ "metric_list": [
273
+ {
274
+ "metric": "acc",
275
+ "aggregation": "mean",
276
+ "higher_is_better": true
277
+ },
278
+ {
279
+ "metric": "acc_norm",
280
+ "aggregation": "mean",
281
+ "higher_is_better": true
282
+ }
283
+ ],
284
+ "output_type": "multiple_choice",
285
+ "repeats": 1,
286
+ "should_decontaminate": true,
287
+ "doc_to_decontamination_query": "goal"
288
+ },
289
+ "sciq": {
290
+ "task": "sciq",
291
+ "group": [
292
+ "multiple_choice"
293
+ ],
294
+ "dataset_path": "sciq",
295
+ "training_split": "train",
296
+ "validation_split": "validation",
297
+ "test_split": "test",
298
+ "doc_to_text": "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:",
299
+ "doc_to_target": 3,
300
+ "doc_to_choice": "{{[distractor1, distractor2, distractor3, correct_answer]}}",
301
+ "description": "",
302
+ "target_delimiter": " ",
303
+ "fewshot_delimiter": "\n\n",
304
+ "num_fewshot": 0,
305
+ "metric_list": [
306
+ {
307
+ "metric": "acc",
308
+ "aggregation": "mean",
309
+ "higher_is_better": true
310
+ },
311
+ {
312
+ "metric": "acc_norm",
313
+ "aggregation": "mean",
314
+ "higher_is_better": true
315
+ }
316
+ ],
317
+ "output_type": "multiple_choice",
318
+ "repeats": 1,
319
+ "should_decontaminate": true,
320
+ "doc_to_decontamination_query": "{{support}} {{question}}"
321
+ },
322
+ "wikitext": {
323
+ "task": "wikitext",
324
+ "group": [
325
+ "perplexity",
326
+ "loglikelihood_rolling"
327
+ ],
328
+ "dataset_path": "EleutherAI/wikitext_document_level",
329
+ "dataset_name": "wikitext-2-raw-v1",
330
+ "training_split": "train",
331
+ "validation_split": "validation",
332
+ "test_split": "test",
333
+ "doc_to_text": "",
334
+ "doc_to_target": "<function wikitext_detokenizer at 0x7f5ef3dc5120>",
335
+ "description": "",
336
+ "target_delimiter": " ",
337
+ "fewshot_delimiter": "\n\n",
338
+ "num_fewshot": 0,
339
+ "metric_list": [
340
+ {
341
+ "metric": "word_perplexity"
342
+ },
343
+ {
344
+ "metric": "byte_perplexity"
345
+ },
346
+ {
347
+ "metric": "bits_per_byte"
348
+ }
349
+ ],
350
+ "output_type": "loglikelihood_rolling",
351
+ "repeats": 1,
352
+ "should_decontaminate": true,
353
+ "doc_to_decontamination_query": "{{page}}"
354
+ },
355
+ "winogrande": {
356
+ "task": "winogrande",
357
+ "dataset_path": "winogrande",
358
+ "dataset_name": "winogrande_xl",
359
+ "training_split": "train",
360
+ "validation_split": "validation",
361
+ "doc_to_text": "<function doc_to_text at 0x7f5ef3dc5360>",
362
+ "doc_to_target": "<function doc_to_target at 0x7f5ef3dc56c0>",
363
+ "doc_to_choice": "<function doc_to_choice at 0x7f5ef3dc5a20>",
364
+ "description": "",
365
+ "target_delimiter": " ",
366
+ "fewshot_delimiter": "\n\n",
367
+ "num_fewshot": 0,
368
+ "metric_list": [
369
+ {
370
+ "metric": "acc",
371
+ "aggregation": "mean",
372
+ "higher_is_better": true
373
+ }
374
+ ],
375
+ "output_type": "multiple_choice",
376
+ "repeats": 1,
377
+ "should_decontaminate": true,
378
+ "doc_to_decontamination_query": "sentence"
379
+ }
380
+ },
381
+ "versions": {
382
+ "arc_challenge": "Yaml",
383
+ "arc_easy": "Yaml",
384
+ "boolq": "Yaml",
385
+ "hellaswag": "Yaml",
386
+ "lambada_openai": "Yaml",
387
+ "openbookqa": "Yaml",
388
+ "piqa": "Yaml",
389
+ "sciq": "Yaml",
390
+ "wikitext": "Yaml",
391
+ "winogrande": "Yaml"
392
+ },
393
+ "config": {
394
+ "model": "hf",
395
+ "model_args": "pretrained=lomahony/eleuther-pythia6.9b-hh-sft",
396
+ "batch_size": "4",
397
+ "batch_sizes": [],
398
+ "device": null,
399
+ "use_cache": null,
400
+ "limit": null,
401
+ "bootstrap_iters": 100000
402
+ },
403
+ "git_hash": "d1a44c8"
404
+ }
sft-6.9b-eval-files/sft-pythia-6.9b-5shot-shelloutput.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bootstrapping for stddev: perplexity
2
+ hf (pretrained=lomahony/eleuther-pythia6.9b-hh-sft), limit: None, num_fewshot: 5, batch_size: 4
3
+ | Task |Version|Filter| Metric | Value | |Stderr|
4
+ |--------------|-------|------|---------------|------:|---|-----:|
5
+ |arc_challenge |Yaml |none |acc | 0.3584|± |0.0140|
6
+ | | |none |acc_norm | 0.3848|± |0.0142|
7
+ |arc_easy |Yaml |none |acc | 0.7104|± |0.0093|
8
+ | | |none |acc_norm | 0.7134|± |0.0093|
9
+ |boolq |Yaml |none |acc | 0.6664|± |0.0082|
10
+ |hellaswag |Yaml |none |acc | 0.4829|± |0.0050|
11
+ | | |none |acc_norm | 0.6446|± |0.0048|
12
+ |lambada_openai|Yaml |none |perplexity | 5.0215|± |0.1228|
13
+ | | |none |acc | 0.6385|± |0.0067|
14
+ |openbookqa |Yaml |none |acc | 0.2760|± |0.0200|
15
+ | | |none |acc_norm | 0.3700|± |0.0216|
16
+ |piqa |Yaml |none |acc | 0.7688|± |0.0098|
17
+ | | |none |acc_norm | 0.7709|± |0.0098|
18
+ |sciq |Yaml |none |acc | 0.9520|± |0.0068|
19
+ | | |none |acc_norm | 0.9540|± |0.0066|
20
+ |wikitext |Yaml |none |word_perplexity|18.0722| | |
21
+ | | |none |byte_perplexity| 1.6115| | |
22
+ | | |none |bits_per_byte | 0.6884| | |
23
+ |winogrande |Yaml |none |acc | 0.6156|± |0.0137|
24
+
sft-6.9b-eval-files/sft-pythia-6.9b-5shot/results.json ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "arc_challenge": {
4
+ "acc,none": 0.3583617747440273,
5
+ "acc_stderr,none": 0.014012883334859871,
6
+ "acc_norm,none": 0.3848122866894198,
7
+ "acc_norm_stderr,none": 0.014218371065251107
8
+ },
9
+ "arc_easy": {
10
+ "acc,none": 0.7104377104377104,
11
+ "acc_stderr,none": 0.0093068389121739,
12
+ "acc_norm,none": 0.7133838383838383,
13
+ "acc_norm_stderr,none": 0.009278551100969293
14
+ },
15
+ "boolq": {
16
+ "acc,none": 0.6663608562691131,
17
+ "acc_stderr,none": 0.008246805985556873
18
+ },
19
+ "hellaswag": {
20
+ "acc,none": 0.48287193786098387,
21
+ "acc_stderr,none": 0.004986852842576715,
22
+ "acc_norm,none": 0.6445927106154152,
23
+ "acc_norm_stderr,none": 0.004776583530909558
24
+ },
25
+ "lambada_openai": {
26
+ "perplexity,none": 5.021473818972182,
27
+ "perplexity_stderr,none": 0.12279168525921821,
28
+ "acc,none": 0.6384630312439356,
29
+ "acc_stderr,none": 0.006693546839258681
30
+ },
31
+ "openbookqa": {
32
+ "acc,none": 0.276,
33
+ "acc_stderr,none": 0.020011219298073528,
34
+ "acc_norm,none": 0.37,
35
+ "acc_norm_stderr,none": 0.021613289165165785
36
+ },
37
+ "piqa": {
38
+ "acc,none": 0.7687704026115343,
39
+ "acc_stderr,none": 0.009837063180625329,
40
+ "acc_norm,none": 0.7709466811751904,
41
+ "acc_norm_stderr,none": 0.009804509865175504
42
+ },
43
+ "sciq": {
44
+ "acc,none": 0.952,
45
+ "acc_stderr,none": 0.006763264133666668,
46
+ "acc_norm,none": 0.954,
47
+ "acc_norm_stderr,none": 0.006627814717380702
48
+ },
49
+ "wikitext": {
50
+ "word_perplexity,none": 18.072185827315998,
51
+ "byte_perplexity,none": 1.6114783437385283,
52
+ "bits_per_byte,none": 0.6883848005035924
53
+ },
54
+ "winogrande": {
55
+ "acc,none": 0.6156274664561957,
56
+ "acc_stderr,none": 0.01367156760083619
57
+ }
58
+ },
59
+ "configs": {
60
+ "arc_challenge": {
61
+ "task": "arc_challenge",
62
+ "group": [
63
+ "ai2_arc",
64
+ "multiple_choice"
65
+ ],
66
+ "dataset_path": "ai2_arc",
67
+ "dataset_name": "ARC-Challenge",
68
+ "training_split": "train",
69
+ "validation_split": "validation",
70
+ "test_split": "test",
71
+ "doc_to_text": "Question: {{question}}\nAnswer:",
72
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
73
+ "doc_to_choice": "{{choices.text}}",
74
+ "description": "",
75
+ "target_delimiter": " ",
76
+ "fewshot_delimiter": "\n\n",
77
+ "num_fewshot": 5,
78
+ "metric_list": [
79
+ {
80
+ "metric": "acc",
81
+ "aggregation": "mean",
82
+ "higher_is_better": true
83
+ },
84
+ {
85
+ "metric": "acc_norm",
86
+ "aggregation": "mean",
87
+ "higher_is_better": true
88
+ }
89
+ ],
90
+ "output_type": "multiple_choice",
91
+ "repeats": 1,
92
+ "should_decontaminate": true,
93
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
94
+ },
95
+ "arc_easy": {
96
+ "task": "arc_easy",
97
+ "group": [
98
+ "ai2_arc",
99
+ "multiple_choice"
100
+ ],
101
+ "dataset_path": "ai2_arc",
102
+ "dataset_name": "ARC-Easy",
103
+ "training_split": "train",
104
+ "validation_split": "validation",
105
+ "test_split": "test",
106
+ "doc_to_text": "Question: {{question}}\nAnswer:",
107
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
108
+ "doc_to_choice": "{{choices.text}}",
109
+ "description": "",
110
+ "target_delimiter": " ",
111
+ "fewshot_delimiter": "\n\n",
112
+ "num_fewshot": 5,
113
+ "metric_list": [
114
+ {
115
+ "metric": "acc",
116
+ "aggregation": "mean",
117
+ "higher_is_better": true
118
+ },
119
+ {
120
+ "metric": "acc_norm",
121
+ "aggregation": "mean",
122
+ "higher_is_better": true
123
+ }
124
+ ],
125
+ "output_type": "multiple_choice",
126
+ "repeats": 1,
127
+ "should_decontaminate": true,
128
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
129
+ },
130
+ "boolq": {
131
+ "task": "boolq",
132
+ "group": [
133
+ "super-glue-lm-eval-v1"
134
+ ],
135
+ "dataset_path": "super_glue",
136
+ "dataset_name": "boolq",
137
+ "training_split": "train",
138
+ "validation_split": "validation",
139
+ "doc_to_text": "{{passage}}\nQuestion: {{question}}?\nAnswer:",
140
+ "doc_to_target": "label",
141
+ "doc_to_choice": [
142
+ "no",
143
+ "yes"
144
+ ],
145
+ "description": "",
146
+ "target_delimiter": " ",
147
+ "fewshot_delimiter": "\n\n",
148
+ "num_fewshot": 5,
149
+ "metric_list": [
150
+ {
151
+ "metric": "acc"
152
+ }
153
+ ],
154
+ "output_type": "multiple_choice",
155
+ "repeats": 1,
156
+ "should_decontaminate": true,
157
+ "doc_to_decontamination_query": "passage"
158
+ },
159
+ "hellaswag": {
160
+ "task": "hellaswag",
161
+ "group": [
162
+ "multiple_choice"
163
+ ],
164
+ "dataset_path": "hellaswag",
165
+ "training_split": "train",
166
+ "validation_split": "validation",
167
+ "doc_to_text": "{% set text = activity_label ~ ': ' ~ ctx_a ~ ' ' ~ ctx_b.capitalize() %}{{text|trim|replace(' [title]', '. ')|regex_replace('\\[.*?\\]', '')|replace(' ', ' ')}}",
168
+ "doc_to_target": "{{label}}",
169
+ "doc_to_choice": "{{endings|map('trim')|map('replace', ' [title]', '. ')|map('regex_replace', '\\[.*?\\]', '')|map('replace', ' ', ' ')|list}}",
170
+ "description": "",
171
+ "target_delimiter": " ",
172
+ "fewshot_delimiter": "\n\n",
173
+ "num_fewshot": 5,
174
+ "metric_list": [
175
+ {
176
+ "metric": "acc",
177
+ "aggregation": "mean",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc_norm",
182
+ "aggregation": "mean",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "multiple_choice",
187
+ "repeats": 1,
188
+ "should_decontaminate": false
189
+ },
190
+ "lambada_openai": {
191
+ "task": "lambada_openai",
192
+ "group": [
193
+ "lambada",
194
+ "loglikelihood",
195
+ "perplexity"
196
+ ],
197
+ "dataset_path": "EleutherAI/lambada_openai",
198
+ "dataset_name": "default",
199
+ "test_split": "test",
200
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
201
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
202
+ "description": "",
203
+ "target_delimiter": " ",
204
+ "fewshot_delimiter": "\n\n",
205
+ "num_fewshot": 5,
206
+ "metric_list": [
207
+ {
208
+ "metric": "perplexity",
209
+ "aggregation": "perplexity",
210
+ "higher_is_better": false
211
+ },
212
+ {
213
+ "metric": "acc",
214
+ "aggregation": "mean",
215
+ "higher_is_better": true
216
+ }
217
+ ],
218
+ "output_type": "loglikelihood",
219
+ "repeats": 1,
220
+ "should_decontaminate": true,
221
+ "doc_to_decontamination_query": "{{text}}"
222
+ },
223
+ "openbookqa": {
224
+ "task": "openbookqa",
225
+ "group": [
226
+ "multiple_choice"
227
+ ],
228
+ "dataset_path": "openbookqa",
229
+ "dataset_name": "main",
230
+ "training_split": "train",
231
+ "validation_split": "validation",
232
+ "test_split": "test",
233
+ "doc_to_text": "question_stem",
234
+ "doc_to_target": "{{choices.label.index(answerKey.lstrip())}}",
235
+ "doc_to_choice": "{{choices.text}}",
236
+ "description": "",
237
+ "target_delimiter": " ",
238
+ "fewshot_delimiter": "\n\n",
239
+ "num_fewshot": 5,
240
+ "metric_list": [
241
+ {
242
+ "metric": "acc",
243
+ "aggregation": "mean",
244
+ "higher_is_better": true
245
+ },
246
+ {
247
+ "metric": "acc_norm",
248
+ "aggregation": "mean",
249
+ "higher_is_better": true
250
+ }
251
+ ],
252
+ "output_type": "multiple_choice",
253
+ "repeats": 1,
254
+ "should_decontaminate": true,
255
+ "doc_to_decontamination_query": "question_stem"
256
+ },
257
+ "piqa": {
258
+ "task": "piqa",
259
+ "group": [
260
+ "multiple_choice"
261
+ ],
262
+ "dataset_path": "piqa",
263
+ "training_split": "train",
264
+ "validation_split": "validation",
265
+ "doc_to_text": "Question: {{goal}}\nAnswer:",
266
+ "doc_to_target": "label",
267
+ "doc_to_choice": "{{[sol1, sol2]}}",
268
+ "description": "",
269
+ "target_delimiter": " ",
270
+ "fewshot_delimiter": "\n\n",
271
+ "num_fewshot": 5,
272
+ "metric_list": [
273
+ {
274
+ "metric": "acc",
275
+ "aggregation": "mean",
276
+ "higher_is_better": true
277
+ },
278
+ {
279
+ "metric": "acc_norm",
280
+ "aggregation": "mean",
281
+ "higher_is_better": true
282
+ }
283
+ ],
284
+ "output_type": "multiple_choice",
285
+ "repeats": 1,
286
+ "should_decontaminate": true,
287
+ "doc_to_decontamination_query": "goal"
288
+ },
289
+ "sciq": {
290
+ "task": "sciq",
291
+ "group": [
292
+ "multiple_choice"
293
+ ],
294
+ "dataset_path": "sciq",
295
+ "training_split": "train",
296
+ "validation_split": "validation",
297
+ "test_split": "test",
298
+ "doc_to_text": "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:",
299
+ "doc_to_target": 3,
300
+ "doc_to_choice": "{{[distractor1, distractor2, distractor3, correct_answer]}}",
301
+ "description": "",
302
+ "target_delimiter": " ",
303
+ "fewshot_delimiter": "\n\n",
304
+ "num_fewshot": 5,
305
+ "metric_list": [
306
+ {
307
+ "metric": "acc",
308
+ "aggregation": "mean",
309
+ "higher_is_better": true
310
+ },
311
+ {
312
+ "metric": "acc_norm",
313
+ "aggregation": "mean",
314
+ "higher_is_better": true
315
+ }
316
+ ],
317
+ "output_type": "multiple_choice",
318
+ "repeats": 1,
319
+ "should_decontaminate": true,
320
+ "doc_to_decontamination_query": "{{support}} {{question}}"
321
+ },
322
+ "wikitext": {
323
+ "task": "wikitext",
324
+ "group": [
325
+ "perplexity",
326
+ "loglikelihood_rolling"
327
+ ],
328
+ "dataset_path": "EleutherAI/wikitext_document_level",
329
+ "dataset_name": "wikitext-2-raw-v1",
330
+ "training_split": "train",
331
+ "validation_split": "validation",
332
+ "test_split": "test",
333
+ "doc_to_text": "",
334
+ "doc_to_target": "<function wikitext_detokenizer at 0x7fed99361120>",
335
+ "description": "",
336
+ "target_delimiter": " ",
337
+ "fewshot_delimiter": "\n\n",
338
+ "num_fewshot": 5,
339
+ "metric_list": [
340
+ {
341
+ "metric": "word_perplexity"
342
+ },
343
+ {
344
+ "metric": "byte_perplexity"
345
+ },
346
+ {
347
+ "metric": "bits_per_byte"
348
+ }
349
+ ],
350
+ "output_type": "loglikelihood_rolling",
351
+ "repeats": 1,
352
+ "should_decontaminate": true,
353
+ "doc_to_decontamination_query": "{{page}}"
354
+ },
355
+ "winogrande": {
356
+ "task": "winogrande",
357
+ "dataset_path": "winogrande",
358
+ "dataset_name": "winogrande_xl",
359
+ "training_split": "train",
360
+ "validation_split": "validation",
361
+ "doc_to_text": "<function doc_to_text at 0x7fed99361360>",
362
+ "doc_to_target": "<function doc_to_target at 0x7fed993616c0>",
363
+ "doc_to_choice": "<function doc_to_choice at 0x7fed99361a20>",
364
+ "description": "",
365
+ "target_delimiter": " ",
366
+ "fewshot_delimiter": "\n\n",
367
+ "num_fewshot": 5,
368
+ "metric_list": [
369
+ {
370
+ "metric": "acc",
371
+ "aggregation": "mean",
372
+ "higher_is_better": true
373
+ }
374
+ ],
375
+ "output_type": "multiple_choice",
376
+ "repeats": 1,
377
+ "should_decontaminate": true,
378
+ "doc_to_decontamination_query": "sentence"
379
+ }
380
+ },
381
+ "versions": {
382
+ "arc_challenge": "Yaml",
383
+ "arc_easy": "Yaml",
384
+ "boolq": "Yaml",
385
+ "hellaswag": "Yaml",
386
+ "lambada_openai": "Yaml",
387
+ "openbookqa": "Yaml",
388
+ "piqa": "Yaml",
389
+ "sciq": "Yaml",
390
+ "wikitext": "Yaml",
391
+ "winogrande": "Yaml"
392
+ },
393
+ "config": {
394
+ "model": "hf",
395
+ "model_args": "pretrained=lomahony/eleuther-pythia6.9b-hh-sft",
396
+ "batch_size": "4",
397
+ "batch_sizes": [],
398
+ "device": null,
399
+ "use_cache": null,
400
+ "limit": null,
401
+ "bootstrap_iters": 100000
402
+ },
403
+ "git_hash": "d1a44c8"
404
+ }