Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
File size: 23,310 Bytes
a854c7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a017d9
a854c7a
3a017d9
 
a854c7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263b027
a854c7a
263b027
 
a854c7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b314140
a854c7a
b314140
 
a854c7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6292ffd
a854c7a
6292ffd
 
a854c7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f25881c
a854c7a
f25881c
 
a854c7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e6c11b3
a854c7a
e6c11b3
 
a854c7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16b9ccb
a854c7a
16b9ccb
 
a854c7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e94a5a1
a854c7a
e94a5a1
 
a854c7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
950685d
a854c7a
950685d
 
a854c7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73ba3c9
a854c7a
73ba3c9
 
a854c7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9da9b5a
a854c7a
9da9b5a
 
a854c7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1d98e9d
a854c7a
957ad84
 
 
 
a854c7a
 
957ad84
 
1cfae60
957ad84
 
 
 
 
 
 
 
 
 
 
 
 
ef41eb5
957ad84
 
 
 
 
 
 
 
 
 
 
 
 
 
5ff325d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
957ad84
 
 
 
 
 
 
 
c91ed3a
 
957ad84
 
 
c91ed3a
957ad84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
---
license: cc-by-sa-4.0
configs:
- config_name: bbh_logical_deduction_three_objects
  data_files:
  - split: test
    path: bbh_logical_deduction_three_objects/test-*
- config_name: bbh_navigate
  data_files:
  - split: test
    path: bbh_navigate/test-*
- config_name: bbh_object_counting
  data_files:
  - split: test
    path: bbh_object_counting/test-*
- config_name: drop
  data_files:
  - split: test
    path: drop/test-*
- config_name: gsm8k
  data_files:
  - split: test
    path: gsm8k/test-*
- config_name: hotpotqa
  data_files:
  - split: test
    path: hotpotqa/test-*
- config_name: mmlu_math
  data_files:
  - split: test
    path: mmlu_math/test-*
- config_name: multiarith
  data_files:
  - split: test
    path: multiarith/test-*
- config_name: singleop
  data_files:
  - split: test
    path: singleop/test-*
- config_name: singleq
  data_files:
  - split: test
    path: singleq/test-*
- config_name: squad
  data_files:
  - split: test
    path: squad/test-*
- config_name: svamp
  data_files:
  - split: test
    path: svamp/test-*
- config_name: tab_fact
  data_files:
  - split: test
    path: tab_fact/test-*
- config_name: vqa
  data_files:
  - split: test
    path: vqa/test-*
- config_name: winograd_wsc
  data_files:
  - split: test
    path: winograd_wsc/test-*
dataset_info:
- config_name: bbh_logical_deduction_three_objects
  features:
  - name: cleaning_status
    dtype: string
  - name: platinum_prompt
    dtype: string
  - name: platinum_prompt_no_cot
    dtype: string
  - name: platinum_target
    sequence: string
  - name: original_target
    sequence: string
  - name: platinum_parsing_strategy
    dtype: string
  - name: input
    dtype: string
  - name: target
    dtype: string
  splits:
  - name: test
    num_bytes: 305159
    num_examples: 200
  download_size: 60084
  dataset_size: 305159
- config_name: bbh_navigate
  features:
  - name: cleaning_status
    dtype: string
  - name: platinum_prompt
    dtype: string
  - name: platinum_prompt_no_cot
    dtype: string
  - name: platinum_target
    sequence: string
  - name: original_target
    sequence: string
  - name: platinum_parsing_strategy
    dtype: string
  - name: input
    dtype: string
  - name: target
    dtype: string
  splits:
  - name: test
    num_bytes: 166521
    num_examples: 200
  download_size: 29525
  dataset_size: 166521
- config_name: bbh_object_counting
  features:
  - name: cleaning_status
    dtype: string
  - name: platinum_prompt
    dtype: string
  - name: platinum_prompt_no_cot
    dtype: string
  - name: platinum_target
    sequence: string
  - name: original_target
    sequence: string
  - name: platinum_parsing_strategy
    dtype: string
  - name: input
    dtype: string
  - name: target
    dtype: string
  splits:
  - name: test
    num_bytes: 128265
    num_examples: 200
  download_size: 31211
  dataset_size: 128265
- config_name: drop
  features:
  - name: cleaning_status
    dtype: string
  - name: platinum_prompt
    dtype: string
  - name: platinum_prompt_no_cot
    dtype: string
  - name: platinum_target
    sequence: string
  - name: original_target
    sequence: string
  - name: platinum_parsing_strategy
    dtype: string
  - name: section_id
    dtype: string
  - name: query_id
    dtype: string
  - name: passage
    dtype: string
  - name: question
    dtype: string
  - name: answers_spans
    struct:
    - name: spans
      sequence: string
    - name: types
      sequence: string
  splits:
  - name: test
    num_bytes: 957113
    num_examples: 250
  download_size: 469801
  dataset_size: 957113
- config_name: gsm8k
  features:
  - name: cleaning_status
    dtype: string
  - name: platinum_prompt
    dtype: string
  - name: platinum_prompt_no_cot
    dtype: string
  - name: platinum_target
    sequence: string
  - name: original_target
    sequence: string
  - name: platinum_parsing_strategy
    dtype: string
  - name: question
    dtype: string
  - name: answer
    dtype: string
  splits:
  - name: test
    num_bytes: 411558
    num_examples: 300
  download_size: 200727
  dataset_size: 411558
- config_name: hotpotqa
  features:
  - name: cleaning_status
    dtype: string
  - name: platinum_prompt
    dtype: string
  - name: platinum_prompt_no_cot
    dtype: string
  - name: platinum_target
    sequence: string
  - name: original_target
    sequence: string
  - name: platinum_parsing_strategy
    dtype: string
  - name: id
    dtype: string
  - name: question
    dtype: string
  - name: answer
    dtype: string
  - name: type
    dtype: string
  - name: level
    dtype: string
  - name: supporting_facts
    struct:
    - name: sent_id
      sequence: int64
    - name: title
      sequence: string
  - name: context
    struct:
    - name: sentences
      sequence:
        sequence: string
    - name: title
      sequence: string
  splits:
  - name: test
    num_bytes: 2163497
    num_examples: 250
  download_size: 1287407
  dataset_size: 2163497
- config_name: mmlu_math
  features:
  - name: cleaning_status
    dtype: string
  - name: platinum_prompt
    dtype: string
  - name: platinum_prompt_no_cot
    dtype: string
  - name: platinum_target
    sequence: string
  - name: original_target
    sequence: string
  - name: platinum_parsing_strategy
    dtype: string
  - name: question
    dtype: string
  - name: subject
    dtype: string
  - name: choices
    sequence: string
  - name: answer
    dtype: int64
  splits:
  - name: test
    num_bytes: 287231
    num_examples: 270
  download_size: 113739
  dataset_size: 287231
- config_name: multiarith
  features:
  - name: cleaning_status
    dtype: string
  - name: platinum_prompt
    dtype: string
  - name: platinum_prompt_no_cot
    dtype: string
  - name: platinum_target
    sequence: string
  - name: original_target
    sequence: string
  - name: platinum_parsing_strategy
    dtype: string
  - name: input
    dtype: string
  - name: output_program
    dtype: string
  - name: output_answer
    dtype: string
  - name: split
    dtype: string
  - name: dataset
    dtype: string
  splits:
  - name: test
    num_bytes: 157371
    num_examples: 174
  download_size: 54214
  dataset_size: 157371
- config_name: singleop
  features:
  - name: cleaning_status
    dtype: string
  - name: platinum_prompt
    dtype: string
  - name: platinum_prompt_no_cot
    dtype: string
  - name: platinum_target
    sequence: string
  - name: original_target
    sequence: string
  - name: platinum_parsing_strategy
    dtype: string
  - name: input
    dtype: string
  - name: output_program
    dtype: string
  - name: output_answer
    dtype: string
  - name: split
    dtype: string
  - name: dataset
    dtype: string
  splits:
  - name: test
    num_bytes: 118922
    num_examples: 159
  download_size: 45006
  dataset_size: 118922
- config_name: singleq
  features:
  - name: cleaning_status
    dtype: string
  - name: platinum_prompt
    dtype: string
  - name: platinum_prompt_no_cot
    dtype: string
  - name: platinum_target
    sequence: string
  - name: original_target
    sequence: string
  - name: platinum_parsing_strategy
    dtype: string
  - name: input
    dtype: string
  - name: output_program
    dtype: string
  - name: output_answer
    dtype: string
  - name: split
    dtype: string
  - name: dataset
    dtype: string
  splits:
  - name: test
    num_bytes: 96097
    num_examples: 109
  download_size: 39915
  dataset_size: 96097
- config_name: squad
  features:
  - name: cleaning_status
    dtype: string
  - name: platinum_prompt
    dtype: string
  - name: platinum_prompt_no_cot
    dtype: string
  - name: platinum_target
    sequence: string
  - name: original_target
    sequence: string
  - name: platinum_parsing_strategy
    dtype: string
  - name: id
    dtype: string
  - name: title
    dtype: string
  - name: context
    dtype: string
  - name: question
    dtype: string
  - name: answers
    struct:
    - name: answer_start
      sequence: int64
    - name: text
      sequence: string
  splits:
  - name: test
    num_bytes: 860040
    num_examples: 250
  download_size: 464857
  dataset_size: 860040
- config_name: svamp
  features:
  - name: cleaning_status
    dtype: string
  - name: platinum_prompt
    dtype: string
  - name: platinum_prompt_no_cot
    dtype: string
  - name: platinum_target
    sequence: string
  - name: original_target
    sequence: string
  - name: platinum_parsing_strategy
    dtype: string
  - name: ID
    dtype: string
  - name: Body
    dtype: string
  - name: Question
    dtype: string
  - name: Equation
    dtype: string
  - name: Answer
    dtype: string
  - name: Type
    dtype: string
  - name: question_concat
    dtype: string
  splits:
  - name: test
    num_bytes: 322658
    num_examples: 300
  download_size: 116772
  dataset_size: 322658
- config_name: tab_fact
  features:
  - name: cleaning_status
    dtype: string
  - name: platinum_prompt
    dtype: string
  - name: platinum_prompt_no_cot
    dtype: string
  - name: platinum_target
    sequence: string
  - name: original_target
    sequence: string
  - name: platinum_parsing_strategy
    dtype: string
  - name: id
    dtype: int64
  - name: table_id
    dtype: string
  - name: table_text
    dtype: string
  - name: table_caption
    dtype: string
  - name: statement
    dtype: string
  - name: label
    dtype: int64
  splits:
  - name: test
    num_bytes: 1137041
    num_examples: 200
  download_size: 475116
  dataset_size: 1137041
- config_name: vqa
  features:
  - name: cleaning_status
    dtype: string
  - name: image_path
    dtype: string
  - name: platinum_prompt
    dtype: string
  - name: platinum_prompt_no_cot
    dtype: string
  - name: platinum_target
    sequence: string
  - name: original_target
    sequence: 'null'
  - name: platinum_parsing_stratagy
    dtype: string
  - name: question_type
    dtype: string
  - name: multiple_choice_answer
    dtype: string
  - name: answers
    list:
    - name: answer
      dtype: string
    - name: answer_confidence
      dtype: string
    - name: answer_id
      dtype: int64
  - name: image_id
    dtype: int64
  - name: answer_type
    dtype: string
  - name: question_id
    dtype: int64
  - name: question
    dtype: string
  splits:
  - name: test
    num_bytes: 122801
    num_examples: 242
  download_size: 26070
  dataset_size: 122801
- config_name: winograd_wsc
  features:
  - name: cleaning_status
    dtype: string
  - name: platinum_prompt
    dtype: string
  - name: platinum_prompt_no_cot
    dtype: string
  - name: platinum_target
    sequence: string
  - name: original_target
    sequence: string
  - name: platinum_parsing_strategy
    dtype: string
  - name: text
    dtype: string
  - name: pronoun
    dtype: string
  - name: pronoun_loc
    dtype: int64
  - name: quote
    dtype: string
  - name: quote_loc
    dtype: int64
  - name: options
    sequence: string
  - name: label
    dtype: int64
  - name: source
    dtype: string
  splits:
  - name: test
    num_bytes: 198631
    num_examples: 200
  download_size: 54961
  dataset_size: 198631
task_categories:
- question-answering
language:
- en
---

# Dataset Card for PlatinumBench

[**🏆 Leaderboard**](http://platinum-bench.csail.mit.edu/)  |  [**🖥️ Code**](https://github.com/MadryLab/platinum-benchmarks/)  |  [**📖 Paper**](https://arxiv.org/abs/2502.03461)  |  [**🔍 Error Viewer**](http://platinum-bench.csail.mit.edu/inspect)

## Dataset Description

- **Homepage:** http://platinum-bench.csail.mit.edu/
- **Repository:** https://github.com/MadryLab/platinum-benchmarks/
- **Paper:** https://arxiv.org/abs/2502.03461
- **Leaderboard:** http://platinum-bench.csail.mit.edu/
- **Point of Contact:** [Joshua Vendrow](mailto:jvendrow@mit.edu), [Edward Vendrow](mailto:evendrow@mit.edu)

### Dataset Summary

_**Platinum Benchmarks**_ are benchmarks that are are carefully curated to minimize label errors and ambiguity, allowing us to measure reliability of models.

This dataset contains fifteen platinum benchmarks created by manually revising questions from existing datasets (see the github repo for details on accessing our revised subset of VQA). To revise each benchmark, we ran a variety of frontier models on individual examples and manually re-annotated any example for which at least one model made an error. See the paper for further details on the revision process.

### Load the Dataset

To load the dataset using HuggingFace `datasets`, you first need to `pip install datasets`, then run the following code:

```python
from datasets import load_dataset

ds = load_dataset("madrylab/platinum-bench", name="gsm8k", split="test") # or another subset
ds = ds.filter(lambda x: x['cleaning_status'] != 'rejected') # filter out rejected questions
```

## Dataset structure

### Dataset Subsets & Cleaning Statistics

Below we list each of the platinum benchmarks with the number of examples in each benchmark that we kept via consensus, revised, verified, or rejected. See "Data Fields" for a description of what each cleaning status means.

|         | Included |  |  |  | Excluded |
| ----- | ----- | ----- | ----- | ----- | ----- | 
Dataset | **# Included** | Consensus | Revised | Verified | Rejected
SingleOp (Platinum) | **150** | 142 | 0 | 8 | 9
SingleEq (Platinum) | **100** | 87 | 0 | 13 | 9
MultiArith (Platinum) | **171** | 165 | 3 | 3 | 3
SVAMP (Platinum) | **268** | 222 | 3 | 43 | 32
GSM8K (Platinum) | **271** | 227 | 1 | 43 | 29
MMLU High‑School Math (Platinum) | **268** | 106 | 0 | 162 | 2
Logic. Ded. 3-Obj (Platinum) | **200** | 199 | 0 | 1 | 0
Object Counting (Platinum) | **190** | 58 | 0 | 132 | 10
Navigate (Platinum) | **200** | 134 | 0 | 66 | 0
TabFact (Platinum) | **173** | 58 | 3 | 112 | 27
HotPotQA (Platinum) | **183** | 48 | 89 | 46 | 67
SQUAD2.0 (Platinum) | **164** | 78 | 43 | 43 | 86
DROP (Platinum) | **209** | 30 | 177 | 2 | 41
Winograd WSC (Platinum) | **195** | 77 | 0 | 118 | 5
VQA (Platinum) | **242** | 0 | 242 | 0 | 358

### Data Instances

We accessed each of the fourteen original natural language benchmarks that we revised from their respective huggingface repositories, and each benchmark had its own per-instance data fields/columns. We have standardized these benchmarks by providing pre-constructed prompts for each dataset (under 'platinum_prompt'). Each prompt template automatically formats the relevant dataset columns into a consistent structure. You can use these standardized prompts directly, but we include the original dataset columns for those interested in their own prompting, or to seamlessly subtitute our revised benchmarks for the original versions.

For VQA, we source images and annotataions from their [official website](https://visualqa.org/download.html), and reference images by their image path in the original downloaded directory format (see our GitHub repository for additional details).

An example from the PlatinumBench GSM8K subset looks as follows:
```
{'cleaning_status': 'consensus',
 'platinum_prompt': 'Solve the following math word problem.\n\nA robe takes 2 bolts of blue fiber and half that much white fiber.  How many bolts in total does it take?\n\nThink step-by-step. Then, provide the final answer as a single integer in the format "Answer: XXX" with no extra formatting.',
 'platinum_prompt_no_cot': 'Solve the following math word problem.\n\nA robe takes 2 bolts of blue fiber and half that much white fiber.  How many bolts in total does it take?\n\nThen, provide the final answer as a single integer in the format "Answer: XXX" with no extra formatting.',
 'platinum_target': ['3'],
 'platinum_parsing_strategy': 'math',
 'original_target': ['3']
 'question': 'A robe takes 2 bolts of blue fiber and half that much white fiber.  How many bolts in total does it take?',
 'answer': 'It takes 2/2=<<2/2=1>>1 bolt of white fiber\nSo the total amount of fabric is 2+1=<<2+1=3>>3 bolts of fabric\n#### 3'}
```

### Data Fields
- **cleaning_status** (`str`): One of:
	1. *concensus*: all LLMs agreed with the label, so the example was not manually reviewed (`platinum_target` == `original_target` by default).
	2. *verified*: the original target was maually verified to be correct (`platinum_target` == `original_target`).
	3. *revised*: the label is updated from the original label  (`platinum_target` != `original_target`).
	4. *rejected*: the example is removed due to issues such as ambiguity.
- **platinum_prompt** (`str`): A chain-of-thought question prompt that can be directly asked to a language model. This is constructed from fields in the original dataset.
- **platinum_prompt_no_cot** (`str`): The same prompt, but without explicity chain-of-thought instructions. This is used for models like `o1` that don't need chain-of-thought prompting.
- **platinum_target** (`List[str]`): The list of all correct answers for the question. In most cases there is just one correct answer.
- **original_target** (`str`): The original target provided in the dataset. This is can be different from the platinum target if it is incorrect.
- **platinum_parsing_strategy** (`str`): The parser that should be used to parse the LLM answer. Refer to the provided code.
- **image_path** (`str`): Only included for VQA. The image path from which to source the relevant image, such as: `'val2014/COCO_val2014_000000304481.jpg`.
- We also incude all the original dataset columns after these ones.

> [!NOTE]
> This HuggingFace dataset includes rejected questions that are not used for evaluation. To use only questions that we include in our platinum benchmarks, make sure to filter these out:
> 
>`ds = ds.filter(lambda x: x['cleaning_status'] != 'rejected')`

### Prompt Example

Here is an example of the standardized prompt we provide for a question from MultiArith:

```
Solve the following math word problem.

At the schools book fair Sam bought 13 adventure books and 17 mystery books. If 15 of the books were used, how many new books did he buy?

Think step-by-step. Then, provide the final answer as a single number in the format "Answer: XXX" with no extra formatting.
```

The specific prompt template and parsing strategy depends on the model, although many of them are common between datasets.

## Dataset Creation

### Curation Rationale

Many current LLM benchmarks are riddled with label noise such as mislabeled or ambiguous questions. Due to this label noise, progress in these benchmarks often stalls before models actually achieve reliable performance on them. As a result, the comminuty often considers these benchmarks to be "saturated" and discards them too early, discouraging machine learning practictioners from ever striving to achieve proper reliability. As a first step towards addressing this gap in benchmarking practices, we revise samples from fifteen "saturated" benchmark to minimize label noise.

### Source Data and Attribution
Each of the fifteen benchmarks that we revise was sourced from the following huggingface repositories:


|                       |  Type   | URL | Subset | Split | License
| -----                 | ------ | ----- | ---- | ----| ----|
| SingleOp              | Math  | https://huggingface.co/datasets/allenai/lila          | singleop                        | test       | [CC&nbsp;BY&nbsp;4.0](https://github.com/allenai/Lila/blob/main/LICENSE.txt)
| SingleEq              | Math  | https://huggingface.co/datasets/allenai/lila          | singleeq                        | test       | [CC&nbsp;BY&nbsp;4.0](https://github.com/allenai/Lila/blob/main/LICENSE.txt)
| MultiArith            | Math  | https://huggingface.co/datasets/allenai/lila          | multiarith                      | test       | [CC&nbsp;BY&nbsp;4.0](https://github.com/allenai/Lila/blob/main/LICENSE.txt)
| SVAMP                 | Math  | https://huggingface.co/datasets/ChilleD/svamp         | default                         | test       | [MIT](https://github.com/arkilpatel/SVAMP/blob/main/LICENSE)
| GSM8K                 | Math  | https://huggingface.co/datasets/openai/gsm8k          | main                            | test       | [MIT](https://github.com/openai/grade-school-math/blob/master/LICENSE)
| MMLU&nbsp;High‑School&nbsp;Math | Math  | https://huggingface.co/datasets/cais/mmlu             | high_school_mathematics         | test       | [MIT](https://github.com/hendrycks/test/blob/master/LICENSE)
| Logic.&nbsp;Ded.&nbsp;3-Obj     | Logic | https://huggingface.co/datasets/maveriq/bigbenchhard  | logical_deduction_three_objects | train      | [MIT](https://github.com/suzgunmirac/BIG-Bench-Hard/blob/main/LICENSE)
| Object Counting       | Logic | https://huggingface.co/datasets/maveriq/bigbenchhard  | object_counting                 | train      | [MIT](https://github.com/suzgunmirac/BIG-Bench-Hard/blob/main/LICENSE)
| Navigate              | Logic | https://huggingface.co/datasets/maveriq/bigbenchhard  | navigate                        | train      | [MIT](https://github.com/suzgunmirac/BIG-Bench-Hard/blob/main/LICENSE)
| TabFact               | Table&nbsp;Understanding   | https://huggingface.co/datasets/wenhu/tab_fact        | tab_fact                        | test       | [CC&nbsp;BY&nbsp;4.0](https://creativecommons.org/licenses/by/4.0/legalcode)
| HotPotQA             | Reading&nbsp;Comp.    | https://huggingface.co/datasets/hotpotqa/hotpot_qa     | distractor                      | validation | [CC&nbsp;BY‑SA&nbsp;4.0](https://creativecommons.org/licenses/by-sa/4.0/legalcode)
| SQuAD2.0              | Reading&nbsp;Comp.    | https://huggingface.co/datasets/rajpurkar/squad_v2    | squad_v2                        | validation | [CC&nbsp;BY‑SA&nbsp;4.0](https://creativecommons.org/licenses/by-sa/4.0/legalcode)
| DROP                  | Reading&nbsp;Comp.    | https://huggingface.co/datasets/ucinlp/drop           | default                         | validation | [CC&nbsp;BY‑SA&nbsp;4.0](https://creativecommons.org/licenses/by-sa/4.0/legalcode)
| Wingograd WSC         | Commonsense    | https://huggingface.co/datasets/ErnestSDavis/winograd_wsc  | wsc285 | test | [CC&nbsp;BY&nbsp;4.0](https://creativecommons.org/licenses/by/4.0/legalcode)
| VQA                            | Vision     | https://visualqa.org/download.html                                         | N/A       |    validation | [CC&nbsp;BY&nbsp;4.0](https://creativecommons.org/licenses/by/4.0/legalcode)

Please defer to the datasets cards of these benchmarks for further details on their collection and annotation process.


## Additional Information

### Licensing Information

See the table above for the licensing information of the original datasets upon which our work is based. The further annotations we provide are licensed under the [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/legalcode) license.

### Citation Information
Cite this dataset and the source datasets (see [sources.bib](https://github.com/MadryLab/platinum-benchmarks/blob/main/sources.bib)).

```
@misc{vendrow2025largelanguagemodelbenchmarks,
      title={Do Large Language Model Benchmarks Test Reliability?}, 
      author={Joshua Vendrow and Edward Vendrow and Sara Beery and Aleksander Madry},
      year={2025},
      eprint={2502.03461},
      archivePrefix={arXiv},
      primaryClass={cs.LG},
      url={https://arxiv.org/abs/2502.03461}, 
}
```