This shouldn't be possible (yet)

#1
by pierric HF staff - opened
Files changed (43) hide show
  1. README.md +63 -640
  2. cola/train-00000-of-00001.parquet +0 -3
  3. dataset_infos.json +1 -0
  4. ax/test-00000-of-00001.parquet → dummy/ax/1.0.0/dummy_data.zip +2 -2
  5. cola/test-00000-of-00001.parquet → dummy/cola/1.0.0/dummy_data.zip +2 -2
  6. mrpc/validation-00000-of-00001.parquet → dummy/mnli/1.0.0/dummy_data.zip +2 -2
  7. cola/validation-00000-of-00001.parquet → dummy/mrpc/1.0.0/dummy_data.zip +2 -2
  8. dummy/qnli/1.0.0/dummy_data.zip +3 -0
  9. dummy/qqp/1.0.0/dummy_data.zip +3 -0
  10. dummy/rte/1.0.0/dummy_data.zip +3 -0
  11. dummy/sst2/1.0.0/dummy_data.zip +3 -0
  12. dummy/stsb/1.0.0/dummy_data.zip +3 -0
  13. dummy/wnli/1.0.0/dummy_data.zip +3 -0
  14. glue.py +628 -0
  15. mnli/test_matched-00000-of-00001.parquet +0 -3
  16. mnli/test_mismatched-00000-of-00001.parquet +0 -3
  17. mnli/train-00000-of-00001.parquet +0 -3
  18. mnli/validation_matched-00000-of-00001.parquet +0 -3
  19. mnli/validation_mismatched-00000-of-00001.parquet +0 -3
  20. mnli_matched/test-00000-of-00001.parquet +0 -3
  21. mnli_matched/validation-00000-of-00001.parquet +0 -3
  22. mnli_mismatched/test-00000-of-00001.parquet +0 -3
  23. mnli_mismatched/validation-00000-of-00001.parquet +0 -3
  24. mrpc/test-00000-of-00001.parquet +0 -3
  25. mrpc/train-00000-of-00001.parquet +0 -3
  26. qnli/test-00000-of-00001.parquet +0 -3
  27. qnli/train-00000-of-00001.parquet +0 -3
  28. qnli/validation-00000-of-00001.parquet +0 -3
  29. qqp/test-00000-of-00001.parquet +0 -3
  30. qqp/train-00000-of-00001.parquet +0 -3
  31. qqp/validation-00000-of-00001.parquet +0 -3
  32. rte/test-00000-of-00001.parquet +0 -3
  33. rte/train-00000-of-00001.parquet +0 -3
  34. rte/validation-00000-of-00001.parquet +0 -3
  35. sst2/test-00000-of-00001.parquet +0 -3
  36. sst2/train-00000-of-00001.parquet +0 -3
  37. sst2/validation-00000-of-00001.parquet +0 -3
  38. stsb/test-00000-of-00001.parquet +0 -3
  39. stsb/train-00000-of-00001.parquet +0 -3
  40. stsb/validation-00000-of-00001.parquet +0 -3
  41. wnli/test-00000-of-00001.parquet +0 -3
  42. wnli/train-00000-of-00001.parquet +0 -3
  43. wnli/validation-00000-of-00001.parquet +0 -3
README.md CHANGED
@@ -6,7 +6,7 @@ language_creators:
6
  language:
7
  - en
8
  license:
9
- - other
10
  multilinguality:
11
  - monolingual
12
  size_categories:
@@ -20,422 +20,12 @@ task_ids:
20
  - natural-language-inference
21
  - semantic-similarity-scoring
22
  - sentiment-classification
 
 
 
23
  - text-scoring
24
  paperswithcode_id: glue
25
  pretty_name: GLUE (General Language Understanding Evaluation benchmark)
26
- config_names:
27
- - ax
28
- - cola
29
- - mnli
30
- - mnli_matched
31
- - mnli_mismatched
32
- - mrpc
33
- - qnli
34
- - qqp
35
- - rte
36
- - sst2
37
- - stsb
38
- - wnli
39
- tags:
40
- - qa-nli
41
- - coreference-nli
42
- - paraphrase-identification
43
- dataset_info:
44
- - config_name: ax
45
- features:
46
- - name: premise
47
- dtype: string
48
- - name: hypothesis
49
- dtype: string
50
- - name: label
51
- dtype:
52
- class_label:
53
- names:
54
- '0': entailment
55
- '1': neutral
56
- '2': contradiction
57
- - name: idx
58
- dtype: int32
59
- splits:
60
- - name: test
61
- num_bytes: 237694
62
- num_examples: 1104
63
- download_size: 80767
64
- dataset_size: 237694
65
- - config_name: cola
66
- features:
67
- - name: sentence
68
- dtype: string
69
- - name: label
70
- dtype:
71
- class_label:
72
- names:
73
- '0': unacceptable
74
- '1': acceptable
75
- - name: idx
76
- dtype: int32
77
- splits:
78
- - name: train
79
- num_bytes: 484869
80
- num_examples: 8551
81
- - name: validation
82
- num_bytes: 60322
83
- num_examples: 1043
84
- - name: test
85
- num_bytes: 60513
86
- num_examples: 1063
87
- download_size: 326394
88
- dataset_size: 605704
89
- - config_name: mnli
90
- features:
91
- - name: premise
92
- dtype: string
93
- - name: hypothesis
94
- dtype: string
95
- - name: label
96
- dtype:
97
- class_label:
98
- names:
99
- '0': entailment
100
- '1': neutral
101
- '2': contradiction
102
- - name: idx
103
- dtype: int32
104
- splits:
105
- - name: train
106
- num_bytes: 74619646
107
- num_examples: 392702
108
- - name: validation_matched
109
- num_bytes: 1833783
110
- num_examples: 9815
111
- - name: validation_mismatched
112
- num_bytes: 1949231
113
- num_examples: 9832
114
- - name: test_matched
115
- num_bytes: 1848654
116
- num_examples: 9796
117
- - name: test_mismatched
118
- num_bytes: 1950703
119
- num_examples: 9847
120
- download_size: 57168425
121
- dataset_size: 82202017
122
- - config_name: mnli_matched
123
- features:
124
- - name: premise
125
- dtype: string
126
- - name: hypothesis
127
- dtype: string
128
- - name: label
129
- dtype:
130
- class_label:
131
- names:
132
- '0': entailment
133
- '1': neutral
134
- '2': contradiction
135
- - name: idx
136
- dtype: int32
137
- splits:
138
- - name: validation
139
- num_bytes: 1833783
140
- num_examples: 9815
141
- - name: test
142
- num_bytes: 1848654
143
- num_examples: 9796
144
- download_size: 2435055
145
- dataset_size: 3682437
146
- - config_name: mnli_mismatched
147
- features:
148
- - name: premise
149
- dtype: string
150
- - name: hypothesis
151
- dtype: string
152
- - name: label
153
- dtype:
154
- class_label:
155
- names:
156
- '0': entailment
157
- '1': neutral
158
- '2': contradiction
159
- - name: idx
160
- dtype: int32
161
- splits:
162
- - name: validation
163
- num_bytes: 1949231
164
- num_examples: 9832
165
- - name: test
166
- num_bytes: 1950703
167
- num_examples: 9847
168
- download_size: 2509009
169
- dataset_size: 3899934
170
- - config_name: mrpc
171
- features:
172
- - name: sentence1
173
- dtype: string
174
- - name: sentence2
175
- dtype: string
176
- - name: label
177
- dtype:
178
- class_label:
179
- names:
180
- '0': not_equivalent
181
- '1': equivalent
182
- - name: idx
183
- dtype: int32
184
- splits:
185
- - name: train
186
- num_bytes: 943843
187
- num_examples: 3668
188
- - name: validation
189
- num_bytes: 105879
190
- num_examples: 408
191
- - name: test
192
- num_bytes: 442410
193
- num_examples: 1725
194
- download_size: 1033400
195
- dataset_size: 1492132
196
- - config_name: qnli
197
- features:
198
- - name: question
199
- dtype: string
200
- - name: sentence
201
- dtype: string
202
- - name: label
203
- dtype:
204
- class_label:
205
- names:
206
- '0': entailment
207
- '1': not_entailment
208
- - name: idx
209
- dtype: int32
210
- splits:
211
- - name: train
212
- num_bytes: 25612443
213
- num_examples: 104743
214
- - name: validation
215
- num_bytes: 1368304
216
- num_examples: 5463
217
- - name: test
218
- num_bytes: 1373093
219
- num_examples: 5463
220
- download_size: 19278324
221
- dataset_size: 28353840
222
- - config_name: qqp
223
- features:
224
- - name: question1
225
- dtype: string
226
- - name: question2
227
- dtype: string
228
- - name: label
229
- dtype:
230
- class_label:
231
- names:
232
- '0': not_duplicate
233
- '1': duplicate
234
- - name: idx
235
- dtype: int32
236
- splits:
237
- - name: train
238
- num_bytes: 50900820
239
- num_examples: 363846
240
- - name: validation
241
- num_bytes: 5653754
242
- num_examples: 40430
243
- - name: test
244
- num_bytes: 55171111
245
- num_examples: 390965
246
- download_size: 73982265
247
- dataset_size: 111725685
248
- - config_name: rte
249
- features:
250
- - name: sentence1
251
- dtype: string
252
- - name: sentence2
253
- dtype: string
254
- - name: label
255
- dtype:
256
- class_label:
257
- names:
258
- '0': entailment
259
- '1': not_entailment
260
- - name: idx
261
- dtype: int32
262
- splits:
263
- - name: train
264
- num_bytes: 847320
265
- num_examples: 2490
266
- - name: validation
267
- num_bytes: 90728
268
- num_examples: 277
269
- - name: test
270
- num_bytes: 974053
271
- num_examples: 3000
272
- download_size: 1274409
273
- dataset_size: 1912101
274
- - config_name: sst2
275
- features:
276
- - name: sentence
277
- dtype: string
278
- - name: label
279
- dtype:
280
- class_label:
281
- names:
282
- '0': negative
283
- '1': positive
284
- - name: idx
285
- dtype: int32
286
- splits:
287
- - name: train
288
- num_bytes: 4681603
289
- num_examples: 67349
290
- - name: validation
291
- num_bytes: 106252
292
- num_examples: 872
293
- - name: test
294
- num_bytes: 216640
295
- num_examples: 1821
296
- download_size: 3331080
297
- dataset_size: 5004495
298
- - config_name: stsb
299
- features:
300
- - name: sentence1
301
- dtype: string
302
- - name: sentence2
303
- dtype: string
304
- - name: label
305
- dtype: float32
306
- - name: idx
307
- dtype: int32
308
- splits:
309
- - name: train
310
- num_bytes: 754791
311
- num_examples: 5749
312
- - name: validation
313
- num_bytes: 216064
314
- num_examples: 1500
315
- - name: test
316
- num_bytes: 169974
317
- num_examples: 1379
318
- download_size: 766983
319
- dataset_size: 1140829
320
- - config_name: wnli
321
- features:
322
- - name: sentence1
323
- dtype: string
324
- - name: sentence2
325
- dtype: string
326
- - name: label
327
- dtype:
328
- class_label:
329
- names:
330
- '0': not_entailment
331
- '1': entailment
332
- - name: idx
333
- dtype: int32
334
- splits:
335
- - name: train
336
- num_bytes: 107109
337
- num_examples: 635
338
- - name: validation
339
- num_bytes: 12162
340
- num_examples: 71
341
- - name: test
342
- num_bytes: 37889
343
- num_examples: 146
344
- download_size: 63522
345
- dataset_size: 157160
346
- configs:
347
- - config_name: ax
348
- data_files:
349
- - split: test
350
- path: ax/test-*
351
- - config_name: cola
352
- data_files:
353
- - split: train
354
- path: cola/train-*
355
- - split: validation
356
- path: cola/validation-*
357
- - split: test
358
- path: cola/test-*
359
- - config_name: mnli
360
- data_files:
361
- - split: train
362
- path: mnli/train-*
363
- - split: validation_matched
364
- path: mnli/validation_matched-*
365
- - split: validation_mismatched
366
- path: mnli/validation_mismatched-*
367
- - split: test_matched
368
- path: mnli/test_matched-*
369
- - split: test_mismatched
370
- path: mnli/test_mismatched-*
371
- - config_name: mnli_matched
372
- data_files:
373
- - split: validation
374
- path: mnli_matched/validation-*
375
- - split: test
376
- path: mnli_matched/test-*
377
- - config_name: mnli_mismatched
378
- data_files:
379
- - split: validation
380
- path: mnli_mismatched/validation-*
381
- - split: test
382
- path: mnli_mismatched/test-*
383
- - config_name: mrpc
384
- data_files:
385
- - split: train
386
- path: mrpc/train-*
387
- - split: validation
388
- path: mrpc/validation-*
389
- - split: test
390
- path: mrpc/test-*
391
- - config_name: qnli
392
- data_files:
393
- - split: train
394
- path: qnli/train-*
395
- - split: validation
396
- path: qnli/validation-*
397
- - split: test
398
- path: qnli/test-*
399
- - config_name: qqp
400
- data_files:
401
- - split: train
402
- path: qqp/train-*
403
- - split: validation
404
- path: qqp/validation-*
405
- - split: test
406
- path: qqp/test-*
407
- - config_name: rte
408
- data_files:
409
- - split: train
410
- path: rte/train-*
411
- - split: validation
412
- path: rte/validation-*
413
- - split: test
414
- path: rte/test-*
415
- - config_name: sst2
416
- data_files:
417
- - split: train
418
- path: sst2/train-*
419
- - split: validation
420
- path: sst2/validation-*
421
- - split: test
422
- path: sst2/test-*
423
- - config_name: stsb
424
- data_files:
425
- - split: train
426
- path: stsb/train-*
427
- - split: validation
428
- path: stsb/validation-*
429
- - split: test
430
- path: stsb/test-*
431
- - config_name: wnli
432
- data_files:
433
- - split: train
434
- path: wnli/train-*
435
- - split: validation
436
- path: wnli/validation-*
437
- - split: test
438
- path: wnli/test-*
439
  train-eval-index:
440
  - config: cola
441
  task: text-classification
@@ -545,6 +135,19 @@ train-eval-index:
545
  sentence1: text1
546
  sentence2: text2
547
  label: target
 
 
 
 
 
 
 
 
 
 
 
 
 
548
  ---
549
 
550
  # Dataset Card for GLUE
@@ -629,14 +232,13 @@ train-eval-index:
629
 
630
  ## Dataset Description
631
 
632
- - **Homepage:** https://gluebenchmark.com/
633
- - **Repository:** https://github.com/nyu-mll/GLUE-baselines
634
- - **Paper:** https://arxiv.org/abs/1804.07461
635
- - **Leaderboard:** https://gluebenchmark.com/leaderboard
636
  - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
637
- - **Size of downloaded dataset files:** 1.00 GB
638
- - **Size of the generated dataset:** 240.84 MB
639
- - **Total amount of disk used:** 1.24 GB
640
 
641
  ### Dataset Summary
642
 
@@ -704,9 +306,9 @@ The language data in GLUE is in English (BCP-47 `en`)
704
 
705
  #### ax
706
 
707
- - **Size of downloaded dataset files:** 0.22 MB
708
- - **Size of the generated dataset:** 0.24 MB
709
- - **Total amount of disk used:** 0.46 MB
710
 
711
  An example of 'test' looks as follows.
712
  ```
@@ -720,9 +322,9 @@ An example of 'test' looks as follows.
720
 
721
  #### cola
722
 
723
- - **Size of downloaded dataset files:** 0.38 MB
724
- - **Size of the generated dataset:** 0.61 MB
725
- - **Total amount of disk used:** 0.99 MB
726
 
727
  An example of 'train' looks as follows.
728
  ```
@@ -735,9 +337,9 @@ An example of 'train' looks as follows.
735
 
736
  #### mnli
737
 
738
- - **Size of downloaded dataset files:** 312.78 MB
739
- - **Size of the generated dataset:** 82.47 MB
740
- - **Total amount of disk used:** 395.26 MB
741
 
742
  An example of 'train' looks as follows.
743
  ```
@@ -751,9 +353,9 @@ An example of 'train' looks as follows.
751
 
752
  #### mnli_matched
753
 
754
- - **Size of downloaded dataset files:** 312.78 MB
755
- - **Size of the generated dataset:** 3.69 MB
756
- - **Total amount of disk used:** 316.48 MB
757
 
758
  An example of 'test' looks as follows.
759
  ```
@@ -767,15 +369,15 @@ An example of 'test' looks as follows.
767
 
768
  #### mnli_mismatched
769
 
770
- - **Size of downloaded dataset files:** 312.78 MB
771
- - **Size of the generated dataset:** 3.91 MB
772
- - **Total amount of disk used:** 316.69 MB
773
 
774
  An example of 'test' looks as follows.
775
  ```
776
  {
777
  "premise": "What have you decided, what are you going to do?",
778
- "hypothesis": "So what's your decision?",
779
  "label": -1,
780
  "idx": 0
781
  }
@@ -783,114 +385,31 @@ An example of 'test' looks as follows.
783
 
784
  #### mrpc
785
 
786
- - **Size of downloaded dataset files:** ??
787
- - **Size of the generated dataset:** 1.5 MB
788
- - **Total amount of disk used:** ??
789
-
790
- An example of 'train' looks as follows.
791
- ```
792
- {
793
- "sentence1": "Amrozi accused his brother, whom he called "the witness", of deliberately distorting his evidence.",
794
- "sentence2": "Referring to him as only "the witness", Amrozi accused his brother of deliberately distorting his evidence.",
795
- "label": 1,
796
- "idx": 0
797
- }
798
- ```
799
 
800
  #### qnli
801
 
802
- - **Size of downloaded dataset files:** ??
803
- - **Size of the generated dataset:** 28 MB
804
- - **Total amount of disk used:** ??
805
-
806
- An example of 'train' looks as follows.
807
- ```
808
- {
809
- "question": "When did the third Digimon series begin?",
810
- "sentence": "Unlike the two seasons before it and most of the seasons that followed, Digimon Tamers takes a darker and more realistic approach to its story featuring Digimon who do not reincarnate after their deaths and more complex character development in the original Japanese.",
811
- "label": 1,
812
- "idx": 0
813
- }
814
- ```
815
 
816
  #### qqp
817
 
818
- - **Size of downloaded dataset files:** ??
819
- - **Size of the generated dataset:** 107 MB
820
- - **Total amount of disk used:** ??
821
-
822
- An example of 'train' looks as follows.
823
- ```
824
- {
825
- "question1": "How is the life of a math student? Could you describe your own experiences?",
826
- "question2": "Which level of prepration is enough for the exam jlpt5?",
827
- "label": 0,
828
- "idx": 0
829
- }
830
- ```
831
 
832
  #### rte
833
 
834
- - **Size of downloaded dataset files:** ??
835
- - **Size of the generated dataset:** 1.9 MB
836
- - **Total amount of disk used:** ??
837
-
838
- An example of 'train' looks as follows.
839
- ```
840
- {
841
- "sentence1": "No Weapons of Mass Destruction Found in Iraq Yet.",
842
- "sentence2": "Weapons of Mass Destruction Found in Iraq.",
843
- "label": 1,
844
- "idx": 0
845
- }
846
- ```
847
 
848
  #### sst2
849
 
850
- - **Size of downloaded dataset files:** ??
851
- - **Size of the generated dataset:** 4.9 MB
852
- - **Total amount of disk used:** ??
853
-
854
- An example of 'train' looks as follows.
855
- ```
856
- {
857
- "sentence": "hide new secretions from the parental units",
858
- "label": 0,
859
- "idx": 0
860
- }
861
- ```
862
 
863
  #### stsb
864
 
865
- - **Size of downloaded dataset files:** ??
866
- - **Size of the generated dataset:** 1.2 MB
867
- - **Total amount of disk used:** ??
868
-
869
- An example of 'train' looks as follows.
870
- ```
871
- {
872
- "sentence1": "A plane is taking off.",
873
- "sentence2": "An air plane is taking off.",
874
- "label": 5.0,
875
- "idx": 0
876
- }
877
- ```
878
 
879
  #### wnli
880
 
881
- - **Size of downloaded dataset files:** ??
882
- - **Size of the generated dataset:** 0.18 MB
883
- - **Total amount of disk used:** ??
884
-
885
- An example of 'train' looks as follows.
886
- ```
887
- {
888
- "sentence1": "I stuck a pin through a carrot. When I pulled the pin out, it had a hole.",
889
- "sentence2": "The carrot had a hole.",
890
- "label": 1,
891
- "idx": 0
892
- }
893
- ```
894
 
895
  ### Data Fields
896
 
@@ -927,51 +446,31 @@ The data fields are the same among all splits.
927
 
928
  #### mrpc
929
 
930
- - `sentence1`: a `string` feature.
931
- - `sentence2`: a `string` feature.
932
- - `label`: a classification label, with possible values including `not_equivalent` (0), `equivalent` (1).
933
- - `idx`: a `int32` feature.
934
 
935
  #### qnli
936
 
937
- - `question`: a `string` feature.
938
- - `sentence`: a `string` feature.
939
- - `label`: a classification label, with possible values including `entailment` (0), `not_entailment` (1).
940
- - `idx`: a `int32` feature.
941
 
942
  #### qqp
943
 
944
- - `question1`: a `string` feature.
945
- - `question2`: a `string` feature.
946
- - `label`: a classification label, with possible values including `not_duplicate` (0), `duplicate` (1).
947
- - `idx`: a `int32` feature.
948
 
949
  #### rte
950
 
951
- - `sentence1`: a `string` feature.
952
- - `sentence2`: a `string` feature.
953
- - `label`: a classification label, with possible values including `entailment` (0), `not_entailment` (1).
954
- - `idx`: a `int32` feature.
955
 
956
  #### sst2
957
 
958
- - `sentence`: a `string` feature.
959
- - `label`: a classification label, with possible values including `negative` (0), `positive` (1).
960
- - `idx`: a `int32` feature.
961
 
962
  #### stsb
963
 
964
- - `sentence1`: a `string` feature.
965
- - `sentence2`: a `string` feature.
966
- - `label`: a float32 regression label, with possible values from 0 to 5.
967
- - `idx`: a `int32` feature.
968
 
969
  #### wnli
970
 
971
- - `sentence1`: a `string` feature.
972
- - `sentence2`: a `string` feature.
973
- - `label`: a classification label, with possible values including `not_entailment` (0), `entailment` (1).
974
- - `idx`: a `int32` feature.
975
 
976
  ### Data Splits
977
 
@@ -1085,105 +584,29 @@ The data fields are the same among all splits.
1085
 
1086
  ### Licensing Information
1087
 
1088
- The primary GLUE tasks are built on and derived from existing datasets. We refer users to the original licenses accompanying each dataset.
1089
 
1090
  ### Citation Information
1091
 
1092
- If you use GLUE, please cite all the datasets you use.
1093
-
1094
- In addition, we encourage you to use the following BibTeX citation for GLUE itself:
1095
  ```
 
 
 
 
 
 
1096
  @inproceedings{wang2019glue,
1097
  title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
1098
  author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
1099
  note={In the Proceedings of ICLR.},
1100
  year={2019}
1101
  }
1102
- ```
1103
 
1104
- If you evaluate using GLUE, we also highly recommend citing the papers that originally introduced the nine GLUE tasks, both to give the original authors their due credit and because venues will expect papers to describe the data they evaluate on.
1105
- The following provides BibTeX for all of the GLUE tasks, except QQP, for which we recommend adding a footnote to this page: https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs
1106
- ```
1107
- @article{warstadt2018neural,
1108
- title={Neural Network Acceptability Judgments},
1109
- author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R.},
1110
- journal={arXiv preprint 1805.12471},
1111
- year={2018}
1112
- }
1113
- @inproceedings{socher2013recursive,
1114
- title={Recursive deep models for semantic compositionality over a sentiment treebank},
1115
- author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
1116
- booktitle={Proceedings of EMNLP},
1117
- pages={1631--1642},
1118
- year={2013}
1119
- }
1120
- @inproceedings{dolan2005automatically,
1121
- title={Automatically constructing a corpus of sentential paraphrases},
1122
- author={Dolan, William B and Brockett, Chris},
1123
- booktitle={Proceedings of the International Workshop on Paraphrasing},
1124
- year={2005}
1125
- }
1126
- @book{agirre2007semantic,
1127
- editor = {Agirre, Eneko and M`arquez, Llu'{i}s and Wicentowski, Richard},
1128
- title = {Proceedings of the Fourth International Workshop on Semantic Evaluations (SemEval-2007)},
1129
- month = {June},
1130
- year = {2007},
1131
- address = {Prague, Czech Republic},
1132
- publisher = {Association for Computational Linguistics},
1133
- }
1134
- @inproceedings{williams2018broad,
1135
- author = {Williams, Adina and Nangia, Nikita and Bowman, Samuel R.},
1136
- title = {A Broad-Coverage Challenge Corpus for Sentence Understanding through Inference},
1137
- booktitle = {Proceedings of NAACL-HLT},
1138
- year = 2018
1139
- }
1140
- @inproceedings{rajpurkar2016squad,
1141
- author = {Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy}
1142
- title = {{SQ}u{AD}: 100,000+ Questions for Machine Comprehension of Text},
1143
- booktitle = {Proceedings of EMNLP}
1144
- year = {2016},
1145
- publisher = {Association for Computational Linguistics},
1146
- pages = {2383--2392},
1147
- location = {Austin, Texas},
1148
- }
1149
- @incollection{dagan2006pascal,
1150
- title={The {PASCAL} recognising textual entailment challenge},
1151
- author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
1152
- booktitle={Machine learning challenges. evaluating predictive uncertainty, visual object classification, and recognising tectual entailment},
1153
- pages={177--190},
1154
- year={2006},
1155
- publisher={Springer}
1156
- }
1157
- @article{bar2006second,
1158
- title={The second {PASCAL} recognising textual entailment challenge},
1159
- author={Bar Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
1160
- year={2006}
1161
- }
1162
- @inproceedings{giampiccolo2007third,
1163
- title={The third {PASCAL} recognizing textual entailment challenge},
1164
- author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
1165
- booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
1166
- pages={1--9},
1167
- year={2007},
1168
- organization={Association for Computational Linguistics},
1169
- }
1170
- @article{bentivogli2009fifth,
1171
- title={The Fifth {PASCAL} Recognizing Textual Entailment Challenge},
1172
- author={Bentivogli, Luisa and Dagan, Ido and Dang, Hoa Trang and Giampiccolo, Danilo and Magnini, Bernardo},
1173
- booktitle={TAC},
1174
- year={2009}
1175
- }
1176
- @inproceedings{levesque2011winograd,
1177
- title={The {W}inograd schema challenge},
1178
- author={Levesque, Hector J and Davis, Ernest and Morgenstern, Leora},
1179
- booktitle={{AAAI} Spring Symposium: Logical Formalizations of Commonsense Reasoning},
1180
- volume={46},
1181
- pages={47},
1182
- year={2011}
1183
- }
1184
  ```
1185
 
1186
 
1187
  ### Contributions
1188
 
1189
- Thanks to [@patpizio](https://github.com/patpizio), [@jeswan](https://github.com/jeswan), [@thomwolf](https://github.com/thomwolf), [@patrickvonplaten](https://github.com/patrickvonplaten), [@mariamabarham](https://github.com/mariamabarham) for adding this dataset.
 
6
  language:
7
  - en
8
  license:
9
+ - cc-by-4.0
10
  multilinguality:
11
  - monolingual
12
  size_categories:
 
20
  - natural-language-inference
21
  - semantic-similarity-scoring
22
  - sentiment-classification
23
+ - text-classification-other-coreference-nli
24
+ - text-classification-other-paraphrase-identification
25
+ - text-classification-other-qa-nli
26
  - text-scoring
27
  paperswithcode_id: glue
28
  pretty_name: GLUE (General Language Understanding Evaluation benchmark)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  train-eval-index:
30
  - config: cola
31
  task: text-classification
 
135
  sentence1: text1
136
  sentence2: text2
137
  label: target
138
+ configs:
139
+ - ax
140
+ - cola
141
+ - mnli
142
+ - mnli_matched
143
+ - mnli_mismatched
144
+ - mrpc
145
+ - qnli
146
+ - qqp
147
+ - rte
148
+ - sst2
149
+ - stsb
150
+ - wnli
151
  ---
152
 
153
  # Dataset Card for GLUE
 
232
 
233
  ## Dataset Description
234
 
235
+ - **Homepage:** [https://nyu-mll.github.io/CoLA/](https://nyu-mll.github.io/CoLA/)
236
+ - **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
237
+ - **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
 
238
  - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
239
+ - **Size of downloaded dataset files:** 955.33 MB
240
+ - **Size of the generated dataset:** 229.68 MB
241
+ - **Total amount of disk used:** 1185.01 MB
242
 
243
  ### Dataset Summary
244
 
 
306
 
307
  #### ax
308
 
309
+ - **Size of downloaded dataset files:** 0.21 MB
310
+ - **Size of the generated dataset:** 0.23 MB
311
+ - **Total amount of disk used:** 0.44 MB
312
 
313
  An example of 'test' looks as follows.
314
  ```
 
322
 
323
  #### cola
324
 
325
+ - **Size of downloaded dataset files:** 0.36 MB
326
+ - **Size of the generated dataset:** 0.58 MB
327
+ - **Total amount of disk used:** 0.94 MB
328
 
329
  An example of 'train' looks as follows.
330
  ```
 
337
 
338
  #### mnli
339
 
340
+ - **Size of downloaded dataset files:** 298.29 MB
341
+ - **Size of the generated dataset:** 78.65 MB
342
+ - **Total amount of disk used:** 376.95 MB
343
 
344
  An example of 'train' looks as follows.
345
  ```
 
353
 
354
  #### mnli_matched
355
 
356
+ - **Size of downloaded dataset files:** 298.29 MB
357
+ - **Size of the generated dataset:** 3.52 MB
358
+ - **Total amount of disk used:** 301.82 MB
359
 
360
  An example of 'test' looks as follows.
361
  ```
 
369
 
370
  #### mnli_mismatched
371
 
372
+ - **Size of downloaded dataset files:** 298.29 MB
373
+ - **Size of the generated dataset:** 3.73 MB
374
+ - **Total amount of disk used:** 302.02 MB
375
 
376
  An example of 'test' looks as follows.
377
  ```
378
  {
379
  "premise": "What have you decided, what are you going to do?",
380
+ "hypothesis": "So what's your decision?,
381
  "label": -1,
382
  "idx": 0
383
  }
 
385
 
386
  #### mrpc
387
 
388
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
 
 
 
 
 
 
 
 
 
 
 
 
389
 
390
  #### qnli
391
 
392
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
 
 
 
 
 
 
 
 
 
 
 
 
393
 
394
  #### qqp
395
 
396
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
 
 
 
 
 
 
 
 
 
 
 
 
397
 
398
  #### rte
399
 
400
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
 
 
 
 
 
 
 
 
 
 
 
 
401
 
402
  #### sst2
403
 
404
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
 
 
 
 
 
 
 
 
 
 
 
405
 
406
  #### stsb
407
 
408
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
 
 
 
 
 
 
 
 
 
 
 
 
409
 
410
  #### wnli
411
 
412
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
 
 
 
 
 
 
 
 
 
 
 
 
413
 
414
  ### Data Fields
415
 
 
446
 
447
  #### mrpc
448
 
449
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
 
 
 
450
 
451
  #### qnli
452
 
453
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
 
 
 
454
 
455
  #### qqp
456
 
457
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
 
 
 
458
 
459
  #### rte
460
 
461
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
 
 
 
462
 
463
  #### sst2
464
 
465
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
 
 
466
 
467
  #### stsb
468
 
469
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
 
 
 
470
 
471
  #### wnli
472
 
473
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
 
 
 
474
 
475
  ### Data Splits
476
 
 
584
 
585
  ### Licensing Information
586
 
587
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
588
 
589
  ### Citation Information
590
 
 
 
 
591
  ```
592
+ @article{warstadt2018neural,
593
+ title={Neural Network Acceptability Judgments},
594
+ author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
595
+ journal={arXiv preprint arXiv:1805.12471},
596
+ year={2018}
597
+ }
598
  @inproceedings{wang2019glue,
599
  title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
600
  author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
601
  note={In the Proceedings of ICLR.},
602
  year={2019}
603
  }
 
604
 
605
+ Note that each GLUE dataset has its own citation. Please see the source to see
606
+ the correct citation for each contained dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
607
  ```
608
 
609
 
610
  ### Contributions
611
 
612
+ Thanks to [@patpizio](https://github.com/patpizio), [@jeswan](https://github.com/jeswan), [@thomwolf](https://github.com/thomwolf), [@patrickvonplaten](https://github.com/patrickvonplaten), [@mariamabarham](https://github.com/mariamabarham) for adding this dataset.
cola/train-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e7538afa2000e63f5343f16a758d75c452661a384208399d2035cd2fce45c33
3
- size 251124
 
 
 
 
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cola": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@article{warstadt2018neural,\n title={Neural Network Acceptability Judgments},\n author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1805.12471},\n year={2018}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://nyu-mll.github.io/CoLA/", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["unacceptable", "acceptable"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "cola", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 61049, "num_examples": 1063, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 489149, "num_examples": 8551, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 60850, "num_examples": 1043, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/CoLA.zip": {"num_bytes": 376971, "checksum": "f212fcd832b8f7b435fb991f101abf89f96b933ab400603bf198960dfc32cbff"}}, "download_size": 376971, "post_processing_size": null, "dataset_size": 611048, "size_in_bytes": 988019}, "sst2": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{socher2013recursive,\n title={Recursive deep models for semantic compositionality over a sentiment treebank},\n author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},\n booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},\n pages={1631--1642},\n year={2013}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://nlp.stanford.edu/sentiment/index.html", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["negative", "positive"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "sst2", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 217556, "num_examples": 1821, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 4715283, "num_examples": 67349, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 106692, "num_examples": 872, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/SST-2.zip": {"num_bytes": 7439277, "checksum": "d67e16fb55739c1b32cdce9877596db1c127dc322d93c082281f64057c16deaa"}}, "download_size": 7439277, "post_processing_size": null, "dataset_size": 5039531, "size_in_bytes": 12478808}, "mrpc": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{dolan2005automatically,\n title={Automatically constructing a corpus of sentential paraphrases},\n author={Dolan, William B and Brockett, Chris},\n booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},\n year={2005}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://www.microsoft.com/en-us/download/details.aspx?id=52398", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["not_equivalent", "equivalent"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "mrpc", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 443498, "num_examples": 1725, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 946146, "num_examples": 3668, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 106142, "num_examples": 408, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/mrpc_dev_ids.tsv": {"num_bytes": 6222, "checksum": "971d7767d81b997fd9060ade0ec23c4fc31cbb226a55d1bd4a1bac474eb81dc7"}, "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt": {"num_bytes": 1047044, "checksum": "60a9b09084528f0673eedee2b69cb941920f0b8cd0eeccefc464a98768457f89"}, "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt": {"num_bytes": 441275, "checksum": "a04e271090879aaba6423d65b94950c089298587d9c084bf9cd7439bd785f784"}}, "download_size": 1494541, "post_processing_size": null, "dataset_size": 1495786, "size_in_bytes": 2990327}, "qqp": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@online{WinNT,\n author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},\n title = {First Quora Dataset Release: Question Pairs},\n year = {2017},\n url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},\n urldate = {2019-04-03}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n", "homepage": "https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs", "license": "", "features": {"question1": {"dtype": "string", "id": null, "_type": "Value"}, "question2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["not_duplicate", "duplicate"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "qqp", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 50901116, "num_examples": 363846, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 5653794, "num_examples": 40430, "dataset_name": "glue"}, "test": {"name": "test", "num_bytes": 55171431, "num_examples": 390965, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/QQP-clean.zip": {"num_bytes": 41696084, "checksum": "40e7c862c04eb26ee04b67fd900e76c45c6ba8e6d8fab4f8f1f8072a1a3fbae0"}}, "download_size": 41696084, "post_processing_size": null, "dataset_size": 111726341, "size_in_bytes": 153422425}, "stsb": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@article{cer2017semeval,\n title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},\n author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},\n journal={arXiv preprint arXiv:1708.00055},\n year={2017}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "float32", "id": null, "_type": "Value"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "stsb", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 170847, "num_examples": 1379, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 758394, "num_examples": 5749, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 217012, "num_examples": 1500, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/STS-B.zip": {"num_bytes": 802872, "checksum": "e60a6393de5a8b5b9bac5020a1554b54e3691f9d600b775bd131e613ac179c85"}}, "download_size": 802872, "post_processing_size": null, "dataset_size": 1146253, "size_in_bytes": 1949125}, "mnli": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://www.nyu.edu/projects/bowman/multinli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "mnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test_matched": {"name": "test_matched", "num_bytes": 1854787, "num_examples": 9796, "dataset_name": "glue"}, "test_mismatched": {"name": "test_mismatched", "num_bytes": 1956866, "num_examples": 9847, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 74865118, "num_examples": 392702, "dataset_name": "glue"}, "validation_matched": {"name": "validation_matched", "num_bytes": 1839926, "num_examples": 9815, "dataset_name": "glue"}, "validation_mismatched": {"name": "validation_mismatched", "num_bytes": 1955384, "num_examples": 9832, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {"num_bytes": 312783507, "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"}}, "download_size": 312783507, "post_processing_size": null, "dataset_size": 82472081, "size_in_bytes": 395255588}, "mnli_mismatched": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://www.nyu.edu/projects/bowman/multinli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "mnli_mismatched", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1956866, "num_examples": 9847, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 1955384, "num_examples": 9832, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {"num_bytes": 312783507, "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"}}, "download_size": 312783507, "post_processing_size": null, "dataset_size": 3912250, "size_in_bytes": 316695757}, "mnli_matched": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://www.nyu.edu/projects/bowman/multinli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "mnli_matched", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1854787, "num_examples": 9796, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 1839926, "num_examples": 9815, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {"num_bytes": 312783507, "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"}}, "download_size": 312783507, "post_processing_size": null, "dataset_size": 3694713, "size_in_bytes": 316478220}, "qnli": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@article{rajpurkar2016squad,\n title={Squad: 100,000+ questions for machine comprehension of text},\n author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},\n journal={arXiv preprint arXiv:1606.05250},\n year={2016}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://rajpurkar.github.io/SQuAD-explorer/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "not_entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "qnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1376516, "num_examples": 5463, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 25677924, "num_examples": 104743, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 1371727, "num_examples": 5463, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip": {"num_bytes": 10627589, "checksum": "e634e78627a29adaecd4f955359b22bf5e70f2cbd93b493f2d624138a0c0e5f5"}}, "download_size": 10627589, "post_processing_size": null, "dataset_size": 28426167, "size_in_bytes": 39053756}, "rte": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{dagan2005pascal,\n title={The PASCAL recognising textual entailment challenge},\n author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},\n booktitle={Machine Learning Challenges Workshop},\n pages={177--190},\n year={2005},\n organization={Springer}\n}\n@inproceedings{bar2006second,\n title={The second pascal recognising textual entailment challenge},\n author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},\n booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},\n volume={6},\n number={1},\n pages={6--4},\n year={2006},\n organization={Venice}\n}\n@inproceedings{giampiccolo2007third,\n title={The third pascal recognizing textual entailment challenge},\n author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},\n booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},\n pages={1--9},\n year={2007},\n organization={Association for Computational Linguistics}\n}\n@inproceedings{bentivogli2009fifth,\n title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},\n author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},\n booktitle={TAC},\n year={2009}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://aclweb.org/aclwiki/Recognizing_Textual_Entailment", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "not_entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "rte", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 975936, "num_examples": 3000, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 848888, "num_examples": 2490, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 90911, "num_examples": 277, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/RTE.zip": {"num_bytes": 697150, "checksum": "6bf86de103ecd335f3441bd43574d23fef87ecc695977a63b82d5efb206556ee"}}, "download_size": 697150, "post_processing_size": null, "dataset_size": 1915735, "size_in_bytes": 2612885}, "wnli": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{levesque2012winograd,\n title={The winograd schema challenge},\n author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},\n booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},\n year={2012}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["not_entailment", "entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "wnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 37992, "num_examples": 146, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 107517, "num_examples": 635, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 12215, "num_examples": 71, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/WNLI.zip": {"num_bytes": 28999, "checksum": "ae0e8e4d16f4d46d4a0a566ec7ecceccfd3fbfaa4a7a4b4e02848c0f2561ac46"}}, "download_size": 28999, "post_processing_size": null, "dataset_size": 157724, "size_in_bytes": 186723}, "ax": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://gluebenchmark.com/diagnostics", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "ax", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 238392, "num_examples": 1104, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/AX.tsv": {"num_bytes": 222257, "checksum": "0e13510b1bb14436ff7e2ee82338f0efb0133ecf2e73507a697dc210db3f05fd"}}, "download_size": 222257, "post_processing_size": null, "dataset_size": 238392, "size_in_bytes": 460649}}
ax/test-00000-of-00001.parquet → dummy/ax/1.0.0/dummy_data.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a07b802fe2d4968a1f7ccce9406826dc77e0d1dc53fea9491664bd8ebba8571a
3
- size 80767
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2a34cfe9a95b80530887f488eb04e3514b322e0fa65c64f425ddb7aea449f69
3
+ size 509
cola/test-00000-of-00001.parquet → dummy/cola/1.0.0/dummy_data.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3c4d526b6f49f432621de43569f9ecf6af41f639baaf4a9d821b95d745def61d
3
- size 37719
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d678797c6eb84d3436868f8b5ac506f88f12bd51633245bd1a20af6021ac48d4
3
+ size 1116
mrpc/validation-00000-of-00001.parquet → dummy/mnli/1.0.0/dummy_data.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:33c007dbf5bfa8463d87a13e6226df8c0fcf2596c2cd39d0f3bb79754e00f50f
3
- size 75678
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef46936124ebde31577df53b2ae6e381aa9c66e95a2cf50f42ba68478ec3896e
3
+ size 5438
cola/validation-00000-of-00001.parquet → dummy/mrpc/1.0.0/dummy_data.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c14b7219a7d9f9fe3dd291fd000f6623ee413805eb108c9c49578ed50873e4ba
3
- size 37551
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bfe41b0047215524032750c1faf32c84c41566279fca9df1c35482640537aa6
3
+ size 4539
dummy/qnli/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a771b312be26048e7d921ff4bf01ac7de224641cd51977629bb54b9839637fb0
3
+ size 1859
dummy/qqp/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a1f6bf7c3ae0587a99d4ecfc2c4ab900efbd23dc1c68e2556426da9feab0163
3
+ size 1588
dummy/rte/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bec2e7562503a3b7ef577986b4cd10b075818b66fb03df8d4dec79d28a5bf5f
3
+ size 1613
dummy/sst2/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ff05ebd2679fd60f174cd19415e8dd0c2f701f49f8f9dbb63f7b30707d9b06e
3
+ size 1143
dummy/stsb/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8027e1188e092ea53eede8a2b2bd245f4c98f2b37132ea5d7dd173bac36e025e
3
+ size 1353
dummy/wnli/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e52960c15224df1f7202371029b3a5fad3b4dfec72132d3c8b996ff03db92755
3
+ size 1407
glue.py ADDED
@@ -0,0 +1,628 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """The General Language Understanding Evaluation (GLUE) benchmark."""
18
+
19
+
20
+ import csv
21
+ import os
22
+ import textwrap
23
+
24
+ import numpy as np
25
+
26
+ import datasets
27
+
28
+
29
+ _GLUE_CITATION = """\
30
+ @inproceedings{wang2019glue,
31
+ title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
32
+ author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
33
+ note={In the Proceedings of ICLR.},
34
+ year={2019}
35
+ }
36
+ """
37
+
38
+ _GLUE_DESCRIPTION = """\
39
+ GLUE, the General Language Understanding Evaluation benchmark
40
+ (https://gluebenchmark.com/) is a collection of resources for training,
41
+ evaluating, and analyzing natural language understanding systems.
42
+
43
+ """
44
+
45
+ _MRPC_DEV_IDS = "https://dl.fbaipublicfiles.com/glue/data/mrpc_dev_ids.tsv"
46
+ _MRPC_TRAIN = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt"
47
+ _MRPC_TEST = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt"
48
+
49
+ _MNLI_BASE_KWARGS = dict(
50
+ text_features={
51
+ "premise": "sentence1",
52
+ "hypothesis": "sentence2",
53
+ },
54
+ label_classes=["entailment", "neutral", "contradiction"],
55
+ label_column="gold_label",
56
+ data_url="https://dl.fbaipublicfiles.com/glue/data/MNLI.zip",
57
+ data_dir="MNLI",
58
+ citation=textwrap.dedent(
59
+ """\
60
+ @InProceedings{N18-1101,
61
+ author = "Williams, Adina
62
+ and Nangia, Nikita
63
+ and Bowman, Samuel",
64
+ title = "A Broad-Coverage Challenge Corpus for
65
+ Sentence Understanding through Inference",
66
+ booktitle = "Proceedings of the 2018 Conference of
67
+ the North American Chapter of the
68
+ Association for Computational Linguistics:
69
+ Human Language Technologies, Volume 1 (Long
70
+ Papers)",
71
+ year = "2018",
72
+ publisher = "Association for Computational Linguistics",
73
+ pages = "1112--1122",
74
+ location = "New Orleans, Louisiana",
75
+ url = "http://aclweb.org/anthology/N18-1101"
76
+ }
77
+ @article{bowman2015large,
78
+ title={A large annotated corpus for learning natural language inference},
79
+ author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
80
+ journal={arXiv preprint arXiv:1508.05326},
81
+ year={2015}
82
+ }"""
83
+ ),
84
+ url="http://www.nyu.edu/projects/bowman/multinli/",
85
+ )
86
+
87
+
88
+ class GlueConfig(datasets.BuilderConfig):
89
+ """BuilderConfig for GLUE."""
90
+
91
+ def __init__(
92
+ self,
93
+ text_features,
94
+ label_column,
95
+ data_url,
96
+ data_dir,
97
+ citation,
98
+ url,
99
+ label_classes=None,
100
+ process_label=lambda x: x,
101
+ **kwargs,
102
+ ):
103
+ """BuilderConfig for GLUE.
104
+
105
+ Args:
106
+ text_features: `dict[string, string]`, map from the name of the feature
107
+ dict for each text field to the name of the column in the tsv file
108
+ label_column: `string`, name of the column in the tsv file corresponding
109
+ to the label
110
+ data_url: `string`, url to download the zip file from
111
+ data_dir: `string`, the path to the folder containing the tsv files in the
112
+ downloaded zip
113
+ citation: `string`, citation for the data set
114
+ url: `string`, url for information about the data set
115
+ label_classes: `list[string]`, the list of classes if the label is
116
+ categorical. If not provided, then the label will be of type
117
+ `datasets.Value('float32')`.
118
+ process_label: `Function[string, any]`, function taking in the raw value
119
+ of the label and processing it to the form required by the label feature
120
+ **kwargs: keyword arguments forwarded to super.
121
+ """
122
+ super(GlueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
123
+ self.text_features = text_features
124
+ self.label_column = label_column
125
+ self.label_classes = label_classes
126
+ self.data_url = data_url
127
+ self.data_dir = data_dir
128
+ self.citation = citation
129
+ self.url = url
130
+ self.process_label = process_label
131
+
132
+
133
+ class Glue(datasets.GeneratorBasedBuilder):
134
+ """The General Language Understanding Evaluation (GLUE) benchmark."""
135
+
136
+ BUILDER_CONFIGS = [
137
+ GlueConfig(
138
+ name="cola",
139
+ description=textwrap.dedent(
140
+ """\
141
+ The Corpus of Linguistic Acceptability consists of English
142
+ acceptability judgments drawn from books and journal articles on
143
+ linguistic theory. Each example is a sequence of words annotated
144
+ with whether it is a grammatical English sentence."""
145
+ ),
146
+ text_features={"sentence": "sentence"},
147
+ label_classes=["unacceptable", "acceptable"],
148
+ label_column="is_acceptable",
149
+ data_url="https://dl.fbaipublicfiles.com/glue/data/CoLA.zip",
150
+ data_dir="CoLA",
151
+ citation=textwrap.dedent(
152
+ """\
153
+ @article{warstadt2018neural,
154
+ title={Neural Network Acceptability Judgments},
155
+ author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
156
+ journal={arXiv preprint arXiv:1805.12471},
157
+ year={2018}
158
+ }"""
159
+ ),
160
+ url="https://nyu-mll.github.io/CoLA/",
161
+ ),
162
+ GlueConfig(
163
+ name="sst2",
164
+ description=textwrap.dedent(
165
+ """\
166
+ The Stanford Sentiment Treebank consists of sentences from movie reviews and
167
+ human annotations of their sentiment. The task is to predict the sentiment of a
168
+ given sentence. We use the two-way (positive/negative) class split, and use only
169
+ sentence-level labels."""
170
+ ),
171
+ text_features={"sentence": "sentence"},
172
+ label_classes=["negative", "positive"],
173
+ label_column="label",
174
+ data_url="https://dl.fbaipublicfiles.com/glue/data/SST-2.zip",
175
+ data_dir="SST-2",
176
+ citation=textwrap.dedent(
177
+ """\
178
+ @inproceedings{socher2013recursive,
179
+ title={Recursive deep models for semantic compositionality over a sentiment treebank},
180
+ author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
181
+ booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
182
+ pages={1631--1642},
183
+ year={2013}
184
+ }"""
185
+ ),
186
+ url="https://datasets.stanford.edu/sentiment/index.html",
187
+ ),
188
+ GlueConfig(
189
+ name="mrpc",
190
+ description=textwrap.dedent(
191
+ """\
192
+ The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of
193
+ sentence pairs automatically extracted from online news sources, with human annotations
194
+ for whether the sentences in the pair are semantically equivalent."""
195
+ ), # pylint: disable=line-too-long
196
+ text_features={"sentence1": "", "sentence2": ""},
197
+ label_classes=["not_equivalent", "equivalent"],
198
+ label_column="Quality",
199
+ data_url="", # MRPC isn't hosted by GLUE.
200
+ data_dir="MRPC",
201
+ citation=textwrap.dedent(
202
+ """\
203
+ @inproceedings{dolan2005automatically,
204
+ title={Automatically constructing a corpus of sentential paraphrases},
205
+ author={Dolan, William B and Brockett, Chris},
206
+ booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},
207
+ year={2005}
208
+ }"""
209
+ ),
210
+ url="https://www.microsoft.com/en-us/download/details.aspx?id=52398",
211
+ ),
212
+ GlueConfig(
213
+ name="qqp",
214
+ description=textwrap.dedent(
215
+ """\
216
+ The Quora Question Pairs2 dataset is a collection of question pairs from the
217
+ community question-answering website Quora. The task is to determine whether a
218
+ pair of questions are semantically equivalent."""
219
+ ),
220
+ text_features={
221
+ "question1": "question1",
222
+ "question2": "question2",
223
+ },
224
+ label_classes=["not_duplicate", "duplicate"],
225
+ label_column="is_duplicate",
226
+ data_url="https://dl.fbaipublicfiles.com/glue/data/QQP-clean.zip",
227
+ data_dir="QQP",
228
+ citation=textwrap.dedent(
229
+ """\
230
+ @online{WinNT,
231
+ author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},
232
+ title = {First Quora Dataset Release: Question Pairs},
233
+ year = {2017},
234
+ url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},
235
+ urldate = {2019-04-03}
236
+ }"""
237
+ ),
238
+ url="https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs",
239
+ ),
240
+ GlueConfig(
241
+ name="stsb",
242
+ description=textwrap.dedent(
243
+ """\
244
+ The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of
245
+ sentence pairs drawn from news headlines, video and image captions, and natural
246
+ language inference data. Each pair is human-annotated with a similarity score
247
+ from 1 to 5."""
248
+ ),
249
+ text_features={
250
+ "sentence1": "sentence1",
251
+ "sentence2": "sentence2",
252
+ },
253
+ label_column="score",
254
+ data_url="https://dl.fbaipublicfiles.com/glue/data/STS-B.zip",
255
+ data_dir="STS-B",
256
+ citation=textwrap.dedent(
257
+ """\
258
+ @article{cer2017semeval,
259
+ title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},
260
+ author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},
261
+ journal={arXiv preprint arXiv:1708.00055},
262
+ year={2017}
263
+ }"""
264
+ ),
265
+ url="http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
266
+ process_label=np.float32,
267
+ ),
268
+ GlueConfig(
269
+ name="mnli",
270
+ description=textwrap.dedent(
271
+ """\
272
+ The Multi-Genre Natural Language Inference Corpus is a crowdsourced
273
+ collection of sentence pairs with textual entailment annotations. Given a premise sentence
274
+ and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
275
+ (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
276
+ gathered from ten different sources, including transcribed speech, fiction, and government reports.
277
+ We use the standard test set, for which we obtained private labels from the authors, and evaluate
278
+ on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
279
+ the SNLI corpus as 550k examples of auxiliary training data."""
280
+ ),
281
+ **_MNLI_BASE_KWARGS,
282
+ ),
283
+ GlueConfig(
284
+ name="mnli_mismatched",
285
+ description=textwrap.dedent(
286
+ """\
287
+ The mismatched validation and test splits from MNLI.
288
+ See the "mnli" BuilderConfig for additional information."""
289
+ ),
290
+ **_MNLI_BASE_KWARGS,
291
+ ),
292
+ GlueConfig(
293
+ name="mnli_matched",
294
+ description=textwrap.dedent(
295
+ """\
296
+ The matched validation and test splits from MNLI.
297
+ See the "mnli" BuilderConfig for additional information."""
298
+ ),
299
+ **_MNLI_BASE_KWARGS,
300
+ ),
301
+ GlueConfig(
302
+ name="qnli",
303
+ description=textwrap.dedent(
304
+ """\
305
+ The Stanford Question Answering Dataset is a question-answering
306
+ dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
307
+ from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
308
+ convert the task into sentence pair classification by forming a pair between each question and each
309
+ sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
310
+ question and the context sentence. The task is to determine whether the context sentence contains
311
+ the answer to the question. This modified version of the original task removes the requirement that
312
+ the model select the exact answer, but also removes the simplifying assumptions that the answer
313
+ is always present in the input and that lexical overlap is a reliable cue."""
314
+ ), # pylint: disable=line-too-long
315
+ text_features={
316
+ "question": "question",
317
+ "sentence": "sentence",
318
+ },
319
+ label_classes=["entailment", "not_entailment"],
320
+ label_column="label",
321
+ data_url="https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip",
322
+ data_dir="QNLI",
323
+ citation=textwrap.dedent(
324
+ """\
325
+ @article{rajpurkar2016squad,
326
+ title={Squad: 100,000+ questions for machine comprehension of text},
327
+ author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
328
+ journal={arXiv preprint arXiv:1606.05250},
329
+ year={2016}
330
+ }"""
331
+ ),
332
+ url="https://rajpurkar.github.io/SQuAD-explorer/",
333
+ ),
334
+ GlueConfig(
335
+ name="rte",
336
+ description=textwrap.dedent(
337
+ """\
338
+ The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual
339
+ entailment challenges. We combine the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim
340
+ et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009).4 Examples are
341
+ constructed based on news and Wikipedia text. We convert all datasets to a two-class split, where
342
+ for three-class datasets we collapse neutral and contradiction into not entailment, for consistency."""
343
+ ), # pylint: disable=line-too-long
344
+ text_features={
345
+ "sentence1": "sentence1",
346
+ "sentence2": "sentence2",
347
+ },
348
+ label_classes=["entailment", "not_entailment"],
349
+ label_column="label",
350
+ data_url="https://dl.fbaipublicfiles.com/glue/data/RTE.zip",
351
+ data_dir="RTE",
352
+ citation=textwrap.dedent(
353
+ """\
354
+ @inproceedings{dagan2005pascal,
355
+ title={The PASCAL recognising textual entailment challenge},
356
+ author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
357
+ booktitle={Machine Learning Challenges Workshop},
358
+ pages={177--190},
359
+ year={2005},
360
+ organization={Springer}
361
+ }
362
+ @inproceedings{bar2006second,
363
+ title={The second pascal recognising textual entailment challenge},
364
+ author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
365
+ booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
366
+ volume={6},
367
+ number={1},
368
+ pages={6--4},
369
+ year={2006},
370
+ organization={Venice}
371
+ }
372
+ @inproceedings{giampiccolo2007third,
373
+ title={The third pascal recognizing textual entailment challenge},
374
+ author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
375
+ booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
376
+ pages={1--9},
377
+ year={2007},
378
+ organization={Association for Computational Linguistics}
379
+ }
380
+ @inproceedings{bentivogli2009fifth,
381
+ title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
382
+ author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
383
+ booktitle={TAC},
384
+ year={2009}
385
+ }"""
386
+ ),
387
+ url="https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
388
+ ),
389
+ GlueConfig(
390
+ name="wnli",
391
+ description=textwrap.dedent(
392
+ """\
393
+ The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task
394
+ in which a system must read a sentence with a pronoun and select the referent of that pronoun from
395
+ a list of choices. The examples are manually constructed to foil simple statistical methods: Each
396
+ one is contingent on contextual information provided by a single word or phrase in the sentence.
397
+ To convert the problem into sentence pair classification, we construct sentence pairs by replacing
398
+ the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the
399
+ pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of
400
+ new examples derived from fiction books that was shared privately by the authors of the original
401
+ corpus. While the included training set is balanced between two classes, the test set is imbalanced
402
+ between them (65% not entailment). Also, due to a data quirk, the development set is adversarial:
403
+ hypotheses are sometimes shared between training and development examples, so if a model memorizes the
404
+ training examples, they will predict the wrong label on corresponding development set
405
+ example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence
406
+ between a model's score on this task and its score on the unconverted original task. We
407
+ call converted dataset WNLI (Winograd NLI)."""
408
+ ),
409
+ text_features={
410
+ "sentence1": "sentence1",
411
+ "sentence2": "sentence2",
412
+ },
413
+ label_classes=["not_entailment", "entailment"],
414
+ label_column="label",
415
+ data_url="https://dl.fbaipublicfiles.com/glue/data/WNLI.zip",
416
+ data_dir="WNLI",
417
+ citation=textwrap.dedent(
418
+ """\
419
+ @inproceedings{levesque2012winograd,
420
+ title={The winograd schema challenge},
421
+ author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
422
+ booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
423
+ year={2012}
424
+ }"""
425
+ ),
426
+ url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
427
+ ),
428
+ GlueConfig(
429
+ name="ax",
430
+ description=textwrap.dedent(
431
+ """\
432
+ A manually-curated evaluation dataset for fine-grained analysis of
433
+ system performance on a broad range of linguistic phenomena. This
434
+ dataset evaluates sentence understanding through Natural Language
435
+ Inference (NLI) problems. Use a model trained on MulitNLI to produce
436
+ predictions for this dataset."""
437
+ ),
438
+ text_features={
439
+ "premise": "sentence1",
440
+ "hypothesis": "sentence2",
441
+ },
442
+ label_classes=["entailment", "neutral", "contradiction"],
443
+ label_column="", # No label since we only have test set.
444
+ # We must use a URL shortener since the URL from GLUE is very long and
445
+ # causes issues in TFDS.
446
+ data_url="https://dl.fbaipublicfiles.com/glue/data/AX.tsv",
447
+ data_dir="", # We are downloading a tsv.
448
+ citation="", # The GLUE citation is sufficient.
449
+ url="https://gluebenchmark.com/diagnostics",
450
+ ),
451
+ ]
452
+
453
+ def _info(self):
454
+ features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
455
+ if self.config.label_classes:
456
+ features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
457
+ else:
458
+ features["label"] = datasets.Value("float32")
459
+ features["idx"] = datasets.Value("int32")
460
+ return datasets.DatasetInfo(
461
+ description=_GLUE_DESCRIPTION,
462
+ features=datasets.Features(features),
463
+ homepage=self.config.url,
464
+ citation=self.config.citation + "\n" + _GLUE_CITATION,
465
+ )
466
+
467
+ def _split_generators(self, dl_manager):
468
+ if self.config.name == "ax":
469
+ data_file = dl_manager.download(self.config.data_url)
470
+ return [
471
+ datasets.SplitGenerator(
472
+ name=datasets.Split.TEST,
473
+ gen_kwargs={
474
+ "data_file": data_file,
475
+ "split": "test",
476
+ },
477
+ )
478
+ ]
479
+
480
+ if self.config.name == "mrpc":
481
+ data_dir = None
482
+ mrpc_files = dl_manager.download(
483
+ {
484
+ "dev_ids": _MRPC_DEV_IDS,
485
+ "train": _MRPC_TRAIN,
486
+ "test": _MRPC_TEST,
487
+ }
488
+ )
489
+ else:
490
+ dl_dir = dl_manager.download_and_extract(self.config.data_url)
491
+ data_dir = os.path.join(dl_dir, self.config.data_dir)
492
+ mrpc_files = None
493
+ train_split = datasets.SplitGenerator(
494
+ name=datasets.Split.TRAIN,
495
+ gen_kwargs={
496
+ "data_file": os.path.join(data_dir or "", "train.tsv"),
497
+ "split": "train",
498
+ "mrpc_files": mrpc_files,
499
+ },
500
+ )
501
+ if self.config.name == "mnli":
502
+ return [
503
+ train_split,
504
+ _mnli_split_generator("validation_matched", data_dir, "dev", matched=True),
505
+ _mnli_split_generator("validation_mismatched", data_dir, "dev", matched=False),
506
+ _mnli_split_generator("test_matched", data_dir, "test", matched=True),
507
+ _mnli_split_generator("test_mismatched", data_dir, "test", matched=False),
508
+ ]
509
+ elif self.config.name == "mnli_matched":
510
+ return [
511
+ _mnli_split_generator("validation", data_dir, "dev", matched=True),
512
+ _mnli_split_generator("test", data_dir, "test", matched=True),
513
+ ]
514
+ elif self.config.name == "mnli_mismatched":
515
+ return [
516
+ _mnli_split_generator("validation", data_dir, "dev", matched=False),
517
+ _mnli_split_generator("test", data_dir, "test", matched=False),
518
+ ]
519
+ else:
520
+ return [
521
+ train_split,
522
+ datasets.SplitGenerator(
523
+ name=datasets.Split.VALIDATION,
524
+ gen_kwargs={
525
+ "data_file": os.path.join(data_dir or "", "dev.tsv"),
526
+ "split": "dev",
527
+ "mrpc_files": mrpc_files,
528
+ },
529
+ ),
530
+ datasets.SplitGenerator(
531
+ name=datasets.Split.TEST,
532
+ gen_kwargs={
533
+ "data_file": os.path.join(data_dir or "", "test.tsv"),
534
+ "split": "test",
535
+ "mrpc_files": mrpc_files,
536
+ },
537
+ ),
538
+ ]
539
+
540
+ def _generate_examples(self, data_file, split, mrpc_files=None):
541
+ if self.config.name == "mrpc":
542
+ # We have to prepare the MRPC dataset from the original sources ourselves.
543
+ examples = self._generate_example_mrpc_files(mrpc_files=mrpc_files, split=split)
544
+ for example in examples:
545
+ yield example["idx"], example
546
+ else:
547
+ process_label = self.config.process_label
548
+ label_classes = self.config.label_classes
549
+
550
+ # The train and dev files for CoLA are the only tsv files without a
551
+ # header.
552
+ is_cola_non_test = self.config.name == "cola" and split != "test"
553
+
554
+ with open(data_file, encoding="utf8") as f:
555
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
556
+ if is_cola_non_test:
557
+ reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
558
+
559
+ for n, row in enumerate(reader):
560
+ if is_cola_non_test:
561
+ row = {
562
+ "sentence": row[3],
563
+ "is_acceptable": row[1],
564
+ }
565
+
566
+ example = {feat: row[col] for feat, col in self.config.text_features.items()}
567
+ example["idx"] = n
568
+
569
+ if self.config.label_column in row:
570
+ label = row[self.config.label_column]
571
+ # For some tasks, the label is represented as 0 and 1 in the tsv
572
+ # files and needs to be cast to integer to work with the feature.
573
+ if label_classes and label not in label_classes:
574
+ label = int(label) if label else None
575
+ example["label"] = process_label(label)
576
+ else:
577
+ example["label"] = process_label(-1)
578
+
579
+ # Filter out corrupted rows.
580
+ for value in example.values():
581
+ if value is None:
582
+ break
583
+ else:
584
+ yield example["idx"], example
585
+
586
+ def _generate_example_mrpc_files(self, mrpc_files, split):
587
+ if split == "test":
588
+ with open(mrpc_files["test"], encoding="utf8") as f:
589
+ # The first 3 bytes are the utf-8 BOM \xef\xbb\xbf, which messes with
590
+ # the Quality key.
591
+ f.seek(3)
592
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
593
+ for n, row in enumerate(reader):
594
+ yield {
595
+ "sentence1": row["#1 String"],
596
+ "sentence2": row["#2 String"],
597
+ "label": int(row["Quality"]),
598
+ "idx": n,
599
+ }
600
+ else:
601
+ with open(mrpc_files["dev_ids"], encoding="utf8") as f:
602
+ reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
603
+ dev_ids = [[row[0], row[1]] for row in reader]
604
+ with open(mrpc_files["train"], encoding="utf8") as f:
605
+ # The first 3 bytes are the utf-8 BOM \xef\xbb\xbf, which messes with
606
+ # the Quality key.
607
+ f.seek(3)
608
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
609
+ for n, row in enumerate(reader):
610
+ is_row_in_dev = [row["#1 ID"], row["#2 ID"]] in dev_ids
611
+ if is_row_in_dev == (split == "dev"):
612
+ yield {
613
+ "sentence1": row["#1 String"],
614
+ "sentence2": row["#2 String"],
615
+ "label": int(row["Quality"]),
616
+ "idx": n,
617
+ }
618
+
619
+
620
+ def _mnli_split_generator(name, data_dir, split, matched):
621
+ return datasets.SplitGenerator(
622
+ name=name,
623
+ gen_kwargs={
624
+ "data_file": os.path.join(data_dir, "%s_%s.tsv" % (split, "matched" if matched else "mismatched")),
625
+ "split": split,
626
+ "mrpc_files": None,
627
+ },
628
+ )
mnli/test_matched-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a330c4f2aeb0bc92f1b4b133fbbaf51bf9c7d0f5cac3d06f49ef63af47dbb822
3
- size 1220119
 
 
 
 
mnli/test_mismatched-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e5078398d5c83d183578b1bdafe94e4491ed28ad1cf8d98ee8846afcec651f16
3
- size 1257857
 
 
 
 
mnli/train-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:49a4a5508b89b8fed2c6e81d2c47d00f4759050a7048c6cc5d95d31122ced3c1
3
- size 52224361
 
 
 
 
mnli/validation_matched-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f918c09d9c35446b8e8f06a5672f8ab704e2897fecbf52e2e154141f3d7c421
3
- size 1214936
 
 
 
 
mnli/validation_mismatched-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:04aba92823a954be36fe1b69b61eed334c9eb1009daba0dd79f69d77b87c535c
3
- size 1251152
 
 
 
 
mnli_matched/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a330c4f2aeb0bc92f1b4b133fbbaf51bf9c7d0f5cac3d06f49ef63af47dbb822
3
- size 1220119
 
 
 
 
mnli_matched/validation-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f918c09d9c35446b8e8f06a5672f8ab704e2897fecbf52e2e154141f3d7c421
3
- size 1214936
 
 
 
 
mnli_mismatched/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e5078398d5c83d183578b1bdafe94e4491ed28ad1cf8d98ee8846afcec651f16
3
- size 1257857
 
 
 
 
mnli_mismatched/validation-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:04aba92823a954be36fe1b69b61eed334c9eb1009daba0dd79f69d77b87c535c
3
- size 1251152
 
 
 
 
mrpc/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a623ed1cbdf445b11f8e249acbf649d7d3a5ee58c918554c40cbd8307e488693
3
- size 308441
 
 
 
 
mrpc/train-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:61fd41301e0e244b0420c4350a170c8e7cf64740335fc875a4af2d79af0df0af
3
- size 649281
 
 
 
 
qnli/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f39520cd07925c9784e4a7f1f7aed8f17f136039b8498f7ad07c7bf13d65ba83
3
- size 877345
 
 
 
 
qnli/train-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ebc7cb70a5bbde0b0336c3d51f31bb4df4673e908e8874b090b52169b1365c6c
3
- size 17528917
 
 
 
 
qnli/validation-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e69311b81dc65589286091d9905a27617a90436dd215c7a59832fa8f4f336169
3
- size 872062
 
 
 
 
qqp/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:95d5d1efcfa3ff7e090565e98085770b3497aad8dbcf12996412b23d2fb669e8
3
- size 36694152
 
 
 
 
qqp/train-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d6f02e643f7c36e9a4f7d4971a5ee9bd74063a319452fe6c87850c739774cd7
3
- size 33558839
 
 
 
 
qqp/validation-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:efd86a539c412d74874ee451573d7bd142f56c47fe36de033b9f367d8bb0fa71
3
- size 3729274
 
 
 
 
rte/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f44aadbfb8bbb7a64ba0674bd26ff77b66e88fdf7a6d64255a5ba6ae9057383
3
- size 621413
 
 
 
 
rte/train-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a6252ab17015d718f6de1effe0980f7b158df63e3d16207cd8bd396b608e5147
3
- size 583976
 
 
 
 
rte/validation-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:fb2aa2e04f551133ba663617a15ae133dc22b0f6a969bc0629b5ea6003ee9cf8
3
- size 69020
 
 
 
 
sst2/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e9d23cf0067211d2baf018328b507f5153fb6704d75117295a8bda47c7adccb1
3
- size 147793
 
 
 
 
sst2/train-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:66a253e67968acfabcbe49dbe9da964b42ac1c851c40ab760e8c8942efdb3229
3
- size 3110468
 
 
 
 
sst2/validation-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a1371f3b3a7b0bcefa8388799a9359dc3ce76c349cc0079507a7991364fd2a9b
3
- size 72819
 
 
 
 
stsb/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:04fa2561f1ff3c395cf8980e3eed5d133c194abf636d5e1870d765c861087bd9
3
- size 114296
 
 
 
 
stsb/train-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:bbd93bbb988fd18437e02185fe3b2bd9a18350376c392e7820de9df1b247ed1f
3
- size 502065
 
 
 
 
stsb/validation-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:152de7cf1fa34ee4df1c243bd209b02ade21a1d5c4fb3b7da5240f78e4000aa9
3
- size 150622
 
 
 
 
wnli/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:766d3754c46a80f3275cb81a32ee6b7b49176fa8c1ef85ea92a4a3676510b902
3
- size 13620
 
 
 
 
wnli/train-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:40f4c0c60db68addeda8e9cbe25e6344cd99d5bbb80125535994a9a3141ee0a9
3
- size 38835
 
 
 
 
wnli/validation-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:880037e45e03df868d5799ca21dc03f3a6378f0adf3c01c7bfc46b94fa61f1cb
3
- size 11067