albertvillanova HF staff commited on
Commit
a3f3ab7
1 Parent(s): cb2099c

Convert dataset to Parquet (#16)

Browse files

- Convert dataset to Parquet (c8b4538eb38d9e94325f5033a85ee6773f3b3e35)
- Add sst2 data files (a17903a5822db5c0f065b813544eff839645333c)
- Add mrpc data files (85520db5c4bb7e37c8ba1e1a38fc96dae5568c5d)
- Add qqp data files (169a67d6d185e00f8e46409cc182dd6e741b54b7)
- Add stsb data files (bed045762055580cd139bb533c9b6b67ee9556e7)
- Add mnli data files (4c50f6d2c71ecad30a90f5da22d5392673f3ab05)
- Add mnli_mismatched data files (b04c7fe0968b4ff35a57fc723b7a9b4c713b245c)
- Add mnli_matched data files (c234b36c1609cbe2b00dc63c26e1b196145b07d1)
- Add qnli data files (ea7dceb5f3c5354644b3aaf78c1c6869df26aa6b)
- Add rte data files (26b471247b6f6c6e83711b6683fd87a3cf607118)
- Add wnli data files (68d3d763fc36f89a8d047f4b8e51b98ea36b98d0)
- Add ax data files (9236b360ddd813bb7e9d58ac12eb358a8f81bee1)
- Delete loading script (11762d92f088f525c9548bf8a16ff55e0514a851)
- Delete legacy dataset_infos.json (0de23925db68e9cf6287d3776a0f67f5267a42eb)

Files changed (37) hide show
  1. README.md +268 -175
  2. ax/test-00000-of-00001.parquet +3 -0
  3. cola/test-00000-of-00001.parquet +3 -0
  4. cola/train-00000-of-00001.parquet +3 -0
  5. cola/validation-00000-of-00001.parquet +3 -0
  6. dataset_infos.json +0 -1
  7. glue.py +0 -628
  8. mnli/test_matched-00000-of-00001.parquet +3 -0
  9. mnli/test_mismatched-00000-of-00001.parquet +3 -0
  10. mnli/train-00000-of-00001.parquet +3 -0
  11. mnli/validation_matched-00000-of-00001.parquet +3 -0
  12. mnli/validation_mismatched-00000-of-00001.parquet +3 -0
  13. mnli_matched/test-00000-of-00001.parquet +3 -0
  14. mnli_matched/validation-00000-of-00001.parquet +3 -0
  15. mnli_mismatched/test-00000-of-00001.parquet +3 -0
  16. mnli_mismatched/validation-00000-of-00001.parquet +3 -0
  17. mrpc/test-00000-of-00001.parquet +3 -0
  18. mrpc/train-00000-of-00001.parquet +3 -0
  19. mrpc/validation-00000-of-00001.parquet +3 -0
  20. qnli/test-00000-of-00001.parquet +3 -0
  21. qnli/train-00000-of-00001.parquet +3 -0
  22. qnli/validation-00000-of-00001.parquet +3 -0
  23. qqp/test-00000-of-00001.parquet +3 -0
  24. qqp/train-00000-of-00001.parquet +3 -0
  25. qqp/validation-00000-of-00001.parquet +3 -0
  26. rte/test-00000-of-00001.parquet +3 -0
  27. rte/train-00000-of-00001.parquet +3 -0
  28. rte/validation-00000-of-00001.parquet +3 -0
  29. sst2/test-00000-of-00001.parquet +3 -0
  30. sst2/train-00000-of-00001.parquet +3 -0
  31. sst2/validation-00000-of-00001.parquet +3 -0
  32. stsb/test-00000-of-00001.parquet +3 -0
  33. stsb/train-00000-of-00001.parquet +3 -0
  34. stsb/validation-00000-of-00001.parquet +3 -0
  35. wnli/test-00000-of-00001.parquet +3 -0
  36. wnli/train-00000-of-00001.parquet +3 -0
  37. wnli/validation-00000-of-00001.parquet +3 -0
README.md CHANGED
@@ -23,36 +23,46 @@ task_ids:
23
  - text-scoring
24
  paperswithcode_id: glue
25
  pretty_name: GLUE (General Language Understanding Evaluation benchmark)
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  tags:
27
  - qa-nli
28
  - coreference-nli
29
  - paraphrase-identification
30
  dataset_info:
31
- - config_name: cola
32
  features:
33
- - name: sentence
 
 
34
  dtype: string
35
  - name: label
36
  dtype:
37
  class_label:
38
  names:
39
- '0': unacceptable
40
- '1': acceptable
 
41
  - name: idx
42
  dtype: int32
43
  splits:
44
  - name: test
45
- num_bytes: 61049
46
- num_examples: 1063
47
- - name: train
48
- num_bytes: 489149
49
- num_examples: 8551
50
- - name: validation
51
- num_bytes: 60850
52
- num_examples: 1043
53
- download_size: 376971
54
- dataset_size: 611048
55
- - config_name: sst2
56
  features:
57
  - name: sentence
58
  dtype: string
@@ -60,97 +70,56 @@ dataset_info:
60
  dtype:
61
  class_label:
62
  names:
63
- '0': negative
64
- '1': positive
65
  - name: idx
66
  dtype: int32
67
  splits:
68
- - name: test
69
- num_bytes: 217556
70
- num_examples: 1821
71
  - name: train
72
- num_bytes: 4715283
73
- num_examples: 67349
74
  - name: validation
75
- num_bytes: 106692
76
- num_examples: 872
77
- download_size: 7439277
78
- dataset_size: 5039531
79
- - config_name: mrpc
80
- features:
81
- - name: sentence1
82
- dtype: string
83
- - name: sentence2
84
- dtype: string
85
- - name: label
86
- dtype:
87
- class_label:
88
- names:
89
- '0': not_equivalent
90
- '1': equivalent
91
- - name: idx
92
- dtype: int32
93
- splits:
94
  - name: test
95
- num_bytes: 443498
96
- num_examples: 1725
97
- - name: train
98
- num_bytes: 946146
99
- num_examples: 3668
100
- - name: validation
101
- num_bytes: 106142
102
- num_examples: 408
103
- download_size: 1494541
104
- dataset_size: 1495786
105
- - config_name: qqp
106
  features:
107
- - name: question1
108
  dtype: string
109
- - name: question2
110
  dtype: string
111
  - name: label
112
  dtype:
113
  class_label:
114
  names:
115
- '0': not_duplicate
116
- '1': duplicate
117
- - name: idx
118
- dtype: int32
119
- splits:
120
- - name: train
121
- num_bytes: 50901116
122
- num_examples: 363846
123
- - name: validation
124
- num_bytes: 5653794
125
- num_examples: 40430
126
- - name: test
127
- num_bytes: 55171431
128
- num_examples: 390965
129
- download_size: 41696084
130
- dataset_size: 111726341
131
- - config_name: stsb
132
- features:
133
- - name: sentence1
134
- dtype: string
135
- - name: sentence2
136
- dtype: string
137
- - name: label
138
- dtype: float32
139
  - name: idx
140
  dtype: int32
141
  splits:
142
- - name: test
143
- num_bytes: 170847
144
- num_examples: 1379
145
  - name: train
146
- num_bytes: 758394
147
- num_examples: 5749
148
- - name: validation
149
- num_bytes: 217012
150
- num_examples: 1500
151
- download_size: 802872
152
- dataset_size: 1146253
153
- - config_name: mnli
 
 
 
 
 
 
 
 
 
154
  features:
155
  - name: premise
156
  dtype: string
@@ -166,23 +135,14 @@ dataset_info:
166
  - name: idx
167
  dtype: int32
168
  splits:
169
- - name: test_matched
170
- num_bytes: 1854787
171
- num_examples: 9796
172
- - name: test_mismatched
173
- num_bytes: 1956866
174
- num_examples: 9847
175
- - name: train
176
- num_bytes: 74865118
177
- num_examples: 392702
178
- - name: validation_matched
179
- num_bytes: 1839926
180
  num_examples: 9815
181
- - name: validation_mismatched
182
- num_bytes: 1955384
183
- num_examples: 9832
184
- download_size: 312783507
185
- dataset_size: 82472081
186
  - config_name: mnli_mismatched
187
  features:
188
  - name: premise
@@ -199,38 +159,40 @@ dataset_info:
199
  - name: idx
200
  dtype: int32
201
  splits:
202
- - name: test
203
- num_bytes: 1956866
204
- num_examples: 9847
205
  - name: validation
206
- num_bytes: 1955384
207
  num_examples: 9832
208
- download_size: 312783507
209
- dataset_size: 3912250
210
- - config_name: mnli_matched
 
 
 
211
  features:
212
- - name: premise
213
  dtype: string
214
- - name: hypothesis
215
  dtype: string
216
  - name: label
217
  dtype:
218
  class_label:
219
  names:
220
- '0': entailment
221
- '1': neutral
222
- '2': contradiction
223
  - name: idx
224
  dtype: int32
225
  splits:
226
- - name: test
227
- num_bytes: 1854787
228
- num_examples: 9796
229
  - name: validation
230
- num_bytes: 1839926
231
- num_examples: 9815
232
- download_size: 312783507
233
- dataset_size: 3694713
 
 
 
234
  - config_name: qnli
235
  features:
236
  - name: question
@@ -246,17 +208,43 @@ dataset_info:
246
  - name: idx
247
  dtype: int32
248
  splits:
249
- - name: test
250
- num_bytes: 1376516
251
- num_examples: 5463
252
  - name: train
253
- num_bytes: 25677924
254
  num_examples: 104743
255
  - name: validation
256
- num_bytes: 1371727
 
 
 
257
  num_examples: 5463
258
- download_size: 10627589
259
- dataset_size: 28426167
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260
  - config_name: rte
261
  features:
262
  - name: sentence1
@@ -272,64 +260,182 @@ dataset_info:
272
  - name: idx
273
  dtype: int32
274
  splits:
275
- - name: test
276
- num_bytes: 975936
277
- num_examples: 3000
278
  - name: train
279
- num_bytes: 848888
280
  num_examples: 2490
281
  - name: validation
282
- num_bytes: 90911
283
  num_examples: 277
284
- download_size: 697150
285
- dataset_size: 1915735
286
- - config_name: wnli
 
 
 
287
  features:
288
- - name: sentence1
289
- dtype: string
290
- - name: sentence2
291
  dtype: string
292
  - name: label
293
  dtype:
294
  class_label:
295
  names:
296
- '0': not_entailment
297
- '1': entailment
298
  - name: idx
299
  dtype: int32
300
  splits:
 
 
 
 
 
 
301
  - name: test
302
- num_bytes: 37992
303
- num_examples: 146
 
 
 
 
 
 
 
 
 
 
 
 
 
304
  - name: train
305
- num_bytes: 107517
306
- num_examples: 635
307
  - name: validation
308
- num_bytes: 12215
309
- num_examples: 71
310
- download_size: 28999
311
- dataset_size: 157724
312
- - config_name: ax
 
 
 
313
  features:
314
- - name: premise
315
  dtype: string
316
- - name: hypothesis
317
  dtype: string
318
  - name: label
319
  dtype:
320
  class_label:
321
  names:
322
- '0': entailment
323
- '1': neutral
324
- '2': contradiction
325
  - name: idx
326
  dtype: int32
327
  splits:
 
 
 
 
 
 
328
  - name: test
329
- num_bytes: 238392
330
- num_examples: 1104
331
- download_size: 222257
332
- dataset_size: 238392
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333
  train-eval-index:
334
  - config: cola
335
  task: text-classification
@@ -439,19 +545,6 @@ train-eval-index:
439
  sentence1: text1
440
  sentence2: text2
441
  label: target
442
- config_names:
443
- - ax
444
- - cola
445
- - mnli
446
- - mnli_matched
447
- - mnli_mismatched
448
- - mrpc
449
- - qnli
450
- - qqp
451
- - rte
452
- - sst2
453
- - stsb
454
- - wnli
455
  ---
456
 
457
  # Dataset Card for GLUE
23
  - text-scoring
24
  paperswithcode_id: glue
25
  pretty_name: GLUE (General Language Understanding Evaluation benchmark)
26
+ config_names:
27
+ - ax
28
+ - cola
29
+ - mnli
30
+ - mnli_matched
31
+ - mnli_mismatched
32
+ - mrpc
33
+ - qnli
34
+ - qqp
35
+ - rte
36
+ - sst2
37
+ - stsb
38
+ - wnli
39
  tags:
40
  - qa-nli
41
  - coreference-nli
42
  - paraphrase-identification
43
  dataset_info:
44
+ - config_name: ax
45
  features:
46
+ - name: premise
47
+ dtype: string
48
+ - name: hypothesis
49
  dtype: string
50
  - name: label
51
  dtype:
52
  class_label:
53
  names:
54
+ '0': entailment
55
+ '1': neutral
56
+ '2': contradiction
57
  - name: idx
58
  dtype: int32
59
  splits:
60
  - name: test
61
+ num_bytes: 237694
62
+ num_examples: 1104
63
+ download_size: 80767
64
+ dataset_size: 237694
65
+ - config_name: cola
 
 
 
 
 
 
66
  features:
67
  - name: sentence
68
  dtype: string
70
  dtype:
71
  class_label:
72
  names:
73
+ '0': unacceptable
74
+ '1': acceptable
75
  - name: idx
76
  dtype: int32
77
  splits:
 
 
 
78
  - name: train
79
+ num_bytes: 484869
80
+ num_examples: 8551
81
  - name: validation
82
+ num_bytes: 60322
83
+ num_examples: 1043
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  - name: test
85
+ num_bytes: 60513
86
+ num_examples: 1063
87
+ download_size: 326394
88
+ dataset_size: 605704
89
+ - config_name: mnli
 
 
 
 
 
 
90
  features:
91
+ - name: premise
92
  dtype: string
93
+ - name: hypothesis
94
  dtype: string
95
  - name: label
96
  dtype:
97
  class_label:
98
  names:
99
+ '0': entailment
100
+ '1': neutral
101
+ '2': contradiction
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  - name: idx
103
  dtype: int32
104
  splits:
 
 
 
105
  - name: train
106
+ num_bytes: 74619646
107
+ num_examples: 392702
108
+ - name: validation_matched
109
+ num_bytes: 1833783
110
+ num_examples: 9815
111
+ - name: validation_mismatched
112
+ num_bytes: 1949231
113
+ num_examples: 9832
114
+ - name: test_matched
115
+ num_bytes: 1848654
116
+ num_examples: 9796
117
+ - name: test_mismatched
118
+ num_bytes: 1950703
119
+ num_examples: 9847
120
+ download_size: 57168425
121
+ dataset_size: 82202017
122
+ - config_name: mnli_matched
123
  features:
124
  - name: premise
125
  dtype: string
135
  - name: idx
136
  dtype: int32
137
  splits:
138
+ - name: validation
139
+ num_bytes: 1833783
 
 
 
 
 
 
 
 
 
140
  num_examples: 9815
141
+ - name: test
142
+ num_bytes: 1848654
143
+ num_examples: 9796
144
+ download_size: 2435055
145
+ dataset_size: 3682437
146
  - config_name: mnli_mismatched
147
  features:
148
  - name: premise
159
  - name: idx
160
  dtype: int32
161
  splits:
 
 
 
162
  - name: validation
163
+ num_bytes: 1949231
164
  num_examples: 9832
165
+ - name: test
166
+ num_bytes: 1950703
167
+ num_examples: 9847
168
+ download_size: 2509009
169
+ dataset_size: 3899934
170
+ - config_name: mrpc
171
  features:
172
+ - name: sentence1
173
  dtype: string
174
+ - name: sentence2
175
  dtype: string
176
  - name: label
177
  dtype:
178
  class_label:
179
  names:
180
+ '0': not_equivalent
181
+ '1': equivalent
 
182
  - name: idx
183
  dtype: int32
184
  splits:
185
+ - name: train
186
+ num_bytes: 943843
187
+ num_examples: 3668
188
  - name: validation
189
+ num_bytes: 105879
190
+ num_examples: 408
191
+ - name: test
192
+ num_bytes: 442410
193
+ num_examples: 1725
194
+ download_size: 1033400
195
+ dataset_size: 1492132
196
  - config_name: qnli
197
  features:
198
  - name: question
208
  - name: idx
209
  dtype: int32
210
  splits:
 
 
 
211
  - name: train
212
+ num_bytes: 25612443
213
  num_examples: 104743
214
  - name: validation
215
+ num_bytes: 1368304
216
+ num_examples: 5463
217
+ - name: test
218
+ num_bytes: 1373093
219
  num_examples: 5463
220
+ download_size: 19278324
221
+ dataset_size: 28353840
222
+ - config_name: qqp
223
+ features:
224
+ - name: question1
225
+ dtype: string
226
+ - name: question2
227
+ dtype: string
228
+ - name: label
229
+ dtype:
230
+ class_label:
231
+ names:
232
+ '0': not_duplicate
233
+ '1': duplicate
234
+ - name: idx
235
+ dtype: int32
236
+ splits:
237
+ - name: train
238
+ num_bytes: 50900820
239
+ num_examples: 363846
240
+ - name: validation
241
+ num_bytes: 5653754
242
+ num_examples: 40430
243
+ - name: test
244
+ num_bytes: 55171111
245
+ num_examples: 390965
246
+ download_size: 73982265
247
+ dataset_size: 111725685
248
  - config_name: rte
249
  features:
250
  - name: sentence1
260
  - name: idx
261
  dtype: int32
262
  splits:
 
 
 
263
  - name: train
264
+ num_bytes: 847320
265
  num_examples: 2490
266
  - name: validation
267
+ num_bytes: 90728
268
  num_examples: 277
269
+ - name: test
270
+ num_bytes: 974053
271
+ num_examples: 3000
272
+ download_size: 1274409
273
+ dataset_size: 1912101
274
+ - config_name: sst2
275
  features:
276
+ - name: sentence
 
 
277
  dtype: string
278
  - name: label
279
  dtype:
280
  class_label:
281
  names:
282
+ '0': negative
283
+ '1': positive
284
  - name: idx
285
  dtype: int32
286
  splits:
287
+ - name: train
288
+ num_bytes: 4681603
289
+ num_examples: 67349
290
+ - name: validation
291
+ num_bytes: 106252
292
+ num_examples: 872
293
  - name: test
294
+ num_bytes: 216640
295
+ num_examples: 1821
296
+ download_size: 3331080
297
+ dataset_size: 5004495
298
+ - config_name: stsb
299
+ features:
300
+ - name: sentence1
301
+ dtype: string
302
+ - name: sentence2
303
+ dtype: string
304
+ - name: label
305
+ dtype: float32
306
+ - name: idx
307
+ dtype: int32
308
+ splits:
309
  - name: train
310
+ num_bytes: 754791
311
+ num_examples: 5749
312
  - name: validation
313
+ num_bytes: 216064
314
+ num_examples: 1500
315
+ - name: test
316
+ num_bytes: 169974
317
+ num_examples: 1379
318
+ download_size: 766983
319
+ dataset_size: 1140829
320
+ - config_name: wnli
321
  features:
322
+ - name: sentence1
323
  dtype: string
324
+ - name: sentence2
325
  dtype: string
326
  - name: label
327
  dtype:
328
  class_label:
329
  names:
330
+ '0': not_entailment
331
+ '1': entailment
 
332
  - name: idx
333
  dtype: int32
334
  splits:
335
+ - name: train
336
+ num_bytes: 107109
337
+ num_examples: 635
338
+ - name: validation
339
+ num_bytes: 12162
340
+ num_examples: 71
341
  - name: test
342
+ num_bytes: 37889
343
+ num_examples: 146
344
+ download_size: 63522
345
+ dataset_size: 157160
346
+ configs:
347
+ - config_name: ax
348
+ data_files:
349
+ - split: test
350
+ path: ax/test-*
351
+ - config_name: cola
352
+ data_files:
353
+ - split: train
354
+ path: cola/train-*
355
+ - split: validation
356
+ path: cola/validation-*
357
+ - split: test
358
+ path: cola/test-*
359
+ - config_name: mnli
360
+ data_files:
361
+ - split: train
362
+ path: mnli/train-*
363
+ - split: validation_matched
364
+ path: mnli/validation_matched-*
365
+ - split: validation_mismatched
366
+ path: mnli/validation_mismatched-*
367
+ - split: test_matched
368
+ path: mnli/test_matched-*
369
+ - split: test_mismatched
370
+ path: mnli/test_mismatched-*
371
+ - config_name: mnli_matched
372
+ data_files:
373
+ - split: validation
374
+ path: mnli_matched/validation-*
375
+ - split: test
376
+ path: mnli_matched/test-*
377
+ - config_name: mnli_mismatched
378
+ data_files:
379
+ - split: validation
380
+ path: mnli_mismatched/validation-*
381
+ - split: test
382
+ path: mnli_mismatched/test-*
383
+ - config_name: mrpc
384
+ data_files:
385
+ - split: train
386
+ path: mrpc/train-*
387
+ - split: validation
388
+ path: mrpc/validation-*
389
+ - split: test
390
+ path: mrpc/test-*
391
+ - config_name: qnli
392
+ data_files:
393
+ - split: train
394
+ path: qnli/train-*
395
+ - split: validation
396
+ path: qnli/validation-*
397
+ - split: test
398
+ path: qnli/test-*
399
+ - config_name: qqp
400
+ data_files:
401
+ - split: train
402
+ path: qqp/train-*
403
+ - split: validation
404
+ path: qqp/validation-*
405
+ - split: test
406
+ path: qqp/test-*
407
+ - config_name: rte
408
+ data_files:
409
+ - split: train
410
+ path: rte/train-*
411
+ - split: validation
412
+ path: rte/validation-*
413
+ - split: test
414
+ path: rte/test-*
415
+ - config_name: sst2
416
+ data_files:
417
+ - split: train
418
+ path: sst2/train-*
419
+ - split: validation
420
+ path: sst2/validation-*
421
+ - split: test
422
+ path: sst2/test-*
423
+ - config_name: stsb
424
+ data_files:
425
+ - split: train
426
+ path: stsb/train-*
427
+ - split: validation
428
+ path: stsb/validation-*
429
+ - split: test
430
+ path: stsb/test-*
431
+ - config_name: wnli
432
+ data_files:
433
+ - split: train
434
+ path: wnli/train-*
435
+ - split: validation
436
+ path: wnli/validation-*
437
+ - split: test
438
+ path: wnli/test-*
439
  train-eval-index:
440
  - config: cola
441
  task: text-classification
545
  sentence1: text1
546
  sentence2: text2
547
  label: target
 
 
 
 
 
 
 
 
 
 
 
 
 
548
  ---
549
 
550
  # Dataset Card for GLUE
ax/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a07b802fe2d4968a1f7ccce9406826dc77e0d1dc53fea9491664bd8ebba8571a
3
+ size 80767
cola/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c4d526b6f49f432621de43569f9ecf6af41f639baaf4a9d821b95d745def61d
3
+ size 37719
cola/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e7538afa2000e63f5343f16a758d75c452661a384208399d2035cd2fce45c33
3
+ size 251124
cola/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c14b7219a7d9f9fe3dd291fd000f6623ee413805eb108c9c49578ed50873e4ba
3
+ size 37551
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"cola": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@article{warstadt2018neural,\n title={Neural Network Acceptability Judgments},\n author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1805.12471},\n year={2018}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://nyu-mll.github.io/CoLA/", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["unacceptable", "acceptable"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "cola", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 61049, "num_examples": 1063, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 489149, "num_examples": 8551, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 60850, "num_examples": 1043, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/CoLA.zip": {"num_bytes": 376971, "checksum": "f212fcd832b8f7b435fb991f101abf89f96b933ab400603bf198960dfc32cbff"}}, "download_size": 376971, "post_processing_size": null, "dataset_size": 611048, "size_in_bytes": 988019}, "sst2": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{socher2013recursive,\n title={Recursive deep models for semantic compositionality over a sentiment treebank},\n author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},\n booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},\n pages={1631--1642},\n year={2013}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://nlp.stanford.edu/sentiment/index.html", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["negative", "positive"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "sst2", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 217556, "num_examples": 1821, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 4715283, "num_examples": 67349, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 106692, "num_examples": 872, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/SST-2.zip": {"num_bytes": 7439277, "checksum": "d67e16fb55739c1b32cdce9877596db1c127dc322d93c082281f64057c16deaa"}}, "download_size": 7439277, "post_processing_size": null, "dataset_size": 5039531, "size_in_bytes": 12478808}, "mrpc": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{dolan2005automatically,\n title={Automatically constructing a corpus of sentential paraphrases},\n author={Dolan, William B and Brockett, Chris},\n booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},\n year={2005}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://www.microsoft.com/en-us/download/details.aspx?id=52398", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["not_equivalent", "equivalent"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "mrpc", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 443498, "num_examples": 1725, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 946146, "num_examples": 3668, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 106142, "num_examples": 408, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/mrpc_dev_ids.tsv": {"num_bytes": 6222, "checksum": "971d7767d81b997fd9060ade0ec23c4fc31cbb226a55d1bd4a1bac474eb81dc7"}, "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt": {"num_bytes": 1047044, "checksum": "60a9b09084528f0673eedee2b69cb941920f0b8cd0eeccefc464a98768457f89"}, "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt": {"num_bytes": 441275, "checksum": "a04e271090879aaba6423d65b94950c089298587d9c084bf9cd7439bd785f784"}}, "download_size": 1494541, "post_processing_size": null, "dataset_size": 1495786, "size_in_bytes": 2990327}, "qqp": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@online{WinNT,\n author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},\n title = {First Quora Dataset Release: Question Pairs},\n year = {2017},\n url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},\n urldate = {2019-04-03}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n", "homepage": "https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs", "license": "", "features": {"question1": {"dtype": "string", "id": null, "_type": "Value"}, "question2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["not_duplicate", "duplicate"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "qqp", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 50901116, "num_examples": 363846, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 5653794, "num_examples": 40430, "dataset_name": "glue"}, "test": {"name": "test", "num_bytes": 55171431, "num_examples": 390965, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/QQP-clean.zip": {"num_bytes": 41696084, "checksum": "40e7c862c04eb26ee04b67fd900e76c45c6ba8e6d8fab4f8f1f8072a1a3fbae0"}}, "download_size": 41696084, "post_processing_size": null, "dataset_size": 111726341, "size_in_bytes": 153422425}, "stsb": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@article{cer2017semeval,\n title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},\n author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},\n journal={arXiv preprint arXiv:1708.00055},\n year={2017}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "float32", "id": null, "_type": "Value"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "stsb", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 170847, "num_examples": 1379, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 758394, "num_examples": 5749, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 217012, "num_examples": 1500, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/STS-B.zip": {"num_bytes": 802872, "checksum": "e60a6393de5a8b5b9bac5020a1554b54e3691f9d600b775bd131e613ac179c85"}}, "download_size": 802872, "post_processing_size": null, "dataset_size": 1146253, "size_in_bytes": 1949125}, "mnli": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://www.nyu.edu/projects/bowman/multinli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "mnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test_matched": {"name": "test_matched", "num_bytes": 1854787, "num_examples": 9796, "dataset_name": "glue"}, "test_mismatched": {"name": "test_mismatched", "num_bytes": 1956866, "num_examples": 9847, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 74865118, "num_examples": 392702, "dataset_name": "glue"}, "validation_matched": {"name": "validation_matched", "num_bytes": 1839926, "num_examples": 9815, "dataset_name": "glue"}, "validation_mismatched": {"name": "validation_mismatched", "num_bytes": 1955384, "num_examples": 9832, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {"num_bytes": 312783507, "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"}}, "download_size": 312783507, "post_processing_size": null, "dataset_size": 82472081, "size_in_bytes": 395255588}, "mnli_mismatched": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://www.nyu.edu/projects/bowman/multinli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "mnli_mismatched", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1956866, "num_examples": 9847, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 1955384, "num_examples": 9832, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {"num_bytes": 312783507, "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"}}, "download_size": 312783507, "post_processing_size": null, "dataset_size": 3912250, "size_in_bytes": 316695757}, "mnli_matched": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://www.nyu.edu/projects/bowman/multinli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "mnli_matched", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1854787, "num_examples": 9796, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 1839926, "num_examples": 9815, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {"num_bytes": 312783507, "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"}}, "download_size": 312783507, "post_processing_size": null, "dataset_size": 3694713, "size_in_bytes": 316478220}, "qnli": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@article{rajpurkar2016squad,\n title={Squad: 100,000+ questions for machine comprehension of text},\n author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},\n journal={arXiv preprint arXiv:1606.05250},\n year={2016}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://rajpurkar.github.io/SQuAD-explorer/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "not_entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "qnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1376516, "num_examples": 5463, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 25677924, "num_examples": 104743, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 1371727, "num_examples": 5463, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip": {"num_bytes": 10627589, "checksum": "e634e78627a29adaecd4f955359b22bf5e70f2cbd93b493f2d624138a0c0e5f5"}}, "download_size": 10627589, "post_processing_size": null, "dataset_size": 28426167, "size_in_bytes": 39053756}, "rte": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{dagan2005pascal,\n title={The PASCAL recognising textual entailment challenge},\n author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},\n booktitle={Machine Learning Challenges Workshop},\n pages={177--190},\n year={2005},\n organization={Springer}\n}\n@inproceedings{bar2006second,\n title={The second pascal recognising textual entailment challenge},\n author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},\n booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},\n volume={6},\n number={1},\n pages={6--4},\n year={2006},\n organization={Venice}\n}\n@inproceedings{giampiccolo2007third,\n title={The third pascal recognizing textual entailment challenge},\n author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},\n booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},\n pages={1--9},\n year={2007},\n organization={Association for Computational Linguistics}\n}\n@inproceedings{bentivogli2009fifth,\n title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},\n author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},\n booktitle={TAC},\n year={2009}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://aclweb.org/aclwiki/Recognizing_Textual_Entailment", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "not_entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "rte", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 975936, "num_examples": 3000, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 848888, "num_examples": 2490, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 90911, "num_examples": 277, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/RTE.zip": {"num_bytes": 697150, "checksum": "6bf86de103ecd335f3441bd43574d23fef87ecc695977a63b82d5efb206556ee"}}, "download_size": 697150, "post_processing_size": null, "dataset_size": 1915735, "size_in_bytes": 2612885}, "wnli": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{levesque2012winograd,\n title={The winograd schema challenge},\n author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},\n booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},\n year={2012}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["not_entailment", "entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "wnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 37992, "num_examples": 146, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 107517, "num_examples": 635, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 12215, "num_examples": 71, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/WNLI.zip": {"num_bytes": 28999, "checksum": "ae0e8e4d16f4d46d4a0a566ec7ecceccfd3fbfaa4a7a4b4e02848c0f2561ac46"}}, "download_size": 28999, "post_processing_size": null, "dataset_size": 157724, "size_in_bytes": 186723}, "ax": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://gluebenchmark.com/diagnostics", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "ax", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 238392, "num_examples": 1104, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/AX.tsv": {"num_bytes": 222257, "checksum": "0e13510b1bb14436ff7e2ee82338f0efb0133ecf2e73507a697dc210db3f05fd"}}, "download_size": 222257, "post_processing_size": null, "dataset_size": 238392, "size_in_bytes": 460649}}
 
glue.py DELETED
@@ -1,628 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """The General Language Understanding Evaluation (GLUE) benchmark."""
18
-
19
-
20
- import csv
21
- import os
22
- import textwrap
23
-
24
- import numpy as np
25
-
26
- import datasets
27
-
28
-
29
- _GLUE_CITATION = """\
30
- @inproceedings{wang2019glue,
31
- title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
32
- author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
33
- note={In the Proceedings of ICLR.},
34
- year={2019}
35
- }
36
- """
37
-
38
- _GLUE_DESCRIPTION = """\
39
- GLUE, the General Language Understanding Evaluation benchmark
40
- (https://gluebenchmark.com/) is a collection of resources for training,
41
- evaluating, and analyzing natural language understanding systems.
42
-
43
- """
44
-
45
- _MRPC_DEV_IDS = "https://dl.fbaipublicfiles.com/glue/data/mrpc_dev_ids.tsv"
46
- _MRPC_TRAIN = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt"
47
- _MRPC_TEST = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt"
48
-
49
- _MNLI_BASE_KWARGS = dict(
50
- text_features={
51
- "premise": "sentence1",
52
- "hypothesis": "sentence2",
53
- },
54
- label_classes=["entailment", "neutral", "contradiction"],
55
- label_column="gold_label",
56
- data_url="https://dl.fbaipublicfiles.com/glue/data/MNLI.zip",
57
- data_dir="MNLI",
58
- citation=textwrap.dedent(
59
- """\
60
- @InProceedings{N18-1101,
61
- author = "Williams, Adina
62
- and Nangia, Nikita
63
- and Bowman, Samuel",
64
- title = "A Broad-Coverage Challenge Corpus for
65
- Sentence Understanding through Inference",
66
- booktitle = "Proceedings of the 2018 Conference of
67
- the North American Chapter of the
68
- Association for Computational Linguistics:
69
- Human Language Technologies, Volume 1 (Long
70
- Papers)",
71
- year = "2018",
72
- publisher = "Association for Computational Linguistics",
73
- pages = "1112--1122",
74
- location = "New Orleans, Louisiana",
75
- url = "http://aclweb.org/anthology/N18-1101"
76
- }
77
- @article{bowman2015large,
78
- title={A large annotated corpus for learning natural language inference},
79
- author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
80
- journal={arXiv preprint arXiv:1508.05326},
81
- year={2015}
82
- }"""
83
- ),
84
- url="http://www.nyu.edu/projects/bowman/multinli/",
85
- )
86
-
87
-
88
- class GlueConfig(datasets.BuilderConfig):
89
- """BuilderConfig for GLUE."""
90
-
91
- def __init__(
92
- self,
93
- text_features,
94
- label_column,
95
- data_url,
96
- data_dir,
97
- citation,
98
- url,
99
- label_classes=None,
100
- process_label=lambda x: x,
101
- **kwargs,
102
- ):
103
- """BuilderConfig for GLUE.
104
-
105
- Args:
106
- text_features: `dict[string, string]`, map from the name of the feature
107
- dict for each text field to the name of the column in the tsv file
108
- label_column: `string`, name of the column in the tsv file corresponding
109
- to the label
110
- data_url: `string`, url to download the zip file from
111
- data_dir: `string`, the path to the folder containing the tsv files in the
112
- downloaded zip
113
- citation: `string`, citation for the data set
114
- url: `string`, url for information about the data set
115
- label_classes: `list[string]`, the list of classes if the label is
116
- categorical. If not provided, then the label will be of type
117
- `datasets.Value('float32')`.
118
- process_label: `Function[string, any]`, function taking in the raw value
119
- of the label and processing it to the form required by the label feature
120
- **kwargs: keyword arguments forwarded to super.
121
- """
122
- super(GlueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
123
- self.text_features = text_features
124
- self.label_column = label_column
125
- self.label_classes = label_classes
126
- self.data_url = data_url
127
- self.data_dir = data_dir
128
- self.citation = citation
129
- self.url = url
130
- self.process_label = process_label
131
-
132
-
133
- class Glue(datasets.GeneratorBasedBuilder):
134
- """The General Language Understanding Evaluation (GLUE) benchmark."""
135
-
136
- BUILDER_CONFIGS = [
137
- GlueConfig(
138
- name="cola",
139
- description=textwrap.dedent(
140
- """\
141
- The Corpus of Linguistic Acceptability consists of English
142
- acceptability judgments drawn from books and journal articles on
143
- linguistic theory. Each example is a sequence of words annotated
144
- with whether it is a grammatical English sentence."""
145
- ),
146
- text_features={"sentence": "sentence"},
147
- label_classes=["unacceptable", "acceptable"],
148
- label_column="is_acceptable",
149
- data_url="https://dl.fbaipublicfiles.com/glue/data/CoLA.zip",
150
- data_dir="CoLA",
151
- citation=textwrap.dedent(
152
- """\
153
- @article{warstadt2018neural,
154
- title={Neural Network Acceptability Judgments},
155
- author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
156
- journal={arXiv preprint arXiv:1805.12471},
157
- year={2018}
158
- }"""
159
- ),
160
- url="https://nyu-mll.github.io/CoLA/",
161
- ),
162
- GlueConfig(
163
- name="sst2",
164
- description=textwrap.dedent(
165
- """\
166
- The Stanford Sentiment Treebank consists of sentences from movie reviews and
167
- human annotations of their sentiment. The task is to predict the sentiment of a
168
- given sentence. We use the two-way (positive/negative) class split, and use only
169
- sentence-level labels."""
170
- ),
171
- text_features={"sentence": "sentence"},
172
- label_classes=["negative", "positive"],
173
- label_column="label",
174
- data_url="https://dl.fbaipublicfiles.com/glue/data/SST-2.zip",
175
- data_dir="SST-2",
176
- citation=textwrap.dedent(
177
- """\
178
- @inproceedings{socher2013recursive,
179
- title={Recursive deep models for semantic compositionality over a sentiment treebank},
180
- author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
181
- booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
182
- pages={1631--1642},
183
- year={2013}
184
- }"""
185
- ),
186
- url="https://datasets.stanford.edu/sentiment/index.html",
187
- ),
188
- GlueConfig(
189
- name="mrpc",
190
- description=textwrap.dedent(
191
- """\
192
- The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of
193
- sentence pairs automatically extracted from online news sources, with human annotations
194
- for whether the sentences in the pair are semantically equivalent."""
195
- ), # pylint: disable=line-too-long
196
- text_features={"sentence1": "", "sentence2": ""},
197
- label_classes=["not_equivalent", "equivalent"],
198
- label_column="Quality",
199
- data_url="", # MRPC isn't hosted by GLUE.
200
- data_dir="MRPC",
201
- citation=textwrap.dedent(
202
- """\
203
- @inproceedings{dolan2005automatically,
204
- title={Automatically constructing a corpus of sentential paraphrases},
205
- author={Dolan, William B and Brockett, Chris},
206
- booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},
207
- year={2005}
208
- }"""
209
- ),
210
- url="https://www.microsoft.com/en-us/download/details.aspx?id=52398",
211
- ),
212
- GlueConfig(
213
- name="qqp",
214
- description=textwrap.dedent(
215
- """\
216
- The Quora Question Pairs2 dataset is a collection of question pairs from the
217
- community question-answering website Quora. The task is to determine whether a
218
- pair of questions are semantically equivalent."""
219
- ),
220
- text_features={
221
- "question1": "question1",
222
- "question2": "question2",
223
- },
224
- label_classes=["not_duplicate", "duplicate"],
225
- label_column="is_duplicate",
226
- data_url="https://dl.fbaipublicfiles.com/glue/data/QQP-clean.zip",
227
- data_dir="QQP",
228
- citation=textwrap.dedent(
229
- """\
230
- @online{WinNT,
231
- author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},
232
- title = {First Quora Dataset Release: Question Pairs},
233
- year = {2017},
234
- url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},
235
- urldate = {2019-04-03}
236
- }"""
237
- ),
238
- url="https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs",
239
- ),
240
- GlueConfig(
241
- name="stsb",
242
- description=textwrap.dedent(
243
- """\
244
- The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of
245
- sentence pairs drawn from news headlines, video and image captions, and natural
246
- language inference data. Each pair is human-annotated with a similarity score
247
- from 1 to 5."""
248
- ),
249
- text_features={
250
- "sentence1": "sentence1",
251
- "sentence2": "sentence2",
252
- },
253
- label_column="score",
254
- data_url="https://dl.fbaipublicfiles.com/glue/data/STS-B.zip",
255
- data_dir="STS-B",
256
- citation=textwrap.dedent(
257
- """\
258
- @article{cer2017semeval,
259
- title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},
260
- author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},
261
- journal={arXiv preprint arXiv:1708.00055},
262
- year={2017}
263
- }"""
264
- ),
265
- url="http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
266
- process_label=np.float32,
267
- ),
268
- GlueConfig(
269
- name="mnli",
270
- description=textwrap.dedent(
271
- """\
272
- The Multi-Genre Natural Language Inference Corpus is a crowdsourced
273
- collection of sentence pairs with textual entailment annotations. Given a premise sentence
274
- and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
275
- (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
276
- gathered from ten different sources, including transcribed speech, fiction, and government reports.
277
- We use the standard test set, for which we obtained private labels from the authors, and evaluate
278
- on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
279
- the SNLI corpus as 550k examples of auxiliary training data."""
280
- ),
281
- **_MNLI_BASE_KWARGS,
282
- ),
283
- GlueConfig(
284
- name="mnli_mismatched",
285
- description=textwrap.dedent(
286
- """\
287
- The mismatched validation and test splits from MNLI.
288
- See the "mnli" BuilderConfig for additional information."""
289
- ),
290
- **_MNLI_BASE_KWARGS,
291
- ),
292
- GlueConfig(
293
- name="mnli_matched",
294
- description=textwrap.dedent(
295
- """\
296
- The matched validation and test splits from MNLI.
297
- See the "mnli" BuilderConfig for additional information."""
298
- ),
299
- **_MNLI_BASE_KWARGS,
300
- ),
301
- GlueConfig(
302
- name="qnli",
303
- description=textwrap.dedent(
304
- """\
305
- The Stanford Question Answering Dataset is a question-answering
306
- dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
307
- from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
308
- convert the task into sentence pair classification by forming a pair between each question and each
309
- sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
310
- question and the context sentence. The task is to determine whether the context sentence contains
311
- the answer to the question. This modified version of the original task removes the requirement that
312
- the model select the exact answer, but also removes the simplifying assumptions that the answer
313
- is always present in the input and that lexical overlap is a reliable cue."""
314
- ), # pylint: disable=line-too-long
315
- text_features={
316
- "question": "question",
317
- "sentence": "sentence",
318
- },
319
- label_classes=["entailment", "not_entailment"],
320
- label_column="label",
321
- data_url="https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip",
322
- data_dir="QNLI",
323
- citation=textwrap.dedent(
324
- """\
325
- @article{rajpurkar2016squad,
326
- title={Squad: 100,000+ questions for machine comprehension of text},
327
- author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
328
- journal={arXiv preprint arXiv:1606.05250},
329
- year={2016}
330
- }"""
331
- ),
332
- url="https://rajpurkar.github.io/SQuAD-explorer/",
333
- ),
334
- GlueConfig(
335
- name="rte",
336
- description=textwrap.dedent(
337
- """\
338
- The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual
339
- entailment challenges. We combine the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim
340
- et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009).4 Examples are
341
- constructed based on news and Wikipedia text. We convert all datasets to a two-class split, where
342
- for three-class datasets we collapse neutral and contradiction into not entailment, for consistency."""
343
- ), # pylint: disable=line-too-long
344
- text_features={
345
- "sentence1": "sentence1",
346
- "sentence2": "sentence2",
347
- },
348
- label_classes=["entailment", "not_entailment"],
349
- label_column="label",
350
- data_url="https://dl.fbaipublicfiles.com/glue/data/RTE.zip",
351
- data_dir="RTE",
352
- citation=textwrap.dedent(
353
- """\
354
- @inproceedings{dagan2005pascal,
355
- title={The PASCAL recognising textual entailment challenge},
356
- author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
357
- booktitle={Machine Learning Challenges Workshop},
358
- pages={177--190},
359
- year={2005},
360
- organization={Springer}
361
- }
362
- @inproceedings{bar2006second,
363
- title={The second pascal recognising textual entailment challenge},
364
- author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
365
- booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
366
- volume={6},
367
- number={1},
368
- pages={6--4},
369
- year={2006},
370
- organization={Venice}
371
- }
372
- @inproceedings{giampiccolo2007third,
373
- title={The third pascal recognizing textual entailment challenge},
374
- author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
375
- booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
376
- pages={1--9},
377
- year={2007},
378
- organization={Association for Computational Linguistics}
379
- }
380
- @inproceedings{bentivogli2009fifth,
381
- title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
382
- author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
383
- booktitle={TAC},
384
- year={2009}
385
- }"""
386
- ),
387
- url="https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
388
- ),
389
- GlueConfig(
390
- name="wnli",
391
- description=textwrap.dedent(
392
- """\
393
- The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task
394
- in which a system must read a sentence with a pronoun and select the referent of that pronoun from
395
- a list of choices. The examples are manually constructed to foil simple statistical methods: Each
396
- one is contingent on contextual information provided by a single word or phrase in the sentence.
397
- To convert the problem into sentence pair classification, we construct sentence pairs by replacing
398
- the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the
399
- pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of
400
- new examples derived from fiction books that was shared privately by the authors of the original
401
- corpus. While the included training set is balanced between two classes, the test set is imbalanced
402
- between them (65% not entailment). Also, due to a data quirk, the development set is adversarial:
403
- hypotheses are sometimes shared between training and development examples, so if a model memorizes the
404
- training examples, they will predict the wrong label on corresponding development set
405
- example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence
406
- between a model's score on this task and its score on the unconverted original task. We
407
- call converted dataset WNLI (Winograd NLI)."""
408
- ),
409
- text_features={
410
- "sentence1": "sentence1",
411
- "sentence2": "sentence2",
412
- },
413
- label_classes=["not_entailment", "entailment"],
414
- label_column="label",
415
- data_url="https://dl.fbaipublicfiles.com/glue/data/WNLI.zip",
416
- data_dir="WNLI",
417
- citation=textwrap.dedent(
418
- """\
419
- @inproceedings{levesque2012winograd,
420
- title={The winograd schema challenge},
421
- author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
422
- booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
423
- year={2012}
424
- }"""
425
- ),
426
- url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
427
- ),
428
- GlueConfig(
429
- name="ax",
430
- description=textwrap.dedent(
431
- """\
432
- A manually-curated evaluation dataset for fine-grained analysis of
433
- system performance on a broad range of linguistic phenomena. This
434
- dataset evaluates sentence understanding through Natural Language
435
- Inference (NLI) problems. Use a model trained on MulitNLI to produce
436
- predictions for this dataset."""
437
- ),
438
- text_features={
439
- "premise": "sentence1",
440
- "hypothesis": "sentence2",
441
- },
442
- label_classes=["entailment", "neutral", "contradiction"],
443
- label_column="", # No label since we only have test set.
444
- # We must use a URL shortener since the URL from GLUE is very long and
445
- # causes issues in TFDS.
446
- data_url="https://dl.fbaipublicfiles.com/glue/data/AX.tsv",
447
- data_dir="", # We are downloading a tsv.
448
- citation="", # The GLUE citation is sufficient.
449
- url="https://gluebenchmark.com/diagnostics",
450
- ),
451
- ]
452
-
453
- def _info(self):
454
- features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
455
- if self.config.label_classes:
456
- features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
457
- else:
458
- features["label"] = datasets.Value("float32")
459
- features["idx"] = datasets.Value("int32")
460
- return datasets.DatasetInfo(
461
- description=_GLUE_DESCRIPTION,
462
- features=datasets.Features(features),
463
- homepage=self.config.url,
464
- citation=self.config.citation + "\n" + _GLUE_CITATION,
465
- )
466
-
467
- def _split_generators(self, dl_manager):
468
- if self.config.name == "ax":
469
- data_file = dl_manager.download(self.config.data_url)
470
- return [
471
- datasets.SplitGenerator(
472
- name=datasets.Split.TEST,
473
- gen_kwargs={
474
- "data_file": data_file,
475
- "split": "test",
476
- },
477
- )
478
- ]
479
-
480
- if self.config.name == "mrpc":
481
- data_dir = None
482
- mrpc_files = dl_manager.download(
483
- {
484
- "dev_ids": _MRPC_DEV_IDS,
485
- "train": _MRPC_TRAIN,
486
- "test": _MRPC_TEST,
487
- }
488
- )
489
- else:
490
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
491
- data_dir = os.path.join(dl_dir, self.config.data_dir)
492
- mrpc_files = None
493
- train_split = datasets.SplitGenerator(
494
- name=datasets.Split.TRAIN,
495
- gen_kwargs={
496
- "data_file": os.path.join(data_dir or "", "train.tsv"),
497
- "split": "train",
498
- "mrpc_files": mrpc_files,
499
- },
500
- )
501
- if self.config.name == "mnli":
502
- return [
503
- train_split,
504
- _mnli_split_generator("validation_matched", data_dir, "dev", matched=True),
505
- _mnli_split_generator("validation_mismatched", data_dir, "dev", matched=False),
506
- _mnli_split_generator("test_matched", data_dir, "test", matched=True),
507
- _mnli_split_generator("test_mismatched", data_dir, "test", matched=False),
508
- ]
509
- elif self.config.name == "mnli_matched":
510
- return [
511
- _mnli_split_generator("validation", data_dir, "dev", matched=True),
512
- _mnli_split_generator("test", data_dir, "test", matched=True),
513
- ]
514
- elif self.config.name == "mnli_mismatched":
515
- return [
516
- _mnli_split_generator("validation", data_dir, "dev", matched=False),
517
- _mnli_split_generator("test", data_dir, "test", matched=False),
518
- ]
519
- else:
520
- return [
521
- train_split,
522
- datasets.SplitGenerator(
523
- name=datasets.Split.VALIDATION,
524
- gen_kwargs={
525
- "data_file": os.path.join(data_dir or "", "dev.tsv"),
526
- "split": "dev",
527
- "mrpc_files": mrpc_files,
528
- },
529
- ),
530
- datasets.SplitGenerator(
531
- name=datasets.Split.TEST,
532
- gen_kwargs={
533
- "data_file": os.path.join(data_dir or "", "test.tsv"),
534
- "split": "test",
535
- "mrpc_files": mrpc_files,
536
- },
537
- ),
538
- ]
539
-
540
- def _generate_examples(self, data_file, split, mrpc_files=None):
541
- if self.config.name == "mrpc":
542
- # We have to prepare the MRPC dataset from the original sources ourselves.
543
- examples = self._generate_example_mrpc_files(mrpc_files=mrpc_files, split=split)
544
- for example in examples:
545
- yield example["idx"], example
546
- else:
547
- process_label = self.config.process_label
548
- label_classes = self.config.label_classes
549
-
550
- # The train and dev files for CoLA are the only tsv files without a
551
- # header.
552
- is_cola_non_test = self.config.name == "cola" and split != "test"
553
-
554
- with open(data_file, encoding="utf8") as f:
555
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
556
- if is_cola_non_test:
557
- reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
558
-
559
- for n, row in enumerate(reader):
560
- if is_cola_non_test:
561
- row = {
562
- "sentence": row[3],
563
- "is_acceptable": row[1],
564
- }
565
-
566
- example = {feat: row[col] for feat, col in self.config.text_features.items()}
567
- example["idx"] = n
568
-
569
- if self.config.label_column in row:
570
- label = row[self.config.label_column]
571
- # For some tasks, the label is represented as 0 and 1 in the tsv
572
- # files and needs to be cast to integer to work with the feature.
573
- if label_classes and label not in label_classes:
574
- label = int(label) if label else None
575
- example["label"] = process_label(label)
576
- else:
577
- example["label"] = process_label(-1)
578
-
579
- # Filter out corrupted rows.
580
- for value in example.values():
581
- if value is None:
582
- break
583
- else:
584
- yield example["idx"], example
585
-
586
- def _generate_example_mrpc_files(self, mrpc_files, split):
587
- if split == "test":
588
- with open(mrpc_files["test"], encoding="utf8") as f:
589
- # The first 3 bytes are the utf-8 BOM \xef\xbb\xbf, which messes with
590
- # the Quality key.
591
- f.seek(3)
592
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
593
- for n, row in enumerate(reader):
594
- yield {
595
- "sentence1": row["#1 String"],
596
- "sentence2": row["#2 String"],
597
- "label": int(row["Quality"]),
598
- "idx": n,
599
- }
600
- else:
601
- with open(mrpc_files["dev_ids"], encoding="utf8") as f:
602
- reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
603
- dev_ids = [[row[0], row[1]] for row in reader]
604
- with open(mrpc_files["train"], encoding="utf8") as f:
605
- # The first 3 bytes are the utf-8 BOM \xef\xbb\xbf, which messes with
606
- # the Quality key.
607
- f.seek(3)
608
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
609
- for n, row in enumerate(reader):
610
- is_row_in_dev = [row["#1 ID"], row["#2 ID"]] in dev_ids
611
- if is_row_in_dev == (split == "dev"):
612
- yield {
613
- "sentence1": row["#1 String"],
614
- "sentence2": row["#2 String"],
615
- "label": int(row["Quality"]),
616
- "idx": n,
617
- }
618
-
619
-
620
- def _mnli_split_generator(name, data_dir, split, matched):
621
- return datasets.SplitGenerator(
622
- name=name,
623
- gen_kwargs={
624
- "data_file": os.path.join(data_dir, "%s_%s.tsv" % (split, "matched" if matched else "mismatched")),
625
- "split": split,
626
- "mrpc_files": None,
627
- },
628
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mnli/test_matched-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a330c4f2aeb0bc92f1b4b133fbbaf51bf9c7d0f5cac3d06f49ef63af47dbb822
3
+ size 1220119
mnli/test_mismatched-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5078398d5c83d183578b1bdafe94e4491ed28ad1cf8d98ee8846afcec651f16
3
+ size 1257857
mnli/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49a4a5508b89b8fed2c6e81d2c47d00f4759050a7048c6cc5d95d31122ced3c1
3
+ size 52224361
mnli/validation_matched-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f918c09d9c35446b8e8f06a5672f8ab704e2897fecbf52e2e154141f3d7c421
3
+ size 1214936
mnli/validation_mismatched-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04aba92823a954be36fe1b69b61eed334c9eb1009daba0dd79f69d77b87c535c
3
+ size 1251152
mnli_matched/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a330c4f2aeb0bc92f1b4b133fbbaf51bf9c7d0f5cac3d06f49ef63af47dbb822
3
+ size 1220119
mnli_matched/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f918c09d9c35446b8e8f06a5672f8ab704e2897fecbf52e2e154141f3d7c421
3
+ size 1214936
mnli_mismatched/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5078398d5c83d183578b1bdafe94e4491ed28ad1cf8d98ee8846afcec651f16
3
+ size 1257857
mnli_mismatched/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04aba92823a954be36fe1b69b61eed334c9eb1009daba0dd79f69d77b87c535c
3
+ size 1251152
mrpc/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a623ed1cbdf445b11f8e249acbf649d7d3a5ee58c918554c40cbd8307e488693
3
+ size 308441
mrpc/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61fd41301e0e244b0420c4350a170c8e7cf64740335fc875a4af2d79af0df0af
3
+ size 649281
mrpc/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33c007dbf5bfa8463d87a13e6226df8c0fcf2596c2cd39d0f3bb79754e00f50f
3
+ size 75678
qnli/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f39520cd07925c9784e4a7f1f7aed8f17f136039b8498f7ad07c7bf13d65ba83
3
+ size 877345
qnli/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebc7cb70a5bbde0b0336c3d51f31bb4df4673e908e8874b090b52169b1365c6c
3
+ size 17528917
qnli/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e69311b81dc65589286091d9905a27617a90436dd215c7a59832fa8f4f336169
3
+ size 872062
qqp/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95d5d1efcfa3ff7e090565e98085770b3497aad8dbcf12996412b23d2fb669e8
3
+ size 36694152
qqp/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d6f02e643f7c36e9a4f7d4971a5ee9bd74063a319452fe6c87850c739774cd7
3
+ size 33558839
qqp/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efd86a539c412d74874ee451573d7bd142f56c47fe36de033b9f367d8bb0fa71
3
+ size 3729274
rte/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f44aadbfb8bbb7a64ba0674bd26ff77b66e88fdf7a6d64255a5ba6ae9057383
3
+ size 621413
rte/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6252ab17015d718f6de1effe0980f7b158df63e3d16207cd8bd396b608e5147
3
+ size 583976
rte/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb2aa2e04f551133ba663617a15ae133dc22b0f6a969bc0629b5ea6003ee9cf8
3
+ size 69020
sst2/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9d23cf0067211d2baf018328b507f5153fb6704d75117295a8bda47c7adccb1
3
+ size 147793
sst2/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66a253e67968acfabcbe49dbe9da964b42ac1c851c40ab760e8c8942efdb3229
3
+ size 3110468
sst2/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1371f3b3a7b0bcefa8388799a9359dc3ce76c349cc0079507a7991364fd2a9b
3
+ size 72819
stsb/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04fa2561f1ff3c395cf8980e3eed5d133c194abf636d5e1870d765c861087bd9
3
+ size 114296
stsb/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbd93bbb988fd18437e02185fe3b2bd9a18350376c392e7820de9df1b247ed1f
3
+ size 502065
stsb/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:152de7cf1fa34ee4df1c243bd209b02ade21a1d5c4fb3b7da5240f78e4000aa9
3
+ size 150622
wnli/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:766d3754c46a80f3275cb81a32ee6b7b49176fa8c1ef85ea92a4a3676510b902
3
+ size 13620
wnli/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40f4c0c60db68addeda8e9cbe25e6344cd99d5bbb80125535994a9a3141ee0a9
3
+ size 38835
wnli/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:880037e45e03df868d5799ca21dc03f3a6378f0adf3c01c7bfc46b94fa61f1cb
3
+ size 11067