albertvillanova HF staff commited on
Commit
c8b4538
1 Parent(s): cb2099c

Convert dataset to Parquet

Browse files

Convert dataset to Parquet.

README.md CHANGED
@@ -23,133 +23,69 @@ task_ids:
23
  - text-scoring
24
  paperswithcode_id: glue
25
  pretty_name: GLUE (General Language Understanding Evaluation benchmark)
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  tags:
27
  - qa-nli
28
  - coreference-nli
29
  - paraphrase-identification
30
  dataset_info:
31
- - config_name: cola
32
- features:
33
- - name: sentence
34
- dtype: string
35
- - name: label
36
- dtype:
37
- class_label:
38
- names:
39
- '0': unacceptable
40
- '1': acceptable
41
- - name: idx
42
- dtype: int32
43
- splits:
44
- - name: test
45
- num_bytes: 61049
46
- num_examples: 1063
47
- - name: train
48
- num_bytes: 489149
49
- num_examples: 8551
50
- - name: validation
51
- num_bytes: 60850
52
- num_examples: 1043
53
- download_size: 376971
54
- dataset_size: 611048
55
- - config_name: sst2
56
- features:
57
- - name: sentence
58
- dtype: string
59
- - name: label
60
- dtype:
61
- class_label:
62
- names:
63
- '0': negative
64
- '1': positive
65
- - name: idx
66
- dtype: int32
67
- splits:
68
- - name: test
69
- num_bytes: 217556
70
- num_examples: 1821
71
- - name: train
72
- num_bytes: 4715283
73
- num_examples: 67349
74
- - name: validation
75
- num_bytes: 106692
76
- num_examples: 872
77
- download_size: 7439277
78
- dataset_size: 5039531
79
- - config_name: mrpc
80
  features:
81
- - name: sentence1
82
  dtype: string
83
- - name: sentence2
84
  dtype: string
85
  - name: label
86
  dtype:
87
  class_label:
88
  names:
89
- '0': not_equivalent
90
- '1': equivalent
 
91
  - name: idx
92
  dtype: int32
93
  splits:
94
  - name: test
95
- num_bytes: 443498
96
- num_examples: 1725
97
- - name: train
98
- num_bytes: 946146
99
- num_examples: 3668
100
- - name: validation
101
- num_bytes: 106142
102
- num_examples: 408
103
- download_size: 1494541
104
- dataset_size: 1495786
105
- - config_name: qqp
106
  features:
107
- - name: question1
108
- dtype: string
109
- - name: question2
110
  dtype: string
111
  - name: label
112
  dtype:
113
  class_label:
114
  names:
115
- '0': not_duplicate
116
- '1': duplicate
117
  - name: idx
118
  dtype: int32
119
  splits:
120
  - name: train
121
- num_bytes: 50901116
122
- num_examples: 363846
123
  - name: validation
124
- num_bytes: 5653794
125
- num_examples: 40430
126
- - name: test
127
- num_bytes: 55171431
128
- num_examples: 390965
129
- download_size: 41696084
130
- dataset_size: 111726341
131
- - config_name: stsb
132
- features:
133
- - name: sentence1
134
- dtype: string
135
- - name: sentence2
136
- dtype: string
137
- - name: label
138
- dtype: float32
139
- - name: idx
140
- dtype: int32
141
- splits:
142
  - name: test
143
- num_bytes: 170847
144
- num_examples: 1379
145
- - name: train
146
- num_bytes: 758394
147
- num_examples: 5749
148
- - name: validation
149
- num_bytes: 217012
150
- num_examples: 1500
151
- download_size: 802872
152
- dataset_size: 1146253
153
  - config_name: mnli
154
  features:
155
  - name: premise
@@ -183,7 +119,7 @@ dataset_info:
183
  num_examples: 9832
184
  download_size: 312783507
185
  dataset_size: 82472081
186
- - config_name: mnli_mismatched
187
  features:
188
  - name: premise
189
  dtype: string
@@ -200,14 +136,14 @@ dataset_info:
200
  dtype: int32
201
  splits:
202
  - name: test
203
- num_bytes: 1956866
204
- num_examples: 9847
205
  - name: validation
206
- num_bytes: 1955384
207
- num_examples: 9832
208
  download_size: 312783507
209
- dataset_size: 3912250
210
- - config_name: mnli_matched
211
  features:
212
  - name: premise
213
  dtype: string
@@ -224,13 +160,39 @@ dataset_info:
224
  dtype: int32
225
  splits:
226
  - name: test
227
- num_bytes: 1854787
228
- num_examples: 9796
229
  - name: validation
230
- num_bytes: 1839926
231
- num_examples: 9815
232
  download_size: 312783507
233
- dataset_size: 3694713
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234
  - config_name: qnli
235
  features:
236
  - name: question
@@ -257,6 +219,32 @@ dataset_info:
257
  num_examples: 5463
258
  download_size: 10627589
259
  dataset_size: 28426167
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260
  - config_name: rte
261
  features:
262
  - name: sentence1
@@ -283,6 +271,52 @@ dataset_info:
283
  num_examples: 277
284
  download_size: 697150
285
  dataset_size: 1915735
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
286
  - config_name: wnli
287
  features:
288
  - name: sentence1
@@ -309,27 +343,15 @@ dataset_info:
309
  num_examples: 71
310
  download_size: 28999
311
  dataset_size: 157724
312
- - config_name: ax
313
- features:
314
- - name: premise
315
- dtype: string
316
- - name: hypothesis
317
- dtype: string
318
- - name: label
319
- dtype:
320
- class_label:
321
- names:
322
- '0': entailment
323
- '1': neutral
324
- '2': contradiction
325
- - name: idx
326
- dtype: int32
327
- splits:
328
- - name: test
329
- num_bytes: 238392
330
- num_examples: 1104
331
- download_size: 222257
332
- dataset_size: 238392
333
  train-eval-index:
334
  - config: cola
335
  task: text-classification
@@ -439,19 +461,6 @@ train-eval-index:
439
  sentence1: text1
440
  sentence2: text2
441
  label: target
442
- config_names:
443
- - ax
444
- - cola
445
- - mnli
446
- - mnli_matched
447
- - mnli_mismatched
448
- - mrpc
449
- - qnli
450
- - qqp
451
- - rte
452
- - sst2
453
- - stsb
454
- - wnli
455
  ---
456
 
457
  # Dataset Card for GLUE
 
23
  - text-scoring
24
  paperswithcode_id: glue
25
  pretty_name: GLUE (General Language Understanding Evaluation benchmark)
26
+ config_names:
27
+ - ax
28
+ - cola
29
+ - mnli
30
+ - mnli_matched
31
+ - mnli_mismatched
32
+ - mrpc
33
+ - qnli
34
+ - qqp
35
+ - rte
36
+ - sst2
37
+ - stsb
38
+ - wnli
39
  tags:
40
  - qa-nli
41
  - coreference-nli
42
  - paraphrase-identification
43
  dataset_info:
44
+ - config_name: ax
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  features:
46
+ - name: premise
47
  dtype: string
48
+ - name: hypothesis
49
  dtype: string
50
  - name: label
51
  dtype:
52
  class_label:
53
  names:
54
+ '0': entailment
55
+ '1': neutral
56
+ '2': contradiction
57
  - name: idx
58
  dtype: int32
59
  splits:
60
  - name: test
61
+ num_bytes: 238392
62
+ num_examples: 1104
63
+ download_size: 222257
64
+ dataset_size: 238392
65
+ - config_name: cola
 
 
 
 
 
 
66
  features:
67
+ - name: sentence
 
 
68
  dtype: string
69
  - name: label
70
  dtype:
71
  class_label:
72
  names:
73
+ '0': unacceptable
74
+ '1': acceptable
75
  - name: idx
76
  dtype: int32
77
  splits:
78
  - name: train
79
+ num_bytes: 484869
80
+ num_examples: 8551
81
  - name: validation
82
+ num_bytes: 60322
83
+ num_examples: 1043
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  - name: test
85
+ num_bytes: 60513
86
+ num_examples: 1063
87
+ download_size: 326394
88
+ dataset_size: 605704
 
 
 
 
 
 
89
  - config_name: mnli
90
  features:
91
  - name: premise
 
119
  num_examples: 9832
120
  download_size: 312783507
121
  dataset_size: 82472081
122
+ - config_name: mnli_matched
123
  features:
124
  - name: premise
125
  dtype: string
 
136
  dtype: int32
137
  splits:
138
  - name: test
139
+ num_bytes: 1854787
140
+ num_examples: 9796
141
  - name: validation
142
+ num_bytes: 1839926
143
+ num_examples: 9815
144
  download_size: 312783507
145
+ dataset_size: 3694713
146
+ - config_name: mnli_mismatched
147
  features:
148
  - name: premise
149
  dtype: string
 
160
  dtype: int32
161
  splits:
162
  - name: test
163
+ num_bytes: 1956866
164
+ num_examples: 9847
165
  - name: validation
166
+ num_bytes: 1955384
167
+ num_examples: 9832
168
  download_size: 312783507
169
+ dataset_size: 3912250
170
+ - config_name: mrpc
171
+ features:
172
+ - name: sentence1
173
+ dtype: string
174
+ - name: sentence2
175
+ dtype: string
176
+ - name: label
177
+ dtype:
178
+ class_label:
179
+ names:
180
+ '0': not_equivalent
181
+ '1': equivalent
182
+ - name: idx
183
+ dtype: int32
184
+ splits:
185
+ - name: test
186
+ num_bytes: 443498
187
+ num_examples: 1725
188
+ - name: train
189
+ num_bytes: 946146
190
+ num_examples: 3668
191
+ - name: validation
192
+ num_bytes: 106142
193
+ num_examples: 408
194
+ download_size: 1494541
195
+ dataset_size: 1495786
196
  - config_name: qnli
197
  features:
198
  - name: question
 
219
  num_examples: 5463
220
  download_size: 10627589
221
  dataset_size: 28426167
222
+ - config_name: qqp
223
+ features:
224
+ - name: question1
225
+ dtype: string
226
+ - name: question2
227
+ dtype: string
228
+ - name: label
229
+ dtype:
230
+ class_label:
231
+ names:
232
+ '0': not_duplicate
233
+ '1': duplicate
234
+ - name: idx
235
+ dtype: int32
236
+ splits:
237
+ - name: train
238
+ num_bytes: 50901116
239
+ num_examples: 363846
240
+ - name: validation
241
+ num_bytes: 5653794
242
+ num_examples: 40430
243
+ - name: test
244
+ num_bytes: 55171431
245
+ num_examples: 390965
246
+ download_size: 41696084
247
+ dataset_size: 111726341
248
  - config_name: rte
249
  features:
250
  - name: sentence1
 
271
  num_examples: 277
272
  download_size: 697150
273
  dataset_size: 1915735
274
+ - config_name: sst2
275
+ features:
276
+ - name: sentence
277
+ dtype: string
278
+ - name: label
279
+ dtype:
280
+ class_label:
281
+ names:
282
+ '0': negative
283
+ '1': positive
284
+ - name: idx
285
+ dtype: int32
286
+ splits:
287
+ - name: test
288
+ num_bytes: 217556
289
+ num_examples: 1821
290
+ - name: train
291
+ num_bytes: 4715283
292
+ num_examples: 67349
293
+ - name: validation
294
+ num_bytes: 106692
295
+ num_examples: 872
296
+ download_size: 7439277
297
+ dataset_size: 5039531
298
+ - config_name: stsb
299
+ features:
300
+ - name: sentence1
301
+ dtype: string
302
+ - name: sentence2
303
+ dtype: string
304
+ - name: label
305
+ dtype: float32
306
+ - name: idx
307
+ dtype: int32
308
+ splits:
309
+ - name: test
310
+ num_bytes: 170847
311
+ num_examples: 1379
312
+ - name: train
313
+ num_bytes: 758394
314
+ num_examples: 5749
315
+ - name: validation
316
+ num_bytes: 217012
317
+ num_examples: 1500
318
+ download_size: 802872
319
+ dataset_size: 1146253
320
  - config_name: wnli
321
  features:
322
  - name: sentence1
 
343
  num_examples: 71
344
  download_size: 28999
345
  dataset_size: 157724
346
+ configs:
347
+ - config_name: cola
348
+ data_files:
349
+ - split: train
350
+ path: cola/train-*
351
+ - split: validation
352
+ path: cola/validation-*
353
+ - split: test
354
+ path: cola/test-*
 
 
 
 
 
 
 
 
 
 
 
 
355
  train-eval-index:
356
  - config: cola
357
  task: text-classification
 
461
  sentence1: text1
462
  sentence2: text2
463
  label: target
 
 
 
 
 
 
 
 
 
 
 
 
 
464
  ---
465
 
466
  # Dataset Card for GLUE
cola/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c4d526b6f49f432621de43569f9ecf6af41f639baaf4a9d821b95d745def61d
3
+ size 37719
cola/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e7538afa2000e63f5343f16a758d75c452661a384208399d2035cd2fce45c33
3
+ size 251124
cola/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c14b7219a7d9f9fe3dd291fd000f6623ee413805eb108c9c49578ed50873e4ba
3
+ size 37551
dataset_infos.json CHANGED
@@ -1 +1,862 @@
1
- {"cola": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@article{warstadt2018neural,\n title={Neural Network Acceptability Judgments},\n author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1805.12471},\n year={2018}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://nyu-mll.github.io/CoLA/", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["unacceptable", "acceptable"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "cola", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 61049, "num_examples": 1063, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 489149, "num_examples": 8551, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 60850, "num_examples": 1043, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/CoLA.zip": {"num_bytes": 376971, "checksum": "f212fcd832b8f7b435fb991f101abf89f96b933ab400603bf198960dfc32cbff"}}, "download_size": 376971, "post_processing_size": null, "dataset_size": 611048, "size_in_bytes": 988019}, "sst2": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{socher2013recursive,\n title={Recursive deep models for semantic compositionality over a sentiment treebank},\n author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},\n booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},\n pages={1631--1642},\n year={2013}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://nlp.stanford.edu/sentiment/index.html", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["negative", "positive"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "sst2", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 217556, "num_examples": 1821, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 4715283, "num_examples": 67349, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 106692, "num_examples": 872, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/SST-2.zip": {"num_bytes": 7439277, "checksum": "d67e16fb55739c1b32cdce9877596db1c127dc322d93c082281f64057c16deaa"}}, "download_size": 7439277, "post_processing_size": null, "dataset_size": 5039531, "size_in_bytes": 12478808}, "mrpc": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{dolan2005automatically,\n title={Automatically constructing a corpus of sentential paraphrases},\n author={Dolan, William B and Brockett, Chris},\n booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},\n year={2005}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://www.microsoft.com/en-us/download/details.aspx?id=52398", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["not_equivalent", "equivalent"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "mrpc", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 443498, "num_examples": 1725, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 946146, "num_examples": 3668, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 106142, "num_examples": 408, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/mrpc_dev_ids.tsv": {"num_bytes": 6222, "checksum": "971d7767d81b997fd9060ade0ec23c4fc31cbb226a55d1bd4a1bac474eb81dc7"}, "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt": {"num_bytes": 1047044, "checksum": "60a9b09084528f0673eedee2b69cb941920f0b8cd0eeccefc464a98768457f89"}, "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt": {"num_bytes": 441275, "checksum": "a04e271090879aaba6423d65b94950c089298587d9c084bf9cd7439bd785f784"}}, "download_size": 1494541, "post_processing_size": null, "dataset_size": 1495786, "size_in_bytes": 2990327}, "qqp": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@online{WinNT,\n author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},\n title = {First Quora Dataset Release: Question Pairs},\n year = {2017},\n url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},\n urldate = {2019-04-03}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n", "homepage": "https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs", "license": "", "features": {"question1": {"dtype": "string", "id": null, "_type": "Value"}, "question2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["not_duplicate", "duplicate"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "qqp", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 50901116, "num_examples": 363846, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 5653794, "num_examples": 40430, "dataset_name": "glue"}, "test": {"name": "test", "num_bytes": 55171431, "num_examples": 390965, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/QQP-clean.zip": {"num_bytes": 41696084, "checksum": "40e7c862c04eb26ee04b67fd900e76c45c6ba8e6d8fab4f8f1f8072a1a3fbae0"}}, "download_size": 41696084, "post_processing_size": null, "dataset_size": 111726341, "size_in_bytes": 153422425}, "stsb": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@article{cer2017semeval,\n title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},\n author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},\n journal={arXiv preprint arXiv:1708.00055},\n year={2017}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "float32", "id": null, "_type": "Value"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "stsb", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 170847, "num_examples": 1379, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 758394, "num_examples": 5749, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 217012, "num_examples": 1500, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/STS-B.zip": {"num_bytes": 802872, "checksum": "e60a6393de5a8b5b9bac5020a1554b54e3691f9d600b775bd131e613ac179c85"}}, "download_size": 802872, "post_processing_size": null, "dataset_size": 1146253, "size_in_bytes": 1949125}, "mnli": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://www.nyu.edu/projects/bowman/multinli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "mnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test_matched": {"name": "test_matched", "num_bytes": 1854787, "num_examples": 9796, "dataset_name": "glue"}, "test_mismatched": {"name": "test_mismatched", "num_bytes": 1956866, "num_examples": 9847, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 74865118, "num_examples": 392702, "dataset_name": "glue"}, "validation_matched": {"name": "validation_matched", "num_bytes": 1839926, "num_examples": 9815, "dataset_name": "glue"}, "validation_mismatched": {"name": "validation_mismatched", "num_bytes": 1955384, "num_examples": 9832, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {"num_bytes": 312783507, "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"}}, "download_size": 312783507, "post_processing_size": null, "dataset_size": 82472081, "size_in_bytes": 395255588}, "mnli_mismatched": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://www.nyu.edu/projects/bowman/multinli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "mnli_mismatched", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1956866, "num_examples": 9847, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 1955384, "num_examples": 9832, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {"num_bytes": 312783507, "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"}}, "download_size": 312783507, "post_processing_size": null, "dataset_size": 3912250, "size_in_bytes": 316695757}, "mnli_matched": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://www.nyu.edu/projects/bowman/multinli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "mnli_matched", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1854787, "num_examples": 9796, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 1839926, "num_examples": 9815, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {"num_bytes": 312783507, "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"}}, "download_size": 312783507, "post_processing_size": null, "dataset_size": 3694713, "size_in_bytes": 316478220}, "qnli": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@article{rajpurkar2016squad,\n title={Squad: 100,000+ questions for machine comprehension of text},\n author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},\n journal={arXiv preprint arXiv:1606.05250},\n year={2016}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://rajpurkar.github.io/SQuAD-explorer/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "not_entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "qnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1376516, "num_examples": 5463, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 25677924, "num_examples": 104743, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 1371727, "num_examples": 5463, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip": {"num_bytes": 10627589, "checksum": "e634e78627a29adaecd4f955359b22bf5e70f2cbd93b493f2d624138a0c0e5f5"}}, "download_size": 10627589, "post_processing_size": null, "dataset_size": 28426167, "size_in_bytes": 39053756}, "rte": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{dagan2005pascal,\n title={The PASCAL recognising textual entailment challenge},\n author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},\n booktitle={Machine Learning Challenges Workshop},\n pages={177--190},\n year={2005},\n organization={Springer}\n}\n@inproceedings{bar2006second,\n title={The second pascal recognising textual entailment challenge},\n author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},\n booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},\n volume={6},\n number={1},\n pages={6--4},\n year={2006},\n organization={Venice}\n}\n@inproceedings{giampiccolo2007third,\n title={The third pascal recognizing textual entailment challenge},\n author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},\n booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},\n pages={1--9},\n year={2007},\n organization={Association for Computational Linguistics}\n}\n@inproceedings{bentivogli2009fifth,\n title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},\n author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},\n booktitle={TAC},\n year={2009}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://aclweb.org/aclwiki/Recognizing_Textual_Entailment", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "not_entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "rte", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 975936, "num_examples": 3000, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 848888, "num_examples": 2490, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 90911, "num_examples": 277, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/RTE.zip": {"num_bytes": 697150, "checksum": "6bf86de103ecd335f3441bd43574d23fef87ecc695977a63b82d5efb206556ee"}}, "download_size": 697150, "post_processing_size": null, "dataset_size": 1915735, "size_in_bytes": 2612885}, "wnli": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{levesque2012winograd,\n title={The winograd schema challenge},\n author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},\n booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},\n year={2012}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["not_entailment", "entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "wnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 37992, "num_examples": 146, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 107517, "num_examples": 635, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 12215, "num_examples": 71, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/WNLI.zip": {"num_bytes": 28999, "checksum": "ae0e8e4d16f4d46d4a0a566ec7ecceccfd3fbfaa4a7a4b4e02848c0f2561ac46"}}, "download_size": 28999, "post_processing_size": null, "dataset_size": 157724, "size_in_bytes": 186723}, "ax": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://gluebenchmark.com/diagnostics", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "ax", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 238392, "num_examples": 1104, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/AX.tsv": {"num_bytes": 222257, "checksum": "0e13510b1bb14436ff7e2ee82338f0efb0133ecf2e73507a697dc210db3f05fd"}}, "download_size": 222257, "post_processing_size": null, "dataset_size": 238392, "size_in_bytes": 460649}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cola": {
3
+ "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
4
+ "citation": "@article{warstadt2018neural,\n title={Neural Network Acceptability Judgments},\n author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1805.12471},\n year={2018}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n",
5
+ "homepage": "https://nyu-mll.github.io/CoLA/",
6
+ "license": "",
7
+ "features": {
8
+ "sentence": {
9
+ "dtype": "string",
10
+ "_type": "Value"
11
+ },
12
+ "label": {
13
+ "names": [
14
+ "unacceptable",
15
+ "acceptable"
16
+ ],
17
+ "_type": "ClassLabel"
18
+ },
19
+ "idx": {
20
+ "dtype": "int32",
21
+ "_type": "Value"
22
+ }
23
+ },
24
+ "builder_name": "glue",
25
+ "dataset_name": "glue",
26
+ "config_name": "cola",
27
+ "version": {
28
+ "version_str": "1.0.0",
29
+ "description": "",
30
+ "major": 1,
31
+ "minor": 0,
32
+ "patch": 0
33
+ },
34
+ "splits": {
35
+ "train": {
36
+ "name": "train",
37
+ "num_bytes": 484869,
38
+ "num_examples": 8551,
39
+ "dataset_name": null
40
+ },
41
+ "validation": {
42
+ "name": "validation",
43
+ "num_bytes": 60322,
44
+ "num_examples": 1043,
45
+ "dataset_name": null
46
+ },
47
+ "test": {
48
+ "name": "test",
49
+ "num_bytes": 60513,
50
+ "num_examples": 1063,
51
+ "dataset_name": null
52
+ }
53
+ },
54
+ "download_size": 326394,
55
+ "dataset_size": 605704,
56
+ "size_in_bytes": 932098
57
+ },
58
+ "sst2": {
59
+ "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
60
+ "citation": "@inproceedings{socher2013recursive,\n title={Recursive deep models for semantic compositionality over a sentiment treebank},\n author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},\n booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},\n pages={1631--1642},\n year={2013}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
61
+ "homepage": "https://nlp.stanford.edu/sentiment/index.html",
62
+ "license": "",
63
+ "features": {
64
+ "sentence": {
65
+ "dtype": "string",
66
+ "id": null,
67
+ "_type": "Value"
68
+ },
69
+ "label": {
70
+ "num_classes": 2,
71
+ "names": [
72
+ "negative",
73
+ "positive"
74
+ ],
75
+ "names_file": null,
76
+ "id": null,
77
+ "_type": "ClassLabel"
78
+ },
79
+ "idx": {
80
+ "dtype": "int32",
81
+ "id": null,
82
+ "_type": "Value"
83
+ }
84
+ },
85
+ "post_processed": null,
86
+ "supervised_keys": null,
87
+ "builder_name": "glue",
88
+ "config_name": "sst2",
89
+ "version": {
90
+ "version_str": "1.0.0",
91
+ "description": "",
92
+ "major": 1,
93
+ "minor": 0,
94
+ "patch": 0
95
+ },
96
+ "splits": {
97
+ "test": {
98
+ "name": "test",
99
+ "num_bytes": 217556,
100
+ "num_examples": 1821,
101
+ "dataset_name": "glue"
102
+ },
103
+ "train": {
104
+ "name": "train",
105
+ "num_bytes": 4715283,
106
+ "num_examples": 67349,
107
+ "dataset_name": "glue"
108
+ },
109
+ "validation": {
110
+ "name": "validation",
111
+ "num_bytes": 106692,
112
+ "num_examples": 872,
113
+ "dataset_name": "glue"
114
+ }
115
+ },
116
+ "download_checksums": {
117
+ "https://dl.fbaipublicfiles.com/glue/data/SST-2.zip": {
118
+ "num_bytes": 7439277,
119
+ "checksum": "d67e16fb55739c1b32cdce9877596db1c127dc322d93c082281f64057c16deaa"
120
+ }
121
+ },
122
+ "download_size": 7439277,
123
+ "post_processing_size": null,
124
+ "dataset_size": 5039531,
125
+ "size_in_bytes": 12478808
126
+ },
127
+ "mrpc": {
128
+ "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
129
+ "citation": "@inproceedings{dolan2005automatically,\n title={Automatically constructing a corpus of sentential paraphrases},\n author={Dolan, William B and Brockett, Chris},\n booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},\n year={2005}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
130
+ "homepage": "https://www.microsoft.com/en-us/download/details.aspx?id=52398",
131
+ "license": "",
132
+ "features": {
133
+ "sentence1": {
134
+ "dtype": "string",
135
+ "id": null,
136
+ "_type": "Value"
137
+ },
138
+ "sentence2": {
139
+ "dtype": "string",
140
+ "id": null,
141
+ "_type": "Value"
142
+ },
143
+ "label": {
144
+ "num_classes": 2,
145
+ "names": [
146
+ "not_equivalent",
147
+ "equivalent"
148
+ ],
149
+ "names_file": null,
150
+ "id": null,
151
+ "_type": "ClassLabel"
152
+ },
153
+ "idx": {
154
+ "dtype": "int32",
155
+ "id": null,
156
+ "_type": "Value"
157
+ }
158
+ },
159
+ "post_processed": null,
160
+ "supervised_keys": null,
161
+ "builder_name": "glue",
162
+ "config_name": "mrpc",
163
+ "version": {
164
+ "version_str": "1.0.0",
165
+ "description": "",
166
+ "major": 1,
167
+ "minor": 0,
168
+ "patch": 0
169
+ },
170
+ "splits": {
171
+ "test": {
172
+ "name": "test",
173
+ "num_bytes": 443498,
174
+ "num_examples": 1725,
175
+ "dataset_name": "glue"
176
+ },
177
+ "train": {
178
+ "name": "train",
179
+ "num_bytes": 946146,
180
+ "num_examples": 3668,
181
+ "dataset_name": "glue"
182
+ },
183
+ "validation": {
184
+ "name": "validation",
185
+ "num_bytes": 106142,
186
+ "num_examples": 408,
187
+ "dataset_name": "glue"
188
+ }
189
+ },
190
+ "download_checksums": {
191
+ "https://dl.fbaipublicfiles.com/glue/data/mrpc_dev_ids.tsv": {
192
+ "num_bytes": 6222,
193
+ "checksum": "971d7767d81b997fd9060ade0ec23c4fc31cbb226a55d1bd4a1bac474eb81dc7"
194
+ },
195
+ "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt": {
196
+ "num_bytes": 1047044,
197
+ "checksum": "60a9b09084528f0673eedee2b69cb941920f0b8cd0eeccefc464a98768457f89"
198
+ },
199
+ "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt": {
200
+ "num_bytes": 441275,
201
+ "checksum": "a04e271090879aaba6423d65b94950c089298587d9c084bf9cd7439bd785f784"
202
+ }
203
+ },
204
+ "download_size": 1494541,
205
+ "post_processing_size": null,
206
+ "dataset_size": 1495786,
207
+ "size_in_bytes": 2990327
208
+ },
209
+ "qqp": {
210
+ "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
211
+ "citation": "@online{WinNT,\n author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},\n title = {First Quora Dataset Release: Question Pairs},\n year = {2017},\n url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},\n urldate = {2019-04-03}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n",
212
+ "homepage": "https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs",
213
+ "license": "",
214
+ "features": {
215
+ "question1": {
216
+ "dtype": "string",
217
+ "id": null,
218
+ "_type": "Value"
219
+ },
220
+ "question2": {
221
+ "dtype": "string",
222
+ "id": null,
223
+ "_type": "Value"
224
+ },
225
+ "label": {
226
+ "num_classes": 2,
227
+ "names": [
228
+ "not_duplicate",
229
+ "duplicate"
230
+ ],
231
+ "names_file": null,
232
+ "id": null,
233
+ "_type": "ClassLabel"
234
+ },
235
+ "idx": {
236
+ "dtype": "int32",
237
+ "id": null,
238
+ "_type": "Value"
239
+ }
240
+ },
241
+ "post_processed": null,
242
+ "supervised_keys": null,
243
+ "builder_name": "glue",
244
+ "config_name": "qqp",
245
+ "version": {
246
+ "version_str": "1.0.0",
247
+ "description": "",
248
+ "major": 1,
249
+ "minor": 0,
250
+ "patch": 0
251
+ },
252
+ "splits": {
253
+ "train": {
254
+ "name": "train",
255
+ "num_bytes": 50901116,
256
+ "num_examples": 363846,
257
+ "dataset_name": "glue"
258
+ },
259
+ "validation": {
260
+ "name": "validation",
261
+ "num_bytes": 5653794,
262
+ "num_examples": 40430,
263
+ "dataset_name": "glue"
264
+ },
265
+ "test": {
266
+ "name": "test",
267
+ "num_bytes": 55171431,
268
+ "num_examples": 390965,
269
+ "dataset_name": "glue"
270
+ }
271
+ },
272
+ "download_checksums": {
273
+ "https://dl.fbaipublicfiles.com/glue/data/QQP-clean.zip": {
274
+ "num_bytes": 41696084,
275
+ "checksum": "40e7c862c04eb26ee04b67fd900e76c45c6ba8e6d8fab4f8f1f8072a1a3fbae0"
276
+ }
277
+ },
278
+ "download_size": 41696084,
279
+ "post_processing_size": null,
280
+ "dataset_size": 111726341,
281
+ "size_in_bytes": 153422425
282
+ },
283
+ "stsb": {
284
+ "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
285
+ "citation": "@article{cer2017semeval,\n title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},\n author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},\n journal={arXiv preprint arXiv:1708.00055},\n year={2017}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
286
+ "homepage": "http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
287
+ "license": "",
288
+ "features": {
289
+ "sentence1": {
290
+ "dtype": "string",
291
+ "id": null,
292
+ "_type": "Value"
293
+ },
294
+ "sentence2": {
295
+ "dtype": "string",
296
+ "id": null,
297
+ "_type": "Value"
298
+ },
299
+ "label": {
300
+ "dtype": "float32",
301
+ "id": null,
302
+ "_type": "Value"
303
+ },
304
+ "idx": {
305
+ "dtype": "int32",
306
+ "id": null,
307
+ "_type": "Value"
308
+ }
309
+ },
310
+ "post_processed": null,
311
+ "supervised_keys": null,
312
+ "builder_name": "glue",
313
+ "config_name": "stsb",
314
+ "version": {
315
+ "version_str": "1.0.0",
316
+ "description": "",
317
+ "major": 1,
318
+ "minor": 0,
319
+ "patch": 0
320
+ },
321
+ "splits": {
322
+ "test": {
323
+ "name": "test",
324
+ "num_bytes": 170847,
325
+ "num_examples": 1379,
326
+ "dataset_name": "glue"
327
+ },
328
+ "train": {
329
+ "name": "train",
330
+ "num_bytes": 758394,
331
+ "num_examples": 5749,
332
+ "dataset_name": "glue"
333
+ },
334
+ "validation": {
335
+ "name": "validation",
336
+ "num_bytes": 217012,
337
+ "num_examples": 1500,
338
+ "dataset_name": "glue"
339
+ }
340
+ },
341
+ "download_checksums": {
342
+ "https://dl.fbaipublicfiles.com/glue/data/STS-B.zip": {
343
+ "num_bytes": 802872,
344
+ "checksum": "e60a6393de5a8b5b9bac5020a1554b54e3691f9d600b775bd131e613ac179c85"
345
+ }
346
+ },
347
+ "download_size": 802872,
348
+ "post_processing_size": null,
349
+ "dataset_size": 1146253,
350
+ "size_in_bytes": 1949125
351
+ },
352
+ "mnli": {
353
+ "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
354
+ "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
355
+ "homepage": "http://www.nyu.edu/projects/bowman/multinli/",
356
+ "license": "",
357
+ "features": {
358
+ "premise": {
359
+ "dtype": "string",
360
+ "id": null,
361
+ "_type": "Value"
362
+ },
363
+ "hypothesis": {
364
+ "dtype": "string",
365
+ "id": null,
366
+ "_type": "Value"
367
+ },
368
+ "label": {
369
+ "num_classes": 3,
370
+ "names": [
371
+ "entailment",
372
+ "neutral",
373
+ "contradiction"
374
+ ],
375
+ "names_file": null,
376
+ "id": null,
377
+ "_type": "ClassLabel"
378
+ },
379
+ "idx": {
380
+ "dtype": "int32",
381
+ "id": null,
382
+ "_type": "Value"
383
+ }
384
+ },
385
+ "post_processed": null,
386
+ "supervised_keys": null,
387
+ "builder_name": "glue",
388
+ "config_name": "mnli",
389
+ "version": {
390
+ "version_str": "1.0.0",
391
+ "description": "",
392
+ "major": 1,
393
+ "minor": 0,
394
+ "patch": 0
395
+ },
396
+ "splits": {
397
+ "test_matched": {
398
+ "name": "test_matched",
399
+ "num_bytes": 1854787,
400
+ "num_examples": 9796,
401
+ "dataset_name": "glue"
402
+ },
403
+ "test_mismatched": {
404
+ "name": "test_mismatched",
405
+ "num_bytes": 1956866,
406
+ "num_examples": 9847,
407
+ "dataset_name": "glue"
408
+ },
409
+ "train": {
410
+ "name": "train",
411
+ "num_bytes": 74865118,
412
+ "num_examples": 392702,
413
+ "dataset_name": "glue"
414
+ },
415
+ "validation_matched": {
416
+ "name": "validation_matched",
417
+ "num_bytes": 1839926,
418
+ "num_examples": 9815,
419
+ "dataset_name": "glue"
420
+ },
421
+ "validation_mismatched": {
422
+ "name": "validation_mismatched",
423
+ "num_bytes": 1955384,
424
+ "num_examples": 9832,
425
+ "dataset_name": "glue"
426
+ }
427
+ },
428
+ "download_checksums": {
429
+ "https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {
430
+ "num_bytes": 312783507,
431
+ "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"
432
+ }
433
+ },
434
+ "download_size": 312783507,
435
+ "post_processing_size": null,
436
+ "dataset_size": 82472081,
437
+ "size_in_bytes": 395255588
438
+ },
439
+ "mnli_mismatched": {
440
+ "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
441
+ "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
442
+ "homepage": "http://www.nyu.edu/projects/bowman/multinli/",
443
+ "license": "",
444
+ "features": {
445
+ "premise": {
446
+ "dtype": "string",
447
+ "id": null,
448
+ "_type": "Value"
449
+ },
450
+ "hypothesis": {
451
+ "dtype": "string",
452
+ "id": null,
453
+ "_type": "Value"
454
+ },
455
+ "label": {
456
+ "num_classes": 3,
457
+ "names": [
458
+ "entailment",
459
+ "neutral",
460
+ "contradiction"
461
+ ],
462
+ "names_file": null,
463
+ "id": null,
464
+ "_type": "ClassLabel"
465
+ },
466
+ "idx": {
467
+ "dtype": "int32",
468
+ "id": null,
469
+ "_type": "Value"
470
+ }
471
+ },
472
+ "post_processed": null,
473
+ "supervised_keys": null,
474
+ "builder_name": "glue",
475
+ "config_name": "mnli_mismatched",
476
+ "version": {
477
+ "version_str": "1.0.0",
478
+ "description": "",
479
+ "major": 1,
480
+ "minor": 0,
481
+ "patch": 0
482
+ },
483
+ "splits": {
484
+ "test": {
485
+ "name": "test",
486
+ "num_bytes": 1956866,
487
+ "num_examples": 9847,
488
+ "dataset_name": "glue"
489
+ },
490
+ "validation": {
491
+ "name": "validation",
492
+ "num_bytes": 1955384,
493
+ "num_examples": 9832,
494
+ "dataset_name": "glue"
495
+ }
496
+ },
497
+ "download_checksums": {
498
+ "https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {
499
+ "num_bytes": 312783507,
500
+ "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"
501
+ }
502
+ },
503
+ "download_size": 312783507,
504
+ "post_processing_size": null,
505
+ "dataset_size": 3912250,
506
+ "size_in_bytes": 316695757
507
+ },
508
+ "mnli_matched": {
509
+ "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
510
+ "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
511
+ "homepage": "http://www.nyu.edu/projects/bowman/multinli/",
512
+ "license": "",
513
+ "features": {
514
+ "premise": {
515
+ "dtype": "string",
516
+ "id": null,
517
+ "_type": "Value"
518
+ },
519
+ "hypothesis": {
520
+ "dtype": "string",
521
+ "id": null,
522
+ "_type": "Value"
523
+ },
524
+ "label": {
525
+ "num_classes": 3,
526
+ "names": [
527
+ "entailment",
528
+ "neutral",
529
+ "contradiction"
530
+ ],
531
+ "names_file": null,
532
+ "id": null,
533
+ "_type": "ClassLabel"
534
+ },
535
+ "idx": {
536
+ "dtype": "int32",
537
+ "id": null,
538
+ "_type": "Value"
539
+ }
540
+ },
541
+ "post_processed": null,
542
+ "supervised_keys": null,
543
+ "builder_name": "glue",
544
+ "config_name": "mnli_matched",
545
+ "version": {
546
+ "version_str": "1.0.0",
547
+ "description": "",
548
+ "major": 1,
549
+ "minor": 0,
550
+ "patch": 0
551
+ },
552
+ "splits": {
553
+ "test": {
554
+ "name": "test",
555
+ "num_bytes": 1854787,
556
+ "num_examples": 9796,
557
+ "dataset_name": "glue"
558
+ },
559
+ "validation": {
560
+ "name": "validation",
561
+ "num_bytes": 1839926,
562
+ "num_examples": 9815,
563
+ "dataset_name": "glue"
564
+ }
565
+ },
566
+ "download_checksums": {
567
+ "https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {
568
+ "num_bytes": 312783507,
569
+ "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"
570
+ }
571
+ },
572
+ "download_size": 312783507,
573
+ "post_processing_size": null,
574
+ "dataset_size": 3694713,
575
+ "size_in_bytes": 316478220
576
+ },
577
+ "qnli": {
578
+ "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
579
+ "citation": "@article{rajpurkar2016squad,\n title={Squad: 100,000+ questions for machine comprehension of text},\n author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},\n journal={arXiv preprint arXiv:1606.05250},\n year={2016}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
580
+ "homepage": "https://rajpurkar.github.io/SQuAD-explorer/",
581
+ "license": "",
582
+ "features": {
583
+ "question": {
584
+ "dtype": "string",
585
+ "id": null,
586
+ "_type": "Value"
587
+ },
588
+ "sentence": {
589
+ "dtype": "string",
590
+ "id": null,
591
+ "_type": "Value"
592
+ },
593
+ "label": {
594
+ "num_classes": 2,
595
+ "names": [
596
+ "entailment",
597
+ "not_entailment"
598
+ ],
599
+ "names_file": null,
600
+ "id": null,
601
+ "_type": "ClassLabel"
602
+ },
603
+ "idx": {
604
+ "dtype": "int32",
605
+ "id": null,
606
+ "_type": "Value"
607
+ }
608
+ },
609
+ "post_processed": null,
610
+ "supervised_keys": null,
611
+ "builder_name": "glue",
612
+ "config_name": "qnli",
613
+ "version": {
614
+ "version_str": "1.0.0",
615
+ "description": "",
616
+ "major": 1,
617
+ "minor": 0,
618
+ "patch": 0
619
+ },
620
+ "splits": {
621
+ "test": {
622
+ "name": "test",
623
+ "num_bytes": 1376516,
624
+ "num_examples": 5463,
625
+ "dataset_name": "glue"
626
+ },
627
+ "train": {
628
+ "name": "train",
629
+ "num_bytes": 25677924,
630
+ "num_examples": 104743,
631
+ "dataset_name": "glue"
632
+ },
633
+ "validation": {
634
+ "name": "validation",
635
+ "num_bytes": 1371727,
636
+ "num_examples": 5463,
637
+ "dataset_name": "glue"
638
+ }
639
+ },
640
+ "download_checksums": {
641
+ "https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip": {
642
+ "num_bytes": 10627589,
643
+ "checksum": "e634e78627a29adaecd4f955359b22bf5e70f2cbd93b493f2d624138a0c0e5f5"
644
+ }
645
+ },
646
+ "download_size": 10627589,
647
+ "post_processing_size": null,
648
+ "dataset_size": 28426167,
649
+ "size_in_bytes": 39053756
650
+ },
651
+ "rte": {
652
+ "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
653
+ "citation": "@inproceedings{dagan2005pascal,\n title={The PASCAL recognising textual entailment challenge},\n author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},\n booktitle={Machine Learning Challenges Workshop},\n pages={177--190},\n year={2005},\n organization={Springer}\n}\n@inproceedings{bar2006second,\n title={The second pascal recognising textual entailment challenge},\n author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},\n booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},\n volume={6},\n number={1},\n pages={6--4},\n year={2006},\n organization={Venice}\n}\n@inproceedings{giampiccolo2007third,\n title={The third pascal recognizing textual entailment challenge},\n author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},\n booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},\n pages={1--9},\n year={2007},\n organization={Association for Computational Linguistics}\n}\n@inproceedings{bentivogli2009fifth,\n title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},\n author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},\n booktitle={TAC},\n year={2009}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
654
+ "homepage": "https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
655
+ "license": "",
656
+ "features": {
657
+ "sentence1": {
658
+ "dtype": "string",
659
+ "id": null,
660
+ "_type": "Value"
661
+ },
662
+ "sentence2": {
663
+ "dtype": "string",
664
+ "id": null,
665
+ "_type": "Value"
666
+ },
667
+ "label": {
668
+ "num_classes": 2,
669
+ "names": [
670
+ "entailment",
671
+ "not_entailment"
672
+ ],
673
+ "names_file": null,
674
+ "id": null,
675
+ "_type": "ClassLabel"
676
+ },
677
+ "idx": {
678
+ "dtype": "int32",
679
+ "id": null,
680
+ "_type": "Value"
681
+ }
682
+ },
683
+ "post_processed": null,
684
+ "supervised_keys": null,
685
+ "builder_name": "glue",
686
+ "config_name": "rte",
687
+ "version": {
688
+ "version_str": "1.0.0",
689
+ "description": "",
690
+ "major": 1,
691
+ "minor": 0,
692
+ "patch": 0
693
+ },
694
+ "splits": {
695
+ "test": {
696
+ "name": "test",
697
+ "num_bytes": 975936,
698
+ "num_examples": 3000,
699
+ "dataset_name": "glue"
700
+ },
701
+ "train": {
702
+ "name": "train",
703
+ "num_bytes": 848888,
704
+ "num_examples": 2490,
705
+ "dataset_name": "glue"
706
+ },
707
+ "validation": {
708
+ "name": "validation",
709
+ "num_bytes": 90911,
710
+ "num_examples": 277,
711
+ "dataset_name": "glue"
712
+ }
713
+ },
714
+ "download_checksums": {
715
+ "https://dl.fbaipublicfiles.com/glue/data/RTE.zip": {
716
+ "num_bytes": 697150,
717
+ "checksum": "6bf86de103ecd335f3441bd43574d23fef87ecc695977a63b82d5efb206556ee"
718
+ }
719
+ },
720
+ "download_size": 697150,
721
+ "post_processing_size": null,
722
+ "dataset_size": 1915735,
723
+ "size_in_bytes": 2612885
724
+ },
725
+ "wnli": {
726
+ "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
727
+ "citation": "@inproceedings{levesque2012winograd,\n title={The winograd schema challenge},\n author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},\n booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},\n year={2012}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
728
+ "homepage": "https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
729
+ "license": "",
730
+ "features": {
731
+ "sentence1": {
732
+ "dtype": "string",
733
+ "id": null,
734
+ "_type": "Value"
735
+ },
736
+ "sentence2": {
737
+ "dtype": "string",
738
+ "id": null,
739
+ "_type": "Value"
740
+ },
741
+ "label": {
742
+ "num_classes": 2,
743
+ "names": [
744
+ "not_entailment",
745
+ "entailment"
746
+ ],
747
+ "names_file": null,
748
+ "id": null,
749
+ "_type": "ClassLabel"
750
+ },
751
+ "idx": {
752
+ "dtype": "int32",
753
+ "id": null,
754
+ "_type": "Value"
755
+ }
756
+ },
757
+ "post_processed": null,
758
+ "supervised_keys": null,
759
+ "builder_name": "glue",
760
+ "config_name": "wnli",
761
+ "version": {
762
+ "version_str": "1.0.0",
763
+ "description": "",
764
+ "major": 1,
765
+ "minor": 0,
766
+ "patch": 0
767
+ },
768
+ "splits": {
769
+ "test": {
770
+ "name": "test",
771
+ "num_bytes": 37992,
772
+ "num_examples": 146,
773
+ "dataset_name": "glue"
774
+ },
775
+ "train": {
776
+ "name": "train",
777
+ "num_bytes": 107517,
778
+ "num_examples": 635,
779
+ "dataset_name": "glue"
780
+ },
781
+ "validation": {
782
+ "name": "validation",
783
+ "num_bytes": 12215,
784
+ "num_examples": 71,
785
+ "dataset_name": "glue"
786
+ }
787
+ },
788
+ "download_checksums": {
789
+ "https://dl.fbaipublicfiles.com/glue/data/WNLI.zip": {
790
+ "num_bytes": 28999,
791
+ "checksum": "ae0e8e4d16f4d46d4a0a566ec7ecceccfd3fbfaa4a7a4b4e02848c0f2561ac46"
792
+ }
793
+ },
794
+ "download_size": 28999,
795
+ "post_processing_size": null,
796
+ "dataset_size": 157724,
797
+ "size_in_bytes": 186723
798
+ },
799
+ "ax": {
800
+ "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
801
+ "citation": "\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
802
+ "homepage": "https://gluebenchmark.com/diagnostics",
803
+ "license": "",
804
+ "features": {
805
+ "premise": {
806
+ "dtype": "string",
807
+ "id": null,
808
+ "_type": "Value"
809
+ },
810
+ "hypothesis": {
811
+ "dtype": "string",
812
+ "id": null,
813
+ "_type": "Value"
814
+ },
815
+ "label": {
816
+ "num_classes": 3,
817
+ "names": [
818
+ "entailment",
819
+ "neutral",
820
+ "contradiction"
821
+ ],
822
+ "names_file": null,
823
+ "id": null,
824
+ "_type": "ClassLabel"
825
+ },
826
+ "idx": {
827
+ "dtype": "int32",
828
+ "id": null,
829
+ "_type": "Value"
830
+ }
831
+ },
832
+ "post_processed": null,
833
+ "supervised_keys": null,
834
+ "builder_name": "glue",
835
+ "config_name": "ax",
836
+ "version": {
837
+ "version_str": "1.0.0",
838
+ "description": "",
839
+ "major": 1,
840
+ "minor": 0,
841
+ "patch": 0
842
+ },
843
+ "splits": {
844
+ "test": {
845
+ "name": "test",
846
+ "num_bytes": 238392,
847
+ "num_examples": 1104,
848
+ "dataset_name": "glue"
849
+ }
850
+ },
851
+ "download_checksums": {
852
+ "https://dl.fbaipublicfiles.com/glue/data/AX.tsv": {
853
+ "num_bytes": 222257,
854
+ "checksum": "0e13510b1bb14436ff7e2ee82338f0efb0133ecf2e73507a697dc210db3f05fd"
855
+ }
856
+ },
857
+ "download_size": 222257,
858
+ "post_processing_size": null,
859
+ "dataset_size": 238392,
860
+ "size_in_bytes": 460649
861
+ }
862
+ }