polinaeterna commited on
Commit
d53a805
·
1 Parent(s): b1dd333

Delete README.md

Browse files
Files changed (1) hide show
  1. README.md +0 -916
README.md DELETED
@@ -1,916 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - other
4
- language_creators:
5
- - other
6
- language:
7
- - en
8
- license:
9
- - cc-by-4.0
10
- multilinguality:
11
- - monolingual
12
- size_categories:
13
- - 10K<n<100K
14
- source_datasets:
15
- - original
16
- task_categories:
17
- - text-classification
18
- task_ids:
19
- - acceptability-classification
20
- - natural-language-inference
21
- - semantic-similarity-scoring
22
- - sentiment-classification
23
- - text-scoring
24
- paperswithcode_id: glue
25
- pretty_name: GLUE (General Language Understanding Evaluation benchmark)
26
- tags:
27
- - qa-nli
28
- - coreference-nli
29
- - paraphrase-identification
30
- dataset_info:
31
- - config_name: cola
32
- features:
33
- - name: sentence
34
- dtype: string
35
- - name: label
36
- dtype:
37
- class_label:
38
- names:
39
- '0': unacceptable
40
- '1': acceptable
41
- - name: idx
42
- dtype: int32
43
- splits:
44
- - name: test
45
- num_bytes: 61049
46
- num_examples: 1063
47
- - name: train
48
- num_bytes: 489149
49
- num_examples: 8551
50
- - name: validation
51
- num_bytes: 60850
52
- num_examples: 1043
53
- download_size: 376971
54
- dataset_size: 611048
55
- - config_name: sst2
56
- features:
57
- - name: sentence
58
- dtype: string
59
- - name: label
60
- dtype:
61
- class_label:
62
- names:
63
- '0': negative
64
- '1': positive
65
- - name: idx
66
- dtype: int32
67
- splits:
68
- - name: test
69
- num_bytes: 217556
70
- num_examples: 1821
71
- - name: train
72
- num_bytes: 4715283
73
- num_examples: 67349
74
- - name: validation
75
- num_bytes: 106692
76
- num_examples: 872
77
- download_size: 7439277
78
- dataset_size: 5039531
79
- - config_name: mrpc
80
- features:
81
- - name: sentence1
82
- dtype: string
83
- - name: sentence2
84
- dtype: string
85
- - name: label
86
- dtype:
87
- class_label:
88
- names:
89
- '0': not_equivalent
90
- '1': equivalent
91
- - name: idx
92
- dtype: int32
93
- splits:
94
- - name: test
95
- num_bytes: 443498
96
- num_examples: 1725
97
- - name: train
98
- num_bytes: 946146
99
- num_examples: 3668
100
- - name: validation
101
- num_bytes: 106142
102
- num_examples: 408
103
- download_size: 1494541
104
- dataset_size: 1495786
105
- - config_name: qqp
106
- features:
107
- - name: question1
108
- dtype: string
109
- - name: question2
110
- dtype: string
111
- - name: label
112
- dtype:
113
- class_label:
114
- names:
115
- '0': not_duplicate
116
- '1': duplicate
117
- - name: idx
118
- dtype: int32
119
- splits:
120
- - name: train
121
- num_bytes: 50901116
122
- num_examples: 363846
123
- - name: validation
124
- num_bytes: 5653794
125
- num_examples: 40430
126
- - name: test
127
- num_bytes: 55171431
128
- num_examples: 390965
129
- download_size: 41696084
130
- dataset_size: 111726341
131
- - config_name: stsb
132
- features:
133
- - name: sentence1
134
- dtype: string
135
- - name: sentence2
136
- dtype: string
137
- - name: label
138
- dtype: float32
139
- - name: idx
140
- dtype: int32
141
- splits:
142
- - name: test
143
- num_bytes: 170847
144
- num_examples: 1379
145
- - name: train
146
- num_bytes: 758394
147
- num_examples: 5749
148
- - name: validation
149
- num_bytes: 217012
150
- num_examples: 1500
151
- download_size: 802872
152
- dataset_size: 1146253
153
- - config_name: mnli
154
- features:
155
- - name: premise
156
- dtype: string
157
- - name: hypothesis
158
- dtype: string
159
- - name: label
160
- dtype:
161
- class_label:
162
- names:
163
- '0': entailment
164
- '1': neutral
165
- '2': contradiction
166
- - name: idx
167
- dtype: int32
168
- splits:
169
- - name: test_matched
170
- num_bytes: 1854787
171
- num_examples: 9796
172
- - name: test_mismatched
173
- num_bytes: 1956866
174
- num_examples: 9847
175
- - name: train
176
- num_bytes: 74865118
177
- num_examples: 392702
178
- - name: validation_matched
179
- num_bytes: 1839926
180
- num_examples: 9815
181
- - name: validation_mismatched
182
- num_bytes: 1955384
183
- num_examples: 9832
184
- download_size: 312783507
185
- dataset_size: 82472081
186
- - config_name: mnli_mismatched
187
- features:
188
- - name: premise
189
- dtype: string
190
- - name: hypothesis
191
- dtype: string
192
- - name: label
193
- dtype:
194
- class_label:
195
- names:
196
- '0': entailment
197
- '1': neutral
198
- '2': contradiction
199
- - name: idx
200
- dtype: int32
201
- splits:
202
- - name: test
203
- num_bytes: 1956866
204
- num_examples: 9847
205
- - name: validation
206
- num_bytes: 1955384
207
- num_examples: 9832
208
- download_size: 312783507
209
- dataset_size: 3912250
210
- - config_name: mnli_matched
211
- features:
212
- - name: premise
213
- dtype: string
214
- - name: hypothesis
215
- dtype: string
216
- - name: label
217
- dtype:
218
- class_label:
219
- names:
220
- '0': entailment
221
- '1': neutral
222
- '2': contradiction
223
- - name: idx
224
- dtype: int32
225
- splits:
226
- - name: test
227
- num_bytes: 1854787
228
- num_examples: 9796
229
- - name: validation
230
- num_bytes: 1839926
231
- num_examples: 9815
232
- download_size: 312783507
233
- dataset_size: 3694713
234
- - config_name: qnli
235
- features:
236
- - name: question
237
- dtype: string
238
- - name: sentence
239
- dtype: string
240
- - name: label
241
- dtype:
242
- class_label:
243
- names:
244
- '0': entailment
245
- '1': not_entailment
246
- - name: idx
247
- dtype: int32
248
- splits:
249
- - name: test
250
- num_bytes: 1376516
251
- num_examples: 5463
252
- - name: train
253
- num_bytes: 25677924
254
- num_examples: 104743
255
- - name: validation
256
- num_bytes: 1371727
257
- num_examples: 5463
258
- download_size: 10627589
259
- dataset_size: 28426167
260
- - config_name: rte
261
- features:
262
- - name: sentence1
263
- dtype: string
264
- - name: sentence2
265
- dtype: string
266
- - name: label
267
- dtype:
268
- class_label:
269
- names:
270
- '0': entailment
271
- '1': not_entailment
272
- - name: idx
273
- dtype: int32
274
- splits:
275
- - name: test
276
- num_bytes: 975936
277
- num_examples: 3000
278
- - name: train
279
- num_bytes: 848888
280
- num_examples: 2490
281
- - name: validation
282
- num_bytes: 90911
283
- num_examples: 277
284
- download_size: 697150
285
- dataset_size: 1915735
286
- - config_name: wnli
287
- features:
288
- - name: sentence1
289
- dtype: string
290
- - name: sentence2
291
- dtype: string
292
- - name: label
293
- dtype:
294
- class_label:
295
- names:
296
- '0': not_entailment
297
- '1': entailment
298
- - name: idx
299
- dtype: int32
300
- splits:
301
- - name: test
302
- num_bytes: 37992
303
- num_examples: 146
304
- - name: train
305
- num_bytes: 107517
306
- num_examples: 635
307
- - name: validation
308
- num_bytes: 12215
309
- num_examples: 71
310
- download_size: 28999
311
- dataset_size: 157724
312
- - config_name: ax
313
- features:
314
- - name: premise
315
- dtype: string
316
- - name: hypothesis
317
- dtype: string
318
- - name: label
319
- dtype:
320
- class_label:
321
- names:
322
- '0': entailment
323
- '1': neutral
324
- '2': contradiction
325
- - name: idx
326
- dtype: int32
327
- splits:
328
- - name: test
329
- num_bytes: 238392
330
- num_examples: 1104
331
- download_size: 222257
332
- dataset_size: 238392
333
- train-eval-index:
334
- - config: cola
335
- task: text-classification
336
- task_id: binary_classification
337
- splits:
338
- train_split: train
339
- eval_split: validation
340
- col_mapping:
341
- sentence: text
342
- label: target
343
- - config: sst2
344
- task: text-classification
345
- task_id: binary_classification
346
- splits:
347
- train_split: train
348
- eval_split: validation
349
- col_mapping:
350
- sentence: text
351
- label: target
352
- - config: mrpc
353
- task: text-classification
354
- task_id: natural_language_inference
355
- splits:
356
- train_split: train
357
- eval_split: validation
358
- col_mapping:
359
- sentence1: text1
360
- sentence2: text2
361
- label: target
362
- - config: qqp
363
- task: text-classification
364
- task_id: natural_language_inference
365
- splits:
366
- train_split: train
367
- eval_split: validation
368
- col_mapping:
369
- question1: text1
370
- question2: text2
371
- label: target
372
- - config: stsb
373
- task: text-classification
374
- task_id: natural_language_inference
375
- splits:
376
- train_split: train
377
- eval_split: validation
378
- col_mapping:
379
- sentence1: text1
380
- sentence2: text2
381
- label: target
382
- - config: mnli
383
- task: text-classification
384
- task_id: natural_language_inference
385
- splits:
386
- train_split: train
387
- eval_split: validation_matched
388
- col_mapping:
389
- premise: text1
390
- hypothesis: text2
391
- label: target
392
- - config: mnli_mismatched
393
- task: text-classification
394
- task_id: natural_language_inference
395
- splits:
396
- train_split: train
397
- eval_split: validation
398
- col_mapping:
399
- premise: text1
400
- hypothesis: text2
401
- label: target
402
- - config: mnli_matched
403
- task: text-classification
404
- task_id: natural_language_inference
405
- splits:
406
- train_split: train
407
- eval_split: validation
408
- col_mapping:
409
- premise: text1
410
- hypothesis: text2
411
- label: target
412
- - config: qnli
413
- task: text-classification
414
- task_id: natural_language_inference
415
- splits:
416
- train_split: train
417
- eval_split: validation
418
- col_mapping:
419
- question: text1
420
- sentence: text2
421
- label: target
422
- - config: rte
423
- task: text-classification
424
- task_id: natural_language_inference
425
- splits:
426
- train_split: train
427
- eval_split: validation
428
- col_mapping:
429
- sentence1: text1
430
- sentence2: text2
431
- label: target
432
- - config: wnli
433
- task: text-classification
434
- task_id: natural_language_inference
435
- splits:
436
- train_split: train
437
- eval_split: validation
438
- col_mapping:
439
- sentence1: text1
440
- sentence2: text2
441
- label: target
442
- config_names:
443
- - ax
444
- - cola
445
- - mnli
446
- - mnli_matched
447
- - mnli_mismatched
448
- - mrpc
449
- - qnli
450
- - qqp
451
- - rte
452
- - sst2
453
- - stsb
454
- - wnli
455
- ---
456
-
457
- # Dataset Card for GLUE
458
-
459
- ## Table of Contents
460
- - [Dataset Card for GLUE](#dataset-card-for-glue)
461
- - [Table of Contents](#table-of-contents)
462
- - [Dataset Description](#dataset-description)
463
- - [Dataset Summary](#dataset-summary)
464
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
465
- - [ax](#ax)
466
- - [cola](#cola)
467
- - [mnli](#mnli)
468
- - [mnli_matched](#mnli_matched)
469
- - [mnli_mismatched](#mnli_mismatched)
470
- - [mrpc](#mrpc)
471
- - [qnli](#qnli)
472
- - [qqp](#qqp)
473
- - [rte](#rte)
474
- - [sst2](#sst2)
475
- - [stsb](#stsb)
476
- - [wnli](#wnli)
477
- - [Languages](#languages)
478
- - [Dataset Structure](#dataset-structure)
479
- - [Data Instances](#data-instances)
480
- - [ax](#ax-1)
481
- - [cola](#cola-1)
482
- - [mnli](#mnli-1)
483
- - [mnli_matched](#mnli_matched-1)
484
- - [mnli_mismatched](#mnli_mismatched-1)
485
- - [mrpc](#mrpc-1)
486
- - [qnli](#qnli-1)
487
- - [qqp](#qqp-1)
488
- - [rte](#rte-1)
489
- - [sst2](#sst2-1)
490
- - [stsb](#stsb-1)
491
- - [wnli](#wnli-1)
492
- - [Data Fields](#data-fields)
493
- - [ax](#ax-2)
494
- - [cola](#cola-2)
495
- - [mnli](#mnli-2)
496
- - [mnli_matched](#mnli_matched-2)
497
- - [mnli_mismatched](#mnli_mismatched-2)
498
- - [mrpc](#mrpc-2)
499
- - [qnli](#qnli-2)
500
- - [qqp](#qqp-2)
501
- - [rte](#rte-2)
502
- - [sst2](#sst2-2)
503
- - [stsb](#stsb-2)
504
- - [wnli](#wnli-2)
505
- - [Data Splits](#data-splits)
506
- - [ax](#ax-3)
507
- - [cola](#cola-3)
508
- - [mnli](#mnli-3)
509
- - [mnli_matched](#mnli_matched-3)
510
- - [mnli_mismatched](#mnli_mismatched-3)
511
- - [mrpc](#mrpc-3)
512
- - [qnli](#qnli-3)
513
- - [qqp](#qqp-3)
514
- - [rte](#rte-3)
515
- - [sst2](#sst2-3)
516
- - [stsb](#stsb-3)
517
- - [wnli](#wnli-3)
518
- - [Dataset Creation](#dataset-creation)
519
- - [Curation Rationale](#curation-rationale)
520
- - [Source Data](#source-data)
521
- - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
522
- - [Who are the source language producers?](#who-are-the-source-language-producers)
523
- - [Annotations](#annotations)
524
- - [Annotation process](#annotation-process)
525
- - [Who are the annotators?](#who-are-the-annotators)
526
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
527
- - [Considerations for Using the Data](#considerations-for-using-the-data)
528
- - [Social Impact of Dataset](#social-impact-of-dataset)
529
- - [Discussion of Biases](#discussion-of-biases)
530
- - [Other Known Limitations](#other-known-limitations)
531
- - [Additional Information](#additional-information)
532
- - [Dataset Curators](#dataset-curators)
533
- - [Licensing Information](#licensing-information)
534
- - [Citation Information](#citation-information)
535
- - [Contributions](#contributions)
536
-
537
- ## Dataset Description
538
-
539
- - **Homepage:** [https://nyu-mll.github.io/CoLA/](https://nyu-mll.github.io/CoLA/)
540
- - **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
541
- - **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
542
- - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
543
- - **Size of downloaded dataset files:** 1.00 GB
544
- - **Size of the generated dataset:** 240.84 MB
545
- - **Total amount of disk used:** 1.24 GB
546
-
547
- ### Dataset Summary
548
-
549
- GLUE, the General Language Understanding Evaluation benchmark (https://gluebenchmark.com/) is a collection of resources for training, evaluating, and analyzing natural language understanding systems.
550
-
551
- ### Supported Tasks and Leaderboards
552
-
553
- The leaderboard for the GLUE benchmark can be found [at this address](https://gluebenchmark.com/). It comprises the following tasks:
554
-
555
- #### ax
556
-
557
- A manually-curated evaluation dataset for fine-grained analysis of system performance on a broad range of linguistic phenomena. This dataset evaluates sentence understanding through Natural Language Inference (NLI) problems. Use a model trained on MulitNLI to produce predictions for this dataset.
558
-
559
- #### cola
560
-
561
- The Corpus of Linguistic Acceptability consists of English acceptability judgments drawn from books and journal articles on linguistic theory. Each example is a sequence of words annotated with whether it is a grammatical English sentence.
562
-
563
- #### mnli
564
-
565
- The Multi-Genre Natural Language Inference Corpus is a crowdsourced collection of sentence pairs with textual entailment annotations. Given a premise sentence and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are gathered from ten different sources, including transcribed speech, fiction, and government reports. The authors of the benchmark use the standard test set, for which they obtained private labels from the RTE authors, and evaluate on both the matched (in-domain) and mismatched (cross-domain) section. They also uses and recommend the SNLI corpus as 550k examples of auxiliary training data.
566
-
567
- #### mnli_matched
568
-
569
- The matched validation and test splits from MNLI. See the "mnli" BuilderConfig for additional information.
570
-
571
- #### mnli_mismatched
572
-
573
- The mismatched validation and test splits from MNLI. See the "mnli" BuilderConfig for additional information.
574
-
575
- #### mrpc
576
-
577
- The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of sentence pairs automatically extracted from online news sources, with human annotations for whether the sentences in the pair are semantically equivalent.
578
-
579
- #### qnli
580
-
581
- The Stanford Question Answering Dataset is a question-answering dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn from Wikipedia) contains the answer to the corresponding question (written by an annotator). The authors of the benchmark convert the task into sentence pair classification by forming a pair between each question and each sentence in the corresponding context, and filtering out pairs with low lexical overlap between the question and the context sentence. The task is to determine whether the context sentence contains the answer to the question. This modified version of the original task removes the requirement that the model select the exact answer, but also removes the simplifying assumptions that the answer is always present in the input and that lexical overlap is a reliable cue.
582
-
583
- #### qqp
584
-
585
- The Quora Question Pairs2 dataset is a collection of question pairs from the community question-answering website Quora. The task is to determine whether a pair of questions are semantically equivalent.
586
-
587
- #### rte
588
-
589
- The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual entailment challenges. The authors of the benchmark combined the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009). Examples are constructed based on news and Wikipedia text. The authors of the benchmark convert all datasets to a two-class split, where for three-class datasets they collapse neutral and contradiction into not entailment, for consistency.
590
-
591
- #### sst2
592
-
593
- The Stanford Sentiment Treebank consists of sentences from movie reviews and human annotations of their sentiment. The task is to predict the sentiment of a given sentence. It uses the two-way (positive/negative) class split, with only sentence-level labels.
594
-
595
- #### stsb
596
-
597
- The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of sentence pairs drawn from news headlines, video and image captions, and natural language inference data. Each pair is human-annotated with a similarity score from 1 to 5.
598
-
599
- #### wnli
600
-
601
- The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task in which a system must read a sentence with a pronoun and select the referent of that pronoun from a list of choices. The examples are manually constructed to foil simple statistical methods: Each one is contingent on contextual information provided by a single word or phrase in the sentence. To convert the problem into sentence pair classification, the authors of the benchmark construct sentence pairs by replacing the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the pronoun substituted is entailed by the original sentence. They use a small evaluation set consisting of new examples derived from fiction books that was shared privately by the authors of the original corpus. While the included training set is balanced between two classes, the test set is imbalanced between them (65% not entailment). Also, due to a data quirk, the development set is adversarial: hypotheses are sometimes shared between training and development examples, so if a model memorizes the training examples, they will predict the wrong label on corresponding development set example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence between a model's score on this task and its score on the unconverted original task. The authors of the benchmark call converted dataset WNLI (Winograd NLI).
602
-
603
- ### Languages
604
-
605
- The language data in GLUE is in English (BCP-47 `en`)
606
-
607
- ## Dataset Structure
608
-
609
- ### Data Instances
610
-
611
- #### ax
612
-
613
- - **Size of downloaded dataset files:** 0.22 MB
614
- - **Size of the generated dataset:** 0.24 MB
615
- - **Total amount of disk used:** 0.46 MB
616
-
617
- An example of 'test' looks as follows.
618
- ```
619
- {
620
- "premise": "The cat sat on the mat.",
621
- "hypothesis": "The cat did not sit on the mat.",
622
- "label": -1,
623
- "idx: 0
624
- }
625
- ```
626
-
627
- #### cola
628
-
629
- - **Size of downloaded dataset files:** 0.38 MB
630
- - **Size of the generated dataset:** 0.61 MB
631
- - **Total amount of disk used:** 0.99 MB
632
-
633
- An example of 'train' looks as follows.
634
- ```
635
- {
636
- "sentence": "Our friends won't buy this analysis, let alone the next one we propose.",
637
- "label": 1,
638
- "id": 0
639
- }
640
- ```
641
-
642
- #### mnli
643
-
644
- - **Size of downloaded dataset files:** 312.78 MB
645
- - **Size of the generated dataset:** 82.47 MB
646
- - **Total amount of disk used:** 395.26 MB
647
-
648
- An example of 'train' looks as follows.
649
- ```
650
- {
651
- "premise": "Conceptually cream skimming has two basic dimensions - product and geography.",
652
- "hypothesis": "Product and geography are what make cream skimming work.",
653
- "label": 1,
654
- "idx": 0
655
- }
656
- ```
657
-
658
- #### mnli_matched
659
-
660
- - **Size of downloaded dataset files:** 312.78 MB
661
- - **Size of the generated dataset:** 3.69 MB
662
- - **Total amount of disk used:** 316.48 MB
663
-
664
- An example of 'test' looks as follows.
665
- ```
666
- {
667
- "premise": "Hierbas, ans seco, ans dulce, and frigola are just a few names worth keeping a look-out for.",
668
- "hypothesis": "Hierbas is a name worth looking out for.",
669
- "label": -1,
670
- "idx": 0
671
- }
672
- ```
673
-
674
- #### mnli_mismatched
675
-
676
- - **Size of downloaded dataset files:** 312.78 MB
677
- - **Size of the generated dataset:** 3.91 MB
678
- - **Total amount of disk used:** 316.69 MB
679
-
680
- An example of 'test' looks as follows.
681
- ```
682
- {
683
- "premise": "What have you decided, what are you going to do?",
684
- "hypothesis": "So what's your decision?,
685
- "label": -1,
686
- "idx": 0
687
- }
688
- ```
689
-
690
- #### mrpc
691
-
692
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
693
-
694
- #### qnli
695
-
696
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
697
-
698
- #### qqp
699
-
700
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
701
-
702
- #### rte
703
-
704
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
705
-
706
- #### sst2
707
-
708
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
709
-
710
- #### stsb
711
-
712
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
713
-
714
- #### wnli
715
-
716
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
717
-
718
- ### Data Fields
719
-
720
- The data fields are the same among all splits.
721
-
722
- #### ax
723
- - `premise`: a `string` feature.
724
- - `hypothesis`: a `string` feature.
725
- - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
726
- - `idx`: a `int32` feature.
727
-
728
- #### cola
729
- - `sentence`: a `string` feature.
730
- - `label`: a classification label, with possible values including `unacceptable` (0), `acceptable` (1).
731
- - `idx`: a `int32` feature.
732
-
733
- #### mnli
734
- - `premise`: a `string` feature.
735
- - `hypothesis`: a `string` feature.
736
- - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
737
- - `idx`: a `int32` feature.
738
-
739
- #### mnli_matched
740
- - `premise`: a `string` feature.
741
- - `hypothesis`: a `string` feature.
742
- - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
743
- - `idx`: a `int32` feature.
744
-
745
- #### mnli_mismatched
746
- - `premise`: a `string` feature.
747
- - `hypothesis`: a `string` feature.
748
- - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
749
- - `idx`: a `int32` feature.
750
-
751
- #### mrpc
752
-
753
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
754
-
755
- #### qnli
756
-
757
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
758
-
759
- #### qqp
760
-
761
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
762
-
763
- #### rte
764
-
765
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
766
-
767
- #### sst2
768
-
769
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
770
-
771
- #### stsb
772
-
773
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
774
-
775
- #### wnli
776
-
777
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
778
-
779
- ### Data Splits
780
-
781
- #### ax
782
-
783
- | |test|
784
- |---|---:|
785
- |ax |1104|
786
-
787
- #### cola
788
-
789
- | |train|validation|test|
790
- |----|----:|---------:|---:|
791
- |cola| 8551| 1043|1063|
792
-
793
- #### mnli
794
-
795
- | |train |validation_matched|validation_mismatched|test_matched|test_mismatched|
796
- |----|-----:|-----------------:|--------------------:|-----------:|--------------:|
797
- |mnli|392702| 9815| 9832| 9796| 9847|
798
-
799
- #### mnli_matched
800
-
801
- | |validation|test|
802
- |------------|---------:|---:|
803
- |mnli_matched| 9815|9796|
804
-
805
- #### mnli_mismatched
806
-
807
- | |validation|test|
808
- |---------------|---------:|---:|
809
- |mnli_mismatched| 9832|9847|
810
-
811
- #### mrpc
812
-
813
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
814
-
815
- #### qnli
816
-
817
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
818
-
819
- #### qqp
820
-
821
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
822
-
823
- #### rte
824
-
825
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
826
-
827
- #### sst2
828
-
829
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
830
-
831
- #### stsb
832
-
833
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
834
-
835
- #### wnli
836
-
837
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
838
-
839
- ## Dataset Creation
840
-
841
- ### Curation Rationale
842
-
843
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
844
-
845
- ### Source Data
846
-
847
- #### Initial Data Collection and Normalization
848
-
849
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
850
-
851
- #### Who are the source language producers?
852
-
853
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
854
-
855
- ### Annotations
856
-
857
- #### Annotation process
858
-
859
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
860
-
861
- #### Who are the annotators?
862
-
863
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
864
-
865
- ### Personal and Sensitive Information
866
-
867
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
868
-
869
- ## Considerations for Using the Data
870
-
871
- ### Social Impact of Dataset
872
-
873
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
874
-
875
- ### Discussion of Biases
876
-
877
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
878
-
879
- ### Other Known Limitations
880
-
881
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
882
-
883
- ## Additional Information
884
-
885
- ### Dataset Curators
886
-
887
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
888
-
889
- ### Licensing Information
890
-
891
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
892
-
893
- ### Citation Information
894
-
895
- ```
896
- @article{warstadt2018neural,
897
- title={Neural Network Acceptability Judgments},
898
- author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
899
- journal={arXiv preprint arXiv:1805.12471},
900
- year={2018}
901
- }
902
- @inproceedings{wang2019glue,
903
- title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
904
- author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
905
- note={In the Proceedings of ICLR.},
906
- year={2019}
907
- }
908
-
909
- Note that each GLUE dataset has its own citation. Please see the source to see
910
- the correct citation for each contained dataset.
911
- ```
912
-
913
-
914
- ### Contributions
915
-
916
- Thanks to [@patpizio](https://github.com/patpizio), [@jeswan](https://github.com/jeswan), [@thomwolf](https://github.com/thomwolf), [@patrickvonplaten](https://github.com/patrickvonplaten), [@mariamabarham](https://github.com/mariamabarham) for adding this dataset.