mishig HF staff commited on
Commit
ec3e5e6
·
1 Parent(s): b3cda02

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -916
README.md CHANGED
@@ -1,916 +1 @@
1
- ---
2
- annotations_creators:
3
- - other
4
- language_creators:
5
- - other
6
- language:
7
- - en
8
- license:
9
- - cc-by-4.0
10
- multilinguality:
11
- - monolingual
12
- size_categories:
13
- - 10K<n<100K
14
- source_datasets:
15
- - original
16
- task_categories:
17
- - text-classification
18
- task_ids:
19
- - acceptability-classification
20
- - natural-language-inference
21
- - semantic-similarity-scoring
22
- - sentiment-classification
23
- - text-scoring
24
- paperswithcode_id: glue
25
- pretty_name: GLUE (General Language Understanding Evaluation benchmark)
26
- configs:
27
- - ax
28
- - cola
29
- - mnli
30
- - mnli_matched
31
- - mnli_mismatched
32
- - mrpc
33
- - qnli
34
- - qqp
35
- - rte
36
- - sst2
37
- - stsb
38
- - wnli
39
- tags:
40
- - qa-nli
41
- - coreference-nli
42
- - paraphrase-identification
43
- dataset_info:
44
- - config_name: cola
45
- features:
46
- - name: sentence
47
- dtype: string
48
- - name: label
49
- dtype:
50
- class_label:
51
- names:
52
- '0': unacceptable
53
- '1': acceptable
54
- - name: idx
55
- dtype: int32
56
- splits:
57
- - name: test
58
- num_bytes: 61049
59
- num_examples: 1063
60
- - name: train
61
- num_bytes: 489149
62
- num_examples: 8551
63
- - name: validation
64
- num_bytes: 60850
65
- num_examples: 1043
66
- download_size: 376971
67
- dataset_size: 611048
68
- - config_name: sst2
69
- features:
70
- - name: sentence
71
- dtype: string
72
- - name: label
73
- dtype:
74
- class_label:
75
- names:
76
- '0': negative
77
- '1': positive
78
- - name: idx
79
- dtype: int32
80
- splits:
81
- - name: test
82
- num_bytes: 217556
83
- num_examples: 1821
84
- - name: train
85
- num_bytes: 4715283
86
- num_examples: 67349
87
- - name: validation
88
- num_bytes: 106692
89
- num_examples: 872
90
- download_size: 7439277
91
- dataset_size: 5039531
92
- - config_name: mrpc
93
- features:
94
- - name: sentence1
95
- dtype: string
96
- - name: sentence2
97
- dtype: string
98
- - name: label
99
- dtype:
100
- class_label:
101
- names:
102
- '0': not_equivalent
103
- '1': equivalent
104
- - name: idx
105
- dtype: int32
106
- splits:
107
- - name: test
108
- num_bytes: 443498
109
- num_examples: 1725
110
- - name: train
111
- num_bytes: 946146
112
- num_examples: 3668
113
- - name: validation
114
- num_bytes: 106142
115
- num_examples: 408
116
- download_size: 1494541
117
- dataset_size: 1495786
118
- - config_name: qqp
119
- features:
120
- - name: question1
121
- dtype: string
122
- - name: question2
123
- dtype: string
124
- - name: label
125
- dtype:
126
- class_label:
127
- names:
128
- '0': not_duplicate
129
- '1': duplicate
130
- - name: idx
131
- dtype: int32
132
- splits:
133
- - name: train
134
- num_bytes: 50901116
135
- num_examples: 363846
136
- - name: validation
137
- num_bytes: 5653794
138
- num_examples: 40430
139
- - name: test
140
- num_bytes: 55171431
141
- num_examples: 390965
142
- download_size: 41696084
143
- dataset_size: 111726341
144
- - config_name: stsb
145
- features:
146
- - name: sentence1
147
- dtype: string
148
- - name: sentence2
149
- dtype: string
150
- - name: label
151
- dtype: float32
152
- - name: idx
153
- dtype: int32
154
- splits:
155
- - name: test
156
- num_bytes: 170847
157
- num_examples: 1379
158
- - name: train
159
- num_bytes: 758394
160
- num_examples: 5749
161
- - name: validation
162
- num_bytes: 217012
163
- num_examples: 1500
164
- download_size: 802872
165
- dataset_size: 1146253
166
- - config_name: mnli
167
- features:
168
- - name: premise
169
- dtype: string
170
- - name: hypothesis
171
- dtype: string
172
- - name: label
173
- dtype:
174
- class_label:
175
- names:
176
- '0': entailment
177
- '1': neutral
178
- '2': contradiction
179
- - name: idx
180
- dtype: int32
181
- splits:
182
- - name: test_matched
183
- num_bytes: 1854787
184
- num_examples: 9796
185
- - name: test_mismatched
186
- num_bytes: 1956866
187
- num_examples: 9847
188
- - name: train
189
- num_bytes: 74865118
190
- num_examples: 392702
191
- - name: validation_matched
192
- num_bytes: 1839926
193
- num_examples: 9815
194
- - name: validation_mismatched
195
- num_bytes: 1955384
196
- num_examples: 9832
197
- download_size: 312783507
198
- dataset_size: 82472081
199
- - config_name: mnli_mismatched
200
- features:
201
- - name: premise
202
- dtype: string
203
- - name: hypothesis
204
- dtype: string
205
- - name: label
206
- dtype:
207
- class_label:
208
- names:
209
- '0': entailment
210
- '1': neutral
211
- '2': contradiction
212
- - name: idx
213
- dtype: int32
214
- splits:
215
- - name: test
216
- num_bytes: 1956866
217
- num_examples: 9847
218
- - name: validation
219
- num_bytes: 1955384
220
- num_examples: 9832
221
- download_size: 312783507
222
- dataset_size: 3912250
223
- - config_name: mnli_matched
224
- features:
225
- - name: premise
226
- dtype: string
227
- - name: hypothesis
228
- dtype: string
229
- - name: label
230
- dtype:
231
- class_label:
232
- names:
233
- '0': entailment
234
- '1': neutral
235
- '2': contradiction
236
- - name: idx
237
- dtype: int32
238
- splits:
239
- - name: test
240
- num_bytes: 1854787
241
- num_examples: 9796
242
- - name: validation
243
- num_bytes: 1839926
244
- num_examples: 9815
245
- download_size: 312783507
246
- dataset_size: 3694713
247
- - config_name: qnli
248
- features:
249
- - name: question
250
- dtype: string
251
- - name: sentence
252
- dtype: string
253
- - name: label
254
- dtype:
255
- class_label:
256
- names:
257
- '0': entailment
258
- '1': not_entailment
259
- - name: idx
260
- dtype: int32
261
- splits:
262
- - name: test
263
- num_bytes: 1376516
264
- num_examples: 5463
265
- - name: train
266
- num_bytes: 25677924
267
- num_examples: 104743
268
- - name: validation
269
- num_bytes: 1371727
270
- num_examples: 5463
271
- download_size: 10627589
272
- dataset_size: 28426167
273
- - config_name: rte
274
- features:
275
- - name: sentence1
276
- dtype: string
277
- - name: sentence2
278
- dtype: string
279
- - name: label
280
- dtype:
281
- class_label:
282
- names:
283
- '0': entailment
284
- '1': not_entailment
285
- - name: idx
286
- dtype: int32
287
- splits:
288
- - name: test
289
- num_bytes: 975936
290
- num_examples: 3000
291
- - name: train
292
- num_bytes: 848888
293
- num_examples: 2490
294
- - name: validation
295
- num_bytes: 90911
296
- num_examples: 277
297
- download_size: 697150
298
- dataset_size: 1915735
299
- - config_name: wnli
300
- features:
301
- - name: sentence1
302
- dtype: string
303
- - name: sentence2
304
- dtype: string
305
- - name: label
306
- dtype:
307
- class_label:
308
- names:
309
- '0': not_entailment
310
- '1': entailment
311
- - name: idx
312
- dtype: int32
313
- splits:
314
- - name: test
315
- num_bytes: 37992
316
- num_examples: 146
317
- - name: train
318
- num_bytes: 107517
319
- num_examples: 635
320
- - name: validation
321
- num_bytes: 12215
322
- num_examples: 71
323
- download_size: 28999
324
- dataset_size: 157724
325
- - config_name: ax
326
- features:
327
- - name: premise
328
- dtype: string
329
- - name: hypothesis
330
- dtype: string
331
- - name: label
332
- dtype:
333
- class_label:
334
- names:
335
- '0': entailment
336
- '1': neutral
337
- '2': contradiction
338
- - name: idx
339
- dtype: int32
340
- splits:
341
- - name: test
342
- num_bytes: 238392
343
- num_examples: 1104
344
- download_size: 222257
345
- dataset_size: 238392
346
- train-eval-index:
347
- - config: cola
348
- task: text-classification
349
- task_id: binary_classification
350
- splits:
351
- train_split: train
352
- eval_split: validation
353
- col_mapping:
354
- sentence: text
355
- label: target
356
- - config: sst2
357
- task: text-classification
358
- task_id: binary_classification
359
- splits:
360
- train_split: train
361
- eval_split: validation
362
- col_mapping:
363
- sentence: text
364
- label: target
365
- - config: mrpc
366
- task: text-classification
367
- task_id: natural_language_inference
368
- splits:
369
- train_split: train
370
- eval_split: validation
371
- col_mapping:
372
- sentence1: text1
373
- sentence2: text2
374
- label: target
375
- - config: qqp
376
- task: text-classification
377
- task_id: natural_language_inference
378
- splits:
379
- train_split: train
380
- eval_split: validation
381
- col_mapping:
382
- question1: text1
383
- question2: text2
384
- label: target
385
- - config: stsb
386
- task: text-classification
387
- task_id: natural_language_inference
388
- splits:
389
- train_split: train
390
- eval_split: validation
391
- col_mapping:
392
- sentence1: text1
393
- sentence2: text2
394
- label: target
395
- - config: mnli
396
- task: text-classification
397
- task_id: natural_language_inference
398
- splits:
399
- train_split: train
400
- eval_split: validation_matched
401
- col_mapping:
402
- premise: text1
403
- hypothesis: text2
404
- label: target
405
- - config: mnli_mismatched
406
- task: text-classification
407
- task_id: natural_language_inference
408
- splits:
409
- train_split: train
410
- eval_split: validation
411
- col_mapping:
412
- premise: text1
413
- hypothesis: text2
414
- label: target
415
- - config: mnli_matched
416
- task: text-classification
417
- task_id: natural_language_inference
418
- splits:
419
- train_split: train
420
- eval_split: validation
421
- col_mapping:
422
- premise: text1
423
- hypothesis: text2
424
- label: target
425
- - config: qnli
426
- task: text-classification
427
- task_id: natural_language_inference
428
- splits:
429
- train_split: train
430
- eval_split: validation
431
- col_mapping:
432
- question: text1
433
- sentence: text2
434
- label: target
435
- - config: rte
436
- task: text-classification
437
- task_id: natural_language_inference
438
- splits:
439
- train_split: train
440
- eval_split: validation
441
- col_mapping:
442
- sentence1: text1
443
- sentence2: text2
444
- label: target
445
- - config: wnli
446
- task: text-classification
447
- task_id: natural_language_inference
448
- splits:
449
- train_split: train
450
- eval_split: validation
451
- col_mapping:
452
- sentence1: text1
453
- sentence2: text2
454
- label: target
455
- ---
456
-
457
- # Dataset Card for GLUE
458
-
459
- ## Table of Contents
460
- - [Dataset Card for GLUE](#dataset-card-for-glue)
461
- - [Table of Contents](#table-of-contents)
462
- - [Dataset Description](#dataset-description)
463
- - [Dataset Summary](#dataset-summary)
464
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
465
- - [ax](#ax)
466
- - [cola](#cola)
467
- - [mnli](#mnli)
468
- - [mnli_matched](#mnli_matched)
469
- - [mnli_mismatched](#mnli_mismatched)
470
- - [mrpc](#mrpc)
471
- - [qnli](#qnli)
472
- - [qqp](#qqp)
473
- - [rte](#rte)
474
- - [sst2](#sst2)
475
- - [stsb](#stsb)
476
- - [wnli](#wnli)
477
- - [Languages](#languages)
478
- - [Dataset Structure](#dataset-structure)
479
- - [Data Instances](#data-instances)
480
- - [ax](#ax-1)
481
- - [cola](#cola-1)
482
- - [mnli](#mnli-1)
483
- - [mnli_matched](#mnli_matched-1)
484
- - [mnli_mismatched](#mnli_mismatched-1)
485
- - [mrpc](#mrpc-1)
486
- - [qnli](#qnli-1)
487
- - [qqp](#qqp-1)
488
- - [rte](#rte-1)
489
- - [sst2](#sst2-1)
490
- - [stsb](#stsb-1)
491
- - [wnli](#wnli-1)
492
- - [Data Fields](#data-fields)
493
- - [ax](#ax-2)
494
- - [cola](#cola-2)
495
- - [mnli](#mnli-2)
496
- - [mnli_matched](#mnli_matched-2)
497
- - [mnli_mismatched](#mnli_mismatched-2)
498
- - [mrpc](#mrpc-2)
499
- - [qnli](#qnli-2)
500
- - [qqp](#qqp-2)
501
- - [rte](#rte-2)
502
- - [sst2](#sst2-2)
503
- - [stsb](#stsb-2)
504
- - [wnli](#wnli-2)
505
- - [Data Splits](#data-splits)
506
- - [ax](#ax-3)
507
- - [cola](#cola-3)
508
- - [mnli](#mnli-3)
509
- - [mnli_matched](#mnli_matched-3)
510
- - [mnli_mismatched](#mnli_mismatched-3)
511
- - [mrpc](#mrpc-3)
512
- - [qnli](#qnli-3)
513
- - [qqp](#qqp-3)
514
- - [rte](#rte-3)
515
- - [sst2](#sst2-3)
516
- - [stsb](#stsb-3)
517
- - [wnli](#wnli-3)
518
- - [Dataset Creation](#dataset-creation)
519
- - [Curation Rationale](#curation-rationale)
520
- - [Source Data](#source-data)
521
- - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
522
- - [Who are the source language producers?](#who-are-the-source-language-producers)
523
- - [Annotations](#annotations)
524
- - [Annotation process](#annotation-process)
525
- - [Who are the annotators?](#who-are-the-annotators)
526
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
527
- - [Considerations for Using the Data](#considerations-for-using-the-data)
528
- - [Social Impact of Dataset](#social-impact-of-dataset)
529
- - [Discussion of Biases](#discussion-of-biases)
530
- - [Other Known Limitations](#other-known-limitations)
531
- - [Additional Information](#additional-information)
532
- - [Dataset Curators](#dataset-curators)
533
- - [Licensing Information](#licensing-information)
534
- - [Citation Information](#citation-information)
535
- - [Contributions](#contributions)
536
-
537
- ## Dataset Description
538
-
539
- - **Homepage:** [https://nyu-mll.github.io/CoLA/](https://nyu-mll.github.io/CoLA/)
540
- - **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
541
- - **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
542
- - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
543
- - **Size of downloaded dataset files:** 955.33 MB
544
- - **Size of the generated dataset:** 229.68 MB
545
- - **Total amount of disk used:** 1185.01 MB
546
-
547
- ### Dataset Summary
548
-
549
- GLUE, the General Language Understanding Evaluation benchmark (https://gluebenchmark.com/) is a collection of resources for training, evaluating, and analyzing natural language understanding systems.
550
-
551
- ### Supported Tasks and Leaderboards
552
-
553
- The leaderboard for the GLUE benchmark can be found [at this address](https://gluebenchmark.com/). It comprises the following tasks:
554
-
555
- #### ax
556
-
557
- A manually-curated evaluation dataset for fine-grained analysis of system performance on a broad range of linguistic phenomena. This dataset evaluates sentence understanding through Natural Language Inference (NLI) problems. Use a model trained on MulitNLI to produce predictions for this dataset.
558
-
559
- #### cola
560
-
561
- The Corpus of Linguistic Acceptability consists of English acceptability judgments drawn from books and journal articles on linguistic theory. Each example is a sequence of words annotated with whether it is a grammatical English sentence.
562
-
563
- #### mnli
564
-
565
- The Multi-Genre Natural Language Inference Corpus is a crowdsourced collection of sentence pairs with textual entailment annotations. Given a premise sentence and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are gathered from ten different sources, including transcribed speech, fiction, and government reports. The authors of the benchmark use the standard test set, for which they obtained private labels from the RTE authors, and evaluate on both the matched (in-domain) and mismatched (cross-domain) section. They also uses and recommend the SNLI corpus as 550k examples of auxiliary training data.
566
-
567
- #### mnli_matched
568
-
569
- The matched validation and test splits from MNLI. See the "mnli" BuilderConfig for additional information.
570
-
571
- #### mnli_mismatched
572
-
573
- The mismatched validation and test splits from MNLI. See the "mnli" BuilderConfig for additional information.
574
-
575
- #### mrpc
576
-
577
- The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of sentence pairs automatically extracted from online news sources, with human annotations for whether the sentences in the pair are semantically equivalent.
578
-
579
- #### qnli
580
-
581
- The Stanford Question Answering Dataset is a question-answering dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn from Wikipedia) contains the answer to the corresponding question (written by an annotator). The authors of the benchmark convert the task into sentence pair classification by forming a pair between each question and each sentence in the corresponding context, and filtering out pairs with low lexical overlap between the question and the context sentence. The task is to determine whether the context sentence contains the answer to the question. This modified version of the original task removes the requirement that the model select the exact answer, but also removes the simplifying assumptions that the answer is always present in the input and that lexical overlap is a reliable cue.
582
-
583
- #### qqp
584
-
585
- The Quora Question Pairs2 dataset is a collection of question pairs from the community question-answering website Quora. The task is to determine whether a pair of questions are semantically equivalent.
586
-
587
- #### rte
588
-
589
- The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual entailment challenges. The authors of the benchmark combined the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009). Examples are constructed based on news and Wikipedia text. The authors of the benchmark convert all datasets to a two-class split, where for three-class datasets they collapse neutral and contradiction into not entailment, for consistency.
590
-
591
- #### sst2
592
-
593
- The Stanford Sentiment Treebank consists of sentences from movie reviews and human annotations of their sentiment. The task is to predict the sentiment of a given sentence. It uses the two-way (positive/negative) class split, with only sentence-level labels.
594
-
595
- #### stsb
596
-
597
- The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of sentence pairs drawn from news headlines, video and image captions, and natural language inference data. Each pair is human-annotated with a similarity score from 1 to 5.
598
-
599
- #### wnli
600
-
601
- The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task in which a system must read a sentence with a pronoun and select the referent of that pronoun from a list of choices. The examples are manually constructed to foil simple statistical methods: Each one is contingent on contextual information provided by a single word or phrase in the sentence. To convert the problem into sentence pair classification, the authors of the benchmark construct sentence pairs by replacing the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the pronoun substituted is entailed by the original sentence. They use a small evaluation set consisting of new examples derived from fiction books that was shared privately by the authors of the original corpus. While the included training set is balanced between two classes, the test set is imbalanced between them (65% not entailment). Also, due to a data quirk, the development set is adversarial: hypotheses are sometimes shared between training and development examples, so if a model memorizes the training examples, they will predict the wrong label on corresponding development set example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence between a model's score on this task and its score on the unconverted original task. The authors of the benchmark call converted dataset WNLI (Winograd NLI).
602
-
603
- ### Languages
604
-
605
- The language data in GLUE is in English (BCP-47 `en`)
606
-
607
- ## Dataset Structure
608
-
609
- ### Data Instances
610
-
611
- #### ax
612
-
613
- - **Size of downloaded dataset files:** 0.21 MB
614
- - **Size of the generated dataset:** 0.23 MB
615
- - **Total amount of disk used:** 0.44 MB
616
-
617
- An example of 'test' looks as follows.
618
- ```
619
- {
620
- "premise": "The cat sat on the mat.",
621
- "hypothesis": "The cat did not sit on the mat.",
622
- "label": -1,
623
- "idx: 0
624
- }
625
- ```
626
-
627
- #### cola
628
-
629
- - **Size of downloaded dataset files:** 0.36 MB
630
- - **Size of the generated dataset:** 0.58 MB
631
- - **Total amount of disk used:** 0.94 MB
632
-
633
- An example of 'train' looks as follows.
634
- ```
635
- {
636
- "sentence": "Our friends won't buy this analysis, let alone the next one we propose.",
637
- "label": 1,
638
- "id": 0
639
- }
640
- ```
641
-
642
- #### mnli
643
-
644
- - **Size of downloaded dataset files:** 298.29 MB
645
- - **Size of the generated dataset:** 78.65 MB
646
- - **Total amount of disk used:** 376.95 MB
647
-
648
- An example of 'train' looks as follows.
649
- ```
650
- {
651
- "premise": "Conceptually cream skimming has two basic dimensions - product and geography.",
652
- "hypothesis": "Product and geography are what make cream skimming work.",
653
- "label": 1,
654
- "idx": 0
655
- }
656
- ```
657
-
658
- #### mnli_matched
659
-
660
- - **Size of downloaded dataset files:** 298.29 MB
661
- - **Size of the generated dataset:** 3.52 MB
662
- - **Total amount of disk used:** 301.82 MB
663
-
664
- An example of 'test' looks as follows.
665
- ```
666
- {
667
- "premise": "Hierbas, ans seco, ans dulce, and frigola are just a few names worth keeping a look-out for.",
668
- "hypothesis": "Hierbas is a name worth looking out for.",
669
- "label": -1,
670
- "idx": 0
671
- }
672
- ```
673
-
674
- #### mnli_mismatched
675
-
676
- - **Size of downloaded dataset files:** 298.29 MB
677
- - **Size of the generated dataset:** 3.73 MB
678
- - **Total amount of disk used:** 302.02 MB
679
-
680
- An example of 'test' looks as follows.
681
- ```
682
- {
683
- "premise": "What have you decided, what are you going to do?",
684
- "hypothesis": "So what's your decision?,
685
- "label": -1,
686
- "idx": 0
687
- }
688
- ```
689
-
690
- #### mrpc
691
-
692
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
693
-
694
- #### qnli
695
-
696
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
697
-
698
- #### qqp
699
-
700
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
701
-
702
- #### rte
703
-
704
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
705
-
706
- #### sst2
707
-
708
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
709
-
710
- #### stsb
711
-
712
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
713
-
714
- #### wnli
715
-
716
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
717
-
718
- ### Data Fields
719
-
720
- The data fields are the same among all splits.
721
-
722
- #### ax
723
- - `premise`: a `string` feature.
724
- - `hypothesis`: a `string` feature.
725
- - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
726
- - `idx`: a `int32` feature.
727
-
728
- #### cola
729
- - `sentence`: a `string` feature.
730
- - `label`: a classification label, with possible values including `unacceptable` (0), `acceptable` (1).
731
- - `idx`: a `int32` feature.
732
-
733
- #### mnli
734
- - `premise`: a `string` feature.
735
- - `hypothesis`: a `string` feature.
736
- - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
737
- - `idx`: a `int32` feature.
738
-
739
- #### mnli_matched
740
- - `premise`: a `string` feature.
741
- - `hypothesis`: a `string` feature.
742
- - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
743
- - `idx`: a `int32` feature.
744
-
745
- #### mnli_mismatched
746
- - `premise`: a `string` feature.
747
- - `hypothesis`: a `string` feature.
748
- - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
749
- - `idx`: a `int32` feature.
750
-
751
- #### mrpc
752
-
753
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
754
-
755
- #### qnli
756
-
757
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
758
-
759
- #### qqp
760
-
761
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
762
-
763
- #### rte
764
-
765
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
766
-
767
- #### sst2
768
-
769
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
770
-
771
- #### stsb
772
-
773
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
774
-
775
- #### wnli
776
-
777
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
778
-
779
- ### Data Splits
780
-
781
- #### ax
782
-
783
- | |test|
784
- |---|---:|
785
- |ax |1104|
786
-
787
- #### cola
788
-
789
- | |train|validation|test|
790
- |----|----:|---------:|---:|
791
- |cola| 8551| 1043|1063|
792
-
793
- #### mnli
794
-
795
- | |train |validation_matched|validation_mismatched|test_matched|test_mismatched|
796
- |----|-----:|-----------------:|--------------------:|-----------:|--------------:|
797
- |mnli|392702| 9815| 9832| 9796| 9847|
798
-
799
- #### mnli_matched
800
-
801
- | |validation|test|
802
- |------------|---------:|---:|
803
- |mnli_matched| 9815|9796|
804
-
805
- #### mnli_mismatched
806
-
807
- | |validation|test|
808
- |---------------|---------:|---:|
809
- |mnli_mismatched| 9832|9847|
810
-
811
- #### mrpc
812
-
813
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
814
-
815
- #### qnli
816
-
817
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
818
-
819
- #### qqp
820
-
821
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
822
-
823
- #### rte
824
-
825
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
826
-
827
- #### sst2
828
-
829
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
830
-
831
- #### stsb
832
-
833
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
834
-
835
- #### wnli
836
-
837
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
838
-
839
- ## Dataset Creation
840
-
841
- ### Curation Rationale
842
-
843
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
844
-
845
- ### Source Data
846
-
847
- #### Initial Data Collection and Normalization
848
-
849
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
850
-
851
- #### Who are the source language producers?
852
-
853
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
854
-
855
- ### Annotations
856
-
857
- #### Annotation process
858
-
859
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
860
-
861
- #### Who are the annotators?
862
-
863
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
864
-
865
- ### Personal and Sensitive Information
866
-
867
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
868
-
869
- ## Considerations for Using the Data
870
-
871
- ### Social Impact of Dataset
872
-
873
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
874
-
875
- ### Discussion of Biases
876
-
877
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
878
-
879
- ### Other Known Limitations
880
-
881
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
882
-
883
- ## Additional Information
884
-
885
- ### Dataset Curators
886
-
887
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
888
-
889
- ### Licensing Information
890
-
891
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
892
-
893
- ### Citation Information
894
-
895
- ```
896
- @article{warstadt2018neural,
897
- title={Neural Network Acceptability Judgments},
898
- author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
899
- journal={arXiv preprint arXiv:1805.12471},
900
- year={2018}
901
- }
902
- @inproceedings{wang2019glue,
903
- title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
904
- author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
905
- note={In the Proceedings of ICLR.},
906
- year={2019}
907
- }
908
-
909
- Note that each GLUE dataset has its own citation. Please see the source to see
910
- the correct citation for each contained dataset.
911
- ```
912
-
913
-
914
- ### Contributions
915
-
916
- Thanks to [@patpizio](https://github.com/patpizio), [@jeswan](https://github.com/jeswan), [@thomwolf](https://github.com/thomwolf), [@patrickvonplaten](https://github.com/patrickvonplaten), [@mariamabarham](https://github.com/mariamabarham) for adding this dataset.
 
1
+ <video controls loop autoplay src="https://huggingface.co/araffin/ppo-LunarLander-v2/resolve/main/replay.mp4"/>