system HF staff commited on
Commit
ad17642
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cola": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@article{warstadt2018neural,\n title={Neural Network Acceptability Judgments},\n author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1805.12471},\n year={2018}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://nyu-mll.github.io/CoLA/", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["unacceptable", "acceptable"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "glue", "config_name": "cola", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 61049, "num_examples": 1063, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 489149, "num_examples": 8551, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 60850, "num_examples": 1043, "dataset_name": "glue"}}, "download_checksums": {"https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4": {"num_bytes": 376971, "checksum": "f212fcd832b8f7b435fb991f101abf89f96b933ab400603bf198960dfc32cbff"}}, "download_size": 376971, "dataset_size": 611048, "size_in_bytes": 988019}, "sst2": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{socher2013recursive,\n title={Recursive deep models for semantic compositionality over a sentiment treebank},\n author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},\n booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},\n pages={1631--1642},\n year={2013}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://nlp.stanford.edu/sentiment/index.html", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["negative", "positive"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "glue", "config_name": "sst2", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 217556, "num_examples": 1821, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 4715283, "num_examples": 67349, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 106692, "num_examples": 872, "dataset_name": "glue"}}, "download_checksums": {"https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8": {"num_bytes": 7439277, "checksum": "d67e16fb55739c1b32cdce9877596db1c127dc322d93c082281f64057c16deaa"}}, "download_size": 7439277, "dataset_size": 5039531, "size_in_bytes": 12478808}, "mrpc": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{dolan2005automatically,\n title={Automatically constructing a corpus of sentential paraphrases},\n author={Dolan, William B and Brockett, Chris},\n booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},\n year={2005}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://www.microsoft.com/en-us/download/details.aspx?id=52398", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["not_equivalent", "equivalent"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "glue", "config_name": "mrpc", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 443498, "num_examples": 1725, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 946146, "num_examples": 3668, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 106142, "num_examples": 408, "dataset_name": "glue"}}, "download_checksums": {"https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc": {"num_bytes": 6222, "checksum": "971d7767d81b997fd9060ade0ec23c4fc31cbb226a55d1bd4a1bac474eb81dc7"}, "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt": {"num_bytes": 1047044, "checksum": "60a9b09084528f0673eedee2b69cb941920f0b8cd0eeccefc464a98768457f89"}, "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt": {"num_bytes": 441275, "checksum": "a04e271090879aaba6423d65b94950c089298587d9c084bf9cd7439bd785f784"}}, "download_size": 1494541, "dataset_size": 1495786, "size_in_bytes": 2990327}, "qqp": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@online{WinNT,\n author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},\n title = {First Quora Dataset Release: Question Pairs},\n year = 2017,\n url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},\n urldate = {2019-04-03}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs", "license": "", "features": {"question1": {"dtype": "string", "id": null, "_type": "Value"}, "question2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["not_duplicate", "duplicate"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "glue", "config_name": "qqp", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 55415498, "num_examples": 390965, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 51128510, "num_examples": 363849, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 5679032, "num_examples": 40430, "dataset_name": "glue"}}, "download_checksums": {"https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5": {"num_bytes": 60534884, "checksum": "67cb8f5fe66c90a0bc1bf5792e3924f63008b064ab7a473736c919d20bb140ad"}}, "download_size": 60534884, "dataset_size": 112223040, "size_in_bytes": 172757924}, "stsb": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@article{cer2017semeval,\n title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},\n author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},\n journal={arXiv preprint arXiv:1708.00055},\n year={2017}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "float32", "id": null, "_type": "Value"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "glue", "config_name": "stsb", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 170847, "num_examples": 1379, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 758394, "num_examples": 5749, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 217012, "num_examples": 1500, "dataset_name": "glue"}}, "download_checksums": {"https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5": {"num_bytes": 802872, "checksum": "e60a6393de5a8b5b9bac5020a1554b54e3691f9d600b775bd131e613ac179c85"}}, "download_size": 802872, "dataset_size": 1146253, "size_in_bytes": 1949125}, "mnli": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://www.nyu.edu/projects/bowman/multinli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "glue", "config_name": "mnli", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test_matched": {"name": "test_matched", "num_bytes": 1854787, "num_examples": 9796, "dataset_name": "glue"}, "test_mismatched": {"name": "test_mismatched", "num_bytes": 1956866, "num_examples": 9847, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 74865118, "num_examples": 392702, "dataset_name": "glue"}, "validation_matched": {"name": "validation_matched", "num_bytes": 1839926, "num_examples": 9815, "dataset_name": "glue"}, "validation_mismatched": {"name": "validation_mismatched", "num_bytes": 1955384, "num_examples": 9832, "dataset_name": "glue"}}, "download_checksums": {"https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce": {"num_bytes": 312783507, "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"}}, "download_size": 312783507, "dataset_size": 82472081, "size_in_bytes": 395255588}, "mnli_mismatched": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://www.nyu.edu/projects/bowman/multinli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "glue", "config_name": "mnli_mismatched", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1956866, "num_examples": 9847, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 1955384, "num_examples": 9832, "dataset_name": "glue"}}, "download_checksums": {"https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce": {"num_bytes": 312783507, "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"}}, "download_size": 312783507, "dataset_size": 3912250, "size_in_bytes": 316695757}, "mnli_matched": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://www.nyu.edu/projects/bowman/multinli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "glue", "config_name": "mnli_matched", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1854787, "num_examples": 9796, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 1839926, "num_examples": 9815, "dataset_name": "glue"}}, "download_checksums": {"https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce": {"num_bytes": 312783507, "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"}}, "download_size": 312783507, "dataset_size": 3694713, "size_in_bytes": 316478220}, "qnli": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@article{rajpurkar2016squad,\n title={Squad: 100,000+ questions for machine comprehension of text},\n author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},\n journal={arXiv preprint arXiv:1606.05250},\n year={2016}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://rajpurkar.github.io/SQuAD-explorer/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "not_entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "glue", "config_name": "qnli", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1376516, "num_examples": 5463, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 25677924, "num_examples": 104743, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 1371727, "num_examples": 5463, "dataset_name": "glue"}}, "download_checksums": {"https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601": {"num_bytes": 10627589, "checksum": "e634e78627a29adaecd4f955359b22bf5e70f2cbd93b493f2d624138a0c0e5f5"}}, "download_size": 10627589, "dataset_size": 28426167, "size_in_bytes": 39053756}, "rte": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{dagan2005pascal,\n title={The PASCAL recognising textual entailment challenge},\n author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},\n booktitle={Machine Learning Challenges Workshop},\n pages={177--190},\n year={2005},\n organization={Springer}\n}\n@inproceedings{bar2006second,\n title={The second pascal recognising textual entailment challenge},\n author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},\n booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},\n volume={6},\n number={1},\n pages={6--4},\n year={2006},\n organization={Venice}\n}\n@inproceedings{giampiccolo2007third,\n title={The third pascal recognizing textual entailment challenge},\n author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},\n booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},\n pages={1--9},\n year={2007},\n organization={Association for Computational Linguistics}\n}\n@inproceedings{bentivogli2009fifth,\n title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},\n author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},\n booktitle={TAC},\n year={2009}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://aclweb.org/aclwiki/Recognizing_Textual_Entailment", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "not_entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "glue", "config_name": "rte", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 975936, "num_examples": 3000, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 848888, "num_examples": 2490, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 90911, "num_examples": 277, "dataset_name": "glue"}}, "download_checksums": {"https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb": {"num_bytes": 697150, "checksum": "6bf86de103ecd335f3441bd43574d23fef87ecc695977a63b82d5efb206556ee"}}, "download_size": 697150, "dataset_size": 1915735, "size_in_bytes": 2612885}, "wnli": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{levesque2012winograd,\n title={The winograd schema challenge},\n author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},\n booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},\n year={2012}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["not_entailment", "entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "glue", "config_name": "wnli", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 37992, "num_examples": 146, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 107517, "num_examples": 635, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 12215, "num_examples": 71, "dataset_name": "glue"}}, "download_checksums": {"https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf": {"num_bytes": 28999, "checksum": "ae0e8e4d16f4d46d4a0a566ec7ecceccfd3fbfaa4a7a4b4e02848c0f2561ac46"}}, "download_size": 28999, "dataset_size": 157724, "size_in_bytes": 186723}, "ax": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://gluebenchmark.com/diagnostics", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "glue", "config_name": "ax", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 238392, "num_examples": 1104, "dataset_name": "glue"}}, "download_checksums": {"https://bit.ly/2BOtOJ7": {"num_bytes": 222257, "checksum": "0e13510b1bb14436ff7e2ee82338f0efb0133ecf2e73507a697dc210db3f05fd"}}, "download_size": 222257, "dataset_size": 238392, "size_in_bytes": 460649}}
dummy/ax/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2a34cfe9a95b80530887f488eb04e3514b322e0fa65c64f425ddb7aea449f69
3
+ size 509
dummy/cola/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d678797c6eb84d3436868f8b5ac506f88f12bd51633245bd1a20af6021ac48d4
3
+ size 1116
dummy/mnli/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef46936124ebde31577df53b2ae6e381aa9c66e95a2cf50f42ba68478ec3896e
3
+ size 5438
dummy/mrpc/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bfe41b0047215524032750c1faf32c84c41566279fca9df1c35482640537aa6
3
+ size 4539
dummy/qnli/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a771b312be26048e7d921ff4bf01ac7de224641cd51977629bb54b9839637fb0
3
+ size 1859
dummy/qqp/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a1f6bf7c3ae0587a99d4ecfc2c4ab900efbd23dc1c68e2556426da9feab0163
3
+ size 1588
dummy/rte/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bec2e7562503a3b7ef577986b4cd10b075818b66fb03df8d4dec79d28a5bf5f
3
+ size 1613
dummy/sst2/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ff05ebd2679fd60f174cd19415e8dd0c2f701f49f8f9dbb63f7b30707d9b06e
3
+ size 1143
dummy/stsb/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8027e1188e092ea53eede8a2b2bd245f4c98f2b37132ea5d7dd173bac36e025e
3
+ size 1353
dummy/wnli/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e52960c15224df1f7202371029b3a5fad3b4dfec72132d3c8b996ff03db92755
3
+ size 1407
glue.py ADDED
@@ -0,0 +1,627 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """The General Language Understanding Evaluation (GLUE) benchmark."""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import csv
22
+ import os
23
+ import textwrap
24
+
25
+ import numpy as np
26
+ import six
27
+
28
+ import datasets
29
+
30
+
31
+ _GLUE_CITATION = """\
32
+ @inproceedings{wang2019glue,
33
+ title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
34
+ author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
35
+ note={In the Proceedings of ICLR.},
36
+ year={2019}
37
+ }
38
+ """
39
+
40
+ _GLUE_DESCRIPTION = """\
41
+ GLUE, the General Language Understanding Evaluation benchmark
42
+ (https://gluebenchmark.com/) is a collection of resources for training,
43
+ evaluating, and analyzing natural language understanding systems.
44
+
45
+ """
46
+
47
+ _MRPC_DEV_IDS = "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc"
48
+ _MRPC_TRAIN = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt"
49
+ _MRPC_TEST = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt"
50
+
51
+ _MNLI_BASE_KWARGS = dict(
52
+ text_features={
53
+ "premise": "sentence1",
54
+ "hypothesis": "sentence2",
55
+ },
56
+ label_classes=["entailment", "neutral", "contradiction"],
57
+ label_column="gold_label",
58
+ data_url="https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce",
59
+ data_dir="MNLI",
60
+ citation=textwrap.dedent(
61
+ """\
62
+ @InProceedings{N18-1101,
63
+ author = "Williams, Adina
64
+ and Nangia, Nikita
65
+ and Bowman, Samuel",
66
+ title = "A Broad-Coverage Challenge Corpus for
67
+ Sentence Understanding through Inference",
68
+ booktitle = "Proceedings of the 2018 Conference of
69
+ the North American Chapter of the
70
+ Association for Computational Linguistics:
71
+ Human Language Technologies, Volume 1 (Long
72
+ Papers)",
73
+ year = "2018",
74
+ publisher = "Association for Computational Linguistics",
75
+ pages = "1112--1122",
76
+ location = "New Orleans, Louisiana",
77
+ url = "http://aclweb.org/anthology/N18-1101"
78
+ }
79
+ @article{bowman2015large,
80
+ title={A large annotated corpus for learning natural language inference},
81
+ author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
82
+ journal={arXiv preprint arXiv:1508.05326},
83
+ year={2015}
84
+ }"""
85
+ ),
86
+ url="http://www.nyu.edu/projects/bowman/multinli/",
87
+ )
88
+
89
+
90
+ class GlueConfig(datasets.BuilderConfig):
91
+ """BuilderConfig for GLUE."""
92
+
93
+ def __init__(
94
+ self,
95
+ text_features,
96
+ label_column,
97
+ data_url,
98
+ data_dir,
99
+ citation,
100
+ url,
101
+ label_classes=None,
102
+ process_label=lambda x: x,
103
+ **kwargs,
104
+ ):
105
+ """BuilderConfig for GLUE.
106
+
107
+ Args:
108
+ text_features: `dict[string, string]`, map from the name of the feature
109
+ dict for each text field to the name of the column in the tsv file
110
+ label_column: `string`, name of the column in the tsv file corresponding
111
+ to the label
112
+ data_url: `string`, url to download the zip file from
113
+ data_dir: `string`, the path to the folder containing the tsv files in the
114
+ downloaded zip
115
+ citation: `string`, citation for the data set
116
+ url: `string`, url for information about the data set
117
+ label_classes: `list[string]`, the list of classes if the label is
118
+ categorical. If not provided, then the label will be of type
119
+ `datasets.Value('float32')`.
120
+ process_label: `Function[string, any]`, function taking in the raw value
121
+ of the label and processing it to the form required by the label feature
122
+ **kwargs: keyword arguments forwarded to super.
123
+ """
124
+ super(GlueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
125
+ self.text_features = text_features
126
+ self.label_column = label_column
127
+ self.label_classes = label_classes
128
+ self.data_url = data_url
129
+ self.data_dir = data_dir
130
+ self.citation = citation
131
+ self.url = url
132
+ self.process_label = process_label
133
+
134
+
135
+ class Glue(datasets.GeneratorBasedBuilder):
136
+ """The General Language Understanding Evaluation (GLUE) benchmark."""
137
+
138
+ BUILDER_CONFIGS = [
139
+ GlueConfig(
140
+ name="cola",
141
+ description=textwrap.dedent(
142
+ """\
143
+ The Corpus of Linguistic Acceptability consists of English
144
+ acceptability judgments drawn from books and journal articles on
145
+ linguistic theory. Each example is a sequence of words annotated
146
+ with whether it is a grammatical English sentence."""
147
+ ),
148
+ text_features={"sentence": "sentence"},
149
+ label_classes=["unacceptable", "acceptable"],
150
+ label_column="is_acceptable",
151
+ data_url="https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4",
152
+ data_dir="CoLA",
153
+ citation=textwrap.dedent(
154
+ """\
155
+ @article{warstadt2018neural,
156
+ title={Neural Network Acceptability Judgments},
157
+ author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
158
+ journal={arXiv preprint arXiv:1805.12471},
159
+ year={2018}
160
+ }"""
161
+ ),
162
+ url="https://nyu-mll.github.io/CoLA/",
163
+ ),
164
+ GlueConfig(
165
+ name="sst2",
166
+ description=textwrap.dedent(
167
+ """\
168
+ The Stanford Sentiment Treebank consists of sentences from movie reviews and
169
+ human annotations of their sentiment. The task is to predict the sentiment of a
170
+ given sentence. We use the two-way (positive/negative) class split, and use only
171
+ sentence-level labels."""
172
+ ),
173
+ text_features={"sentence": "sentence"},
174
+ label_classes=["negative", "positive"],
175
+ label_column="label",
176
+ data_url="https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8",
177
+ data_dir="SST-2",
178
+ citation=textwrap.dedent(
179
+ """\
180
+ @inproceedings{socher2013recursive,
181
+ title={Recursive deep models for semantic compositionality over a sentiment treebank},
182
+ author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
183
+ booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
184
+ pages={1631--1642},
185
+ year={2013}
186
+ }"""
187
+ ),
188
+ url="https://datasets.stanford.edu/sentiment/index.html",
189
+ ),
190
+ GlueConfig(
191
+ name="mrpc",
192
+ description=textwrap.dedent(
193
+ """\
194
+ The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of
195
+ sentence pairs automatically extracted from online news sources, with human annotations
196
+ for whether the sentences in the pair are semantically equivalent."""
197
+ ), # pylint: disable=line-too-long
198
+ text_features={"sentence1": "", "sentence2": ""},
199
+ label_classes=["not_equivalent", "equivalent"],
200
+ label_column="Quality",
201
+ data_url="", # MRPC isn't hosted by GLUE.
202
+ data_dir="MRPC",
203
+ citation=textwrap.dedent(
204
+ """\
205
+ @inproceedings{dolan2005automatically,
206
+ title={Automatically constructing a corpus of sentential paraphrases},
207
+ author={Dolan, William B and Brockett, Chris},
208
+ booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},
209
+ year={2005}
210
+ }"""
211
+ ),
212
+ url="https://www.microsoft.com/en-us/download/details.aspx?id=52398",
213
+ ),
214
+ GlueConfig(
215
+ name="qqp",
216
+ description=textwrap.dedent(
217
+ """\
218
+ The Quora Question Pairs2 dataset is a collection of question pairs from the
219
+ community question-answering website Quora. The task is to determine whether a
220
+ pair of questions are semantically equivalent."""
221
+ ),
222
+ text_features={
223
+ "question1": "question1",
224
+ "question2": "question2",
225
+ },
226
+ label_classes=["not_duplicate", "duplicate"],
227
+ label_column="is_duplicate",
228
+ data_url="https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5",
229
+ data_dir="QQP",
230
+ citation=textwrap.dedent(
231
+ """\
232
+ @online{WinNT,
233
+ author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},
234
+ title = {First Quora Dataset Release: Question Pairs},
235
+ year = {2017},
236
+ url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},
237
+ urldate = {2019-04-03}
238
+ }"""
239
+ ),
240
+ url="https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs",
241
+ ),
242
+ GlueConfig(
243
+ name="stsb",
244
+ description=textwrap.dedent(
245
+ """\
246
+ The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of
247
+ sentence pairs drawn from news headlines, video and image captions, and natural
248
+ language inference data. Each pair is human-annotated with a similarity score
249
+ from 1 to 5."""
250
+ ),
251
+ text_features={
252
+ "sentence1": "sentence1",
253
+ "sentence2": "sentence2",
254
+ },
255
+ label_column="score",
256
+ data_url="https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5",
257
+ data_dir="STS-B",
258
+ citation=textwrap.dedent(
259
+ """\
260
+ @article{cer2017semeval,
261
+ title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},
262
+ author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},
263
+ journal={arXiv preprint arXiv:1708.00055},
264
+ year={2017}
265
+ }"""
266
+ ),
267
+ url="http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
268
+ process_label=np.float32,
269
+ ),
270
+ GlueConfig(
271
+ name="mnli",
272
+ description=textwrap.dedent(
273
+ """\
274
+ The Multi-Genre Natural Language Inference Corpus is a crowdsourced
275
+ collection of sentence pairs with textual entailment annotations. Given a premise sentence
276
+ and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
277
+ (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
278
+ gathered from ten different sources, including transcribed speech, fiction, and government reports.
279
+ We use the standard test set, for which we obtained private labels from the authors, and evaluate
280
+ on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
281
+ the SNLI corpus as 550k examples of auxiliary training data."""
282
+ ),
283
+ **_MNLI_BASE_KWARGS,
284
+ ),
285
+ GlueConfig(
286
+ name="mnli_mismatched",
287
+ description=textwrap.dedent(
288
+ """\
289
+ The mismatched validation and test splits from MNLI.
290
+ See the "mnli" BuilderConfig for additional information."""
291
+ ),
292
+ **_MNLI_BASE_KWARGS,
293
+ ),
294
+ GlueConfig(
295
+ name="mnli_matched",
296
+ description=textwrap.dedent(
297
+ """\
298
+ The matched validation and test splits from MNLI.
299
+ See the "mnli" BuilderConfig for additional information."""
300
+ ),
301
+ **_MNLI_BASE_KWARGS,
302
+ ),
303
+ GlueConfig(
304
+ name="qnli",
305
+ description=textwrap.dedent(
306
+ """\
307
+ The Stanford Question Answering Dataset is a question-answering
308
+ dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
309
+ from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
310
+ convert the task into sentence pair classification by forming a pair between each question and each
311
+ sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
312
+ question and the context sentence. The task is to determine whether the context sentence contains
313
+ the answer to the question. This modified version of the original task removes the requirement that
314
+ the model select the exact answer, but also removes the simplifying assumptions that the answer
315
+ is always present in the input and that lexical overlap is a reliable cue."""
316
+ ), # pylint: disable=line-too-long
317
+ text_features={
318
+ "question": "question",
319
+ "sentence": "sentence",
320
+ },
321
+ label_classes=["entailment", "not_entailment"],
322
+ label_column="label",
323
+ data_url="https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601",
324
+ data_dir="QNLI",
325
+ citation=textwrap.dedent(
326
+ """\
327
+ @article{rajpurkar2016squad,
328
+ title={Squad: 100,000+ questions for machine comprehension of text},
329
+ author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
330
+ journal={arXiv preprint arXiv:1606.05250},
331
+ year={2016}
332
+ }"""
333
+ ),
334
+ url="https://rajpurkar.github.io/SQuAD-explorer/",
335
+ ),
336
+ GlueConfig(
337
+ name="rte",
338
+ description=textwrap.dedent(
339
+ """\
340
+ The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual
341
+ entailment challenges. We combine the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim
342
+ et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009).4 Examples are
343
+ constructed based on news and Wikipedia text. We convert all datasets to a two-class split, where
344
+ for three-class datasets we collapse neutral and contradiction into not entailment, for consistency."""
345
+ ), # pylint: disable=line-too-long
346
+ text_features={
347
+ "sentence1": "sentence1",
348
+ "sentence2": "sentence2",
349
+ },
350
+ label_classes=["entailment", "not_entailment"],
351
+ label_column="label",
352
+ data_url="https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb",
353
+ data_dir="RTE",
354
+ citation=textwrap.dedent(
355
+ """\
356
+ @inproceedings{dagan2005pascal,
357
+ title={The PASCAL recognising textual entailment challenge},
358
+ author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
359
+ booktitle={Machine Learning Challenges Workshop},
360
+ pages={177--190},
361
+ year={2005},
362
+ organization={Springer}
363
+ }
364
+ @inproceedings{bar2006second,
365
+ title={The second pascal recognising textual entailment challenge},
366
+ author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
367
+ booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
368
+ volume={6},
369
+ number={1},
370
+ pages={6--4},
371
+ year={2006},
372
+ organization={Venice}
373
+ }
374
+ @inproceedings{giampiccolo2007third,
375
+ title={The third pascal recognizing textual entailment challenge},
376
+ author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
377
+ booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
378
+ pages={1--9},
379
+ year={2007},
380
+ organization={Association for Computational Linguistics}
381
+ }
382
+ @inproceedings{bentivogli2009fifth,
383
+ title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
384
+ author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
385
+ booktitle={TAC},
386
+ year={2009}
387
+ }"""
388
+ ),
389
+ url="https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
390
+ ),
391
+ GlueConfig(
392
+ name="wnli",
393
+ description=textwrap.dedent(
394
+ """\
395
+ The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task
396
+ in which a system must read a sentence with a pronoun and select the referent of that pronoun from
397
+ a list of choices. The examples are manually constructed to foil simple statistical methods: Each
398
+ one is contingent on contextual information provided by a single word or phrase in the sentence.
399
+ To convert the problem into sentence pair classification, we construct sentence pairs by replacing
400
+ the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the
401
+ pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of
402
+ new examples derived from fiction books that was shared privately by the authors of the original
403
+ corpus. While the included training set is balanced between two classes, the test set is imbalanced
404
+ between them (65% not entailment). Also, due to a data quirk, the development set is adversarial:
405
+ hypotheses are sometimes shared between training and development examples, so if a model memorizes the
406
+ training examples, they will predict the wrong label on corresponding development set
407
+ example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence
408
+ between a model's score on this task and its score on the unconverted original task. We
409
+ call converted dataset WNLI (Winograd NLI)."""
410
+ ),
411
+ text_features={
412
+ "sentence1": "sentence1",
413
+ "sentence2": "sentence2",
414
+ },
415
+ label_classes=["not_entailment", "entailment"],
416
+ label_column="label",
417
+ data_url="https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf",
418
+ data_dir="WNLI",
419
+ citation=textwrap.dedent(
420
+ """\
421
+ @inproceedings{levesque2012winograd,
422
+ title={The winograd schema challenge},
423
+ author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
424
+ booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
425
+ year={2012}
426
+ }"""
427
+ ),
428
+ url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
429
+ ),
430
+ GlueConfig(
431
+ name="ax",
432
+ description=textwrap.dedent(
433
+ """\
434
+ A manually-curated evaluation dataset for fine-grained analysis of
435
+ system performance on a broad range of linguistic phenomena. This
436
+ dataset evaluates sentence understanding through Natural Language
437
+ Inference (NLI) problems. Use a model trained on MulitNLI to produce
438
+ predictions for this dataset."""
439
+ ),
440
+ text_features={
441
+ "premise": "sentence1",
442
+ "hypothesis": "sentence2",
443
+ },
444
+ label_classes=["entailment", "neutral", "contradiction"],
445
+ label_column="", # No label since we only have test set.
446
+ # We must use a URL shortener since the URL from GLUE is very long and
447
+ # causes issues in TFDS.
448
+ data_url="https://bit.ly/2BOtOJ7",
449
+ data_dir="", # We are downloading a tsv.
450
+ citation="", # The GLUE citation is sufficient.
451
+ url="https://gluebenchmark.com/diagnostics",
452
+ ),
453
+ ]
454
+
455
+ def _info(self):
456
+ features = {text_feature: datasets.Value("string") for text_feature in six.iterkeys(self.config.text_features)}
457
+ if self.config.label_classes:
458
+ features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
459
+ else:
460
+ features["label"] = datasets.Value("float32")
461
+ features["idx"] = datasets.Value("int32")
462
+ return datasets.DatasetInfo(
463
+ description=_GLUE_DESCRIPTION,
464
+ features=datasets.Features(features),
465
+ homepage=self.config.url,
466
+ citation=self.config.citation + "\n" + _GLUE_CITATION,
467
+ )
468
+
469
+ def _split_generators(self, dl_manager):
470
+ if self.config.name == "ax":
471
+ data_file = dl_manager.download(self.config.data_url)
472
+ return [
473
+ datasets.SplitGenerator(
474
+ name=datasets.Split.TEST,
475
+ gen_kwargs={
476
+ "data_file": data_file,
477
+ "split": "test",
478
+ },
479
+ )
480
+ ]
481
+
482
+ if self.config.name == "mrpc":
483
+ data_dir = None
484
+ mrpc_files = dl_manager.download(
485
+ {
486
+ "dev_ids": _MRPC_DEV_IDS,
487
+ "train": _MRPC_TRAIN,
488
+ "test": _MRPC_TEST,
489
+ }
490
+ )
491
+ else:
492
+ dl_dir = dl_manager.download_and_extract(self.config.data_url)
493
+ data_dir = os.path.join(dl_dir, self.config.data_dir)
494
+ mrpc_files = None
495
+ train_split = datasets.SplitGenerator(
496
+ name=datasets.Split.TRAIN,
497
+ gen_kwargs={
498
+ "data_file": os.path.join(data_dir or "", "train.tsv"),
499
+ "split": "train",
500
+ "mrpc_files": mrpc_files,
501
+ },
502
+ )
503
+ if self.config.name == "mnli":
504
+ return [
505
+ train_split,
506
+ _mnli_split_generator("validation_matched", data_dir, "dev", matched=True),
507
+ _mnli_split_generator("validation_mismatched", data_dir, "dev", matched=False),
508
+ _mnli_split_generator("test_matched", data_dir, "test", matched=True),
509
+ _mnli_split_generator("test_mismatched", data_dir, "test", matched=False),
510
+ ]
511
+ elif self.config.name == "mnli_matched":
512
+ return [
513
+ _mnli_split_generator("validation", data_dir, "dev", matched=True),
514
+ _mnli_split_generator("test", data_dir, "test", matched=True),
515
+ ]
516
+ elif self.config.name == "mnli_mismatched":
517
+ return [
518
+ _mnli_split_generator("validation", data_dir, "dev", matched=False),
519
+ _mnli_split_generator("test", data_dir, "test", matched=False),
520
+ ]
521
+ else:
522
+ return [
523
+ train_split,
524
+ datasets.SplitGenerator(
525
+ name=datasets.Split.VALIDATION,
526
+ gen_kwargs={
527
+ "data_file": os.path.join(data_dir or "", "dev.tsv"),
528
+ "split": "dev",
529
+ "mrpc_files": mrpc_files,
530
+ },
531
+ ),
532
+ datasets.SplitGenerator(
533
+ name=datasets.Split.TEST,
534
+ gen_kwargs={
535
+ "data_file": os.path.join(data_dir or "", "test.tsv"),
536
+ "split": "test",
537
+ "mrpc_files": mrpc_files,
538
+ },
539
+ ),
540
+ ]
541
+
542
+ def _generate_examples(self, data_file, split, mrpc_files=None):
543
+ if self.config.name == "mrpc":
544
+ # We have to prepare the MRPC dataset from the original sources ourselves.
545
+ examples = self._generate_example_mrpc_files(mrpc_files=mrpc_files, split=split)
546
+ for example in examples:
547
+ yield example["idx"], example
548
+ else:
549
+ process_label = self.config.process_label
550
+ label_classes = self.config.label_classes
551
+
552
+ # The train and dev files for CoLA are the only tsv files without a
553
+ # header.
554
+ is_cola_non_test = self.config.name == "cola" and split != "test"
555
+
556
+ with open(data_file, encoding="utf8") as f:
557
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
558
+ if is_cola_non_test:
559
+ reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
560
+
561
+ for n, row in enumerate(reader):
562
+ if is_cola_non_test:
563
+ row = {
564
+ "sentence": row[3],
565
+ "is_acceptable": row[1],
566
+ }
567
+
568
+ example = {feat: row[col] for feat, col in six.iteritems(self.config.text_features)}
569
+ example["idx"] = n
570
+
571
+ if self.config.label_column in row:
572
+ label = row[self.config.label_column]
573
+ # For some tasks, the label is represented as 0 and 1 in the tsv
574
+ # files and needs to be cast to integer to work with the feature.
575
+ if label_classes and label not in label_classes:
576
+ label = int(label) if label else None
577
+ example["label"] = process_label(label)
578
+ else:
579
+ example["label"] = process_label(-1)
580
+
581
+ # Filter out corrupted rows.
582
+ for value in six.itervalues(example):
583
+ if value is None:
584
+ break
585
+ else:
586
+ yield example["idx"], example
587
+
588
+ def _generate_example_mrpc_files(self, mrpc_files, split):
589
+ if split == "test":
590
+ with open(mrpc_files["test"], encoding="utf8") as f:
591
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
592
+ for n, row in enumerate(reader):
593
+ yield {
594
+ "sentence1": row["#1 String"],
595
+ "sentence2": row["#2 String"],
596
+ "label": -1,
597
+ "idx": n,
598
+ }
599
+ else:
600
+ with open(mrpc_files["dev_ids"], encoding="utf8") as f:
601
+ reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
602
+ dev_ids = [[row[0], row[1]] for row in reader]
603
+ with open(mrpc_files["train"], encoding="utf8") as f:
604
+ # The first 3 bytes are the utf-8 BOM \xef\xbb\xbf, which messes with
605
+ # the Quality key.
606
+ f.seek(3)
607
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
608
+ for n, row in enumerate(reader):
609
+ is_row_in_dev = [row["#1 ID"], row["#2 ID"]] in dev_ids
610
+ if is_row_in_dev == (split == "dev"):
611
+ yield {
612
+ "sentence1": row["#1 String"],
613
+ "sentence2": row["#2 String"],
614
+ "label": int(row["Quality"]),
615
+ "idx": n,
616
+ }
617
+
618
+
619
+ def _mnli_split_generator(name, data_dir, split, matched):
620
+ return datasets.SplitGenerator(
621
+ name=name,
622
+ gen_kwargs={
623
+ "data_file": os.path.join(data_dir, "%s_%s.tsv" % (split, "matched" if matched else "mismatched")),
624
+ "split": split,
625
+ "mrpc_files": None,
626
+ },
627
+ )