|
{ |
|
"cola": { |
|
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", |
|
"citation": "@article{warstadt2018neural,\n title={Neural Network Acceptability Judgments},\n author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1805.12471},\n year={2018}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n", |
|
"homepage": "https://nyu-mll.github.io/CoLA/", |
|
"license": "", |
|
"features": { |
|
"sentence": { |
|
"dtype": "string", |
|
"_type": "Value" |
|
}, |
|
"label": { |
|
"names": [ |
|
"unacceptable", |
|
"acceptable" |
|
], |
|
"_type": "ClassLabel" |
|
}, |
|
"idx": { |
|
"dtype": "int32", |
|
"_type": "Value" |
|
} |
|
}, |
|
"builder_name": "glue", |
|
"dataset_name": "glue", |
|
"config_name": "cola", |
|
"version": { |
|
"version_str": "1.0.0", |
|
"description": "", |
|
"major": 1, |
|
"minor": 0, |
|
"patch": 0 |
|
}, |
|
"splits": { |
|
"train": { |
|
"name": "train", |
|
"num_bytes": 484869, |
|
"num_examples": 8551, |
|
"dataset_name": null |
|
}, |
|
"validation": { |
|
"name": "validation", |
|
"num_bytes": 60322, |
|
"num_examples": 1043, |
|
"dataset_name": null |
|
}, |
|
"test": { |
|
"name": "test", |
|
"num_bytes": 60513, |
|
"num_examples": 1063, |
|
"dataset_name": null |
|
} |
|
}, |
|
"download_size": 326394, |
|
"dataset_size": 605704, |
|
"size_in_bytes": 932098 |
|
}, |
|
"sst2": { |
|
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", |
|
"citation": "@inproceedings{socher2013recursive,\n title={Recursive deep models for semantic compositionality over a sentiment treebank},\n author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},\n booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},\n pages={1631--1642},\n year={2013}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n", |
|
"homepage": "https://datasets.stanford.edu/sentiment/index.html", |
|
"license": "", |
|
"features": { |
|
"sentence": { |
|
"dtype": "string", |
|
"_type": "Value" |
|
}, |
|
"label": { |
|
"names": [ |
|
"negative", |
|
"positive" |
|
], |
|
"_type": "ClassLabel" |
|
}, |
|
"idx": { |
|
"dtype": "int32", |
|
"_type": "Value" |
|
} |
|
}, |
|
"builder_name": "glue", |
|
"dataset_name": "glue", |
|
"config_name": "sst2", |
|
"version": { |
|
"version_str": "1.0.0", |
|
"description": "", |
|
"major": 1, |
|
"minor": 0, |
|
"patch": 0 |
|
}, |
|
"splits": { |
|
"train": { |
|
"name": "train", |
|
"num_bytes": 4681603, |
|
"num_examples": 67349, |
|
"dataset_name": null |
|
}, |
|
"validation": { |
|
"name": "validation", |
|
"num_bytes": 106252, |
|
"num_examples": 872, |
|
"dataset_name": null |
|
}, |
|
"test": { |
|
"name": "test", |
|
"num_bytes": 216640, |
|
"num_examples": 1821, |
|
"dataset_name": null |
|
} |
|
}, |
|
"download_size": 3331080, |
|
"dataset_size": 5004495, |
|
"size_in_bytes": 8335575 |
|
}, |
|
"mrpc": { |
|
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", |
|
"citation": "@inproceedings{dolan2005automatically,\n title={Automatically constructing a corpus of sentential paraphrases},\n author={Dolan, William B and Brockett, Chris},\n booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},\n year={2005}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n", |
|
"homepage": "https://www.microsoft.com/en-us/download/details.aspx?id=52398", |
|
"license": "", |
|
"features": { |
|
"sentence1": { |
|
"dtype": "string", |
|
"_type": "Value" |
|
}, |
|
"sentence2": { |
|
"dtype": "string", |
|
"_type": "Value" |
|
}, |
|
"label": { |
|
"names": [ |
|
"not_equivalent", |
|
"equivalent" |
|
], |
|
"_type": "ClassLabel" |
|
}, |
|
"idx": { |
|
"dtype": "int32", |
|
"_type": "Value" |
|
} |
|
}, |
|
"builder_name": "glue", |
|
"dataset_name": "glue", |
|
"config_name": "mrpc", |
|
"version": { |
|
"version_str": "1.0.0", |
|
"description": "", |
|
"major": 1, |
|
"minor": 0, |
|
"patch": 0 |
|
}, |
|
"splits": { |
|
"train": { |
|
"name": "train", |
|
"num_bytes": 943843, |
|
"num_examples": 3668, |
|
"dataset_name": null |
|
}, |
|
"validation": { |
|
"name": "validation", |
|
"num_bytes": 105879, |
|
"num_examples": 408, |
|
"dataset_name": null |
|
}, |
|
"test": { |
|
"name": "test", |
|
"num_bytes": 442410, |
|
"num_examples": 1725, |
|
"dataset_name": null |
|
} |
|
}, |
|
"download_size": 1033400, |
|
"dataset_size": 1492132, |
|
"size_in_bytes": 2525532 |
|
}, |
|
"qqp": { |
|
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", |
|
"citation": "@online{WinNT,\n author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},\n title = {First Quora Dataset Release: Question Pairs},\n year = {2017},\n url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},\n urldate = {2019-04-03}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n", |
|
"homepage": "https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs", |
|
"license": "", |
|
"features": { |
|
"question1": { |
|
"dtype": "string", |
|
"_type": "Value" |
|
}, |
|
"question2": { |
|
"dtype": "string", |
|
"_type": "Value" |
|
}, |
|
"label": { |
|
"names": [ |
|
"not_duplicate", |
|
"duplicate" |
|
], |
|
"_type": "ClassLabel" |
|
}, |
|
"idx": { |
|
"dtype": "int32", |
|
"_type": "Value" |
|
} |
|
}, |
|
"builder_name": "glue", |
|
"dataset_name": "glue", |
|
"config_name": "qqp", |
|
"version": { |
|
"version_str": "1.0.0", |
|
"description": "", |
|
"major": 1, |
|
"minor": 0, |
|
"patch": 0 |
|
}, |
|
"splits": { |
|
"train": { |
|
"name": "train", |
|
"num_bytes": 50900820, |
|
"num_examples": 363846, |
|
"dataset_name": null |
|
}, |
|
"validation": { |
|
"name": "validation", |
|
"num_bytes": 5653754, |
|
"num_examples": 40430, |
|
"dataset_name": null |
|
}, |
|
"test": { |
|
"name": "test", |
|
"num_bytes": 55171111, |
|
"num_examples": 390965, |
|
"dataset_name": null |
|
} |
|
}, |
|
"download_size": 73982265, |
|
"dataset_size": 111725685, |
|
"size_in_bytes": 185707950 |
|
}, |
|
"stsb": { |
|
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", |
|
"citation": "@article{cer2017semeval,\n title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},\n author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},\n journal={arXiv preprint arXiv:1708.00055},\n year={2017}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n", |
|
"homepage": "http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark", |
|
"license": "", |
|
"features": { |
|
"sentence1": { |
|
"dtype": "string", |
|
"_type": "Value" |
|
}, |
|
"sentence2": { |
|
"dtype": "string", |
|
"_type": "Value" |
|
}, |
|
"label": { |
|
"dtype": "float32", |
|
"_type": "Value" |
|
}, |
|
"idx": { |
|
"dtype": "int32", |
|
"_type": "Value" |
|
} |
|
}, |
|
"builder_name": "glue", |
|
"dataset_name": "glue", |
|
"config_name": "stsb", |
|
"version": { |
|
"version_str": "1.0.0", |
|
"description": "", |
|
"major": 1, |
|
"minor": 0, |
|
"patch": 0 |
|
}, |
|
"splits": { |
|
"train": { |
|
"name": "train", |
|
"num_bytes": 754791, |
|
"num_examples": 5749, |
|
"dataset_name": null |
|
}, |
|
"validation": { |
|
"name": "validation", |
|
"num_bytes": 216064, |
|
"num_examples": 1500, |
|
"dataset_name": null |
|
}, |
|
"test": { |
|
"name": "test", |
|
"num_bytes": 169974, |
|
"num_examples": 1379, |
|
"dataset_name": null |
|
} |
|
}, |
|
"download_size": 766983, |
|
"dataset_size": 1140829, |
|
"size_in_bytes": 1907812 |
|
}, |
|
"mnli": { |
|
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", |
|
"citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n", |
|
"homepage": "http://www.nyu.edu/projects/bowman/multinli/", |
|
"license": "", |
|
"features": { |
|
"premise": { |
|
"dtype": "string", |
|
"_type": "Value" |
|
}, |
|
"hypothesis": { |
|
"dtype": "string", |
|
"_type": "Value" |
|
}, |
|
"label": { |
|
"names": [ |
|
"entailment", |
|
"neutral", |
|
"contradiction" |
|
], |
|
"_type": "ClassLabel" |
|
}, |
|
"idx": { |
|
"dtype": "int32", |
|
"_type": "Value" |
|
} |
|
}, |
|
"builder_name": "glue", |
|
"dataset_name": "glue", |
|
"config_name": "mnli", |
|
"version": { |
|
"version_str": "1.0.0", |
|
"description": "", |
|
"major": 1, |
|
"minor": 0, |
|
"patch": 0 |
|
}, |
|
"splits": { |
|
"train": { |
|
"name": "train", |
|
"num_bytes": 74619646, |
|
"num_examples": 392702, |
|
"dataset_name": null |
|
}, |
|
"validation_matched": { |
|
"name": "validation_matched", |
|
"num_bytes": 1833783, |
|
"num_examples": 9815, |
|
"dataset_name": null |
|
}, |
|
"validation_mismatched": { |
|
"name": "validation_mismatched", |
|
"num_bytes": 1949231, |
|
"num_examples": 9832, |
|
"dataset_name": null |
|
}, |
|
"test_matched": { |
|
"name": "test_matched", |
|
"num_bytes": 1848654, |
|
"num_examples": 9796, |
|
"dataset_name": null |
|
}, |
|
"test_mismatched": { |
|
"name": "test_mismatched", |
|
"num_bytes": 1950703, |
|
"num_examples": 9847, |
|
"dataset_name": null |
|
} |
|
}, |
|
"download_size": 57168425, |
|
"dataset_size": 82202017, |
|
"size_in_bytes": 139370442 |
|
}, |
|
"mnli_mismatched": { |
|
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", |
|
"citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n", |
|
"homepage": "http://www.nyu.edu/projects/bowman/multinli/", |
|
"license": "", |
|
"features": { |
|
"premise": { |
|
"dtype": "string", |
|
"_type": "Value" |
|
}, |
|
"hypothesis": { |
|
"dtype": "string", |
|
"_type": "Value" |
|
}, |
|
"label": { |
|
"names": [ |
|
"entailment", |
|
"neutral", |
|
"contradiction" |
|
], |
|
"_type": "ClassLabel" |
|
}, |
|
"idx": { |
|
"dtype": "int32", |
|
"_type": "Value" |
|
} |
|
}, |
|
"builder_name": "glue", |
|
"dataset_name": "glue", |
|
"config_name": "mnli_mismatched", |
|
"version": { |
|
"version_str": "1.0.0", |
|
"description": "", |
|
"major": 1, |
|
"minor": 0, |
|
"patch": 0 |
|
}, |
|
"splits": { |
|
"validation": { |
|
"name": "validation", |
|
"num_bytes": 1949231, |
|
"num_examples": 9832, |
|
"dataset_name": null |
|
}, |
|
"test": { |
|
"name": "test", |
|
"num_bytes": 1950703, |
|
"num_examples": 9847, |
|
"dataset_name": null |
|
} |
|
}, |
|
"download_size": 2509009, |
|
"dataset_size": 3899934, |
|
"size_in_bytes": 6408943 |
|
}, |
|
"mnli_matched": { |
|
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", |
|
"citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n", |
|
"homepage": "http://www.nyu.edu/projects/bowman/multinli/", |
|
"license": "", |
|
"features": { |
|
"premise": { |
|
"dtype": "string", |
|
"_type": "Value" |
|
}, |
|
"hypothesis": { |
|
"dtype": "string", |
|
"_type": "Value" |
|
}, |
|
"label": { |
|
"names": [ |
|
"entailment", |
|
"neutral", |
|
"contradiction" |
|
], |
|
"_type": "ClassLabel" |
|
}, |
|
"idx": { |
|
"dtype": "int32", |
|
"_type": "Value" |
|
} |
|
}, |
|
"builder_name": "glue", |
|
"dataset_name": "glue", |
|
"config_name": "mnli_matched", |
|
"version": { |
|
"version_str": "1.0.0", |
|
"description": "", |
|
"major": 1, |
|
"minor": 0, |
|
"patch": 0 |
|
}, |
|
"splits": { |
|
"validation": { |
|
"name": "validation", |
|
"num_bytes": 1833783, |
|
"num_examples": 9815, |
|
"dataset_name": null |
|
}, |
|
"test": { |
|
"name": "test", |
|
"num_bytes": 1848654, |
|
"num_examples": 9796, |
|
"dataset_name": null |
|
} |
|
}, |
|
"download_size": 2435055, |
|
"dataset_size": 3682437, |
|
"size_in_bytes": 6117492 |
|
}, |
|
"qnli": { |
|
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", |
|
"citation": "@article{rajpurkar2016squad,\n title={Squad: 100,000+ questions for machine comprehension of text},\n author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},\n journal={arXiv preprint arXiv:1606.05250},\n year={2016}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n", |
|
"homepage": "https://rajpurkar.github.io/SQuAD-explorer/", |
|
"license": "", |
|
"features": { |
|
"question": { |
|
"dtype": "string", |
|
"_type": "Value" |
|
}, |
|
"sentence": { |
|
"dtype": "string", |
|
"_type": "Value" |
|
}, |
|
"label": { |
|
"names": [ |
|
"entailment", |
|
"not_entailment" |
|
], |
|
"_type": "ClassLabel" |
|
}, |
|
"idx": { |
|
"dtype": "int32", |
|
"_type": "Value" |
|
} |
|
}, |
|
"builder_name": "glue", |
|
"dataset_name": "glue", |
|
"config_name": "qnli", |
|
"version": { |
|
"version_str": "1.0.0", |
|
"description": "", |
|
"major": 1, |
|
"minor": 0, |
|
"patch": 0 |
|
}, |
|
"splits": { |
|
"train": { |
|
"name": "train", |
|
"num_bytes": 25612443, |
|
"num_examples": 104743, |
|
"dataset_name": null |
|
}, |
|
"validation": { |
|
"name": "validation", |
|
"num_bytes": 1368304, |
|
"num_examples": 5463, |
|
"dataset_name": null |
|
}, |
|
"test": { |
|
"name": "test", |
|
"num_bytes": 1373093, |
|
"num_examples": 5463, |
|
"dataset_name": null |
|
} |
|
}, |
|
"download_size": 19278324, |
|
"dataset_size": 28353840, |
|
"size_in_bytes": 47632164 |
|
}, |
|
"rte": { |
|
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", |
|
"citation": "@inproceedings{dagan2005pascal,\n title={The PASCAL recognising textual entailment challenge},\n author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},\n booktitle={Machine Learning Challenges Workshop},\n pages={177--190},\n year={2005},\n organization={Springer}\n}\n@inproceedings{bar2006second,\n title={The second pascal recognising textual entailment challenge},\n author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},\n booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},\n volume={6},\n number={1},\n pages={6--4},\n year={2006},\n organization={Venice}\n}\n@inproceedings{giampiccolo2007third,\n title={The third pascal recognizing textual entailment challenge},\n author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},\n booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},\n pages={1--9},\n year={2007},\n organization={Association for Computational Linguistics}\n}\n@inproceedings{bentivogli2009fifth,\n title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},\n author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},\n booktitle={TAC},\n year={2009}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", |
|
"homepage": "https://aclweb.org/aclwiki/Recognizing_Textual_Entailment", |
|
"license": "", |
|
"features": { |
|
"sentence1": { |
|
"dtype": "string", |
|
"id": null, |
|
"_type": "Value" |
|
}, |
|
"sentence2": { |
|
"dtype": "string", |
|
"id": null, |
|
"_type": "Value" |
|
}, |
|
"label": { |
|
"num_classes": 2, |
|
"names": [ |
|
"entailment", |
|
"not_entailment" |
|
], |
|
"names_file": null, |
|
"id": null, |
|
"_type": "ClassLabel" |
|
}, |
|
"idx": { |
|
"dtype": "int32", |
|
"id": null, |
|
"_type": "Value" |
|
} |
|
}, |
|
"post_processed": null, |
|
"supervised_keys": null, |
|
"builder_name": "glue", |
|
"config_name": "rte", |
|
"version": { |
|
"version_str": "1.0.0", |
|
"description": "", |
|
"major": 1, |
|
"minor": 0, |
|
"patch": 0 |
|
}, |
|
"splits": { |
|
"test": { |
|
"name": "test", |
|
"num_bytes": 975936, |
|
"num_examples": 3000, |
|
"dataset_name": "glue" |
|
}, |
|
"train": { |
|
"name": "train", |
|
"num_bytes": 848888, |
|
"num_examples": 2490, |
|
"dataset_name": "glue" |
|
}, |
|
"validation": { |
|
"name": "validation", |
|
"num_bytes": 90911, |
|
"num_examples": 277, |
|
"dataset_name": "glue" |
|
} |
|
}, |
|
"download_checksums": { |
|
"https://dl.fbaipublicfiles.com/glue/data/RTE.zip": { |
|
"num_bytes": 697150, |
|
"checksum": "6bf86de103ecd335f3441bd43574d23fef87ecc695977a63b82d5efb206556ee" |
|
} |
|
}, |
|
"download_size": 697150, |
|
"post_processing_size": null, |
|
"dataset_size": 1915735, |
|
"size_in_bytes": 2612885 |
|
}, |
|
"wnli": { |
|
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", |
|
"citation": "@inproceedings{levesque2012winograd,\n title={The winograd schema challenge},\n author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},\n booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},\n year={2012}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", |
|
"homepage": "https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html", |
|
"license": "", |
|
"features": { |
|
"sentence1": { |
|
"dtype": "string", |
|
"id": null, |
|
"_type": "Value" |
|
}, |
|
"sentence2": { |
|
"dtype": "string", |
|
"id": null, |
|
"_type": "Value" |
|
}, |
|
"label": { |
|
"num_classes": 2, |
|
"names": [ |
|
"not_entailment", |
|
"entailment" |
|
], |
|
"names_file": null, |
|
"id": null, |
|
"_type": "ClassLabel" |
|
}, |
|
"idx": { |
|
"dtype": "int32", |
|
"id": null, |
|
"_type": "Value" |
|
} |
|
}, |
|
"post_processed": null, |
|
"supervised_keys": null, |
|
"builder_name": "glue", |
|
"config_name": "wnli", |
|
"version": { |
|
"version_str": "1.0.0", |
|
"description": "", |
|
"major": 1, |
|
"minor": 0, |
|
"patch": 0 |
|
}, |
|
"splits": { |
|
"test": { |
|
"name": "test", |
|
"num_bytes": 37992, |
|
"num_examples": 146, |
|
"dataset_name": "glue" |
|
}, |
|
"train": { |
|
"name": "train", |
|
"num_bytes": 107517, |
|
"num_examples": 635, |
|
"dataset_name": "glue" |
|
}, |
|
"validation": { |
|
"name": "validation", |
|
"num_bytes": 12215, |
|
"num_examples": 71, |
|
"dataset_name": "glue" |
|
} |
|
}, |
|
"download_checksums": { |
|
"https://dl.fbaipublicfiles.com/glue/data/WNLI.zip": { |
|
"num_bytes": 28999, |
|
"checksum": "ae0e8e4d16f4d46d4a0a566ec7ecceccfd3fbfaa4a7a4b4e02848c0f2561ac46" |
|
} |
|
}, |
|
"download_size": 28999, |
|
"post_processing_size": null, |
|
"dataset_size": 157724, |
|
"size_in_bytes": 186723 |
|
}, |
|
"ax": { |
|
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", |
|
"citation": "\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", |
|
"homepage": "https://gluebenchmark.com/diagnostics", |
|
"license": "", |
|
"features": { |
|
"premise": { |
|
"dtype": "string", |
|
"id": null, |
|
"_type": "Value" |
|
}, |
|
"hypothesis": { |
|
"dtype": "string", |
|
"id": null, |
|
"_type": "Value" |
|
}, |
|
"label": { |
|
"num_classes": 3, |
|
"names": [ |
|
"entailment", |
|
"neutral", |
|
"contradiction" |
|
], |
|
"names_file": null, |
|
"id": null, |
|
"_type": "ClassLabel" |
|
}, |
|
"idx": { |
|
"dtype": "int32", |
|
"id": null, |
|
"_type": "Value" |
|
} |
|
}, |
|
"post_processed": null, |
|
"supervised_keys": null, |
|
"builder_name": "glue", |
|
"config_name": "ax", |
|
"version": { |
|
"version_str": "1.0.0", |
|
"description": "", |
|
"major": 1, |
|
"minor": 0, |
|
"patch": 0 |
|
}, |
|
"splits": { |
|
"test": { |
|
"name": "test", |
|
"num_bytes": 238392, |
|
"num_examples": 1104, |
|
"dataset_name": "glue" |
|
} |
|
}, |
|
"download_checksums": { |
|
"https://dl.fbaipublicfiles.com/glue/data/AX.tsv": { |
|
"num_bytes": 222257, |
|
"checksum": "0e13510b1bb14436ff7e2ee82338f0efb0133ecf2e73507a697dc210db3f05fd" |
|
} |
|
}, |
|
"download_size": 222257, |
|
"post_processing_size": null, |
|
"dataset_size": 238392, |
|
"size_in_bytes": 460649 |
|
} |
|
} |