system HF staff commited on
Commit
bc15b7f
1 Parent(s): ed7efa1

Update files from the datasets library (from 1.1.3)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.1.3

dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"plain_text": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"languages": ["ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr", "ur", "vi", "zh"], "id": null, "_type": "Translation"}, "hypothesis": {"languages": ["ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr", "ur", "vi", "zh"], "num_languages": 15, "id": null, "_type": "TranslationVariableLanguages"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 19387508, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 9566255, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://cims.nyu.edu/~sbowman/xnli/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 17865352, "post_processing_size": null, "dataset_size": 28953763, "size_in_bytes": 46819115}}
1
+ {"ar": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "ar", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 107399934, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1294561, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 633009, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 109327504, "size_in_bytes": 593291216}, "bg": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "bg", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 125973545, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1573042, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 774069, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 128320656, "size_in_bytes": 612284368}, "de": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "de", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 84684460, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 996496, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 494612, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 86175568, "size_in_bytes": 570139280}, "el": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "el", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 139753678, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1704793, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 841234, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 142299705, "size_in_bytes": 626263417}, "en": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "en", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 74444346, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 875142, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 433471, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 75752959, "size_in_bytes": 559716671}, "es": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "es", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 81383604, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 969821, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 478430, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 82831855, "size_in_bytes": 566795567}, "fr": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "fr", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 85809099, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1029247, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 510112, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 87348458, "size_in_bytes": 571312170}, "hi": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "hi", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 170594284, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 2073081, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 1023923, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 173691288, "size_in_bytes": 657655000}, "ru": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "ru", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 129859935, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1603474, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 786450, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 132249859, "size_in_bytes": 616213571}, "sw": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "sw", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 69286045, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 871659, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 429858, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 70587562, "size_in_bytes": 554551274}, "th": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "th", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 176063212, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 2147023, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 1061168, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 179271403, "size_in_bytes": 663235115}, "tr": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "tr", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 71637460, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 934942, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 459316, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 73031718, "size_in_bytes": 556995430}, "ur": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "ur", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 96441806, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1416249, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 699960, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 98558015, "size_in_bytes": 582521727}, "vi": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "vi", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 101417750, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1190225, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 590688, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 103198663, "size_in_bytes": 587162375}, "zh": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "zh", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 72225161, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 777937, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 384859, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 73387957, "size_in_bytes": 557351669}, "all_languages": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"languages": ["ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr", "ur", "vi", "zh"], "id": null, "_type": "Translation"}, "hypothesis": {"languages": ["ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr", "ur", "vi", "zh"], "num_languages": 15, "id": null, "_type": "TranslationVariableLanguages"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "all_languages", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1581474731, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 19387508, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 9566255, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 1610428494, "size_in_bytes": 2094392206}}
dummy/{plain_text/1.0.0 → all_languages/1.1.0}/dummy_data.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:087e5fb69a4751ad440cfac64e14928dd0ada6afe3d1ec2caaaaf2128b00da6b
3
- size 6032
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72bfec36ff57afcd699fdfc3dd8020eb3830715d79ab98f89ddba9e6bd90bc32
3
+ size 28285
dummy/ar/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72bfec36ff57afcd699fdfc3dd8020eb3830715d79ab98f89ddba9e6bd90bc32
3
+ size 28285
dummy/bg/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72bfec36ff57afcd699fdfc3dd8020eb3830715d79ab98f89ddba9e6bd90bc32
3
+ size 28285
dummy/de/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72bfec36ff57afcd699fdfc3dd8020eb3830715d79ab98f89ddba9e6bd90bc32
3
+ size 28285
dummy/el/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72bfec36ff57afcd699fdfc3dd8020eb3830715d79ab98f89ddba9e6bd90bc32
3
+ size 28285
dummy/en/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72bfec36ff57afcd699fdfc3dd8020eb3830715d79ab98f89ddba9e6bd90bc32
3
+ size 28285
dummy/es/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72bfec36ff57afcd699fdfc3dd8020eb3830715d79ab98f89ddba9e6bd90bc32
3
+ size 28285
dummy/fr/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72bfec36ff57afcd699fdfc3dd8020eb3830715d79ab98f89ddba9e6bd90bc32
3
+ size 28285
dummy/hi/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72bfec36ff57afcd699fdfc3dd8020eb3830715d79ab98f89ddba9e6bd90bc32
3
+ size 28285
dummy/ru/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72bfec36ff57afcd699fdfc3dd8020eb3830715d79ab98f89ddba9e6bd90bc32
3
+ size 28285
dummy/sw/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72bfec36ff57afcd699fdfc3dd8020eb3830715d79ab98f89ddba9e6bd90bc32
3
+ size 28285
dummy/th/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72bfec36ff57afcd699fdfc3dd8020eb3830715d79ab98f89ddba9e6bd90bc32
3
+ size 28285
dummy/tr/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72bfec36ff57afcd699fdfc3dd8020eb3830715d79ab98f89ddba9e6bd90bc32
3
+ size 28285
dummy/ur/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72bfec36ff57afcd699fdfc3dd8020eb3830715d79ab98f89ddba9e6bd90bc32
3
+ size 28285
dummy/vi/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72bfec36ff57afcd699fdfc3dd8020eb3830715d79ab98f89ddba9e6bd90bc32
3
+ size 28285
dummy/zh/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72bfec36ff57afcd699fdfc3dd8020eb3830715d79ab98f89ddba9e6bd90bc32
3
+ size 28285
xnli.py CHANGED
@@ -21,8 +21,7 @@ from __future__ import absolute_import, division, print_function
21
  import collections
22
  import csv
23
  import os
24
-
25
- import six
26
 
27
  import datasets
28
 
@@ -52,36 +51,76 @@ B) and is a classification task (given two sentences, predict one of three
52
  labels).
53
  """
54
 
55
- _DATA_URL = "https://cims.nyu.edu/~sbowman/xnli/XNLI-1.0.zip"
 
56
 
57
  _LANGUAGES = ("ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr", "ur", "vi", "zh")
58
 
59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  class Xnli(datasets.GeneratorBasedBuilder):
61
  """XNLI: The Cross-Lingual NLI Corpus. Version 1.0."""
62
 
 
 
63
  BUILDER_CONFIGS = [
64
- datasets.BuilderConfig(
65
- name="plain_text",
66
- version=datasets.Version("1.0.0", ""),
67
- description="Plain text import of XNLI",
 
 
 
 
 
 
 
 
 
68
  )
69
  ]
70
 
71
  def _info(self):
72
- return datasets.DatasetInfo(
73
- description=_DESCRIPTION,
74
- features=datasets.Features(
75
  {
76
- "premise": datasets.features.Translation(
77
  languages=_LANGUAGES,
78
  ),
79
- "hypothesis": datasets.features.TranslationVariableLanguages(
80
  languages=_LANGUAGES,
81
  ),
82
- "label": datasets.features.ClassLabel(names=["entailment", "neutral", "contradiction"]),
83
  }
84
- ),
 
 
 
 
 
 
 
 
 
 
 
85
  # No default supervised_keys (as we have to pass both premise
86
  # and hypothesis as input).
87
  supervised_keys=None,
@@ -90,31 +129,84 @@ class Xnli(datasets.GeneratorBasedBuilder):
90
  )
91
 
92
  def _split_generators(self, dl_manager):
93
- dl_dir = dl_manager.download_and_extract(_DATA_URL)
94
- data_dir = os.path.join(dl_dir, "XNLI-1.0")
 
 
 
 
 
 
95
  return [
96
  datasets.SplitGenerator(
97
- name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir, "xnli.test.tsv")}
 
 
 
 
 
 
 
98
  ),
99
  datasets.SplitGenerator(
100
- name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(data_dir, "xnli.dev.tsv")}
 
 
 
 
 
101
  ),
102
  ]
103
 
104
- def _generate_examples(self, filepath):
105
  """This function returns the examples in the raw (text) form."""
106
- rows_per_pair_id = collections.defaultdict(list)
107
-
108
- with open(filepath, encoding="utf-8") as f:
109
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
110
- for row in reader:
111
- rows_per_pair_id[row["pairID"]].append(row)
112
-
113
- for rows in six.itervalues(rows_per_pair_id):
114
- premise = {row["language"]: row["sentence1"] for row in rows}
115
- hypothesis = {row["language"]: row["sentence2"] for row in rows}
116
- yield rows[0]["pairID"], {
117
- "premise": premise,
118
- "hypothesis": hypothesis,
119
- "label": rows[0]["gold_label"],
120
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  import collections
22
  import csv
23
  import os
24
+ from contextlib import ExitStack
 
25
 
26
  import datasets
27
 
51
  labels).
52
  """
53
 
54
+ _TRAIN_DATA_URL = "https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip"
55
+ _TESTVAL_DATA_URL = "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip"
56
 
57
  _LANGUAGES = ("ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr", "ur", "vi", "zh")
58
 
59
 
60
+ class XnliConfig(datasets.BuilderConfig):
61
+ """BuilderConfig for XNLI."""
62
+
63
+ def __init__(self, language: str, languages=None, **kwargs):
64
+ """BuilderConfig for XNLI.
65
+
66
+ Args:
67
+ language: One of ar,bg,de,el,en,es,fr,hi,ru,sw,th,tr,ur,vi,zh, or all_languages
68
+ **kwargs: keyword arguments forwarded to super.
69
+ """
70
+ super(XnliConfig, self).__init__(**kwargs)
71
+ self.language = language
72
+ if language != "all_languages":
73
+ self.languages = [language]
74
+ else:
75
+ self.languages = languages if languages is not None else _LANGUAGES
76
+
77
+
78
  class Xnli(datasets.GeneratorBasedBuilder):
79
  """XNLI: The Cross-Lingual NLI Corpus. Version 1.0."""
80
 
81
+ VERSION = datasets.Version("1.1.0", "")
82
+ BUILDER_CONFIG_CLASS = XnliConfig
83
  BUILDER_CONFIGS = [
84
+ XnliConfig(
85
+ name=lang,
86
+ language=lang,
87
+ version=datasets.Version("1.1.0", ""),
88
+ description=f"Plain text import of XNLI for the {lang} language",
89
+ )
90
+ for lang in _LANGUAGES
91
+ ] + [
92
+ XnliConfig(
93
+ name="all_languages",
94
+ language="all_languages",
95
+ version=datasets.Version("1.1.0", ""),
96
+ description="Plain text import of XNLI for all languages",
97
  )
98
  ]
99
 
100
  def _info(self):
101
+ if self.config.language == "all_languages":
102
+ features = datasets.Features(
 
103
  {
104
+ "premise": datasets.Translation(
105
  languages=_LANGUAGES,
106
  ),
107
+ "hypothesis": datasets.TranslationVariableLanguages(
108
  languages=_LANGUAGES,
109
  ),
110
+ "label": datasets.ClassLabel(names=["entailment", "neutral", "contradiction"]),
111
  }
112
+ )
113
+ else:
114
+ features = datasets.Features(
115
+ {
116
+ "premise": datasets.Value("string"),
117
+ "hypothesis": datasets.Value("string"),
118
+ "label": datasets.ClassLabel(names=["entailment", "neutral", "contradiction"]),
119
+ }
120
+ )
121
+ return datasets.DatasetInfo(
122
+ description=_DESCRIPTION,
123
+ features=features,
124
  # No default supervised_keys (as we have to pass both premise
125
  # and hypothesis as input).
126
  supervised_keys=None,
129
  )
130
 
131
  def _split_generators(self, dl_manager):
132
+ dl_dirs = dl_manager.download_and_extract(
133
+ {
134
+ "train_data": _TRAIN_DATA_URL,
135
+ "testval_data": _TESTVAL_DATA_URL,
136
+ }
137
+ )
138
+ train_dir = os.path.join(dl_dirs["train_data"], "XNLI-MT-1.0", "multinli")
139
+ testval_dir = os.path.join(dl_dirs["testval_data"], "XNLI-1.0")
140
  return [
141
  datasets.SplitGenerator(
142
+ name=datasets.Split.TRAIN,
143
+ gen_kwargs={
144
+ "filepaths": [
145
+ os.path.join(train_dir, "multinli.train.{lang}.tsv".format(lang=lang))
146
+ for lang in self.config.languages
147
+ ],
148
+ "data_format": "XNLI-MT",
149
+ },
150
  ),
151
  datasets.SplitGenerator(
152
+ name=datasets.Split.TEST,
153
+ gen_kwargs={"filepaths": [os.path.join(testval_dir, "xnli.test.tsv")], "data_format": "XNLI"},
154
+ ),
155
+ datasets.SplitGenerator(
156
+ name=datasets.Split.VALIDATION,
157
+ gen_kwargs={"filepaths": [os.path.join(testval_dir, "xnli.dev.tsv")], "data_format": "XNLI"},
158
  ),
159
  ]
160
 
161
+ def _generate_examples(self, data_format, filepaths):
162
  """This function returns the examples in the raw (text) form."""
163
+
164
+ if self.config.language == "all_languages":
165
+ if data_format == "XNLI-MT":
166
+ with ExitStack() as stack:
167
+ files = [stack.enter_context(open(filepath, encoding="utf-8")) for filepath in filepaths]
168
+ readers = [csv.DictReader(file, delimiter="\t", quoting=csv.QUOTE_NONE) for file in files]
169
+ for row_idx, rows in enumerate(zip(*readers)):
170
+ yield row_idx, {
171
+ "premise": {lang: row["premise"] for lang, row in zip(self.config.languages, rows)},
172
+ "hypothesis": {lang: row["hypo"] for lang, row in zip(self.config.languages, rows)},
173
+ "label": rows[0]["label"].replace("contradictory", "contradiction"),
174
+ }
175
+ else:
176
+ rows_per_pair_id = collections.defaultdict(list)
177
+ for filepath in filepaths:
178
+ with open(filepath, encoding="utf-8") as f:
179
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
180
+ for row in reader:
181
+ rows_per_pair_id[row["pairID"]].append(row)
182
+
183
+ for rows in rows_per_pair_id.values():
184
+ premise = {row["language"]: row["sentence1"] for row in rows}
185
+ hypothesis = {row["language"]: row["sentence2"] for row in rows}
186
+ yield rows[0]["pairID"], {
187
+ "premise": premise,
188
+ "hypothesis": hypothesis,
189
+ "label": rows[0]["gold_label"],
190
+ }
191
+ else:
192
+ if data_format == "XNLI-MT":
193
+ for file_idx, filepath in enumerate(filepaths):
194
+ file = open(filepath, encoding="utf-8")
195
+ reader = csv.DictReader(file, delimiter="\t", quoting=csv.QUOTE_NONE)
196
+ for row_idx, row in enumerate(reader):
197
+ yield (file_idx, row_idx), {
198
+ "premise": row["premise"],
199
+ "hypothesis": row["hypo"],
200
+ "label": row["label"].replace("contradictory", "contradiction"),
201
+ }
202
+ else:
203
+ for filepath in filepaths:
204
+ with open(filepath, encoding="utf-8") as f:
205
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
206
+ for row in reader:
207
+ if row["language"] == self.config.language:
208
+ yield row["pairID"], {
209
+ "premise": row["sentence1"],
210
+ "hypothesis": row["sentence2"],
211
+ "label": row["gold_label"],
212
+ }