diff --git a/README.md b/README.md index bdcf7dd4755456b6523482900ccd1d2bc43abf67..3c97827baf782ed0ebbdc718267f23c0eebc6dfe 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ --- +paperswithcode_id: xtreme --- # Dataset Card for "xtreme" @@ -6,12 +7,12 @@ ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - - [Supported Tasks](#supported-tasks) + - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - - [Data Splits Sample Size](#data-splits-sample-size) + - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) @@ -55,7 +56,7 @@ and availability of training data. Among these are many under-studied languages, (spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the Niger-Congo languages Swahili and Yoruba, spoken in Africa. -### Supported Tasks +### Supported Tasks and Leaderboards [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) @@ -173,7 +174,7 @@ The data fields are the same among all splits. - `answer_start`: a `int32` feature. - `text`: a `string` feature. -### Data Splits Sample Size +### Data Splits | name |validation|test| |----------|---------:|---:| @@ -191,10 +192,22 @@ The data fields are the same among all splits. ### Source Data +#### Initial Data Collection and Normalization + +[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) + +#### Who are the source language producers? + [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Annotations +#### Annotation process + +[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) + +#### Who are the annotators? + [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Personal and Sensitive Information diff --git a/dataset_infos.json b/dataset_infos.json index 6cff29fcf42eec6d639733ac77655e9f7ed00aee..74752f6c5cce1dbf2a7b88abedc57dffcb5667f6 100644 --- a/dataset_infos.json +++ b/dataset_infos.json @@ -1 +1 @@ -{"XNLI": {"description": "\nThe Cross-lingual Natural Language Inference (XNLI) corpus is a crowd-sourced collection of 5,000 test and\n2,500 dev pairs for the MultiNLI corpus. The pairs are annotated with textual entailment and translated into\n14 languages: French, Spanish, German, Greek, Bulgarian, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese,\nHindi, Swahili and Urdu. This results in 112.5k annotated pairs. Each premise can be associated with the\ncorresponding hypothesis in the 15 languages, summing up to more than 1.5M combinations. The corpus is made to\nevaluate how to perform inference in any language (including low-resources ones like Swahili or Urdu) when only\nEnglish NLI data is available at training time. One solution is cross-lingual sentence encoding, for which XNLI\nis an evaluation benchmark.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"language": {"dtype": "string", "id": null, "_type": "Value"}, "sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "gold_label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XNLI", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 20359500, "num_examples": 75150, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 10049303, "num_examples": 37350, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 17865352, "post_processing_size": null, "dataset_size": 30408803, "size_in_bytes": 48274155}, "tydiqa": {"description": "Gold passage task (GoldP): Given a passage that is guaranteed to contain the\n answer, predict the single contiguous span of characters that answers the question. This is more similar to\n existing reading comprehension datasets (as opposed to the information-seeking task outlined above).\n This task is constructed with two goals in mind: (1) more directly comparing with prior work and (2) providing\n a simplified way for researchers to use TyDi QA by providing compatibility with existing code for SQuAD 1.1,\n XQuAD, and MLQA. Toward these goals, the gold passage task differs from the primary task in several ways:\n only the gold answer passage is provided rather than the entire Wikipedia article;\n unanswerable questions have been discarded, similar to MLQA and XQuAD;\n we evaluate with the SQuAD 1.1 metrics like XQuAD; and\n Thai and Japanese are removed since the lack of whitespace breaks some tools.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{tydiqa,\n title = {TyDi QA: A Benchmark for Information-Seeking Question Answering in Typologically Diverse Languages},\n author = {Jonathan H. Clark and Eunsol Choi and Michael Collins and Dan Garrette and Tom Kwiatkowski and Vitaly Nikolaev and Jennimaria Palomaki}\n year = {2020},\n journal = {Transactions of the Association for Computational Linguistics}\n }\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/tydiqa", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tydiqa", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 52948607, "num_examples": 49881, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 5006461, "num_examples": 5077, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/tydiqa/v1.1/tydiqa-goldp-v1.1-train.json": {"num_bytes": 58004076, "checksum": "cefc8e09ff2548d9b10a678d3a6bbbe5bc036be543f92418819ea676c97be23b"}, "https://storage.googleapis.com/tydiqa/v1.1/tydiqa-goldp-v1.1-dev.json": {"num_bytes": 5617409, "checksum": "b286e0f34bc7f52259359989716f369b160565bd12ad8f3a3e311f9b0dbad1c0"}}, "download_size": 63621485, "post_processing_size": null, "dataset_size": 57955068, "size_in_bytes": 121576553}, "SQuAD": {"description": "Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{2016arXiv160605250R,\n author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},\n Konstantin and {Liang}, Percy},\n title = \"{SQuAD: 100,000+ Questions for Machine Comprehension of Text}\",\n journal = {arXiv e-prints},\n year = 2016,\n eid = {arXiv:1606.05250},\n pages = {arXiv:1606.05250},\n archivePrefix = {arXiv},\n eprint = {1606.05250},\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://rajpurkar.github.io/SQuAD-explorer/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "SQuAD", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 79317110, "num_examples": 87599, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 10472653, "num_examples": 10570, "dataset_name": "xtreme"}}, "download_checksums": {"https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json": {"num_bytes": 30288272, "checksum": "3527663986b8295af4f7fcdff1ba1ff3f72d07d61a20f487cb238a6ef92fd955"}, "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json": {"num_bytes": 4854279, "checksum": "95aa6a52d5d6a735563366753ca50492a658031da74f301ac5238b03966972c9"}}, "download_size": 35142551, "post_processing_size": null, "dataset_size": 89789763, "size_in_bytes": 124932314}, "PAN-X.af": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.af", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 259709, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 257204, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 1321396, "num_examples": 5000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1838309, "size_in_bytes": 1838309}, "PAN-X.ar": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1808303, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1811983, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3634136, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 7254422, "size_in_bytes": 7254422}, "PAN-X.bg": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.bg", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2310314, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2306158, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4600773, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 9217245, "size_in_bytes": 9217245}, "PAN-X.bn": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.bn", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 159088, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 159282, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 1568845, "num_examples": 10000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1887215, "size_in_bytes": 1887215}, "PAN-X.de": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2381565, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2377639, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4762352, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 9521556, "size_in_bytes": 9521556}, "PAN-X.el": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.el", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2533806, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2547594, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 5063176, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 10144576, "size_in_bytes": 10144576}, "PAN-X.en": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1920069, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1916220, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3823474, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 7659763, "size_in_bytes": 7659763}, "PAN-X.es": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1592525, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1602291, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3199161, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 6393977, "size_in_bytes": 6393977}, "PAN-X.et": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.et", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2030160, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2021409, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3023211, "num_examples": 15000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 7074780, "size_in_bytes": 7074780}, "PAN-X.eu": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.eu", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2296335, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2249835, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2292327, "num_examples": 10000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 6838497, "size_in_bytes": 6838497}, "PAN-X.fa": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.fa", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1782306, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1770284, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3529354, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 7081944, "size_in_bytes": 7081944}, "PAN-X.fi": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.fi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2131769, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2130665, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4273793, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8536227, "size_in_bytes": 8536227}, "PAN-X.fr": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.fr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1664190, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1675785, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3335424, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 6675399, "size_in_bytes": 6675399}, "PAN-X.he": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.he", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2332760, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2318756, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4667100, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 9318616, "size_in_bytes": 9318616}, "PAN-X.hi": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 190671, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 196190, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 964212, "num_examples": 5000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1351073, "size_in_bytes": 1351073}, "PAN-X.hu": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.hu", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2211851, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2249779, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4499914, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8961544, "size_in_bytes": 8961544}, "PAN-X.id": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.id", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1537979, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1536879, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3084007, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 6158865, "size_in_bytes": 6158865}, "PAN-X.it": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.it", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1908529, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1928408, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3874663, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 7711600, "size_in_bytes": 7711600}, "PAN-X.ja": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.ja", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 6323003, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 6448960, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 12670401, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 25442364, "size_in_bytes": 25442364}, "PAN-X.jv": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.jv", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 14600, "num_examples": 100, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 16917, "num_examples": 100, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 16106, "num_examples": 100, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 47623, "size_in_bytes": 47623}, "PAN-X.ka": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.ka", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2806901, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2824641, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2777362, "num_examples": 10000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8408904, "size_in_bytes": 8408904}, "PAN-X.kk": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.kk", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 238109, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 236724, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 240276, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 715109, "size_in_bytes": 715109}, "PAN-X.ko": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.ko", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2138167, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2138294, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4284733, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8561194, "size_in_bytes": 8561194}, "PAN-X.ml": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.ml", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 290755, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 276926, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2865204, "num_examples": 10000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3432885, "size_in_bytes": 3432885}, "PAN-X.mr": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.mr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 245358, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 255904, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 1248259, "num_examples": 5000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1749521, "size_in_bytes": 1749521}, "PAN-X.ms": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.ms", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 147515, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 147168, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2965048, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3259731, "size_in_bytes": 3259731}, "PAN-X.my": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.my", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 40428, "num_examples": 100, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 37366, "num_examples": 100, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 32735, "num_examples": 100, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 110529, "size_in_bytes": 110529}, "PAN-X.nl": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.nl", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2016856, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2038638, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4062189, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8117683, "size_in_bytes": 8117683}, "PAN-X.pt": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.pt", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1575141, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1562625, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3149283, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 6287049, "size_in_bytes": 6287049}, "PAN-X.ru": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.ru", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2053169, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2074145, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4121791, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8249105, "size_in_bytes": 8249105}, "PAN-X.sw": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.sw", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 136368, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 140231, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 135911, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 412510, "size_in_bytes": 412510}, "PAN-X.ta": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.ta", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 277625, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 278114, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4122130, "num_examples": 15000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4677869, "size_in_bytes": 4677869}, "PAN-X.te": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.te", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 293281, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 296963, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 295410, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 885654, "size_in_bytes": 885654}, "PAN-X.th": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.th", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 13262737, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 13586928, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 27133029, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 53982694, "size_in_bytes": 53982694}, "PAN-X.tl": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.tl", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 114156, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 117904, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 1168717, "num_examples": 10000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1400777, "size_in_bytes": 1400777}, "PAN-X.tr": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.tr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1915352, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1911503, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3779170, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 7606025, "size_in_bytes": 7606025}, "PAN-X.ur": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.ur", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 152148, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 151922, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3072276, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3376346, "size_in_bytes": 3376346}, "PAN-X.vi": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1565143, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1580216, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3153227, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 6298586, "size_in_bytes": 6298586}, "PAN-X.yo": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.yo", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 13245, "num_examples": 100, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 13533, "num_examples": 100, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 14709, "num_examples": 100, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 41487, "size_in_bytes": 41487}, "PAN-X.zh": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\t", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 4491325, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 4363172, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 8832051, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 17686548, "size_in_bytes": 17686548}, "MLQA.ar.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.ar.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 8368114, "num_examples": 5335, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 824108, "num_examples": 517, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 9192222, "size_in_bytes": 84911272}, "MLQA.ar.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.ar.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2183942, "num_examples": 1649, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 364837, "num_examples": 207, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 2548779, "size_in_bytes": 78267829}, "MLQA.ar.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.ar.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3290629, "num_examples": 2047, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 288446, "num_examples": 163, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3579075, "size_in_bytes": 79298125}, "MLQA.ar.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.ar.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3229872, "num_examples": 1912, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 340049, "num_examples": 188, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3569921, "size_in_bytes": 79288971}, "MLQA.ar.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.ar.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 8225662, "num_examples": 5335, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 810089, "num_examples": 517, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 9035751, "size_in_bytes": 84754801}, "MLQA.ar.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.ar.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3041378, "num_examples": 1978, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 228180, "num_examples": 161, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3269558, "size_in_bytes": 78988608}, "MLQA.ar.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.ar.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3039396, "num_examples": 1831, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 281770, "num_examples": 186, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3321166, "size_in_bytes": 79040216}, "MLQA.de.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.de.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1620006, "num_examples": 1649, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 200174, "num_examples": 207, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1820180, "size_in_bytes": 77539230}, "MLQA.de.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.de.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4366102, "num_examples": 4517, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 488367, "num_examples": 512, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4854469, "size_in_bytes": 80573519}, "MLQA.de.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.de.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1688483, "num_examples": 1675, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 216075, "num_examples": 182, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1904558, "size_in_bytes": 77623608}, "MLQA.de.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.de.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1679180, "num_examples": 1621, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 184318, "num_examples": 190, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1863498, "size_in_bytes": 77582548}, "MLQA.de.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.de.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4343144, "num_examples": 4517, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 485894, "num_examples": 512, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4829038, "size_in_bytes": 80548088}, "MLQA.de.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.de.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1716615, "num_examples": 1776, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 170582, "num_examples": 196, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1887197, "size_in_bytes": 77606247}, "MLQA.de.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.de.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1371074, "num_examples": 1430, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 153871, "num_examples": 163, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1524945, "size_in_bytes": 77243995}, "MLQA.vi.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.vi.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3205185, "num_examples": 2047, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 230335, "num_examples": 163, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3435520, "size_in_bytes": 79154570}, "MLQA.vi.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.vi.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2227033, "num_examples": 1675, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 277185, "num_examples": 182, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 2504218, "size_in_bytes": 78223268}, "MLQA.vi.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.vi.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 7922085, "num_examples": 5495, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 726518, "num_examples": 511, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 8648603, "size_in_bytes": 84367653}, "MLQA.vi.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.vi.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2989660, "num_examples": 1943, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 269389, "num_examples": 184, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3259049, "size_in_bytes": 78978099}, "MLQA.vi.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.vi.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 7843431, "num_examples": 5495, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 719273, "num_examples": 511, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 8562704, "size_in_bytes": 84281754}, "MLQA.vi.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.vi.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2866597, "num_examples": 2018, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 283461, "num_examples": 189, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3150058, "size_in_bytes": 78869108}, "MLQA.vi.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.vi.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2776664, "num_examples": 1947, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 255007, "num_examples": 177, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3031671, "size_in_bytes": 78750721}, "MLQA.zh.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.zh.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1731483, "num_examples": 1912, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 175349, "num_examples": 188, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1906832, "size_in_bytes": 77625882}, "MLQA.zh.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.zh.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1390018, "num_examples": 1621, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 174605, "num_examples": 190, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1564623, "size_in_bytes": 77283673}, "MLQA.zh.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.zh.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1806186, "num_examples": 1943, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 172934, "num_examples": 184, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1979120, "size_in_bytes": 77698170}, "MLQA.zh.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.zh.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4422350, "num_examples": 5137, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 443810, "num_examples": 504, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4866160, "size_in_bytes": 80585210}, "MLQA.zh.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.zh.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4450985, "num_examples": 5137, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 446868, "num_examples": 504, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4897853, "size_in_bytes": 80616903}, "MLQA.zh.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.zh.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1736283, "num_examples": 1947, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 138073, "num_examples": 161, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1874356, "size_in_bytes": 77593406}, "MLQA.zh.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.zh.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1578219, "num_examples": 1767, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 184401, "num_examples": 189, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1762620, "size_in_bytes": 77481670}, "MLQA.en.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.en.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 6739219, "num_examples": 5335, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 630843, "num_examples": 517, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 7370062, "size_in_bytes": 83089112}, "MLQA.en.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.en.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 5056722, "num_examples": 4517, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 594936, "num_examples": 512, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 5651658, "size_in_bytes": 81370708}, "MLQA.en.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.en.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 7056698, "num_examples": 5495, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 640646, "num_examples": 511, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 7697344, "size_in_bytes": 83416394}, "MLQA.en.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.en.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 6539307, "num_examples": 5137, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 608444, "num_examples": 504, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 7147751, "size_in_bytes": 82866801}, "MLQA.en.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.en.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 14004648, "num_examples": 11590, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 1329112, "num_examples": 1148, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 15333760, "size_in_bytes": 91052810}, "MLQA.en.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.en.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 6179249, "num_examples": 5253, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 555462, "num_examples": 500, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 6734711, "size_in_bytes": 82453761}, "MLQA.en.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.en.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 6378866, "num_examples": 4918, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 623171, "num_examples": 507, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 7002037, "size_in_bytes": 82721087}, "MLQA.es.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.es.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1740282, "num_examples": 1978, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 148649, "num_examples": 161, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1888931, "size_in_bytes": 77607981}, "MLQA.es.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.es.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1404025, "num_examples": 1776, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 144186, "num_examples": 196, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1548211, "size_in_bytes": 77267261}, "MLQA.es.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.es.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1747969, "num_examples": 2018, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 176841, "num_examples": 189, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1924810, "size_in_bytes": 77643860}, "MLQA.es.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.es.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1678451, "num_examples": 1947, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 126646, "num_examples": 161, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1805097, "size_in_bytes": 77524147}, "MLQA.es.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.es.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4362737, "num_examples": 5253, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 419068, "num_examples": 500, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4781805, "size_in_bytes": 80500855}, "MLQA.es.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.es.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4394333, "num_examples": 5253, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 422071, "num_examples": 500, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4816404, "size_in_bytes": 80535454}, "MLQA.es.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.es.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1523523, "num_examples": 1723, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 181834, "num_examples": 187, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1705357, "size_in_bytes": 77424407}, "MLQA.hi.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.hi.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4445589, "num_examples": 1831, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 410424, "num_examples": 186, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4856013, "size_in_bytes": 80575063}, "MLQA.hi.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.hi.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3022864, "num_examples": 1430, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 301713, "num_examples": 163, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3324577, "size_in_bytes": 79043627}, "MLQA.hi.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.hi.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4743484, "num_examples": 1947, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 419106, "num_examples": 177, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 5162590, "size_in_bytes": 80881640}, "MLQA.hi.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.hi.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4354875, "num_examples": 1767, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 424246, "num_examples": 189, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4779121, "size_in_bytes": 80498171}, "MLQA.hi.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.hi.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 11449261, "num_examples": 4918, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 1097857, "num_examples": 507, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 12547118, "size_in_bytes": 88266168}, "MLQA.hi.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.hi.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3862229, "num_examples": 1723, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 420402, "num_examples": 187, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4282631, "size_in_bytes": 80001681}, "MLQA.hi.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.hi.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 11810475, "num_examples": 4918, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 1136784, "num_examples": 507, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 12947259, "size_in_bytes": 88666309}, "XQuAD.ar": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1722799, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.ar.json": {"num_bytes": 1582988, "checksum": "abdabd7afed5c635d99cca0f3f0d0c9d9ed0bc77451e963c2e4e0638c29e486d"}}, "download_size": 1582988, "post_processing_size": null, "dataset_size": 1722799, "size_in_bytes": 3305787}, "XQuAD.de": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1283301, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.de.json": {"num_bytes": 669810, "checksum": "990b5d746746ed65ed4702ea5f35f99ffa4e2f1c390c07d003642acd937916f9"}}, "download_size": 669810, "post_processing_size": null, "dataset_size": 1283301, "size_in_bytes": 1953111}, "XQuAD.vi": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1477239, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.vi.json": {"num_bytes": 911401, "checksum": "f619a1eb11fb42d3ab0834259e488a65f585447ef6154437bfb7199d85161a04"}}, "download_size": 911401, "post_processing_size": null, "dataset_size": 1477239, "size_in_bytes": 2388640}, "XQuAD.zh": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 984241, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.zh.json": {"num_bytes": 808652, "checksum": "691d0b3359bc6b8faa8de931dfdfe21d50a65861ae348e32a0d1a0190b0c8835"}}, "download_size": 808652, "post_processing_size": null, "dataset_size": 984241, "size_in_bytes": 1792893}, "XQuAD.en": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1116123, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.en.json": {"num_bytes": 609383, "checksum": "e4c57d1c9143aaa1c5d265ba5987a65f4e69528d2a98f29d6e75019b10344f29"}}, "download_size": 609383, "post_processing_size": null, "dataset_size": 1116123, "size_in_bytes": 1725506}, "XQuAD.es": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1273499, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.es.json": {"num_bytes": 684322, "checksum": "dcbae93ec3a9f4b9e78fd834a171d6f96c1a875e10e15b7530b7e4ef4971e37e"}}, "download_size": 684322, "post_processing_size": null, "dataset_size": 1273499, "size_in_bytes": 1957821}, "XQuAD.hi": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2682975, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.hi.json": {"num_bytes": 1680538, "checksum": "df2cce3532b37e9beb8979704b5c9a4bf874358f105395a298b89427b43b9d24"}}, "download_size": 1680538, "post_processing_size": null, "dataset_size": 2682975, "size_in_bytes": 4363513}, "XQuAD.el": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.el", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2206690, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.el.json": {"num_bytes": 1918889, "checksum": "821cf0f88e73fa258fd2f548b19b6ec39f7025059e16f6f9fc8cd797c9c3663e"}}, "download_size": 1918889, "post_processing_size": null, "dataset_size": 2206690, "size_in_bytes": 4125579}, "XQuAD.ru": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.ru", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2136990, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.ru.json": {"num_bytes": 1896368, "checksum": "208d5b1aa154c52b1b5c5eda16281e455e8fd198cdb9af3f469f0d6037d973bf"}}, "download_size": 1896368, "post_processing_size": null, "dataset_size": 2136990, "size_in_bytes": 4033358}, "XQuAD.th": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.th", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2854959, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.th.json": {"num_bytes": 1809143, "checksum": "5cdda11d0e1e075f7872abf4e6ae830388ce7f617964d542308e9ae4257e0f43"}}, "download_size": 1809143, "post_processing_size": null, "dataset_size": 2854959, "size_in_bytes": 4664102}, "XQuAD.tr": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.tr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1210763, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.tr.json": {"num_bytes": 729506, "checksum": "92179a564774b7696100d144c1e10870d0a966b6fccbdd254a65b9d2ab1971cc"}}, "download_size": 729506, "post_processing_size": null, "dataset_size": 1210763, "size_in_bytes": 1940269}, "bucc18.de": {"description": "Building and Using Comparable Corpora\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://comparable.limsi.fr/bucc2018/", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "bucc18.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 247339, "num_examples": 1038, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2309764, "num_examples": 9580, "dataset_name": "xtreme"}}, "download_checksums": {"https://comparable.limsi.fr/bucc2018/bucc2018-de-en.training-gold.tar.bz2": {"num_bytes": 28189548, "checksum": "766e0fdebbd1438fb87c21254828eb13c8b997d8fbab002103dd060dcac50c5c"}, "https://comparable.limsi.fr/bucc2018/bucc2018-de-en.sample-gold.tar.bz2": {"num_bytes": 2529652, "checksum": "8e16ba8b02ef8d648b06adfbd6dfb188f43524e18f97b2b12a14a086caac62f3"}}, "download_size": 30719200, "post_processing_size": null, "dataset_size": 2557103, "size_in_bytes": 33276303}, "bucc18.fr": {"description": "Building and Using Comparable Corpora\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://comparable.limsi.fr/bucc2018/", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "bucc18.fr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 210221, "num_examples": 929, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2064547, "num_examples": 9086, "dataset_name": "xtreme"}}, "download_checksums": {"https://comparable.limsi.fr/bucc2018/bucc2018-fr-en.training-gold.tar.bz2": {"num_bytes": 20757713, "checksum": "53c0d0e7dd97dc89593a2db25a26c5f0ccdc7113e8451263bb0c80e4c4c7dc30"}, "https://comparable.limsi.fr/bucc2018/bucc2018-fr-en.sample-gold.tar.bz2": {"num_bytes": 1948831, "checksum": "df9eb3966954e163c9264076f7c9c1eb56d9d8a91855f9d3afbf2c0fdaef0a08"}}, "download_size": 22706544, "post_processing_size": null, "dataset_size": 2274768, "size_in_bytes": 24981312}, "bucc18.zh": {"description": "Building and Using Comparable Corpora\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://comparable.limsi.fr/bucc2018/", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "bucc18.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 55077, "num_examples": 257, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 412002, "num_examples": 1899, "dataset_name": "xtreme"}}, "download_checksums": {"https://comparable.limsi.fr/bucc2018/bucc2018-zh-en.training-gold.tar.bz2": {"num_bytes": 6344925, "checksum": "3facb71798277c8f44dc78c1f8ae2110f254d0e14799f3508eedd54b4236877a"}, "https://comparable.limsi.fr/bucc2018/bucc2018-zh-en.sample-gold.tar.bz2": {"num_bytes": 769869, "checksum": "a3425be5c0320ee131a0927b66c3e29befb3b481ebf1b87257e660e514bc16ac"}}, "download_size": 7114794, "post_processing_size": null, "dataset_size": 467079, "size_in_bytes": 7581873}, "bucc18.ru": {"description": "Building and Using Comparable Corpora\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://comparable.limsi.fr/bucc2018/", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "bucc18.ru", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 751368, "num_examples": 2374, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 4583855, "num_examples": 14435, "dataset_name": "xtreme"}}, "download_checksums": {"https://comparable.limsi.fr/bucc2018/bucc2018-ru-en.training-gold.tar.bz2": {"num_bytes": 37085079, "checksum": "1895df56e936ca3d4f5b12299ceffe0b7ff4806584c40bdaa3ae1d445f25afa5"}, "https://comparable.limsi.fr/bucc2018/bucc2018-ru-en.sample-gold.tar.bz2": {"num_bytes": 4269233, "checksum": "fce3cabc7ee50ddb4b18aa6fb090e2669c8383d2a29fc97eed6ae70fed9a23e5"}}, "download_size": 41354312, "post_processing_size": null, "dataset_size": 5335223, "size_in_bytes": 46689535}, "PAWS-X.de": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAWS-X.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 500009, "num_examples": 2000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 510194, "num_examples": 2000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 12451883, "num_examples": 49380, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 13462086, "size_in_bytes": 43744143}, "PAWS-X.en": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAWS-X.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 478291, "num_examples": 2000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 480738, "num_examples": 2000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 11827719, "num_examples": 49175, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 12786748, "size_in_bytes": 43068805}, "PAWS-X.es": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAWS-X.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 494069, "num_examples": 1961, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 505047, "num_examples": 2000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 12462107, "num_examples": 49401, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 13461223, "size_in_bytes": 43743280}, "PAWS-X.fr": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAWS-X.fr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 516111, "num_examples": 1988, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 521031, "num_examples": 2000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 12948512, "num_examples": 49399, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 13985654, "size_in_bytes": 44267711}, "PAWS-X.ja": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAWS-X.ja", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 647774, "num_examples": 2000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 654640, "num_examples": 2000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 14695653, "num_examples": 49401, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 15998067, "size_in_bytes": 46280124}, "PAWS-X.ko": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAWS-X.ko", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 540787, "num_examples": 2000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 547978, "num_examples": 1999, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 13542657, "num_examples": 49164, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 14631422, "size_in_bytes": 44913479}, "PAWS-X.zh": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAWS-X.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 459120, "num_examples": 2000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 460638, "num_examples": 2000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 10469712, "num_examples": 49401, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 11389470, "size_in_bytes": 41671527}, "tatoeba.afr": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.afr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 179651, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.afr-eng.afr": {"num_bytes": 30586, "checksum": "7bb9e073ad8422d6bfdec7c9ebdcef8ac486e72b237200e447923a8b921a0a56"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.afr-eng.eng": {"num_bytes": 29049, "checksum": "0b700c125efb4030b4cc50d9d765d5884afc24f39296f29b028a1b2a8512034f"}}, "download_size": 59635, "post_processing_size": null, "dataset_size": 179651, "size_in_bytes": 239286}, "tatoeba.ara": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.ara", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 192666, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ara-eng.ara": {"num_bytes": 43582, "checksum": "e67a0ae072b79cd9e8eb09f166c3bc0b23488d39f5720f2ee0a8350ae17b719f"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ara-eng.eng": {"num_bytes": 29068, "checksum": "3c11838c963f598a52dcf2f452b666353538257001db5c59c3a5f54a999b336b"}}, "download_size": 72650, "post_processing_size": null, "dataset_size": 192666, "size_in_bytes": 265316}, "tatoeba.ben": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.ben", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 211719, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ben-eng.ben": {"num_bytes": 65990, "checksum": "ac3385695d6a6c7e5d18e38ad4b8b7d3780f3df23dd0ff3f539071b8269a8613"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ben-eng.eng": {"num_bytes": 25713, "checksum": "67fbe75fec549d436c3356b6d6f8dd53179b6a908661b5d507d28c7fee83350e"}}, "download_size": 91703, "post_processing_size": null, "dataset_size": 211719, "size_in_bytes": 303422}, "tatoeba.bul": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.bul", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 222295, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.bul-eng.bul": {"num_bytes": 65500, "checksum": "f9fa90cf3599d8c87f7a6ed22f5d648e3ce6687c705656a8c8ea088d891f79d5"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.bul-eng.eng": {"num_bytes": 36779, "checksum": "4b03b3b52ffb7cf4286e0c4453c90910a3021546f160bdf0e4d39d1f45bfbc0b"}}, "download_size": 102279, "post_processing_size": null, "dataset_size": 222295, "size_in_bytes": 324574}, "tatoeba.deu": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.deu", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 225583, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.deu-eng.deu": {"num_bytes": 57121, "checksum": "edfa6f75a42554df388f45891735d5e4214158a99def4b73b5908af4a3054551"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.deu-eng.eng": {"num_bytes": 48446, "checksum": "eb9cc83a42f2c4b22f310d05311207e41abb56c2a084666cac3ee0f84d2d0b84"}}, "download_size": 105567, "post_processing_size": null, "dataset_size": 225583, "size_in_bytes": 331150}, "tatoeba.cmn": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.cmn", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 188947, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.cmn-eng.cmn": {"num_bytes": 33410, "checksum": "965d033966fcd186c89741ad49ab4b0a0b2bbd33e02666635ff3b2be23c1ac1f"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.cmn-eng.eng": {"num_bytes": 35521, "checksum": "29ba36232488f5806aceccac57c59c5e750ddd08edb40eef417e3ada9ff9a239"}}, "download_size": 68931, "post_processing_size": null, "dataset_size": 188947, "size_in_bytes": 257878}, "tatoeba.ell": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.ell", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 198977, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ell-eng.ell": {"num_bytes": 52202, "checksum": "4fb3d3d30bdafd15100dfad5c4680f8f2ed5ca87ed0a6122e2fe2aa21fee65e8"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ell-eng.eng": {"num_bytes": 26759, "checksum": "1f630710e718e2f85a4e757d3c7f3d6e78ded0b25c99653b4c552138318d9ffe"}}, "download_size": 78961, "post_processing_size": null, "dataset_size": 198977, "size_in_bytes": 277938}, "tatoeba.est": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.est", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 179744, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.est-eng.est": {"num_bytes": 29996, "checksum": "94856999ef35e5357502d7ecf50419d0108b99270e507d9c57f8b283bd1be9c5"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.est-eng.eng": {"num_bytes": 29732, "checksum": "bb033b10596178452aecf2d97ad25580251375e7d224d8b38dad3d93d69b1e4f"}}, "download_size": 59728, "post_processing_size": null, "dataset_size": 179744, "size_in_bytes": 239472}, "tatoeba.eus": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.eus", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 186084, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.eus-eng.eus": {"num_bytes": 34060, "checksum": "4255fb70a6c268b09fcc59a9b308f0fcaaf45ef45e66fc55bf3c80eac4d8c97b"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.eus-eng.eng": {"num_bytes": 32008, "checksum": "1e80f0174ad544697fd69ddcf21287ca10c5e3cacba2fc42bf1d68c460d14ba2"}}, "download_size": 66068, "post_processing_size": null, "dataset_size": 186084, "size_in_bytes": 252152}, "tatoeba.fin": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.fin", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 195685, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.fin-eng.fin": {"num_bytes": 39857, "checksum": "8db3c734f755d578445947f1182f40faf2a9a0eca37561dd248717c088802d60"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.fin-eng.eng": {"num_bytes": 35812, "checksum": "322e610359f4d24852e673bbe4524d52c26dbf980aca0760e95c66dc21ecd504"}}, "download_size": 75669, "post_processing_size": null, "dataset_size": 195685, "size_in_bytes": 271354}, "tatoeba.fra": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.fra", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 200034, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.fra-eng.fra": {"num_bytes": 43727, "checksum": "644172ff9642fefa9e41c29b7d6f44196518e84350dc44d4992e943c0cca92b6"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.fra-eng.eng": {"num_bytes": 36291, "checksum": "5634220f8a26a9a23b84753a9aec0b0832e6bdaa9da3f83e0bd84c928c3f46e3"}}, "download_size": 80018, "post_processing_size": null, "dataset_size": 200034, "size_in_bytes": 280052}, "tatoeba.heb": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.heb", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 203516, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.heb-eng.heb": {"num_bytes": 47660, "checksum": "4a07ca4b8a6fb7ab499791573a2454730f47acbe209359d7b9372a9f6094a102"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.heb-eng.eng": {"num_bytes": 35840, "checksum": "73c27ed0f76c1d2da199230f05489749e10bb67ab879c6dfee8ca9807d6bd99c"}}, "download_size": 83500, "post_processing_size": null, "dataset_size": 203516, "size_in_bytes": 287016}, "tatoeba.hin": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.hin", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 242574, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.hin-eng.hin": {"num_bytes": 88468, "checksum": "15e4fb0a394be4438319f1d6955d1aea226e2a8c5ad38798b23b76ae43d742ed"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.hin-eng.eng": {"num_bytes": 34090, "checksum": "adfce2269a55dbac69b25c0f4f6eb89e0f9383165485925a2e042e61b9480562"}}, "download_size": 122558, "post_processing_size": null, "dataset_size": 242574, "size_in_bytes": 365132}, "tatoeba.hun": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.hun", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 188905, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.hun-eng.hun": {"num_bytes": 35335, "checksum": "56bd0682be8c1db6568313650b3310d641cc8d0019d12dd7caf201302350eeac"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.hun-eng.eng": {"num_bytes": 33554, "checksum": "5323266e91ddee67ed1ae00d6bbac0cdf3d37749d1b2da3459bf0d424bc71383"}}, "download_size": 68889, "post_processing_size": null, "dataset_size": 188905, "size_in_bytes": 257794}, "tatoeba.ind": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.ind", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 194860, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ind-eng.ind": {"num_bytes": 39969, "checksum": "4f03cd70cba071f746eacd3ebf6b60fd5a8377ce18b4cc52edec6721f05f352a"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ind-eng.eng": {"num_bytes": 34875, "checksum": "62a3f5127b60eb3526b8fa3994a68fa1a1f114f3a395307a8808a3517c05ffc5"}}, "download_size": 74844, "post_processing_size": null, "dataset_size": 194860, "size_in_bytes": 269704}, "tatoeba.ita": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.ita", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 185849, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ita-eng.ita": {"num_bytes": 34237, "checksum": "9b3f369d0ed92273b46dd3b983721636e3d15024ce7125f5103229249c386d26"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ita-eng.eng": {"num_bytes": 31596, "checksum": "738bf8f981e42d285f4f08bc09238782d285b02050e6e95287aa4e998bb7b24b"}}, "download_size": 65833, "post_processing_size": null, "dataset_size": 185849, "size_in_bytes": 251682}, "tatoeba.jav": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.jav", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 38529, "num_examples": 205, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.jav-eng.jav": {"num_bytes": 7457, "checksum": "ad88399db8f94c2a040aa53e7e862225964fac9308a3beb3d5b38f3eca2f827f"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.jav-eng.eng": {"num_bytes": 6456, "checksum": "172776353690f6c047ea21da969fa6979980d692fff1cfbac17eb25851423760"}}, "download_size": 13913, "post_processing_size": null, "dataset_size": 38529, "size_in_bytes": 52442}, "tatoeba.jpn": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.jpn", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 213099, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.jpn-eng.jpn": {"num_bytes": 53844, "checksum": "56040bd6949170a631039d9f8f4c6440db8761b0065c9686feba55c99a320d46"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.jpn-eng.eng": {"num_bytes": 39239, "checksum": "b42129b34e1bf225ccc25fc00e532a6113af98adbc6605b93021bd8aadeb68b6"}}, "download_size": 93083, "post_processing_size": null, "dataset_size": 213099, "size_in_bytes": 306182}, "tatoeba.kat": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.kat", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 161696, "num_examples": 746, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.kat-eng.kat": {"num_bytes": 50967, "checksum": "6ef69b5efbf355597ed91eb355b33a5f524bdf0875dbeaaccf6375badc20e29b"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.kat-eng.eng": {"num_bytes": 21193, "checksum": "d70a14aa64fd7c6b545f11aea754a632e1cbecb91af27fcf6a98a8449a48a8e7"}}, "download_size": 72160, "post_processing_size": null, "dataset_size": 161696, "size_in_bytes": 233856}, "tatoeba.kaz": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.kaz", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 116194, "num_examples": 575, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.kaz-eng.kaz": {"num_bytes": 29687, "checksum": "f20c682582a80b6aa10f3b933db93bc314449b554ce611e263bc75990b319aef"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.kaz-eng.eng": {"num_bytes": 17491, "checksum": "0ab684e7032c6520540d5785adf00ef206d097221d0dd4dc9bcaabd64068e10d"}}, "download_size": 47178, "post_processing_size": null, "dataset_size": 116194, "size_in_bytes": 163372}, "tatoeba.kor": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.kor", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 199155, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.kor-eng.kor": {"num_bytes": 44054, "checksum": "e550c84184ec35b1a0dab3154284719511a21746e53c40f46eb6ab08179e9188"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.kor-eng.eng": {"num_bytes": 35085, "checksum": "f900cf3c9b72ed5a400e1804702863ff3df00be58eb060902e02285d0e68fab3"}}, "download_size": 79139, "post_processing_size": null, "dataset_size": 199155, "size_in_bytes": 278294}, "tatoeba.mal": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.mal", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 177173, "num_examples": 687, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.mal-eng.mal": {"num_bytes": 72952, "checksum": "1a896f54f85a454fb0123864049c65921ae9dfd0cafda6deef8060f0104d965e"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.mal-eng.eng": {"num_bytes": 21765, "checksum": "4d80cdbb844cd4e33f874e5dc45c1cdda4f80998034448f7eb56b8b6532a6622"}}, "download_size": 94717, "post_processing_size": null, "dataset_size": 177173, "size_in_bytes": 271890}, "tatoeba.mar": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.mar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 220558, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.mar-eng.mar": {"num_bytes": 72652, "checksum": "b2931584fbe62062beb97cc939e4d208ace5ee56f15808860ab14e130fd3c576"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.mar-eng.eng": {"num_bytes": 27890, "checksum": "709d09b697dca053c814b9d525b72cb47cb011aa860c6598f3e2b1b3dd1280dd"}}, "download_size": 100542, "post_processing_size": null, "dataset_size": 220558, "size_in_bytes": 321100}, "tatoeba.nld": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.nld", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 193279, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.nld-eng.nld": {"num_bytes": 37866, "checksum": "d564d4ce1c621ccaefdbe9f5cb08eacccc7bf2a0b58666303e84ca9c7973bdb7"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.nld-eng.eng": {"num_bytes": 35397, "checksum": "3b8836749df573a53235b85ed6771f31bf2de428f520d2d6a1dd94b61b4ef057"}}, "download_size": 73263, "post_processing_size": null, "dataset_size": 193279, "size_in_bytes": 266542}, "tatoeba.pes": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.pes", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 213735, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.pes-eng.pes": {"num_bytes": 58866, "checksum": "f1553713723491fe5876e1060b18fb4abf0c77be3ba06db2e3307e83aedbbb32"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.pes-eng.eng": {"num_bytes": 34853, "checksum": "b5c6cf8c8d93ff8f2fe26b53f3ee29b62db9c6f7dcddcb086ba48953f4ce926b"}}, "download_size": 93719, "post_processing_size": null, "dataset_size": 213735, "size_in_bytes": 307454}, "tatoeba.por": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.por", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 195201, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.por-eng.por": {"num_bytes": 39331, "checksum": "b0c926a232c9889a87a1a970f9399c5618c2d95baf204321e9da794c0aec16f5"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.por-eng.eng": {"num_bytes": 35854, "checksum": "deb4568cfb7b7cbbc060a7fe97c4639fb4680842f4fcd28df791ffdbb753855a"}}, "download_size": 75185, "post_processing_size": null, "dataset_size": 195201, "size_in_bytes": 270386}, "tatoeba.rus": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.rus", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 212488, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.rus-eng.rus": {"num_bytes": 58822, "checksum": "446ff2cae66053c2277d9735b2c2df6b786cae258385f7ade7bed68d8835c6a0"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.rus-eng.eng": {"num_bytes": 33650, "checksum": "7b26f52d6085b7c4944d6f5f6f5b6e1932085b42112f1444db515ce59e878fb8"}}, "download_size": 92472, "post_processing_size": null, "dataset_size": 212488, "size_in_bytes": 304960}, "tatoeba.spa": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.spa", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 192282, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.spa-eng.spa": {"num_bytes": 37490, "checksum": "f9628cea40481e8251f0999718bd893cff0f261752f5e526b3bc20284e2ca018"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.spa-eng.eng": {"num_bytes": 34776, "checksum": "89e4470f4572040b1ca94b3edad97dcd8bd2f0141f072e12933b8659dadf917d"}}, "download_size": 72266, "post_processing_size": null, "dataset_size": 192282, "size_in_bytes": 264548}, "tatoeba.swh": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.swh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 67283, "num_examples": 390, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.swh-eng.swh": {"num_bytes": 9645, "checksum": "1c672915446c336cc378676e6dbf91eb54d27bbfd0c61563d349265bc6374753"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.swh-eng.eng": {"num_bytes": 10822, "checksum": "e8539647caff9e329776ae863b6224d432923c6e4e9256b9df92ca58ff282eac"}}, "download_size": 20467, "post_processing_size": null, "dataset_size": 67283, "size_in_bytes": 87750}, "tatoeba.tam": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.tam", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 76297, "num_examples": 307, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tam-eng.tam": {"num_bytes": 30553, "checksum": "bde87fb1ddedccf6c7a2b70ffdd19a959a573c113c2e7a041c4b623fb2170bde"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tam-eng.eng": {"num_bytes": 8888, "checksum": "e7c5106acdd100214a161970b2a5c31e7386e5b6a963e3d3afdf30412c90ac53"}}, "download_size": 39441, "post_processing_size": null, "dataset_size": 76297, "size_in_bytes": 115738}, "tatoeba.tgl": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.tgl", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 188154, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tgl-eng.tgl": {"num_bytes": 36506, "checksum": "f99165dc05190b99f6574fe24db884ff85d111612a25e7a37323f001aafc2a6e"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tgl-eng.eng": {"num_bytes": 31632, "checksum": "e7c9beda3f3072a968a34a7226a66d1ebf1dcb33cf002805dc752f80a7c620ae"}}, "download_size": 68138, "post_processing_size": null, "dataset_size": 188154, "size_in_bytes": 256292}, "tatoeba.tha": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.tha", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 128974, "num_examples": 548, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tha-eng.tha": {"num_bytes": 44759, "checksum": "65c7b3c01a56a1ac8971e72e0ea8e74a027718dc84044d8802c0ab36395a3156"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tha-eng.eng": {"num_bytes": 18439, "checksum": "2881c82d2c5fa59cf0a68bc9e012f5e0b0a716f7357cbecf77c247efc2fd7294"}}, "download_size": 63198, "post_processing_size": null, "dataset_size": 128974, "size_in_bytes": 192172}, "tatoeba.tur": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.tur", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 191901, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tur-eng.tur": {"num_bytes": 37607, "checksum": "1ffa0acc006018b3105abda41a4d4ca42f3c122964a49b71793546367b079a86"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tur-eng.eng": {"num_bytes": 34278, "checksum": "a768df7dd3d1344f872a458b32c3a65e24f8381826ccb16ba6677426176c8121"}}, "download_size": 71885, "post_processing_size": null, "dataset_size": 191901, "size_in_bytes": 263786}, "tatoeba.urd": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.urd", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 208728, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.urd-eng.urd": {"num_bytes": 56819, "checksum": "2efc22dc61885a9454aeeee68c8b841c7f9138d53ba644a82308bd210140450b"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.urd-eng.eng": {"num_bytes": 31893, "checksum": "dab35fda3f73b3fd86b6b9f9f9f6242430961aa5d1ac247adbc646867df79cec"}}, "download_size": 88712, "post_processing_size": null, "dataset_size": 208728, "size_in_bytes": 297440}, "tatoeba.vie": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.vie", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 211423, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.vie-eng.vie": {"num_bytes": 52721, "checksum": "6dbb02d778b0bfc8678cd85f87db76de55dd7e409a26fe32ad42d50e0f1fff77"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.vie-eng.eng": {"num_bytes": 38686, "checksum": "a1f60bd8ae6c42224a4c050d2aa1ff4242d14827d64d7831e96ecf2b2c367f5f"}}, "download_size": 91407, "post_processing_size": null, "dataset_size": 211423, "size_in_bytes": 302830}, "udpos.Afrikaans": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Afrikaans", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 88302, "num_examples": 5317, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 167943, "num_examples": 10065, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 566181, "num_examples": 33894, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 822426, "size_in_bytes": 356039107}, "udpos.Arabic": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Arabic", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 656541, "num_examples": 34249, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1027440, "num_examples": 52879, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4945741, "num_examples": 254340, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 6629722, "size_in_bytes": 361846403}, "udpos.Basque": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Basque", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 424583, "num_examples": 24095, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 430236, "num_examples": 24374, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 1285506, "num_examples": 72974, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 2140325, "size_in_bytes": 357357006}, "udpos.Bulgarian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Bulgarian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 336276, "num_examples": 16089, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 329074, "num_examples": 15724, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2603904, "num_examples": 124336, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 3269254, "size_in_bytes": 358485935}, "udpos.Dutch": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Dutch", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 378953, "num_examples": 22966, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 381930, "num_examples": 22634, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4320879, "num_examples": 261164, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 5081762, "size_in_bytes": 360298443}, "udpos.English": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.English", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1001191, "num_examples": 62636, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1365465, "num_examples": 84986, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 6000751, "num_examples": 375267, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 8367407, "size_in_bytes": 363584088}, "udpos.Estonian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Estonian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 788537, "num_examples": 44632, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1034859, "num_examples": 58596, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 6403131, "num_examples": 361827, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 8226527, "size_in_bytes": 363443208}, "udpos.Finnish": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Finnish", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 631109, "num_examples": 34102, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 992307, "num_examples": 53260, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 5402529, "num_examples": 291068, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 7025945, "size_in_bytes": 362242626}, "udpos.French": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.French", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1242289, "num_examples": 77800, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1648118, "num_examples": 103429, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 9719147, "num_examples": 607444, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 12609554, "size_in_bytes": 367826235}, "udpos.German": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.German", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 5800418, "num_examples": 332163, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 7066885, "num_examples": 406668, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 52714515, "num_examples": 3022041, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 65581818, "size_in_bytes": 420798499}, "udpos.Greek": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Greek", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1034375, "num_examples": 46534, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 998254, "num_examples": 45195, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 8633981, "num_examples": 390368, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 10666610, "size_in_bytes": 365883291}, "udpos.Hebrew": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Hebrew", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 254482, "num_examples": 13914, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 275112, "num_examples": 15137, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3075019, "num_examples": 169401, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 3604613, "size_in_bytes": 358821294}, "udpos.Hindi": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Hindi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 820552, "num_examples": 35217, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1367421, "num_examples": 59259, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 6536484, "num_examples": 281057, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 8724457, "size_in_bytes": 363941138}, "udpos.Hungarian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Hungarian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 210628, "num_examples": 11418, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 189032, "num_examples": 10448, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 362836, "num_examples": 20166, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 762496, "size_in_bytes": 355979177}, "udpos.Indonesian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Indonesian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 218051, "num_examples": 12612, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 543962, "num_examples": 31680, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 1686682, "num_examples": 97530, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 2448695, "size_in_bytes": 357665376}, "udpos.Italian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Italian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1006070, "num_examples": 62930, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1352066, "num_examples": 84640, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 11441054, "num_examples": 715942, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 13799190, "size_in_bytes": 369015871}, "udpos.Japanese": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Japanese", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 193699, "num_examples": 11473, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 896128, "num_examples": 53592, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2700811, "num_examples": 160183, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 3790638, "size_in_bytes": 359007319}, "udpos.Kazakh": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Kazakh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 224190, "num_examples": 10141, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 11692, "num_examples": 547, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 235882, "size_in_bytes": 355452563}, "udpos.Korean": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Korean", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 757622, "num_examples": 37236, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1127626, "num_examples": 56627, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 7117227, "num_examples": 353124, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 9002475, "size_in_bytes": 364219156}, "udpos.Chinese": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Chinese", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 569927, "num_examples": 35420, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1188350, "num_examples": 73131, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4065452, "num_examples": 251346, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 5823729, "size_in_bytes": 361040410}, "udpos.Marathi": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Marathi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 9503, "num_examples": 400, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 8666, "num_examples": 376, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 64054, "num_examples": 2730, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 82223, "size_in_bytes": 355298904}, "udpos.Persian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Persian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 311443, "num_examples": 15909, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 315552, "num_examples": 16122, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2369386, "num_examples": 122180, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 2996381, "size_in_bytes": 358213062}, "udpos.Portuguese": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Portuguese", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 725935, "num_examples": 46077, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1097248, "num_examples": 69417, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 7825988, "num_examples": 495259, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 9649171, "size_in_bytes": 364865852}, "udpos.Russian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Russian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 3371661, "num_examples": 156081, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 4139228, "num_examples": 192457, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 23642631, "num_examples": 1082189, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 31153520, "size_in_bytes": 386370201}, "udpos.Spanish": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Spanish", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1468545, "num_examples": 90347, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1444599, "num_examples": 88820, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 13588564, "num_examples": 835847, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 16501708, "size_in_bytes": 371718389}, "udpos.Tagalog": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Tagalog", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4687, "num_examples": 292, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 4687, "size_in_bytes": 355221368}, "udpos.Tamil": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Tamil", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 44904, "num_examples": 1384, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 69902, "num_examples": 2183, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 223732, "num_examples": 6849, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 338538, "size_in_bytes": 355555219}, "udpos.Telugu": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Telugu", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 17079, "num_examples": 662, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 18534, "num_examples": 721, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 130557, "num_examples": 5082, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 166170, "size_in_bytes": 355382851}, "udpos.Thai": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Thai", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 548479, "num_examples": 22322, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 548479, "size_in_bytes": 355765160}, "udpos.Turkish": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Turkish", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 183577, "num_examples": 10310, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 805547, "num_examples": 44739, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 693088, "num_examples": 38733, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 1682212, "size_in_bytes": 356898893}, "udpos.Urdu": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Urdu", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 276569, "num_examples": 14581, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 281727, "num_examples": 14806, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2056250, "num_examples": 108690, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 2614546, "size_in_bytes": 357831227}, "udpos.Vietnamese": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Vietnamese", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 197628, "num_examples": 11514, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 205240, "num_examples": 11955, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 352530, "num_examples": 20285, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 755398, "size_in_bytes": 355972079}, "udpos.Yoruba": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Yoruba", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 44120, "num_examples": 2664, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 44120, "size_in_bytes": 355260801}, "tatoeba.tel": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.tel", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 53239, "num_examples": 234, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tel-eng.tel": {"num_bytes": 18337, "checksum": "7e1a1bcd106cce650a09e2f042f1354b55b29bea2bcfa86554dfa0ad12ce8976"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tel-eng.eng": {"num_bytes": 6806, "checksum": "1efc2ef57d9b1ecebfc4baa45e86fd793e38473304e9c043aebabc3a1b29a294"}}, "download_size": 25143, "post_processing_size": null, "dataset_size": 53239, "size_in_bytes": 78382}} \ No newline at end of file +{"XNLI": {"description": "\nThe Cross-lingual Natural Language Inference (XNLI) corpus is a crowd-sourced collection of 5,000 test and\n2,500 dev pairs for the MultiNLI corpus. The pairs are annotated with textual entailment and translated into\n14 languages: French, Spanish, German, Greek, Bulgarian, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese,\nHindi, Swahili and Urdu. This results in 112.5k annotated pairs. Each premise can be associated with the\ncorresponding hypothesis in the 15 languages, summing up to more than 1.5M combinations. The corpus is made to\nevaluate how to perform inference in any language (including low-resources ones like Swahili or Urdu) when only\nEnglish NLI data is available at training time. One solution is cross-lingual sentence encoding, for which XNLI\nis an evaluation benchmark.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"language": {"dtype": "string", "id": null, "_type": "Value"}, "sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "gold_label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XNLI", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 20359500, "num_examples": 75150, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 10049303, "num_examples": 37350, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 17865352, "post_processing_size": null, "dataset_size": 30408803, "size_in_bytes": 48274155}, "tydiqa": {"description": "Gold passage task (GoldP): Given a passage that is guaranteed to contain the\n answer, predict the single contiguous span of characters that answers the question. This is more similar to\n existing reading comprehension datasets (as opposed to the information-seeking task outlined above).\n This task is constructed with two goals in mind: (1) more directly comparing with prior work and (2) providing\n a simplified way for researchers to use TyDi QA by providing compatibility with existing code for SQuAD 1.1,\n XQuAD, and MLQA. Toward these goals, the gold passage task differs from the primary task in several ways:\n only the gold answer passage is provided rather than the entire Wikipedia article;\n unanswerable questions have been discarded, similar to MLQA and XQuAD;\n we evaluate with the SQuAD 1.1 metrics like XQuAD; and\n Thai and Japanese are removed since the lack of whitespace breaks some tools.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{tydiqa,\n title = {TyDi QA: A Benchmark for Information-Seeking Question Answering in Typologically Diverse Languages},\n author = {Jonathan H. Clark and Eunsol Choi and Michael Collins and Dan Garrette and Tom Kwiatkowski and Vitaly Nikolaev and Jennimaria Palomaki}\n year = {2020},\n journal = {Transactions of the Association for Computational Linguistics}\n }\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/tydiqa", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tydiqa", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 52948607, "num_examples": 49881, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 5006461, "num_examples": 5077, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/tydiqa/v1.1/tydiqa-goldp-v1.1-train.json": {"num_bytes": 58004076, "checksum": "cefc8e09ff2548d9b10a678d3a6bbbe5bc036be543f92418819ea676c97be23b"}, "https://storage.googleapis.com/tydiqa/v1.1/tydiqa-goldp-v1.1-dev.json": {"num_bytes": 5617409, "checksum": "b286e0f34bc7f52259359989716f369b160565bd12ad8f3a3e311f9b0dbad1c0"}}, "download_size": 63621485, "post_processing_size": null, "dataset_size": 57955068, "size_in_bytes": 121576553}, "SQuAD": {"description": "Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{2016arXiv160605250R,\n author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},\n Konstantin and {Liang}, Percy},\n title = \"{SQuAD: 100,000+ Questions for Machine Comprehension of Text}\",\n journal = {arXiv e-prints},\n year = 2016,\n eid = {arXiv:1606.05250},\n pages = {arXiv:1606.05250},\n archivePrefix = {arXiv},\n eprint = {1606.05250},\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://rajpurkar.github.io/SQuAD-explorer/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "SQuAD", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 79317110, "num_examples": 87599, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 10472653, "num_examples": 10570, "dataset_name": "xtreme"}}, "download_checksums": {"https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json": {"num_bytes": 30288272, "checksum": "3527663986b8295af4f7fcdff1ba1ff3f72d07d61a20f487cb238a6ef92fd955"}, "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json": {"num_bytes": 4854279, "checksum": "95aa6a52d5d6a735563366753ca50492a658031da74f301ac5238b03966972c9"}}, "download_size": 35142551, "post_processing_size": null, "dataset_size": 89789763, "size_in_bytes": 124932314}, "PAN-X.af": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.af", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 259709, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 257204, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 1321396, "num_examples": 5000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 1838309, "size_in_bytes": 235964645}, "PAN-X.ar": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1808303, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1811983, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3634136, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 7254422, "size_in_bytes": 241380758}, "PAN-X.bg": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.bg", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2310314, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2306158, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4600773, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 9217245, "size_in_bytes": 243343581}, "PAN-X.bn": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.bn", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 159088, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 159282, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 1568845, "num_examples": 10000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 1887215, "size_in_bytes": 236013551}, "PAN-X.de": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2381565, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2377639, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4762352, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 9521556, "size_in_bytes": 243647892}, "PAN-X.el": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.el", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2533806, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2547594, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 5063176, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 10144576, "size_in_bytes": 244270912}, "PAN-X.en": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1920069, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1916220, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3823474, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 7659763, "size_in_bytes": 241786099}, "PAN-X.es": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1592525, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1602291, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3199161, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 6393977, "size_in_bytes": 240520313}, "PAN-X.et": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.et", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2030160, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2021409, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3023211, "num_examples": 15000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 7074780, "size_in_bytes": 241201116}, "PAN-X.eu": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.eu", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2296335, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2249835, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2292327, "num_examples": 10000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 6838497, "size_in_bytes": 240964833}, "PAN-X.fa": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.fa", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1782306, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1770284, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3529354, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 7081944, "size_in_bytes": 241208280}, "PAN-X.fi": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.fi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2131769, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2130665, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4273793, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 8536227, "size_in_bytes": 242662563}, "PAN-X.fr": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.fr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1664190, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1675785, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3335424, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 6675399, "size_in_bytes": 240801735}, "PAN-X.he": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.he", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2332760, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2318756, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4667100, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 9318616, "size_in_bytes": 243444952}, "PAN-X.hi": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 190671, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 196190, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 964212, "num_examples": 5000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 1351073, "size_in_bytes": 235477409}, "PAN-X.hu": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.hu", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2211851, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2249779, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4499914, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 8961544, "size_in_bytes": 243087880}, "PAN-X.id": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.id", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1537979, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1536879, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3084007, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 6158865, "size_in_bytes": 240285201}, "PAN-X.it": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.it", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1908529, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1928408, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3874663, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 7711600, "size_in_bytes": 241837936}, "PAN-X.ja": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.ja", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 6323003, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 6448960, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 12670401, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 25442364, "size_in_bytes": 259568700}, "PAN-X.jv": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.jv", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 14600, "num_examples": 100, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 16917, "num_examples": 100, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 16106, "num_examples": 100, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 47623, "size_in_bytes": 234173959}, "PAN-X.ka": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.ka", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2806901, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2824641, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2777362, "num_examples": 10000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 8408904, "size_in_bytes": 242535240}, "PAN-X.kk": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.kk", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 238109, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 236724, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 240276, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 715109, "size_in_bytes": 234841445}, "PAN-X.ko": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.ko", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2138167, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2138294, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4284733, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 8561194, "size_in_bytes": 242687530}, "PAN-X.ml": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.ml", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 290755, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 276926, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2865204, "num_examples": 10000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 3432885, "size_in_bytes": 237559221}, "PAN-X.mr": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.mr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 245358, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 255904, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 1248259, "num_examples": 5000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 1749521, "size_in_bytes": 235875857}, "PAN-X.ms": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.ms", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 147515, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 147168, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2965048, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 3259731, "size_in_bytes": 237386067}, "PAN-X.my": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.my", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 40428, "num_examples": 100, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 37366, "num_examples": 100, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 32735, "num_examples": 100, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 110529, "size_in_bytes": 234236865}, "PAN-X.nl": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.nl", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2016856, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2038638, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4062189, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 8117683, "size_in_bytes": 242244019}, "PAN-X.pt": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.pt", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1575141, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1562625, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3149283, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 6287049, "size_in_bytes": 240413385}, "PAN-X.ru": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.ru", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2053169, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2074145, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4121791, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 8249105, "size_in_bytes": 242375441}, "PAN-X.sw": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.sw", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 136368, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 140231, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 135911, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 412510, "size_in_bytes": 234538846}, "PAN-X.ta": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.ta", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 277625, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 278114, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4122130, "num_examples": 15000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 4677869, "size_in_bytes": 238804205}, "PAN-X.te": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.te", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 293281, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 296963, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 295410, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 885654, "size_in_bytes": 235011990}, "PAN-X.th": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.th", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 13262737, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 13586928, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 27133029, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 53982694, "size_in_bytes": 288109030}, "PAN-X.tl": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.tl", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 114156, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 117904, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 1168717, "num_examples": 10000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 1400777, "size_in_bytes": 235527113}, "PAN-X.tr": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.tr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1915352, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1911503, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3779170, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 7606025, "size_in_bytes": 241732361}, "PAN-X.ur": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.ur", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 152148, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 151922, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3072276, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 3376346, "size_in_bytes": 237502682}, "PAN-X.vi": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1565143, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1580216, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3153227, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 6298586, "size_in_bytes": 240424922}, "PAN-X.yo": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.yo", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 13245, "num_examples": 100, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 13533, "num_examples": 100, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 14709, "num_examples": 100, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 41487, "size_in_bytes": 234167823}, "PAN-X.zh": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAN-X.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 4491325, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 4363172, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 8832051, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1": {"num_bytes": 234126336, "checksum": "e2720a94b6590d4d70e9a7725106d1e5bdaafde730e06d8101ce4981d1417cce"}}, "download_size": 234126336, "post_processing_size": null, "dataset_size": 17686548, "size_in_bytes": 251812884}, "MLQA.ar.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.ar.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 8368114, "num_examples": 5335, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 824108, "num_examples": 517, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 9192222, "size_in_bytes": 84911272}, "MLQA.ar.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.ar.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2183942, "num_examples": 1649, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 364837, "num_examples": 207, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 2548779, "size_in_bytes": 78267829}, "MLQA.ar.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.ar.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3290629, "num_examples": 2047, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 288446, "num_examples": 163, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3579075, "size_in_bytes": 79298125}, "MLQA.ar.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.ar.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3229872, "num_examples": 1912, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 340049, "num_examples": 188, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3569921, "size_in_bytes": 79288971}, "MLQA.ar.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.ar.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 8225662, "num_examples": 5335, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 810089, "num_examples": 517, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 9035751, "size_in_bytes": 84754801}, "MLQA.ar.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.ar.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3041378, "num_examples": 1978, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 228180, "num_examples": 161, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3269558, "size_in_bytes": 78988608}, "MLQA.ar.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.ar.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3039396, "num_examples": 1831, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 281770, "num_examples": 186, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3321166, "size_in_bytes": 79040216}, "MLQA.de.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.de.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1620006, "num_examples": 1649, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 200174, "num_examples": 207, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1820180, "size_in_bytes": 77539230}, "MLQA.de.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.de.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4366102, "num_examples": 4517, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 488367, "num_examples": 512, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4854469, "size_in_bytes": 80573519}, "MLQA.de.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.de.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1688483, "num_examples": 1675, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 216075, "num_examples": 182, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1904558, "size_in_bytes": 77623608}, "MLQA.de.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.de.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1679180, "num_examples": 1621, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 184318, "num_examples": 190, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1863498, "size_in_bytes": 77582548}, "MLQA.de.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.de.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4343144, "num_examples": 4517, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 485894, "num_examples": 512, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4829038, "size_in_bytes": 80548088}, "MLQA.de.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.de.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1716615, "num_examples": 1776, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 170582, "num_examples": 196, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1887197, "size_in_bytes": 77606247}, "MLQA.de.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.de.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1371074, "num_examples": 1430, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 153871, "num_examples": 163, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1524945, "size_in_bytes": 77243995}, "MLQA.vi.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.vi.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3205185, "num_examples": 2047, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 230335, "num_examples": 163, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3435520, "size_in_bytes": 79154570}, "MLQA.vi.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.vi.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2227033, "num_examples": 1675, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 277185, "num_examples": 182, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 2504218, "size_in_bytes": 78223268}, "MLQA.vi.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.vi.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 7922085, "num_examples": 5495, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 726518, "num_examples": 511, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 8648603, "size_in_bytes": 84367653}, "MLQA.vi.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.vi.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2989660, "num_examples": 1943, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 269389, "num_examples": 184, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3259049, "size_in_bytes": 78978099}, "MLQA.vi.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.vi.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 7843431, "num_examples": 5495, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 719273, "num_examples": 511, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 8562704, "size_in_bytes": 84281754}, "MLQA.vi.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.vi.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2866597, "num_examples": 2018, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 283461, "num_examples": 189, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3150058, "size_in_bytes": 78869108}, "MLQA.vi.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.vi.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2776664, "num_examples": 1947, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 255007, "num_examples": 177, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3031671, "size_in_bytes": 78750721}, "MLQA.zh.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.zh.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1731483, "num_examples": 1912, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 175349, "num_examples": 188, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1906832, "size_in_bytes": 77625882}, "MLQA.zh.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.zh.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1390018, "num_examples": 1621, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 174605, "num_examples": 190, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1564623, "size_in_bytes": 77283673}, "MLQA.zh.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.zh.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1806186, "num_examples": 1943, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 172934, "num_examples": 184, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1979120, "size_in_bytes": 77698170}, "MLQA.zh.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.zh.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4422350, "num_examples": 5137, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 443810, "num_examples": 504, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4866160, "size_in_bytes": 80585210}, "MLQA.zh.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.zh.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4450985, "num_examples": 5137, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 446868, "num_examples": 504, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4897853, "size_in_bytes": 80616903}, "MLQA.zh.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.zh.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1736283, "num_examples": 1947, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 138073, "num_examples": 161, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1874356, "size_in_bytes": 77593406}, "MLQA.zh.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.zh.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1578219, "num_examples": 1767, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 184401, "num_examples": 189, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1762620, "size_in_bytes": 77481670}, "MLQA.en.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.en.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 6739219, "num_examples": 5335, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 630843, "num_examples": 517, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 7370062, "size_in_bytes": 83089112}, "MLQA.en.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.en.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 5056722, "num_examples": 4517, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 594936, "num_examples": 512, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 5651658, "size_in_bytes": 81370708}, "MLQA.en.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.en.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 7056698, "num_examples": 5495, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 640646, "num_examples": 511, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 7697344, "size_in_bytes": 83416394}, "MLQA.en.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.en.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 6539307, "num_examples": 5137, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 608444, "num_examples": 504, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 7147751, "size_in_bytes": 82866801}, "MLQA.en.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.en.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 14004648, "num_examples": 11590, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 1329112, "num_examples": 1148, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 15333760, "size_in_bytes": 91052810}, "MLQA.en.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.en.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 6179249, "num_examples": 5253, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 555462, "num_examples": 500, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 6734711, "size_in_bytes": 82453761}, "MLQA.en.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.en.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 6378866, "num_examples": 4918, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 623171, "num_examples": 507, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 7002037, "size_in_bytes": 82721087}, "MLQA.es.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.es.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1740282, "num_examples": 1978, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 148649, "num_examples": 161, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1888931, "size_in_bytes": 77607981}, "MLQA.es.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.es.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1404025, "num_examples": 1776, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 144186, "num_examples": 196, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1548211, "size_in_bytes": 77267261}, "MLQA.es.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.es.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1747969, "num_examples": 2018, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 176841, "num_examples": 189, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1924810, "size_in_bytes": 77643860}, "MLQA.es.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.es.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1678451, "num_examples": 1947, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 126646, "num_examples": 161, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1805097, "size_in_bytes": 77524147}, "MLQA.es.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.es.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4362737, "num_examples": 5253, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 419068, "num_examples": 500, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4781805, "size_in_bytes": 80500855}, "MLQA.es.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.es.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4394333, "num_examples": 5253, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 422071, "num_examples": 500, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4816404, "size_in_bytes": 80535454}, "MLQA.es.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.es.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1523523, "num_examples": 1723, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 181834, "num_examples": 187, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1705357, "size_in_bytes": 77424407}, "MLQA.hi.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.hi.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4445589, "num_examples": 1831, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 410424, "num_examples": 186, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4856013, "size_in_bytes": 80575063}, "MLQA.hi.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.hi.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3022864, "num_examples": 1430, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 301713, "num_examples": 163, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3324577, "size_in_bytes": 79043627}, "MLQA.hi.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.hi.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4743484, "num_examples": 1947, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 419106, "num_examples": 177, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 5162590, "size_in_bytes": 80881640}, "MLQA.hi.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.hi.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4354875, "num_examples": 1767, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 424246, "num_examples": 189, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4779121, "size_in_bytes": 80498171}, "MLQA.hi.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.hi.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 11449261, "num_examples": 4918, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 1097857, "num_examples": 507, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 12547118, "size_in_bytes": 88266168}, "MLQA.hi.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.hi.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3862229, "num_examples": 1723, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 420402, "num_examples": 187, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4282631, "size_in_bytes": 80001681}, "MLQA.hi.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "MLQA.hi.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 11810475, "num_examples": 4918, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 1136784, "num_examples": 507, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 12947259, "size_in_bytes": 88666309}, "XQuAD.ar": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1722799, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.ar.json": {"num_bytes": 1582988, "checksum": "abdabd7afed5c635d99cca0f3f0d0c9d9ed0bc77451e963c2e4e0638c29e486d"}}, "download_size": 1582988, "post_processing_size": null, "dataset_size": 1722799, "size_in_bytes": 3305787}, "XQuAD.de": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1283301, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.de.json": {"num_bytes": 669810, "checksum": "990b5d746746ed65ed4702ea5f35f99ffa4e2f1c390c07d003642acd937916f9"}}, "download_size": 669810, "post_processing_size": null, "dataset_size": 1283301, "size_in_bytes": 1953111}, "XQuAD.vi": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1477239, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.vi.json": {"num_bytes": 911401, "checksum": "f619a1eb11fb42d3ab0834259e488a65f585447ef6154437bfb7199d85161a04"}}, "download_size": 911401, "post_processing_size": null, "dataset_size": 1477239, "size_in_bytes": 2388640}, "XQuAD.zh": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 984241, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.zh.json": {"num_bytes": 808652, "checksum": "691d0b3359bc6b8faa8de931dfdfe21d50a65861ae348e32a0d1a0190b0c8835"}}, "download_size": 808652, "post_processing_size": null, "dataset_size": 984241, "size_in_bytes": 1792893}, "XQuAD.en": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1116123, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.en.json": {"num_bytes": 609383, "checksum": "e4c57d1c9143aaa1c5d265ba5987a65f4e69528d2a98f29d6e75019b10344f29"}}, "download_size": 609383, "post_processing_size": null, "dataset_size": 1116123, "size_in_bytes": 1725506}, "XQuAD.es": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1273499, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.es.json": {"num_bytes": 684322, "checksum": "dcbae93ec3a9f4b9e78fd834a171d6f96c1a875e10e15b7530b7e4ef4971e37e"}}, "download_size": 684322, "post_processing_size": null, "dataset_size": 1273499, "size_in_bytes": 1957821}, "XQuAD.hi": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2682975, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.hi.json": {"num_bytes": 1680538, "checksum": "df2cce3532b37e9beb8979704b5c9a4bf874358f105395a298b89427b43b9d24"}}, "download_size": 1680538, "post_processing_size": null, "dataset_size": 2682975, "size_in_bytes": 4363513}, "XQuAD.el": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.el", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2206690, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.el.json": {"num_bytes": 1918889, "checksum": "821cf0f88e73fa258fd2f548b19b6ec39f7025059e16f6f9fc8cd797c9c3663e"}}, "download_size": 1918889, "post_processing_size": null, "dataset_size": 2206690, "size_in_bytes": 4125579}, "XQuAD.ru": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.ru", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2136990, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.ru.json": {"num_bytes": 1896368, "checksum": "208d5b1aa154c52b1b5c5eda16281e455e8fd198cdb9af3f469f0d6037d973bf"}}, "download_size": 1896368, "post_processing_size": null, "dataset_size": 2136990, "size_in_bytes": 4033358}, "XQuAD.th": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.th", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2854959, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.th.json": {"num_bytes": 1809143, "checksum": "5cdda11d0e1e075f7872abf4e6ae830388ce7f617964d542308e9ae4257e0f43"}}, "download_size": 1809143, "post_processing_size": null, "dataset_size": 2854959, "size_in_bytes": 4664102}, "XQuAD.tr": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "XQuAD.tr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1210763, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.tr.json": {"num_bytes": 729506, "checksum": "92179a564774b7696100d144c1e10870d0a966b6fccbdd254a65b9d2ab1971cc"}}, "download_size": 729506, "post_processing_size": null, "dataset_size": 1210763, "size_in_bytes": 1940269}, "bucc18.de": {"description": "Building and Using Comparable Corpora\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://comparable.limsi.fr/bucc2018/", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "bucc18.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 247339, "num_examples": 1038, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2309764, "num_examples": 9580, "dataset_name": "xtreme"}}, "download_checksums": {"https://comparable.limsi.fr/bucc2018/bucc2018-de-en.training-gold.tar.bz2": {"num_bytes": 28189548, "checksum": "766e0fdebbd1438fb87c21254828eb13c8b997d8fbab002103dd060dcac50c5c"}, "https://comparable.limsi.fr/bucc2018/bucc2018-de-en.sample-gold.tar.bz2": {"num_bytes": 2529652, "checksum": "8e16ba8b02ef8d648b06adfbd6dfb188f43524e18f97b2b12a14a086caac62f3"}}, "download_size": 30719200, "post_processing_size": null, "dataset_size": 2557103, "size_in_bytes": 33276303}, "bucc18.fr": {"description": "Building and Using Comparable Corpora\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://comparable.limsi.fr/bucc2018/", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "bucc18.fr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 210221, "num_examples": 929, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2064547, "num_examples": 9086, "dataset_name": "xtreme"}}, "download_checksums": {"https://comparable.limsi.fr/bucc2018/bucc2018-fr-en.training-gold.tar.bz2": {"num_bytes": 20757713, "checksum": "53c0d0e7dd97dc89593a2db25a26c5f0ccdc7113e8451263bb0c80e4c4c7dc30"}, "https://comparable.limsi.fr/bucc2018/bucc2018-fr-en.sample-gold.tar.bz2": {"num_bytes": 1948831, "checksum": "df9eb3966954e163c9264076f7c9c1eb56d9d8a91855f9d3afbf2c0fdaef0a08"}}, "download_size": 22706544, "post_processing_size": null, "dataset_size": 2274768, "size_in_bytes": 24981312}, "bucc18.zh": {"description": "Building and Using Comparable Corpora\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://comparable.limsi.fr/bucc2018/", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "bucc18.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 55077, "num_examples": 257, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 412002, "num_examples": 1899, "dataset_name": "xtreme"}}, "download_checksums": {"https://comparable.limsi.fr/bucc2018/bucc2018-zh-en.training-gold.tar.bz2": {"num_bytes": 6344925, "checksum": "3facb71798277c8f44dc78c1f8ae2110f254d0e14799f3508eedd54b4236877a"}, "https://comparable.limsi.fr/bucc2018/bucc2018-zh-en.sample-gold.tar.bz2": {"num_bytes": 769869, "checksum": "a3425be5c0320ee131a0927b66c3e29befb3b481ebf1b87257e660e514bc16ac"}}, "download_size": 7114794, "post_processing_size": null, "dataset_size": 467079, "size_in_bytes": 7581873}, "bucc18.ru": {"description": "Building and Using Comparable Corpora\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://comparable.limsi.fr/bucc2018/", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "bucc18.ru", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 751368, "num_examples": 2374, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 4583855, "num_examples": 14435, "dataset_name": "xtreme"}}, "download_checksums": {"https://comparable.limsi.fr/bucc2018/bucc2018-ru-en.training-gold.tar.bz2": {"num_bytes": 37085079, "checksum": "1895df56e936ca3d4f5b12299ceffe0b7ff4806584c40bdaa3ae1d445f25afa5"}, "https://comparable.limsi.fr/bucc2018/bucc2018-ru-en.sample-gold.tar.bz2": {"num_bytes": 4269233, "checksum": "fce3cabc7ee50ddb4b18aa6fb090e2669c8383d2a29fc97eed6ae70fed9a23e5"}}, "download_size": 41354312, "post_processing_size": null, "dataset_size": 5335223, "size_in_bytes": 46689535}, "PAWS-X.de": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAWS-X.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 500009, "num_examples": 2000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 510194, "num_examples": 2000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 12451883, "num_examples": 49380, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 13462086, "size_in_bytes": 43744143}, "PAWS-X.en": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAWS-X.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 478291, "num_examples": 2000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 480738, "num_examples": 2000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 11827719, "num_examples": 49175, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 12786748, "size_in_bytes": 43068805}, "PAWS-X.es": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAWS-X.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 494069, "num_examples": 1961, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 505047, "num_examples": 2000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 12462107, "num_examples": 49401, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 13461223, "size_in_bytes": 43743280}, "PAWS-X.fr": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAWS-X.fr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 516111, "num_examples": 1988, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 521031, "num_examples": 2000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 12948512, "num_examples": 49399, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 13985654, "size_in_bytes": 44267711}, "PAWS-X.ja": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAWS-X.ja", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 647774, "num_examples": 2000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 654640, "num_examples": 2000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 14695653, "num_examples": 49401, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 15998067, "size_in_bytes": 46280124}, "PAWS-X.ko": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAWS-X.ko", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 540787, "num_examples": 2000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 547978, "num_examples": 1999, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 13542657, "num_examples": 49164, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 14631422, "size_in_bytes": 44913479}, "PAWS-X.zh": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "PAWS-X.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 459120, "num_examples": 2000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 460638, "num_examples": 2000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 10469712, "num_examples": 49401, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 11389470, "size_in_bytes": 41671527}, "tatoeba.afr": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.afr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 179651, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.afr-eng.afr": {"num_bytes": 30586, "checksum": "7bb9e073ad8422d6bfdec7c9ebdcef8ac486e72b237200e447923a8b921a0a56"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.afr-eng.eng": {"num_bytes": 29049, "checksum": "0b700c125efb4030b4cc50d9d765d5884afc24f39296f29b028a1b2a8512034f"}}, "download_size": 59635, "post_processing_size": null, "dataset_size": 179651, "size_in_bytes": 239286}, "tatoeba.ara": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.ara", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 192666, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ara-eng.ara": {"num_bytes": 43582, "checksum": "e67a0ae072b79cd9e8eb09f166c3bc0b23488d39f5720f2ee0a8350ae17b719f"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ara-eng.eng": {"num_bytes": 29068, "checksum": "3c11838c963f598a52dcf2f452b666353538257001db5c59c3a5f54a999b336b"}}, "download_size": 72650, "post_processing_size": null, "dataset_size": 192666, "size_in_bytes": 265316}, "tatoeba.ben": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.ben", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 211719, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ben-eng.ben": {"num_bytes": 65990, "checksum": "ac3385695d6a6c7e5d18e38ad4b8b7d3780f3df23dd0ff3f539071b8269a8613"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ben-eng.eng": {"num_bytes": 25713, "checksum": "67fbe75fec549d436c3356b6d6f8dd53179b6a908661b5d507d28c7fee83350e"}}, "download_size": 91703, "post_processing_size": null, "dataset_size": 211719, "size_in_bytes": 303422}, "tatoeba.bul": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.bul", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 222295, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.bul-eng.bul": {"num_bytes": 65500, "checksum": "f9fa90cf3599d8c87f7a6ed22f5d648e3ce6687c705656a8c8ea088d891f79d5"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.bul-eng.eng": {"num_bytes": 36779, "checksum": "4b03b3b52ffb7cf4286e0c4453c90910a3021546f160bdf0e4d39d1f45bfbc0b"}}, "download_size": 102279, "post_processing_size": null, "dataset_size": 222295, "size_in_bytes": 324574}, "tatoeba.deu": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.deu", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 225583, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.deu-eng.deu": {"num_bytes": 57121, "checksum": "edfa6f75a42554df388f45891735d5e4214158a99def4b73b5908af4a3054551"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.deu-eng.eng": {"num_bytes": 48446, "checksum": "eb9cc83a42f2c4b22f310d05311207e41abb56c2a084666cac3ee0f84d2d0b84"}}, "download_size": 105567, "post_processing_size": null, "dataset_size": 225583, "size_in_bytes": 331150}, "tatoeba.cmn": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.cmn", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 188947, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.cmn-eng.cmn": {"num_bytes": 33410, "checksum": "965d033966fcd186c89741ad49ab4b0a0b2bbd33e02666635ff3b2be23c1ac1f"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.cmn-eng.eng": {"num_bytes": 35521, "checksum": "29ba36232488f5806aceccac57c59c5e750ddd08edb40eef417e3ada9ff9a239"}}, "download_size": 68931, "post_processing_size": null, "dataset_size": 188947, "size_in_bytes": 257878}, "tatoeba.ell": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.ell", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 198977, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ell-eng.ell": {"num_bytes": 52202, "checksum": "4fb3d3d30bdafd15100dfad5c4680f8f2ed5ca87ed0a6122e2fe2aa21fee65e8"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ell-eng.eng": {"num_bytes": 26759, "checksum": "1f630710e718e2f85a4e757d3c7f3d6e78ded0b25c99653b4c552138318d9ffe"}}, "download_size": 78961, "post_processing_size": null, "dataset_size": 198977, "size_in_bytes": 277938}, "tatoeba.est": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.est", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 179744, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.est-eng.est": {"num_bytes": 29996, "checksum": "94856999ef35e5357502d7ecf50419d0108b99270e507d9c57f8b283bd1be9c5"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.est-eng.eng": {"num_bytes": 29732, "checksum": "bb033b10596178452aecf2d97ad25580251375e7d224d8b38dad3d93d69b1e4f"}}, "download_size": 59728, "post_processing_size": null, "dataset_size": 179744, "size_in_bytes": 239472}, "tatoeba.eus": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.eus", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 186084, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.eus-eng.eus": {"num_bytes": 34060, "checksum": "4255fb70a6c268b09fcc59a9b308f0fcaaf45ef45e66fc55bf3c80eac4d8c97b"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.eus-eng.eng": {"num_bytes": 32008, "checksum": "1e80f0174ad544697fd69ddcf21287ca10c5e3cacba2fc42bf1d68c460d14ba2"}}, "download_size": 66068, "post_processing_size": null, "dataset_size": 186084, "size_in_bytes": 252152}, "tatoeba.fin": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.fin", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 195685, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.fin-eng.fin": {"num_bytes": 39857, "checksum": "8db3c734f755d578445947f1182f40faf2a9a0eca37561dd248717c088802d60"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.fin-eng.eng": {"num_bytes": 35812, "checksum": "322e610359f4d24852e673bbe4524d52c26dbf980aca0760e95c66dc21ecd504"}}, "download_size": 75669, "post_processing_size": null, "dataset_size": 195685, "size_in_bytes": 271354}, "tatoeba.fra": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.fra", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 200034, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.fra-eng.fra": {"num_bytes": 43727, "checksum": "644172ff9642fefa9e41c29b7d6f44196518e84350dc44d4992e943c0cca92b6"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.fra-eng.eng": {"num_bytes": 36291, "checksum": "5634220f8a26a9a23b84753a9aec0b0832e6bdaa9da3f83e0bd84c928c3f46e3"}}, "download_size": 80018, "post_processing_size": null, "dataset_size": 200034, "size_in_bytes": 280052}, "tatoeba.heb": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.heb", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 203516, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.heb-eng.heb": {"num_bytes": 47660, "checksum": "4a07ca4b8a6fb7ab499791573a2454730f47acbe209359d7b9372a9f6094a102"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.heb-eng.eng": {"num_bytes": 35840, "checksum": "73c27ed0f76c1d2da199230f05489749e10bb67ab879c6dfee8ca9807d6bd99c"}}, "download_size": 83500, "post_processing_size": null, "dataset_size": 203516, "size_in_bytes": 287016}, "tatoeba.hin": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.hin", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 242574, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.hin-eng.hin": {"num_bytes": 88468, "checksum": "15e4fb0a394be4438319f1d6955d1aea226e2a8c5ad38798b23b76ae43d742ed"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.hin-eng.eng": {"num_bytes": 34090, "checksum": "adfce2269a55dbac69b25c0f4f6eb89e0f9383165485925a2e042e61b9480562"}}, "download_size": 122558, "post_processing_size": null, "dataset_size": 242574, "size_in_bytes": 365132}, "tatoeba.hun": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.hun", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 188905, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.hun-eng.hun": {"num_bytes": 35335, "checksum": "56bd0682be8c1db6568313650b3310d641cc8d0019d12dd7caf201302350eeac"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.hun-eng.eng": {"num_bytes": 33554, "checksum": "5323266e91ddee67ed1ae00d6bbac0cdf3d37749d1b2da3459bf0d424bc71383"}}, "download_size": 68889, "post_processing_size": null, "dataset_size": 188905, "size_in_bytes": 257794}, "tatoeba.ind": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.ind", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 194860, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ind-eng.ind": {"num_bytes": 39969, "checksum": "4f03cd70cba071f746eacd3ebf6b60fd5a8377ce18b4cc52edec6721f05f352a"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ind-eng.eng": {"num_bytes": 34875, "checksum": "62a3f5127b60eb3526b8fa3994a68fa1a1f114f3a395307a8808a3517c05ffc5"}}, "download_size": 74844, "post_processing_size": null, "dataset_size": 194860, "size_in_bytes": 269704}, "tatoeba.ita": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.ita", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 185849, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ita-eng.ita": {"num_bytes": 34237, "checksum": "9b3f369d0ed92273b46dd3b983721636e3d15024ce7125f5103229249c386d26"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.ita-eng.eng": {"num_bytes": 31596, "checksum": "738bf8f981e42d285f4f08bc09238782d285b02050e6e95287aa4e998bb7b24b"}}, "download_size": 65833, "post_processing_size": null, "dataset_size": 185849, "size_in_bytes": 251682}, "tatoeba.jav": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.jav", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 38529, "num_examples": 205, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.jav-eng.jav": {"num_bytes": 7457, "checksum": "ad88399db8f94c2a040aa53e7e862225964fac9308a3beb3d5b38f3eca2f827f"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.jav-eng.eng": {"num_bytes": 6456, "checksum": "172776353690f6c047ea21da969fa6979980d692fff1cfbac17eb25851423760"}}, "download_size": 13913, "post_processing_size": null, "dataset_size": 38529, "size_in_bytes": 52442}, "tatoeba.jpn": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.jpn", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 213099, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.jpn-eng.jpn": {"num_bytes": 53844, "checksum": "56040bd6949170a631039d9f8f4c6440db8761b0065c9686feba55c99a320d46"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.jpn-eng.eng": {"num_bytes": 39239, "checksum": "b42129b34e1bf225ccc25fc00e532a6113af98adbc6605b93021bd8aadeb68b6"}}, "download_size": 93083, "post_processing_size": null, "dataset_size": 213099, "size_in_bytes": 306182}, "tatoeba.kat": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.kat", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 161696, "num_examples": 746, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.kat-eng.kat": {"num_bytes": 50967, "checksum": "6ef69b5efbf355597ed91eb355b33a5f524bdf0875dbeaaccf6375badc20e29b"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.kat-eng.eng": {"num_bytes": 21193, "checksum": "d70a14aa64fd7c6b545f11aea754a632e1cbecb91af27fcf6a98a8449a48a8e7"}}, "download_size": 72160, "post_processing_size": null, "dataset_size": 161696, "size_in_bytes": 233856}, "tatoeba.kaz": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.kaz", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 116194, "num_examples": 575, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.kaz-eng.kaz": {"num_bytes": 29687, "checksum": "f20c682582a80b6aa10f3b933db93bc314449b554ce611e263bc75990b319aef"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.kaz-eng.eng": {"num_bytes": 17491, "checksum": "0ab684e7032c6520540d5785adf00ef206d097221d0dd4dc9bcaabd64068e10d"}}, "download_size": 47178, "post_processing_size": null, "dataset_size": 116194, "size_in_bytes": 163372}, "tatoeba.kor": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.kor", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 199155, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.kor-eng.kor": {"num_bytes": 44054, "checksum": "e550c84184ec35b1a0dab3154284719511a21746e53c40f46eb6ab08179e9188"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.kor-eng.eng": {"num_bytes": 35085, "checksum": "f900cf3c9b72ed5a400e1804702863ff3df00be58eb060902e02285d0e68fab3"}}, "download_size": 79139, "post_processing_size": null, "dataset_size": 199155, "size_in_bytes": 278294}, "tatoeba.mal": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.mal", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 177173, "num_examples": 687, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.mal-eng.mal": {"num_bytes": 72952, "checksum": "1a896f54f85a454fb0123864049c65921ae9dfd0cafda6deef8060f0104d965e"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.mal-eng.eng": {"num_bytes": 21765, "checksum": "4d80cdbb844cd4e33f874e5dc45c1cdda4f80998034448f7eb56b8b6532a6622"}}, "download_size": 94717, "post_processing_size": null, "dataset_size": 177173, "size_in_bytes": 271890}, "tatoeba.mar": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.mar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 220558, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.mar-eng.mar": {"num_bytes": 72652, "checksum": "b2931584fbe62062beb97cc939e4d208ace5ee56f15808860ab14e130fd3c576"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.mar-eng.eng": {"num_bytes": 27890, "checksum": "709d09b697dca053c814b9d525b72cb47cb011aa860c6598f3e2b1b3dd1280dd"}}, "download_size": 100542, "post_processing_size": null, "dataset_size": 220558, "size_in_bytes": 321100}, "tatoeba.nld": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.nld", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 193279, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.nld-eng.nld": {"num_bytes": 37866, "checksum": "d564d4ce1c621ccaefdbe9f5cb08eacccc7bf2a0b58666303e84ca9c7973bdb7"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.nld-eng.eng": {"num_bytes": 35397, "checksum": "3b8836749df573a53235b85ed6771f31bf2de428f520d2d6a1dd94b61b4ef057"}}, "download_size": 73263, "post_processing_size": null, "dataset_size": 193279, "size_in_bytes": 266542}, "tatoeba.pes": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.pes", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 213735, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.pes-eng.pes": {"num_bytes": 58866, "checksum": "f1553713723491fe5876e1060b18fb4abf0c77be3ba06db2e3307e83aedbbb32"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.pes-eng.eng": {"num_bytes": 34853, "checksum": "b5c6cf8c8d93ff8f2fe26b53f3ee29b62db9c6f7dcddcb086ba48953f4ce926b"}}, "download_size": 93719, "post_processing_size": null, "dataset_size": 213735, "size_in_bytes": 307454}, "tatoeba.por": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.por", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 195201, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.por-eng.por": {"num_bytes": 39331, "checksum": "b0c926a232c9889a87a1a970f9399c5618c2d95baf204321e9da794c0aec16f5"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.por-eng.eng": {"num_bytes": 35854, "checksum": "deb4568cfb7b7cbbc060a7fe97c4639fb4680842f4fcd28df791ffdbb753855a"}}, "download_size": 75185, "post_processing_size": null, "dataset_size": 195201, "size_in_bytes": 270386}, "tatoeba.rus": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.rus", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 212488, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.rus-eng.rus": {"num_bytes": 58822, "checksum": "446ff2cae66053c2277d9735b2c2df6b786cae258385f7ade7bed68d8835c6a0"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.rus-eng.eng": {"num_bytes": 33650, "checksum": "7b26f52d6085b7c4944d6f5f6f5b6e1932085b42112f1444db515ce59e878fb8"}}, "download_size": 92472, "post_processing_size": null, "dataset_size": 212488, "size_in_bytes": 304960}, "tatoeba.spa": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.spa", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 192282, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.spa-eng.spa": {"num_bytes": 37490, "checksum": "f9628cea40481e8251f0999718bd893cff0f261752f5e526b3bc20284e2ca018"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.spa-eng.eng": {"num_bytes": 34776, "checksum": "89e4470f4572040b1ca94b3edad97dcd8bd2f0141f072e12933b8659dadf917d"}}, "download_size": 72266, "post_processing_size": null, "dataset_size": 192282, "size_in_bytes": 264548}, "tatoeba.swh": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.swh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 67283, "num_examples": 390, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.swh-eng.swh": {"num_bytes": 9645, "checksum": "1c672915446c336cc378676e6dbf91eb54d27bbfd0c61563d349265bc6374753"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.swh-eng.eng": {"num_bytes": 10822, "checksum": "e8539647caff9e329776ae863b6224d432923c6e4e9256b9df92ca58ff282eac"}}, "download_size": 20467, "post_processing_size": null, "dataset_size": 67283, "size_in_bytes": 87750}, "tatoeba.tam": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.tam", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 76297, "num_examples": 307, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tam-eng.tam": {"num_bytes": 30553, "checksum": "bde87fb1ddedccf6c7a2b70ffdd19a959a573c113c2e7a041c4b623fb2170bde"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tam-eng.eng": {"num_bytes": 8888, "checksum": "e7c5106acdd100214a161970b2a5c31e7386e5b6a963e3d3afdf30412c90ac53"}}, "download_size": 39441, "post_processing_size": null, "dataset_size": 76297, "size_in_bytes": 115738}, "tatoeba.tel": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.tel", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 53239, "num_examples": 234, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tel-eng.tel": {"num_bytes": 18337, "checksum": "7e1a1bcd106cce650a09e2f042f1354b55b29bea2bcfa86554dfa0ad12ce8976"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tel-eng.eng": {"num_bytes": 6806, "checksum": "1efc2ef57d9b1ecebfc4baa45e86fd793e38473304e9c043aebabc3a1b29a294"}}, "download_size": 25143, "post_processing_size": null, "dataset_size": 53239, "size_in_bytes": 78382}, "tatoeba.tgl": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.tgl", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 188154, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tgl-eng.tgl": {"num_bytes": 36506, "checksum": "f99165dc05190b99f6574fe24db884ff85d111612a25e7a37323f001aafc2a6e"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tgl-eng.eng": {"num_bytes": 31632, "checksum": "e7c9beda3f3072a968a34a7226a66d1ebf1dcb33cf002805dc752f80a7c620ae"}}, "download_size": 68138, "post_processing_size": null, "dataset_size": 188154, "size_in_bytes": 256292}, "tatoeba.tha": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.tha", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 128974, "num_examples": 548, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tha-eng.tha": {"num_bytes": 44759, "checksum": "65c7b3c01a56a1ac8971e72e0ea8e74a027718dc84044d8802c0ab36395a3156"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tha-eng.eng": {"num_bytes": 18439, "checksum": "2881c82d2c5fa59cf0a68bc9e012f5e0b0a716f7357cbecf77c247efc2fd7294"}}, "download_size": 63198, "post_processing_size": null, "dataset_size": 128974, "size_in_bytes": 192172}, "tatoeba.tur": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.tur", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 191901, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tur-eng.tur": {"num_bytes": 37607, "checksum": "1ffa0acc006018b3105abda41a4d4ca42f3c122964a49b71793546367b079a86"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.tur-eng.eng": {"num_bytes": 34278, "checksum": "a768df7dd3d1344f872a458b32c3a65e24f8381826ccb16ba6677426176c8121"}}, "download_size": 71885, "post_processing_size": null, "dataset_size": 191901, "size_in_bytes": 263786}, "tatoeba.urd": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.urd", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 208728, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.urd-eng.urd": {"num_bytes": 56819, "checksum": "2efc22dc61885a9454aeeee68c8b841c7f9138d53ba644a82308bd210140450b"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.urd-eng.eng": {"num_bytes": 31893, "checksum": "dab35fda3f73b3fd86b6b9f9f9f6242430961aa5d1ac247adbc646867df79cec"}}, "download_size": 88712, "post_processing_size": null, "dataset_size": 208728, "size_in_bytes": 297440}, "tatoeba.vie": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\n\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\n\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "tatoeba.vie", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 211423, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.vie-eng.vie": {"num_bytes": 52721, "checksum": "6dbb02d778b0bfc8678cd85f87db76de55dd7e409a26fe32ad42d50e0f1fff77"}, "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.vie-eng.eng": {"num_bytes": 38686, "checksum": "a1f60bd8ae6c42224a4c050d2aa1ff4242d14827d64d7831e96ecf2b2c367f5f"}}, "download_size": 91407, "post_processing_size": null, "dataset_size": 211423, "size_in_bytes": 302830}, "udpos.Afrikaans": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Afrikaans", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 89742, "num_examples": 5317, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 170852, "num_examples": 10065, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 575866, "num_examples": 33894, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 836460, "size_in_bytes": 356053141}, "udpos.Arabic": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Arabic", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 586394, "num_examples": 30239, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 960402, "num_examples": 49015, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4405174, "num_examples": 223881, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 5951970, "size_in_bytes": 361168651}, "udpos.Basque": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Basque", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 424299, "num_examples": 24095, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 430264, "num_examples": 24374, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 1284577, "num_examples": 72974, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 2139140, "size_in_bytes": 357355821}, "udpos.Bulgarian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Bulgarian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 338205, "num_examples": 16089, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 331027, "num_examples": 15724, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2618563, "num_examples": 124336, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 3287795, "size_in_bytes": 358504476}, "udpos.Dutch": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Dutch", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 382452, "num_examples": 22966, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 386148, "num_examples": 22634, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4373694, "num_examples": 261164, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 5142294, "size_in_bytes": 360358975}, "udpos.English": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.English", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1010276, "num_examples": 62635, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1377664, "num_examples": 84982, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 6055637, "num_examples": 375240, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 8443577, "size_in_bytes": 363660258}, "udpos.Estonian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Estonian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 789191, "num_examples": 44632, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1035645, "num_examples": 58596, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 6409049, "num_examples": 361827, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 8233885, "size_in_bytes": 363450566}, "udpos.Finnish": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Finnish", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 630750, "num_examples": 34054, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 990374, "num_examples": 53202, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 5396242, "num_examples": 290626, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 7017366, "size_in_bytes": 362234047}, "udpos.French": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.French", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1246296, "num_examples": 76265, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1655373, "num_examples": 101468, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 9740709, "num_examples": 595344, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 12642378, "size_in_bytes": 367859059}, "udpos.German": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.German", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 5891110, "num_examples": 331999, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 7166363, "num_examples": 405961, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 53440193, "num_examples": 3017431, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 66497666, "size_in_bytes": 421714347}, "udpos.Greek": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Greek", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1041995, "num_examples": 46230, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1006213, "num_examples": 44945, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 8707044, "num_examples": 389254, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 10755252, "size_in_bytes": 365971933}, "udpos.Hebrew": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Hebrew", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 206149, "num_examples": 11412, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 219945, "num_examples": 12284, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2463819, "num_examples": 137721, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 2889913, "size_in_bytes": 358106594}, "udpos.Hindi": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Hindi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 826446, "num_examples": 35217, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1378777, "num_examples": 59259, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 6583934, "num_examples": 281057, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 8789157, "size_in_bytes": 364005838}, "udpos.Hungarian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Hungarian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 212359, "num_examples": 11418, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 190144, "num_examples": 10448, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 364958, "num_examples": 20166, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 767461, "size_in_bytes": 355984142}, "udpos.Indonesian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Indonesian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 216399, "num_examples": 12612, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 544661, "num_examples": 31680, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 1674902, "num_examples": 97530, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 2435962, "size_in_bytes": 357652643}, "udpos.Italian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Italian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 970796, "num_examples": 58791, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1309757, "num_examples": 79802, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 11062085, "num_examples": 672924, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 13342638, "size_in_bytes": 368559319}, "udpos.Japanese": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Japanese", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 196276, "num_examples": 11473, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 909950, "num_examples": 53592, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2736019, "num_examples": 160183, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 3842245, "size_in_bytes": 359058926}, "udpos.Kazakh": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Kazakh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 220556, "num_examples": 10006, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 11194, "num_examples": 529, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 231750, "size_in_bytes": 355448431}, "udpos.Korean": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Korean", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 758475, "num_examples": 37236, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1128355, "num_examples": 56627, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 7122131, "num_examples": 353124, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 9008961, "size_in_bytes": 364225642}, "udpos.Chinese": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Chinese", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 570160, "num_examples": 35420, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1191859, "num_examples": 73131, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4067011, "num_examples": 251346, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 5829030, "size_in_bytes": 361045711}, "udpos.Marathi": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Marathi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 8133, "num_examples": 361, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 7499, "num_examples": 340, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 56043, "num_examples": 2474, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 71675, "size_in_bytes": 355288356}, "udpos.Persian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Persian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 312269, "num_examples": 15832, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 315891, "num_examples": 16024, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2362444, "num_examples": 121064, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 2990604, "size_in_bytes": 358207285}, "udpos.Portuguese": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Portuguese", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 698257, "num_examples": 42980, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1061162, "num_examples": 65057, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 7525808, "num_examples": 462500, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 9285227, "size_in_bytes": 364501908}, "udpos.Russian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Russian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 3377415, "num_examples": 156081, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 4146085, "num_examples": 192457, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 23691054, "num_examples": 1082189, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 31214554, "size_in_bytes": 386431235}, "udpos.Spanish": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Spanish", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1474369, "num_examples": 89487, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1451360, "num_examples": 87904, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 13630802, "num_examples": 827052, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 16556531, "size_in_bytes": 371773212}, "udpos.Tagalog": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Tagalog", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4717, "num_examples": 292, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 4717, "size_in_bytes": 355221398}, "udpos.Tamil": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Tamil", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 39395, "num_examples": 1263, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 61410, "num_examples": 1989, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 199400, "num_examples": 6329, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 300205, "size_in_bytes": 355516886}, "udpos.Telugu": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Telugu", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 16946, "num_examples": 662, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 18411, "num_examples": 721, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 129645, "num_examples": 5082, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 165002, "size_in_bytes": 355381683}, "udpos.Thai": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Thai", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 553348, "num_examples": 22322, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 553348, "size_in_bytes": 355770029}, "udpos.Turkish": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Turkish", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 178559, "num_examples": 10046, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 789122, "num_examples": 43796, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 675109, "num_examples": 37784, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 1642790, "size_in_bytes": 356859471}, "udpos.Urdu": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Urdu", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 279853, "num_examples": 14581, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 284281, "num_examples": 14806, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2075062, "num_examples": 108690, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 2639196, "size_in_bytes": 357855877}, "udpos.Vietnamese": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Vietnamese", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 199796, "num_examples": 11514, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 207671, "num_examples": 11955, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 356147, "num_examples": 20285, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 763614, "size_in_bytes": 355980295}, "udpos.Yoruba": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "pos_tag": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xtreme", "config_name": "udpos.Yoruba", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 43860, "num_examples": 2664, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 43860, "size_in_bytes": 355260541}} \ No newline at end of file diff --git a/dummy/PAN-X.ace/1.1.0/dummy_data.zip b/dummy/PAN-X.ace/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..b87f6990eaa432a911dc471ffb10f826774ff0b3 --- /dev/null +++ b/dummy/PAN-X.ace/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08685dcb432e7735171b9a839155a72c31eaa4d4aa1dcc06acd1778d9b2cd14f +size 847 diff --git a/dummy/PAN-X.af/1.1.0/dummy_data.zip b/dummy/PAN-X.af/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..0391e2b44d28d6b05c538b982cd471a0956f3c0e --- /dev/null +++ b/dummy/PAN-X.af/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89c4a663771d081d3f2eaa8105d01c98cba95e2477a69def69ea7063c3835f44 +size 854 diff --git a/dummy/PAN-X.als/1.1.0/dummy_data.zip b/dummy/PAN-X.als/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..b73e5fc509c53779a9b9425d222db0417fea7988 --- /dev/null +++ b/dummy/PAN-X.als/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56403721511493c6895cabe5fdfd476c8b99b016916ef9c78613d7b2f20302b6 +size 777 diff --git a/dummy/PAN-X.am/1.1.0/dummy_data.zip b/dummy/PAN-X.am/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..adfd48cf077c3952bc895994fb51d7fa30946ebe --- /dev/null +++ b/dummy/PAN-X.am/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ded7b7a464380b227bc25432d2f11229e06a5255c9d391d7d374e71f16cab8e +size 852 diff --git a/dummy/PAN-X.an/1.1.0/dummy_data.zip b/dummy/PAN-X.an/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..9a04eb6332f3150b24ecf4441a619a22c100559e --- /dev/null +++ b/dummy/PAN-X.an/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:283602ea9abc0bd903093f9efd1969613c7dd1d717636e815b4c2d1f7772fdce +size 804 diff --git a/dummy/PAN-X.ang/1.1.0/dummy_data.zip b/dummy/PAN-X.ang/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..c0304270f0140b7459f2df49a23d6eb8d6efdf34 --- /dev/null +++ b/dummy/PAN-X.ang/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48239a7dfdbee2d585a4b965e37d4c2a96cce97cadd149425fbc5b4abbea0088 +size 759 diff --git a/dummy/PAN-X.ar/1.1.0/dummy_data.zip b/dummy/PAN-X.ar/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..43378a26b707e713a460e28717f9711d4cacc1f7 --- /dev/null +++ b/dummy/PAN-X.ar/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb8eb77136e3c56290c28b1e2424feffc641de4007c70798c0a82ea879ba51d1 +size 810 diff --git a/dummy/PAN-X.arc/1.1.0/dummy_data.zip b/dummy/PAN-X.arc/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..979c81869ae36fab2d99e7b5ccf36c102306951d --- /dev/null +++ b/dummy/PAN-X.arc/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:120914be4ef6eaeb52e3f3314fee6470d29a7161b943ef0c1ca84ed38f01fcc5 +size 789 diff --git a/dummy/PAN-X.arz/1.1.0/dummy_data.zip b/dummy/PAN-X.arz/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..f891547659c164c32aaed0c633a8fc88c82d4fb2 --- /dev/null +++ b/dummy/PAN-X.arz/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acb22eafb0f27db7b60b07ae9b8c0f7d7930538c65ba1045854dd91dcdc9aad2 +size 911 diff --git a/dummy/PAN-X.as/1.1.0/dummy_data.zip b/dummy/PAN-X.as/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..dadde3e6b648deca0f6bfd8ea022c35865c89915 --- /dev/null +++ b/dummy/PAN-X.as/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86816ad2097021eef245f5767fd2a7d37280d201175045bc0d4376611d28f4ee +size 827 diff --git a/dummy/PAN-X.ast/1.1.0/dummy_data.zip b/dummy/PAN-X.ast/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..12c91f96674728844d309b38ac46136e77626968 --- /dev/null +++ b/dummy/PAN-X.ast/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20d585b0a2e8c7bcd6768ff50516af9c508d8f244da914576a6aa97219a49617 +size 776 diff --git a/dummy/PAN-X.ay/1.1.0/dummy_data.zip b/dummy/PAN-X.ay/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..64dcdd5bef33c4a969d69bb04baf0ee1f887cca7 --- /dev/null +++ b/dummy/PAN-X.ay/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2351dc1286f0a190c8039fa3d7449e5932c7e3ffe89db0e416f8b57907569a67 +size 702 diff --git a/dummy/PAN-X.az/1.1.0/dummy_data.zip b/dummy/PAN-X.az/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..90f245d6bdc8a8b8518658cb6302d2f45de42328 --- /dev/null +++ b/dummy/PAN-X.az/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebc36ba58160fb7103669d07bee8d44356233057e33beb002188f2fc5e9bd1d8 +size 872 diff --git a/dummy/PAN-X.ba/1.1.0/dummy_data.zip b/dummy/PAN-X.ba/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..f33f28819ec1a79279c668997551b2fa4ba7506f --- /dev/null +++ b/dummy/PAN-X.ba/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4926454a6739aaa6cdf8f65c547e9082231a121c5f6f3daae3e86be385697c97 +size 864 diff --git a/dummy/PAN-X.bar/1.1.0/dummy_data.zip b/dummy/PAN-X.bar/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..f531e803378bc39b5f3bc9aedac6954c2f4eacbf --- /dev/null +++ b/dummy/PAN-X.bar/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17da03e8f393c07440653f7ea5713cbe5558092b9114f080a4748d5a5ab75e70 +size 728 diff --git a/dummy/PAN-X.bat-smg/1.1.0/dummy_data.zip b/dummy/PAN-X.bat-smg/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..a01a58dc066d91a0cbaee6bfbe5c9667d6b41003 --- /dev/null +++ b/dummy/PAN-X.bat-smg/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f3740162bd8bbd37e44ce506f49ce2a5c732b22c500635dc75adf678976c22e +size 840 diff --git a/dummy/PAN-X.be-x-old/1.1.0/dummy_data.zip b/dummy/PAN-X.be-x-old/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..522f5104412cba9e8a7a83faf0ad11fec2c444e3 --- /dev/null +++ b/dummy/PAN-X.be-x-old/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81fa89c96629336ae13766df3eb8b7816515087f03cc9ce4e2fdaa5834a6b82f +size 964 diff --git a/dummy/PAN-X.be/1.1.0/dummy_data.zip b/dummy/PAN-X.be/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..c3d3e4044177a3d1408ea25190b6d7fb897e4129 --- /dev/null +++ b/dummy/PAN-X.be/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2707ec27a1dee12a2a1a9121bdbe202ffb4dcccd9909e3d65b734ca9189eba28 +size 890 diff --git a/dummy/PAN-X.bg/1.1.0/dummy_data.zip b/dummy/PAN-X.bg/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..87f840047346e20e0619d36eea96ad967154876c --- /dev/null +++ b/dummy/PAN-X.bg/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4de0734339efcf4cc3efbce2a2658b16c90ee8c477eccb539617e6b04f544343 +size 846 diff --git a/dummy/PAN-X.bh/1.1.0/dummy_data.zip b/dummy/PAN-X.bh/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..6330667d5e14ff3f0deaececb9ebc4de4b13b77a --- /dev/null +++ b/dummy/PAN-X.bh/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c19a0ac98734aac7d6929f4ba9ad94cbc3f5fab258479187ac5cc34f1993dec2 +size 861 diff --git a/dummy/PAN-X.bn/1.1.0/dummy_data.zip b/dummy/PAN-X.bn/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..0745c4bf8f518f5458006457ad65ccc7fc2a3796 --- /dev/null +++ b/dummy/PAN-X.bn/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ece6478cda00f3ba2b7427440db43166513b08ff366119ffc3de5829694bdf7 +size 795 diff --git a/dummy/PAN-X.bo/1.1.0/dummy_data.zip b/dummy/PAN-X.bo/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..e08cf5d92c11ce7a60db709b624448227f5cc34e --- /dev/null +++ b/dummy/PAN-X.bo/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a359d5e089942980ace0590091eea61af9bf00100f394e6e33fe5e13a2a1489 +size 778 diff --git a/dummy/PAN-X.br/1.1.0/dummy_data.zip b/dummy/PAN-X.br/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..660256fd8f73091bc1ed337e370c7b85891c1349 --- /dev/null +++ b/dummy/PAN-X.br/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:529c8e64ab8155c193cbc17ec4bb5e8a8ff6c14b766c9a915a71f02d8e1b4ea5 +size 831 diff --git a/dummy/PAN-X.bs/1.1.0/dummy_data.zip b/dummy/PAN-X.bs/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..5ff9d496f7d8a39d04ea7ed6fdbc39cf531d6681 --- /dev/null +++ b/dummy/PAN-X.bs/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:746cb6d822589accc40038ef60edf0dba994e9e10ace5572b78f2883bc1ebf36 +size 833 diff --git a/dummy/PAN-X.ca/1.1.0/dummy_data.zip b/dummy/PAN-X.ca/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..566cafb02e14ae36e87cc41e0e1640660bb05296 --- /dev/null +++ b/dummy/PAN-X.ca/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5294a5ebf4a2d803f8f176af2d357553dd0feff57d26414786c17e0eb375775 +size 786 diff --git a/dummy/PAN-X.cbk-zam/1.1.0/dummy_data.zip b/dummy/PAN-X.cbk-zam/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..907054609bda486f86a146769a5da7dda18568ff --- /dev/null +++ b/dummy/PAN-X.cbk-zam/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:436b8ceb3841d41cdb8db9e397efff3e8b1b11d02b8b797910d9673ca591f99e +size 980 diff --git a/dummy/PAN-X.cdo/1.1.0/dummy_data.zip b/dummy/PAN-X.cdo/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..55b5e66a74192c74eba64ae01b1293867b3a5746 --- /dev/null +++ b/dummy/PAN-X.cdo/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:029e799ba257d1e62b1b37946d56aaf110c9d5b3042419cc12ec9e08d692daec +size 1022 diff --git a/dummy/PAN-X.ce/1.1.0/dummy_data.zip b/dummy/PAN-X.ce/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..ec233ff2ae4571c52ab05c4ebaef0a301f8a28fc --- /dev/null +++ b/dummy/PAN-X.ce/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afc4aa7cd0f13a3f5c48d66aaff9ef3ba0471b7b100bacdc381e852bad655451 +size 1011 diff --git a/dummy/PAN-X.ceb/1.1.0/dummy_data.zip b/dummy/PAN-X.ceb/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..c83198c6d98683265375e0947e03e12dbbbe3903 --- /dev/null +++ b/dummy/PAN-X.ceb/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a101400483f780b191604c25a05b1d5f55d37500d5a94aa9bc572feeb883d706 +size 818 diff --git a/dummy/PAN-X.ckb/1.1.0/dummy_data.zip b/dummy/PAN-X.ckb/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..9baa5595041c42a67997d32d0b8492db0ba613ec --- /dev/null +++ b/dummy/PAN-X.ckb/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd4d3f0b762341a06ef32bfed8e75b6df42124112f71795207c8e40d3877904d +size 812 diff --git a/dummy/PAN-X.co/1.1.0/dummy_data.zip b/dummy/PAN-X.co/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..bde86b2917e072452d70c26a65e073c0d4080676 --- /dev/null +++ b/dummy/PAN-X.co/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7621df0a390eb0b4a2b73d3b157f4a944deed60b97ab2d2d01ad51c3ab73981 +size 863 diff --git a/dummy/PAN-X.crh/1.1.0/dummy_data.zip b/dummy/PAN-X.crh/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..df99204df8a5b896ae3bf26ea58e9eb12071de8c --- /dev/null +++ b/dummy/PAN-X.crh/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da0b6f1234402a65d7ddcbc42272bf6e2b415ca9c578a69cc52d44c9aacbbc21 +size 835 diff --git a/dummy/PAN-X.cs/1.1.0/dummy_data.zip b/dummy/PAN-X.cs/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..e58acc753d63daa41c2c674a3255ddac115b7104 --- /dev/null +++ b/dummy/PAN-X.cs/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7051a3a844a299db47c298be1497601c1d6ee293ed243c45d3a6f7685dcbfa8 +size 845 diff --git a/dummy/PAN-X.csb/1.1.0/dummy_data.zip b/dummy/PAN-X.csb/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..c7511d131203a8ba94b7a1b36753b51992b98e32 --- /dev/null +++ b/dummy/PAN-X.csb/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82f2f3c8099af79f4fabe4f513dc7824f322dc85f5471ce7f841619cdd7c119e +size 1055 diff --git a/dummy/PAN-X.cv/1.1.0/dummy_data.zip b/dummy/PAN-X.cv/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..92af817b66d6f3d90e28ad24d26923cc729688ba --- /dev/null +++ b/dummy/PAN-X.cv/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6aff9d641b10cc563844ac3a4585b94f053429c0e1deea1636d8d0478d852c6 +size 838 diff --git a/dummy/PAN-X.cy/1.1.0/dummy_data.zip b/dummy/PAN-X.cy/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..31662f659b4b4eff54efa624a57ee0a21769d93d --- /dev/null +++ b/dummy/PAN-X.cy/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7f4a51869766280d71d538190ede9168ef72293d0fe892600693cc57fa230a7 +size 867 diff --git a/dummy/PAN-X.da/1.1.0/dummy_data.zip b/dummy/PAN-X.da/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..c7291efa3789ae74ad91d8617ca2bf53cd31252c --- /dev/null +++ b/dummy/PAN-X.da/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a2272142bcbcdac614064555bdf662ddac9af0e9cd515167320dad7defd5ea8 +size 797 diff --git a/dummy/PAN-X.de/1.1.0/dummy_data.zip b/dummy/PAN-X.de/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..db804dbfe5c2a67559c623628604f3a47694fbf9 --- /dev/null +++ b/dummy/PAN-X.de/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:505172278d411d10b5e5093f0154994b23e34fd67747a8ff823bd76d494e3236 +size 784 diff --git a/dummy/PAN-X.diq/1.1.0/dummy_data.zip b/dummy/PAN-X.diq/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..6cb4d36eb5309ee41ddb449fee284957cdeae2aa --- /dev/null +++ b/dummy/PAN-X.diq/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5006a48e2d102d47330b2993b2ec26e8e74f5afbf7dab07f6a16b3af5c7779b7 +size 804 diff --git a/dummy/PAN-X.dv/1.1.0/dummy_data.zip b/dummy/PAN-X.dv/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..9f6fc7f83708c887ce860815673faa83befe0008 --- /dev/null +++ b/dummy/PAN-X.dv/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73aaac1210f16cbb6585425d8eaaf2281d7462ca84d10e8e48be616362e94b9f +size 950 diff --git a/dummy/PAN-X.el/1.1.0/dummy_data.zip b/dummy/PAN-X.el/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..e31a893cf8d3831c7888fe0716007d10c2a00761 --- /dev/null +++ b/dummy/PAN-X.el/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7e60b2bca29b3cf23d5ba5ba6d4663df1bd946a5dc7a76df6dba27eb8353878 +size 1038 diff --git a/dummy/PAN-X.eml/1.1.0/dummy_data.zip b/dummy/PAN-X.eml/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..c480f13dae32c9d0951ff34406d1dc67a55face2 --- /dev/null +++ b/dummy/PAN-X.eml/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1e72207fc77d9ef2d63e7d8bca697fb8598986001f736212ac6155ced6eea29 +size 829 diff --git a/dummy/PAN-X.en/1.1.0/dummy_data.zip b/dummy/PAN-X.en/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..0a6d317295a113416a006feae700402823b4c28a --- /dev/null +++ b/dummy/PAN-X.en/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1950c37354be49e227803a48af27b10f666d91969e59d88d2b0c5bbef10d3ed +size 883 diff --git a/dummy/PAN-X.eo/1.1.0/dummy_data.zip b/dummy/PAN-X.eo/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..c706e7af3320f226a51652628407d40cbb04c363 --- /dev/null +++ b/dummy/PAN-X.eo/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2dea50db1950b514f0e743b16209fdc3b8b98d7c9472fb4b7134c5b04964c3b +size 847 diff --git a/dummy/PAN-X.es/1.1.0/dummy_data.zip b/dummy/PAN-X.es/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..e0089bd12d67e31f17d68b363ec87391e72e2b1a --- /dev/null +++ b/dummy/PAN-X.es/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf83816be3f7a52150bbbb3fa53d2defe2bc7e3051177cb25bcfbf8a613b09d2 +size 745 diff --git a/dummy/PAN-X.et/1.1.0/dummy_data.zip b/dummy/PAN-X.et/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..3e8e2807f7310179e42ef12d3f40df2a53473d86 --- /dev/null +++ b/dummy/PAN-X.et/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5c03d39ccffe0fc2f0597a246b47670854c6cd733a8489e7eb210d413d40146 +size 858 diff --git a/dummy/PAN-X.eu/1.1.0/dummy_data.zip b/dummy/PAN-X.eu/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..d9b4fb724c7011d0f5eb27a0adcbb36435e0270c --- /dev/null +++ b/dummy/PAN-X.eu/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f6ae842a1a20e2cf98477c3251362012ceb9e19b0f29fb6c127e1bc378750de +size 822 diff --git a/dummy/PAN-X.ext/1.1.0/dummy_data.zip b/dummy/PAN-X.ext/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..8f87697174e7082bd5376c0340da56692f419159 --- /dev/null +++ b/dummy/PAN-X.ext/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c318093e0ad1fdd163edd3bc3b2bfa3a3204e6d324dc035e495802f00d8fca4 +size 681 diff --git a/dummy/PAN-X.fa/1.1.0/dummy_data.zip b/dummy/PAN-X.fa/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..8a10e47ff63e84f7400f5e14b25ab68432eb55cb --- /dev/null +++ b/dummy/PAN-X.fa/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ce90117e708fe1952508063561917d7a074ae8ceebaec655a3486c5998484e7 +size 836 diff --git a/dummy/PAN-X.fi/1.1.0/dummy_data.zip b/dummy/PAN-X.fi/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..a21eea284e032d315f6dd2054c1a3fea1ba80350 --- /dev/null +++ b/dummy/PAN-X.fi/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1f8f316ecf9bcc168e6bc9a716856bb2c28f563e31a7c8f78ce791367783d40 +size 919 diff --git a/dummy/PAN-X.fiu-vro/1.1.0/dummy_data.zip b/dummy/PAN-X.fiu-vro/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..e1465dde26c2ae7af5c065bffa415e4ed59c1424 --- /dev/null +++ b/dummy/PAN-X.fiu-vro/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2471ae20e32385f7b944731da15c01b6330df548c29a34224987834c55e41e7 +size 812 diff --git a/dummy/PAN-X.fo/1.1.0/dummy_data.zip b/dummy/PAN-X.fo/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..028551cf201bbd2dc9f377786d7d436834a8bf74 --- /dev/null +++ b/dummy/PAN-X.fo/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fafe5d44eb320c7da311c2dc26b727ed43cb5385efde1c1d4407027fddc4597f +size 763 diff --git a/dummy/PAN-X.fr/1.1.0/dummy_data.zip b/dummy/PAN-X.fr/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..468e3a97c904fcbebe6c68e43e71b4adc234e87d --- /dev/null +++ b/dummy/PAN-X.fr/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcb777a645b9bef7d752b82ca3b7e90430824b3201125208501679e8403f7b16 +size 758 diff --git a/dummy/PAN-X.frr/1.1.0/dummy_data.zip b/dummy/PAN-X.frr/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..77b81161f849f71638daf9729d8fd0870331ba6f --- /dev/null +++ b/dummy/PAN-X.frr/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd85c9503a338ed1ee45bb3d0a38256f3904dc003039a07ea1a99fe653832b97 +size 723 diff --git a/dummy/PAN-X.fur/1.1.0/dummy_data.zip b/dummy/PAN-X.fur/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..6a12b8b4347f995ff1ebd44b684d2b798ec461a9 --- /dev/null +++ b/dummy/PAN-X.fur/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8d6261d991bd88d92d52c96267c4af7eef421666aad82596fd791f749447e52 +size 864 diff --git a/dummy/PAN-X.fy/1.1.0/dummy_data.zip b/dummy/PAN-X.fy/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..9de1316557f8fa3dbbc5a63f506d88ad9dd33bd2 --- /dev/null +++ b/dummy/PAN-X.fy/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:557f07a606e01de49cbac5c88e0c17745bef2385225cff994443346907152b18 +size 956 diff --git a/dummy/PAN-X.ga/1.1.0/dummy_data.zip b/dummy/PAN-X.ga/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..88a9305e9a7997a322d5c2c41b228b83abceef8d --- /dev/null +++ b/dummy/PAN-X.ga/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9a6daa5ef0a2b35e8b136052c99afa7a5680a1450a3f5d1dd65a15c176a81a0 +size 838 diff --git a/dummy/PAN-X.gan/1.1.0/dummy_data.zip b/dummy/PAN-X.gan/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..905ae18b1a777083a6b2670d5d2d077b37a3e315 --- /dev/null +++ b/dummy/PAN-X.gan/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b00c544a2e444fef5563df48851fb0869fe35da19294af2b656ac3511fc3e279 +size 849 diff --git a/dummy/PAN-X.gd/1.1.0/dummy_data.zip b/dummy/PAN-X.gd/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..25355455d7ab42eba8e5235fb959e39ffd76bb58 --- /dev/null +++ b/dummy/PAN-X.gd/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fd3c8c11524c7ee76e4ba51ed5293815b746722c5c614d5a93bb332e38ba46a +size 848 diff --git a/dummy/PAN-X.gl/1.1.0/dummy_data.zip b/dummy/PAN-X.gl/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..60f23989305cdf159ce57fa6653ad10f1d104e76 --- /dev/null +++ b/dummy/PAN-X.gl/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce38fedb54d0eead3d07bbfd5f8c3464956171b8ed52e63eb73c0e2e4170704a +size 775 diff --git a/dummy/PAN-X.gn/1.1.0/dummy_data.zip b/dummy/PAN-X.gn/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..c067cffe99c70b44267168793d92f219cd7e5d02 --- /dev/null +++ b/dummy/PAN-X.gn/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fd7e775eb8b8e9ad0d44e70267ff383432555d538bb1d131f149020dc7d15d9 +size 825 diff --git a/dummy/PAN-X.gu/1.1.0/dummy_data.zip b/dummy/PAN-X.gu/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..10e15347064565473499815079793b30ebae4fcf --- /dev/null +++ b/dummy/PAN-X.gu/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa9bd4c9e70c937b623905682b22b3f7bfd5ebc2898ae5ac6261f7f7e6b34d4e +size 888 diff --git a/dummy/PAN-X.hak/1.1.0/dummy_data.zip b/dummy/PAN-X.hak/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..d9f59937920040c25ce68698b51c27be34976f3c --- /dev/null +++ b/dummy/PAN-X.hak/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff15e0357b1500d47a3f02a090f34f1b2f1d4f73933229ee99dc26aaecb0e821 +size 835 diff --git a/dummy/PAN-X.he/1.1.0/dummy_data.zip b/dummy/PAN-X.he/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..438ac4ac771a2b661375b8df28515392da365068 --- /dev/null +++ b/dummy/PAN-X.he/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4552b0f64601ade58a7b067c7555014fc886c36a3c829d9e4986b33d2edd568a +size 786 diff --git a/dummy/PAN-X.hi/1.1.0/dummy_data.zip b/dummy/PAN-X.hi/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..db4f6d5beea8a80adf646a8706fed3754c4c012f --- /dev/null +++ b/dummy/PAN-X.hi/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41bf1ab583c81042d6280208836ae8bed68444653c70db0023fd2678b519dc78 +size 941 diff --git a/dummy/PAN-X.hr/1.1.0/dummy_data.zip b/dummy/PAN-X.hr/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..b1df95735c2bdb3e78ece31135ee94c5970c6c65 --- /dev/null +++ b/dummy/PAN-X.hr/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da2f701ac539479b29c4ca0691b8d8c4ccef9633e8208c1c1232f516570aadb8 +size 1004 diff --git a/dummy/PAN-X.hsb/1.1.0/dummy_data.zip b/dummy/PAN-X.hsb/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..103cb08ceb588345c66c497fcca52d8f66679f89 --- /dev/null +++ b/dummy/PAN-X.hsb/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03293dc15c3bc825220b4e9d76c5fdd1b4398c8cacf69d2f199411ed281bc7f2 +size 784 diff --git a/dummy/PAN-X.hu/1.1.0/dummy_data.zip b/dummy/PAN-X.hu/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..e7c24d58f7baa97a2208f45e426acf5004362c55 --- /dev/null +++ b/dummy/PAN-X.hu/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:092e6a7932bba3e1035d045a31347b06a2847fc55a9db920b149126c397c45f1 +size 861 diff --git a/dummy/PAN-X.hy/1.1.0/dummy_data.zip b/dummy/PAN-X.hy/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..113ceff2840099b73dcab90a6e79fc828ccd25e6 --- /dev/null +++ b/dummy/PAN-X.hy/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b28f9336565ecb6a814394abeb89fca289f8d943a3b8d70350a90e4929458907 +size 815 diff --git a/dummy/PAN-X.ia/1.1.0/dummy_data.zip b/dummy/PAN-X.ia/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..6317d95057a68628ddc0406faf864050eef27194 --- /dev/null +++ b/dummy/PAN-X.ia/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0486a022334660d7911bfad957f3563d1a657775c989525bc07f1f5433c3aa2 +size 827 diff --git a/dummy/PAN-X.id/1.1.0/dummy_data.zip b/dummy/PAN-X.id/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..a05db03409b9b9fc826523cdd0ea1f73a506b023 --- /dev/null +++ b/dummy/PAN-X.id/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b71ad8e3da22480c08060882272156660adeec59b0a8295d24362b15a4e3177a +size 779 diff --git a/dummy/PAN-X.ig/1.1.0/dummy_data.zip b/dummy/PAN-X.ig/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..5417633a7c2db8f941894c0331f98e60d8953c45 --- /dev/null +++ b/dummy/PAN-X.ig/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:814f69b6b955bb6ac28c0707d795dc741d7cbece09483fe2338ff888bdd8e626 +size 712 diff --git a/dummy/PAN-X.ilo/1.1.0/dummy_data.zip b/dummy/PAN-X.ilo/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..76b10af7e99b4e626d0bbc9907b52c658dba07a0 --- /dev/null +++ b/dummy/PAN-X.ilo/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e6e469dbbfc0677c5b9bc00be7edceb3360ff33bcc612ef8d563c8cae41c2be +size 741 diff --git a/dummy/PAN-X.io/1.1.0/dummy_data.zip b/dummy/PAN-X.io/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..ec318413bbe5ca4e81372866f3f52cdb47160c97 --- /dev/null +++ b/dummy/PAN-X.io/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a32349fc67ebcc2784b6188c0db97c8122a559a59f77a65b78a7673dd4b1142 +size 738 diff --git a/dummy/PAN-X.is/1.1.0/dummy_data.zip b/dummy/PAN-X.is/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..84634d708ede60afe3860c814b98f61845ab4883 --- /dev/null +++ b/dummy/PAN-X.is/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00925b41883a033636efa87bf1904c7001ce812e74bfa6f463200b69970449b2 +size 810 diff --git a/dummy/PAN-X.it/1.1.0/dummy_data.zip b/dummy/PAN-X.it/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..3f6ad5b5745897cc01c72337b6a1dc662e3e2496 --- /dev/null +++ b/dummy/PAN-X.it/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cc926a8830313d467071cf34ac08b138da3612c36f2b4ceee4c00bdc45671dc +size 781 diff --git a/dummy/PAN-X.ja/1.1.0/dummy_data.zip b/dummy/PAN-X.ja/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..f07b3e70afc06087547c445a2f89e1bb4bd5f34e --- /dev/null +++ b/dummy/PAN-X.ja/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66d9d1c2a71e5690a613252bd12b8cfc34e1bcd720248c78c50afbbfe270e8b6 +size 1381 diff --git a/dummy/PAN-X.jbo/1.1.0/dummy_data.zip b/dummy/PAN-X.jbo/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..584899509cc3e03558d8833329901e554c255ba4 --- /dev/null +++ b/dummy/PAN-X.jbo/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28d5d49dc35de0d477d41b168d63fa47221cfe221fff615809506bd46466747b +size 709 diff --git a/dummy/PAN-X.jv/1.1.0/dummy_data.zip b/dummy/PAN-X.jv/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..363c7800ace63d2ebba3fe828a9a30461f03233b --- /dev/null +++ b/dummy/PAN-X.jv/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e9b07529d7f430e05d2706dc74ed547613f31a69207776e580a9e618acd5d0e +size 766 diff --git a/dummy/PAN-X.ka/1.1.0/dummy_data.zip b/dummy/PAN-X.ka/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..00bffa8d2e479d87566d22ce2a59b05e0a493023 --- /dev/null +++ b/dummy/PAN-X.ka/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1a3bb8d116f5e93e4654a8f0eb1043d88f08560bf34bf07539f6225871274a0 +size 968 diff --git a/dummy/PAN-X.kk/1.1.0/dummy_data.zip b/dummy/PAN-X.kk/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..dfdcbf9f4561f16222becf3e826afca4b5f66782 --- /dev/null +++ b/dummy/PAN-X.kk/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:856be56d16ac21c42ad8894326bb5fdad85a9ef3230e6496a20e0008fc4cb212 +size 881 diff --git a/dummy/PAN-X.km/1.1.0/dummy_data.zip b/dummy/PAN-X.km/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..3998be63a96478ecb94a504d181ce69db6b81991 --- /dev/null +++ b/dummy/PAN-X.km/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a466a6c27a2fb39b6534a7da1e98263b3a28b887eff4b35b7c16834a03516a32 +size 804 diff --git a/dummy/PAN-X.kn/1.1.0/dummy_data.zip b/dummy/PAN-X.kn/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..c3cd34161a9ce3435ce401c4398c69a21eef727b --- /dev/null +++ b/dummy/PAN-X.kn/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a9c19678a5b7962249a865cb688762bd498c9636b6a488ef2dcb57c51d86e73 +size 864 diff --git a/dummy/PAN-X.ko/1.1.0/dummy_data.zip b/dummy/PAN-X.ko/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..1a357a8e5c9b99c19cacd31fc5c30e1dcf4af274 --- /dev/null +++ b/dummy/PAN-X.ko/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64cbb141c64b2251bceb8c4fb540fe3a3568be5c54e2fe1065642b44c9d4d9cc +size 864 diff --git a/dummy/PAN-X.ksh/1.1.0/dummy_data.zip b/dummy/PAN-X.ksh/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..07b031d00028e46f9af47b06a38ccbcaeefa7334 --- /dev/null +++ b/dummy/PAN-X.ksh/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9ae63dc0b4e6ef38a2746d939971e86599b547577b5a7d952c4be8939ce1291 +size 806 diff --git a/dummy/PAN-X.ku/1.1.0/dummy_data.zip b/dummy/PAN-X.ku/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..b0347f696552d249c39741b9e2c80a0a0940ecdc --- /dev/null +++ b/dummy/PAN-X.ku/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:454d8af9e7986673db424730c5b627825a76ef1a7d650e744c2830b0ff3ba6d7 +size 853 diff --git a/dummy/PAN-X.ky/1.1.0/dummy_data.zip b/dummy/PAN-X.ky/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..142c14bc78c1a1ce26d1c7773977bb4cbb907f73 --- /dev/null +++ b/dummy/PAN-X.ky/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a7825c42cfe343b54ec7c0758c18b3d336f667fc390776d81c9e1116f7ff584 +size 846 diff --git a/dummy/PAN-X.la/1.1.0/dummy_data.zip b/dummy/PAN-X.la/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..5f4b897d366d592e78445ea90559eae1b21e7294 --- /dev/null +++ b/dummy/PAN-X.la/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:496015f1cf5583e856933b41f6a333545d482de8bdac9110149f765873653aa9 +size 751 diff --git a/dummy/PAN-X.lb/1.1.0/dummy_data.zip b/dummy/PAN-X.lb/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..e815ae7bd3c2c8d7d9b0545678dee7fa49b60fc3 --- /dev/null +++ b/dummy/PAN-X.lb/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac7d1027778d8e5f1c54d024057825471df8edc8530ca483054869b015d28cd7 +size 823 diff --git a/dummy/PAN-X.li/1.1.0/dummy_data.zip b/dummy/PAN-X.li/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..4244ebf61254b9464586f5466214ae2e89b2e1ef --- /dev/null +++ b/dummy/PAN-X.li/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c75459cd3001be2cce26764ef39dba6297831cf91cdec16549a7c92efc0df477 +size 834 diff --git a/dummy/PAN-X.lij/1.1.0/dummy_data.zip b/dummy/PAN-X.lij/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..a20ca20c391b7939832618811f00bb9dffc278b4 --- /dev/null +++ b/dummy/PAN-X.lij/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fb7ed5b3dfefa1c73ac83a2441427cc564346f1dc6200cb4f5df6df0f6b4ca4 +size 761 diff --git a/dummy/PAN-X.lmo/1.1.0/dummy_data.zip b/dummy/PAN-X.lmo/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..ade881f2b28f160ff7d1c08301849740064834f3 --- /dev/null +++ b/dummy/PAN-X.lmo/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e97bc6ea917cbf70f069a6d87a915fdbf19b628ab5bf31c16fd262790fc2cd66 +size 802 diff --git a/dummy/PAN-X.ln/1.1.0/dummy_data.zip b/dummy/PAN-X.ln/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..6a8b55c5edd10051fcf2518e3693010244380f6b --- /dev/null +++ b/dummy/PAN-X.ln/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e4610cd70604c4d6f263ffaad66367c7a9ea908064f38731b9980ee4cea3132 +size 870 diff --git a/dummy/PAN-X.lt/1.1.0/dummy_data.zip b/dummy/PAN-X.lt/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..0dbefae77cc538426b8df3c8c22139b0cd949c5e --- /dev/null +++ b/dummy/PAN-X.lt/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ec07338671a8f201c8f6c203fc8bd32c81ee948e0c8e9e60271497e00fa2667 +size 763 diff --git a/dummy/PAN-X.lv/1.1.0/dummy_data.zip b/dummy/PAN-X.lv/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..1c507fb3c9d8c22d11345be477d391f609fa1228 --- /dev/null +++ b/dummy/PAN-X.lv/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95284a98c75f01968b46611036e4cc6a454db14247f1ab9ab6525a24e8590de0 +size 808 diff --git a/dummy/PAN-X.map-bms/1.1.0/dummy_data.zip b/dummy/PAN-X.map-bms/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..02a59ba77a4a9c96e8c20c0c811cdafd1a7a5f27 --- /dev/null +++ b/dummy/PAN-X.map-bms/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1153ddf1877e4970438606671bdcc96cdbaffb71e84767d2d3c8d91b1db9f86 +size 792 diff --git a/dummy/PAN-X.mg/1.1.0/dummy_data.zip b/dummy/PAN-X.mg/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..df0b3c2e10d76586648a538d10ccb54f94ddc96c --- /dev/null +++ b/dummy/PAN-X.mg/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81b284de1cd98a07b2dae624ae90d1e0ebe2eeaf0b5e0a81b01c186152f683f4 +size 918 diff --git a/dummy/PAN-X.mhr/1.1.0/dummy_data.zip b/dummy/PAN-X.mhr/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..df46094c3785d339150c8c1c10d1ded437146ba7 --- /dev/null +++ b/dummy/PAN-X.mhr/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8b79930469d35ade1344f3267771fddbefd73c958619f4e486f90f39564979c +size 983 diff --git a/dummy/PAN-X.mi/1.1.0/dummy_data.zip b/dummy/PAN-X.mi/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..5092fcd7b9df1434f3d86f928436a200249bcce8 --- /dev/null +++ b/dummy/PAN-X.mi/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce521398788a4109ab9b2d33d9014ac909370d0cd7b7ead06b19c9abe5e72f54 +size 881 diff --git a/dummy/PAN-X.min/1.1.0/dummy_data.zip b/dummy/PAN-X.min/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..e7dad97e1f33c7793140f3c7056917d9987b5baa --- /dev/null +++ b/dummy/PAN-X.min/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fc81141c4d52dcd28a1bce3137e5a06315367864456512ff982f3c16876660f +size 713 diff --git a/dummy/PAN-X.mk/1.1.0/dummy_data.zip b/dummy/PAN-X.mk/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..1a3ca6aa14c8823e8c05ab0c828505af9f885f09 --- /dev/null +++ b/dummy/PAN-X.mk/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:180495dc4b1c22595e8466e3d01cc11933ca6db1d9efbcecf2ee91213231e0d9 +size 910 diff --git a/dummy/PAN-X.ml/1.1.0/dummy_data.zip b/dummy/PAN-X.ml/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..9200a39cecd52e68aba9014c17681800d8ba1275 --- /dev/null +++ b/dummy/PAN-X.ml/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2c491c6cf1ee40ab8d6b7f0b524c7ef229c1c975d570ee1f535a6e99928548f +size 1006 diff --git a/dummy/PAN-X.mn/1.1.0/dummy_data.zip b/dummy/PAN-X.mn/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..f724602ae94bb4e0c473a5627f976610de509f59 --- /dev/null +++ b/dummy/PAN-X.mn/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be1e356976e3b08e477a629c46f13ee0068b78ccfe97b605f39f68c919ea085a +size 918 diff --git a/dummy/PAN-X.mr/1.1.0/dummy_data.zip b/dummy/PAN-X.mr/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..914e1c849be7d3a6f623a306d02608a6fea06066 --- /dev/null +++ b/dummy/PAN-X.mr/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8abf6ece1654f64d2497e81526636431e721845879327a0c30f025e43e66a81d +size 886 diff --git a/dummy/PAN-X.ms/1.1.0/dummy_data.zip b/dummy/PAN-X.ms/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..7294f97d9a8cdae6c3dc1dec5a2b51654d6b404a --- /dev/null +++ b/dummy/PAN-X.ms/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad2f5cc7ba57e4526245646ce3e2601621704f53584b6ec8699b88f93ff0b65b +size 781 diff --git a/dummy/PAN-X.mt/1.1.0/dummy_data.zip b/dummy/PAN-X.mt/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..53d76b0e0f37c58e8b05503acda43491c0d98696 --- /dev/null +++ b/dummy/PAN-X.mt/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7e49be6e0f0186ae25f676a0cfc08ce2c791daf6f4c0e49287d057111154bc8 +size 753 diff --git a/dummy/PAN-X.mwl/1.1.0/dummy_data.zip b/dummy/PAN-X.mwl/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..59a18318f2d968e09f4c526041e11a0e166624e0 --- /dev/null +++ b/dummy/PAN-X.mwl/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:084fefc3563f48c63f74a2216036f74d08a5949981fe63dc44a23291e66545e3 +size 856 diff --git a/dummy/PAN-X.my/1.1.0/dummy_data.zip b/dummy/PAN-X.my/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..d6abb5ead882dd4b7556f546ee2c3e0b6f7c4a56 --- /dev/null +++ b/dummy/PAN-X.my/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e04ee93d3c87c9eaed3ab6a60817f661449d9920838fed8f1d70e0abbba260d0 +size 1053 diff --git a/dummy/PAN-X.mzn/1.1.0/dummy_data.zip b/dummy/PAN-X.mzn/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..756247a62f35423ac4fac369eb19fa507d7bb27e --- /dev/null +++ b/dummy/PAN-X.mzn/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3157ffaf2ea62ac502beea08da720e06ad819acc19adc0f6b4698971fc82d4a6 +size 877 diff --git a/dummy/PAN-X.nap/1.1.0/dummy_data.zip b/dummy/PAN-X.nap/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..c683be61a66cf0f3497f6d80b6798b38b05b7207 --- /dev/null +++ b/dummy/PAN-X.nap/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4e2dc6335627d5e79eff638dfbb13e0b3cf3020dc42185410882cbec1c3b323 +size 748 diff --git a/dummy/PAN-X.nds/1.1.0/dummy_data.zip b/dummy/PAN-X.nds/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..8e8d29aef0f6c9cbb113c7ac66811d1eef2c5a06 --- /dev/null +++ b/dummy/PAN-X.nds/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1c240b9adcbe75e949c79408a24703661aa61a80cc69d91f8c78a589dbdeff1 +size 837 diff --git a/dummy/PAN-X.ne/1.1.0/dummy_data.zip b/dummy/PAN-X.ne/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..d29392cb40b4befdf09e6ce10ccce004c363b497 --- /dev/null +++ b/dummy/PAN-X.ne/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fdbdac0a1a3d2d2260aed85529b49bd7c9adf2dd3cae33fe91d695ec5fc5628 +size 932 diff --git a/dummy/PAN-X.nl/1.1.0/dummy_data.zip b/dummy/PAN-X.nl/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..afa2a5801d4fb376e82dbe5f92c0cdfac8640afb --- /dev/null +++ b/dummy/PAN-X.nl/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c72fc6b0f26011fc2af1bfc20d7524cf4ab3e35405e847761b103449d5aeef2 +size 917 diff --git a/dummy/PAN-X.nn/1.1.0/dummy_data.zip b/dummy/PAN-X.nn/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..05d371085f07101bf96b41ce35d7994b329bfc71 --- /dev/null +++ b/dummy/PAN-X.nn/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae6b92dad64f7c8f007a7705094817abc674d003831d57b4d66e8f9543d1ee51 +size 993 diff --git a/dummy/PAN-X.no/1.1.0/dummy_data.zip b/dummy/PAN-X.no/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..9df8199260ab9e39daf49bd654d5cee99f8cd6e1 --- /dev/null +++ b/dummy/PAN-X.no/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09ae8472a98b0b6ae220c3567023299b8a343cc42084ce429268f5c75b5e3637 +size 857 diff --git a/dummy/PAN-X.nov/1.1.0/dummy_data.zip b/dummy/PAN-X.nov/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..5c9d8d26c1443caff472f9e9405fd4d25db0bbf8 --- /dev/null +++ b/dummy/PAN-X.nov/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a28164e8ff29878849c55e29d64e7dab25f54c20a1a6cdd9c675a44ae02edac8 +size 700 diff --git a/dummy/PAN-X.oc/1.1.0/dummy_data.zip b/dummy/PAN-X.oc/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..9e33fbc434254a3644f3828620977ff628821a76 --- /dev/null +++ b/dummy/PAN-X.oc/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4a22e39bbac9d162715dc2e3ebcfd8ea0f41883c6cba8dc4ee944f936c557f3 +size 742 diff --git a/dummy/PAN-X.or/1.1.0/dummy_data.zip b/dummy/PAN-X.or/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..a741a6b960f9ef6c20701d6db514e643ef018039 --- /dev/null +++ b/dummy/PAN-X.or/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd8a7ecd4564dca7ced2afcd719c4736e9d4543d9075d48d9efed7a99a5eeb6f +size 911 diff --git a/dummy/PAN-X.os/1.1.0/dummy_data.zip b/dummy/PAN-X.os/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..cba6c4c62bbf0bd5c0e3f991d633064ee4de8051 --- /dev/null +++ b/dummy/PAN-X.os/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2a8e9fdd55fa969f32112d7bf7b1690163c1d9f2c5ad35cc744ffd54289d40b +size 935 diff --git a/dummy/PAN-X.pa/1.1.0/dummy_data.zip b/dummy/PAN-X.pa/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..03d5812f862f7a75638a7924d667883fe4e440a0 --- /dev/null +++ b/dummy/PAN-X.pa/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c01645971bc14bb321e5aa9367a4cad86e3cfc9481a0c64364458ce840f99138 +size 837 diff --git a/dummy/PAN-X.pdc/1.1.0/dummy_data.zip b/dummy/PAN-X.pdc/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..ffde79daa2f31b950002a7fd088f3df64f433999 --- /dev/null +++ b/dummy/PAN-X.pdc/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17b3af4565231375f4037c2fb35434644c1393aa39752eb225b3d869f2f99028 +size 732 diff --git a/dummy/PAN-X.pl/1.1.0/dummy_data.zip b/dummy/PAN-X.pl/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..efd587b8cfee021ce971a469af68025b715e7b13 --- /dev/null +++ b/dummy/PAN-X.pl/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c3665b41fd7b0e479d8d7fcbe147159b15a6b0775a3cd585429af9ca4db192e +size 735 diff --git a/dummy/PAN-X.pms/1.1.0/dummy_data.zip b/dummy/PAN-X.pms/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..515db00e5c7f108350716a99c1444c7adf398108 --- /dev/null +++ b/dummy/PAN-X.pms/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26c75002b8e87fe59a25a9cff1e18c70c176af40cc3088c82d510d7db34b5b7b +size 820 diff --git a/dummy/PAN-X.pnb/1.1.0/dummy_data.zip b/dummy/PAN-X.pnb/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..81b03a3817aead3628b5ca99520c1a48fcb3d5c2 --- /dev/null +++ b/dummy/PAN-X.pnb/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:204201f130a14d6497da019adf91a4c3cbf4df26214d0e05721a9c98fcc0ca11 +size 850 diff --git a/dummy/PAN-X.ps/1.1.0/dummy_data.zip b/dummy/PAN-X.ps/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..a7fd66bed502e8d7b4b1fea433c31db2bcab9e52 --- /dev/null +++ b/dummy/PAN-X.ps/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdbfd6e24af9b033e3345cce283550e86c18652c2be8c9f291e27126eb119c94 +size 979 diff --git a/dummy/PAN-X.pt/1.1.0/dummy_data.zip b/dummy/PAN-X.pt/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..0e1174f664c3c5af461b64dd9bf6f7c5df63fe7b --- /dev/null +++ b/dummy/PAN-X.pt/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c749f6cdb70fc742193c02262c68adbc86e432574f45afe9e6e0afe68c3d589 +size 774 diff --git a/dummy/PAN-X.qu/1.1.0/dummy_data.zip b/dummy/PAN-X.qu/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..2a27bd223444b03dd1f6ecd0b925ece597da1cf5 --- /dev/null +++ b/dummy/PAN-X.qu/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0930c4a1946a9f229a1fc92e6c52e348e09f5945be01af816b0e882e3b3c946e +size 774 diff --git a/dummy/PAN-X.rm/1.1.0/dummy_data.zip b/dummy/PAN-X.rm/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..e2c4dfbd2bf80de15d4189be0c82f1d3043f5f06 --- /dev/null +++ b/dummy/PAN-X.rm/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f718adf5551d780abec8671d2de10342151dca69d245098db487b915c00888b +size 849 diff --git a/dummy/PAN-X.ro/1.1.0/dummy_data.zip b/dummy/PAN-X.ro/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..0d85a2c0fc239e6202192219d45611c19ecc18a9 --- /dev/null +++ b/dummy/PAN-X.ro/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:edfc9b45ee5e27503f0311c6da6d12a72ea0ae65be9f8e048367036612b55023 +size 751 diff --git a/dummy/PAN-X.ru/1.1.0/dummy_data.zip b/dummy/PAN-X.ru/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..0d60d47301283a9504ad726b7c307446274e5e9f --- /dev/null +++ b/dummy/PAN-X.ru/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27e73070278e004f883324491f52b12d7136fb7ec68d837dc7983f6cdcfae4b6 +size 815 diff --git a/dummy/PAN-X.rw/1.1.0/dummy_data.zip b/dummy/PAN-X.rw/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..0f8921f79b4eb4a0503b8add3307bb3359d689bf --- /dev/null +++ b/dummy/PAN-X.rw/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:128a9dbc2a45374f1cf2a48982f0504c660ac68d75910cd396c4e5d449b20d8a +size 716 diff --git a/dummy/PAN-X.sa/1.1.0/dummy_data.zip b/dummy/PAN-X.sa/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..4400316599f018b0a24521385a0d78df698a2a25 --- /dev/null +++ b/dummy/PAN-X.sa/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02f3b927de7afe8ba11de48a99a4ba87f5a4eed92d0c0fe635109026be74b1b4 +size 844 diff --git a/dummy/PAN-X.sah/1.1.0/dummy_data.zip b/dummy/PAN-X.sah/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..3c897268f6b1b4c6b5a2020e3bc5f662e482ea91 --- /dev/null +++ b/dummy/PAN-X.sah/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:704277313d1a3ebcf12b32e6a64d5a3c4dc651b89d49c7884d39cded9356ac37 +size 880 diff --git a/dummy/PAN-X.scn/1.1.0/dummy_data.zip b/dummy/PAN-X.scn/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..e9989bc4c77a64dcb97223201dfd670850e9cf2b --- /dev/null +++ b/dummy/PAN-X.scn/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df05f00bb95ae1175763c9ee06b25026464bca3eeb0a82c32004002bcbd5f272 +size 807 diff --git a/dummy/PAN-X.sco/1.1.0/dummy_data.zip b/dummy/PAN-X.sco/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..242359358b2ae8582961c7b92ad17e5ef4a54091 --- /dev/null +++ b/dummy/PAN-X.sco/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49ba36e4df3c90e25598d381cdac8528a4ca37f4173b7ee015a6bbecffffe11e +size 816 diff --git a/dummy/PAN-X.sd/1.1.0/dummy_data.zip b/dummy/PAN-X.sd/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..92d3f61ad2ebecf3fb3bb65cb6651e90a57de71e --- /dev/null +++ b/dummy/PAN-X.sd/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b6baf9f2d7aa7bfd4ac5b8cf1f1a42f5544a03a61ba2dfe083555ff2c727d6f +size 1140 diff --git a/dummy/PAN-X.sh/1.1.0/dummy_data.zip b/dummy/PAN-X.sh/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..a3ef50f905fd84eacf7ba97ba36abf18632bae95 --- /dev/null +++ b/dummy/PAN-X.sh/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d485c475bf97edf2921f7038109af897cef6efe3456b8487b4e18932681f69f +size 782 diff --git a/dummy/PAN-X.si/1.1.0/dummy_data.zip b/dummy/PAN-X.si/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..21918e7ba27283aafd0bc922b92b96d0b6b9437d --- /dev/null +++ b/dummy/PAN-X.si/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e6f859acf4712e602e5b444a332c6d4a3ef73073131e9ee7032267a055fbc14 +size 892 diff --git a/dummy/PAN-X.simple/1.1.0/dummy_data.zip b/dummy/PAN-X.simple/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..b57b559bc00c2053cbbcb1d9e332750953591a45 --- /dev/null +++ b/dummy/PAN-X.simple/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a92ba152bedf6bd6aa23a4838a0dad5c4e171256daeb4e587caf02045a9b0f9c +size 791 diff --git a/dummy/PAN-X.sk/1.1.0/dummy_data.zip b/dummy/PAN-X.sk/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..491d21a0be2a069c5d02eac9e6045a2f1b528a41 --- /dev/null +++ b/dummy/PAN-X.sk/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ed97fed3ee0ec7043cbf5f59c11fa296f693f17bf21fce8217dea7233fc0e79 +size 807 diff --git a/dummy/PAN-X.sl/1.1.0/dummy_data.zip b/dummy/PAN-X.sl/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..7c96bd018f889b3cfffe314ba37808dbc36ed19d --- /dev/null +++ b/dummy/PAN-X.sl/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f97c749d73707158c841af0eda8e7a536cfcc89e31c5b939f6e9ca975b620bf3 +size 843 diff --git a/dummy/PAN-X.so/1.1.0/dummy_data.zip b/dummy/PAN-X.so/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..9c65b9db2035c412b39ab3ddb37253f22994c19a --- /dev/null +++ b/dummy/PAN-X.so/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc49a4453453d3afa5f7ba4e9a551d251f637e23b8ccc84631eee8089cd40d40 +size 897 diff --git a/dummy/PAN-X.sq/1.1.0/dummy_data.zip b/dummy/PAN-X.sq/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..2d5104099fcb886b8ebfac66f8d51bda7bcf8ce8 --- /dev/null +++ b/dummy/PAN-X.sq/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a096709e33253da96f3249bf863f98153c9e981cddd185ec09b09f9ec117a70f +size 750 diff --git a/dummy/PAN-X.sr/1.1.0/dummy_data.zip b/dummy/PAN-X.sr/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..23b35c1cb23c23882a9d451753430c299f61bab7 --- /dev/null +++ b/dummy/PAN-X.sr/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2ef4de85d965ffe8fc4ab8a55afe9c67c77c4dbf3d7af40df9992d1b9699d7c +size 855 diff --git a/dummy/PAN-X.su/1.1.0/dummy_data.zip b/dummy/PAN-X.su/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..724d745179f1ff60df40d7da551ef08a5c6d3d57 --- /dev/null +++ b/dummy/PAN-X.su/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a0d33f36f517c5e57eeddf203d234afa35af5dc3f846f3955aaa608d305a22e +size 772 diff --git a/dummy/PAN-X.sv/1.1.0/dummy_data.zip b/dummy/PAN-X.sv/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..409b953aa223f77b3e9bcd9a2377912c4ce09f4f --- /dev/null +++ b/dummy/PAN-X.sv/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d786164bce3eff1e92893e7fde8cea1231615385c409a3e2c95b7e8d8756520 +size 802 diff --git a/dummy/PAN-X.sw/1.1.0/dummy_data.zip b/dummy/PAN-X.sw/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..a99305a41c75de35f5ebfc24768b124bae28e065 --- /dev/null +++ b/dummy/PAN-X.sw/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4324870ef2a1acc25af087fafbc93cd2e90a00ebeddeffbe1edd3ef79735b428 +size 768 diff --git a/dummy/PAN-X.szl/1.1.0/dummy_data.zip b/dummy/PAN-X.szl/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..69348a76fdf6a65370516cfdb5b6931906786972 --- /dev/null +++ b/dummy/PAN-X.szl/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33da7a594e5600ec5bf2aa88b521b677710f4d3172e2ee992f98ce857aa2ba97 +size 734 diff --git a/dummy/PAN-X.ta/1.1.0/dummy_data.zip b/dummy/PAN-X.ta/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..7e019e98ea253c39a2ede1f063c949b6db27ec40 --- /dev/null +++ b/dummy/PAN-X.ta/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0ea34d48050bc9fcc3e624b74eb74ff06a64c5895f99be07cd48b3dbd71f905 +size 969 diff --git a/dummy/PAN-X.te/1.1.0/dummy_data.zip b/dummy/PAN-X.te/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..6dc8adcb594daf3f7c06b64a4959e61481bf6da5 --- /dev/null +++ b/dummy/PAN-X.te/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcc40704128d255bf675040c5f9e3efc92620e9a5c7e5bcf5be61cbbb7118d82 +size 1015 diff --git a/dummy/PAN-X.tg/1.1.0/dummy_data.zip b/dummy/PAN-X.tg/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..dee1a9403e32663bb452a336a98ea542704aa32c --- /dev/null +++ b/dummy/PAN-X.tg/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5785fa3bc0a28f385041ece30ef28a6f1b15e4ffc4f1cb964eb5ee9e5f51f24f +size 1054 diff --git a/dummy/PAN-X.th/1.1.0/dummy_data.zip b/dummy/PAN-X.th/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..1ee67be7b695a1ed55b644bf7272e4c5e2eb6c3d --- /dev/null +++ b/dummy/PAN-X.th/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60e678814a0be4719991c461c0f6ccebe0750d9095745e88cff03e9ecc5b73dd +size 1185 diff --git a/dummy/PAN-X.tk/1.1.0/dummy_data.zip b/dummy/PAN-X.tk/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..a837ba032c5456d6aaecee914bc3aa9a7e9fb17c --- /dev/null +++ b/dummy/PAN-X.tk/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3e148347c9394663447d3a5ad20bc5db9b74fa1b1b32c1b3fd8fb9375c969fc +size 735 diff --git a/dummy/PAN-X.tl/1.1.0/dummy_data.zip b/dummy/PAN-X.tl/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..86c1e11f4023601c90cf7ff9fde7c8ca715a6cfb --- /dev/null +++ b/dummy/PAN-X.tl/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49a95b83ed05752ca9333ea378259220fb0942671bb266f501214da6c1503d27 +size 737 diff --git a/dummy/PAN-X.tr/1.1.0/dummy_data.zip b/dummy/PAN-X.tr/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..073b913ad773dc2ef57c1a6e30fabdef9fe89117 --- /dev/null +++ b/dummy/PAN-X.tr/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee3405ab7e421a9dc555d74d99783b1bf25f68b3d4e7abd3180beece09db9ebb +size 944 diff --git a/dummy/PAN-X.tt/1.1.0/dummy_data.zip b/dummy/PAN-X.tt/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..ba22144f96bb5af1ac27b54ec02579c28f67842a --- /dev/null +++ b/dummy/PAN-X.tt/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:040a88c01d80eda24d9f0e980d17c9a4907b7ac3823fc6fb46ba28e2391287d3 +size 942 diff --git a/dummy/PAN-X.ug/1.1.0/dummy_data.zip b/dummy/PAN-X.ug/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..a586d9bc7c3eb03756d251d7e8c2294ca2b0fb24 --- /dev/null +++ b/dummy/PAN-X.ug/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:950b5e47184d676d6b53b58e3dbbd34af9fd2e90fb5b09370a83931ea3801f2c +size 747 diff --git a/dummy/PAN-X.uk/1.1.0/dummy_data.zip b/dummy/PAN-X.uk/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..046b4b57ed120af1073ef91994e9af85d1af5ab9 --- /dev/null +++ b/dummy/PAN-X.uk/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad1256ccbcc50797ef61371287e3ad9feee5d06467ce748d6134b6e9166c7862 +size 852 diff --git a/dummy/PAN-X.ur/1.1.0/dummy_data.zip b/dummy/PAN-X.ur/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..9f4dd3784fae6128f3fe28af2f3da9ecef32f4b1 --- /dev/null +++ b/dummy/PAN-X.ur/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d105ab3713a5eefc1f5327cf441c767e64476edd8f6c3526fff8537272b14be6 +size 819 diff --git a/dummy/PAN-X.uz/1.1.0/dummy_data.zip b/dummy/PAN-X.uz/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..1cd3d4272c1078a5692bdee52af9c1ab3cd6dad2 --- /dev/null +++ b/dummy/PAN-X.uz/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df4fbcfebd0afd6fae9ffb9dc066b4c9c0f55f4ded96079ef5a994f640e29436 +size 757 diff --git a/dummy/PAN-X.vec/1.1.0/dummy_data.zip b/dummy/PAN-X.vec/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..5c611a6525a755c4924ee1198e3385e256f4a0ea --- /dev/null +++ b/dummy/PAN-X.vec/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d5649dd869fddd033a12645d24cee5bbdd8b6912c6ba6db1ff3e8f5bcce8f6b +size 768 diff --git a/dummy/PAN-X.vep/1.1.0/dummy_data.zip b/dummy/PAN-X.vep/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..2ffe257e1f092e966b6f531b32ad333670274927 --- /dev/null +++ b/dummy/PAN-X.vep/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5f06b311f294288a0aa5f81b98baf7e7e8a43fcde7fa932b7997fbaf2bf3356 +size 856 diff --git a/dummy/PAN-X.vi/1.1.0/dummy_data.zip b/dummy/PAN-X.vi/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..091c2c663c67acd16e899ce91e798baafd0ded98 --- /dev/null +++ b/dummy/PAN-X.vi/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c933f99b38b2ad8b8fb703a692c8e6077a6766c1601ea3701971b4986e13ff0f +size 777 diff --git a/dummy/PAN-X.vls/1.1.0/dummy_data.zip b/dummy/PAN-X.vls/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..dede4bea6f76c56f304ed168483b5e6c27bfa09c --- /dev/null +++ b/dummy/PAN-X.vls/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c110b6267bd6336c57dfa212b521d35238073421fbb34a18008b4a85d3a2787 +size 899 diff --git a/dummy/PAN-X.vo/1.1.0/dummy_data.zip b/dummy/PAN-X.vo/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..16a79b7e003c5408f15c0400481064b4f84bd782 --- /dev/null +++ b/dummy/PAN-X.vo/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0d6f679e7b5999d7d0417e1872c7a0a99243ae790619cd6bb05a109cae8fe3a +size 740 diff --git a/dummy/PAN-X.wa/1.1.0/dummy_data.zip b/dummy/PAN-X.wa/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..bb4d1714e00bd9a1c70316926f42ee3f85404e1e --- /dev/null +++ b/dummy/PAN-X.wa/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0386f66293c54b10b3d1a2d37c51958be6ee7ca75dc6399170a6ed7acd7652fa +size 830 diff --git a/dummy/PAN-X.war/1.1.0/dummy_data.zip b/dummy/PAN-X.war/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..76db2edb64bbbff42954eba0c0b9c5dbb69695bf --- /dev/null +++ b/dummy/PAN-X.war/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e1d2a31eb15d362ed44843045f7e6f887c377471c72094ac9a46f378ee6c78c +size 765 diff --git a/dummy/PAN-X.wuu/1.1.0/dummy_data.zip b/dummy/PAN-X.wuu/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..1be5c963269a7807380f30ac2f9bff5a50e12adb --- /dev/null +++ b/dummy/PAN-X.wuu/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e57067a857543c120d27c9ef95d7129e3f3fdea6bb46df647f53c2c4df4a7fa +size 899 diff --git a/dummy/PAN-X.xmf/1.1.0/dummy_data.zip b/dummy/PAN-X.xmf/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..cb1b127f1c540b431630421bf7b3993fe4d54c4f --- /dev/null +++ b/dummy/PAN-X.xmf/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0f5412bc1762be9298802721b7e7279fe6775f88ae4e03e98809a91ef1f8d7a +size 996 diff --git a/dummy/PAN-X.yi/1.1.0/dummy_data.zip b/dummy/PAN-X.yi/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..14a545d13aad2151236883986f1651266d646690 --- /dev/null +++ b/dummy/PAN-X.yi/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8282309068067fed227f76159559a033c925badde460aed0ad720593d6c8c3fc +size 803 diff --git a/dummy/PAN-X.yo/1.1.0/dummy_data.zip b/dummy/PAN-X.yo/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..04f6236d275eb47830ddb726336a5bece28d4c67 --- /dev/null +++ b/dummy/PAN-X.yo/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf963204a4437433f1ed12e561614dd23eff1d1a0ec99ca7ae2c133647364d68 +size 840 diff --git a/dummy/PAN-X.zea/1.1.0/dummy_data.zip b/dummy/PAN-X.zea/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..ec15c24a15748232804f6429ae220d92866e85f6 --- /dev/null +++ b/dummy/PAN-X.zea/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46f03d63a0be15af91d6b251eccd6e73f9e3c16202af392858648954ced9275e +size 858 diff --git a/dummy/PAN-X.zh-classical/1.1.0/dummy_data.zip b/dummy/PAN-X.zh-classical/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..c8edd970fb3e86a231f8ad41d1b7a0b739e42342 --- /dev/null +++ b/dummy/PAN-X.zh-classical/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd10474354441ca7fdbc97c941034936b3f1f2ef9eb08ca69f7cc56bdc9854cf +size 870 diff --git a/dummy/PAN-X.zh-min-nan/1.1.0/dummy_data.zip b/dummy/PAN-X.zh-min-nan/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..c42e1e8ccef63f988caf6b062ae47c5247d8e26f --- /dev/null +++ b/dummy/PAN-X.zh-min-nan/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:258a343d24ac06c947fdd936e3b8b7902bc2a141d0791c8a5cb5c674680e740c +size 842 diff --git a/dummy/PAN-X.zh-yue/1.1.0/dummy_data.zip b/dummy/PAN-X.zh-yue/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..cab430cc0cb5358654fe2230ea43b8931c097023 --- /dev/null +++ b/dummy/PAN-X.zh-yue/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:664e95f386f2f7e3b63130c3630d725e8ab64409e5f681da5015b0697cec0112 +size 849 diff --git a/dummy/PAN-X.zh/1.1.0/dummy_data.zip b/dummy/PAN-X.zh/1.1.0/dummy_data.zip new file mode 100644 index 0000000000000000000000000000000000000000..d5e34b54d8aedaeb379fbea97c16a4712b03f57b --- /dev/null +++ b/dummy/PAN-X.zh/1.1.0/dummy_data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39c00243da8639769241d26d31ca28fffa9083fd6b7ec24b11d9dd76b630cf41 +size 827 diff --git a/xtreme.py b/xtreme.py index 8d3bda05dc24e24ecc5261f79280e8d3de6c4be5..2b05d0dc69bd89b1225e23cd953ddb64b0174fb1 100644 --- a/xtreme.py +++ b/xtreme.py @@ -153,7 +153,7 @@ _PAN_X_LANG = [ "yo", "zh", ] -_PAN_X_FOLDER = "AmazonPhotos.zip" + _NAMES = ["XNLI", "tydiqa", "SQuAD"] for lang in _PAN_X_LANG: _NAMES.append("PAN-X.{}".format(lang)) @@ -373,7 +373,7 @@ _DATA_URLS = { "tatoeba": "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1", "udpos": "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz", "SQuAD": "https://rajpurkar.github.io/SQuAD-explorer/dataset/", - "PAN-X": "", + "PAN-X": "https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1", } _URLS = { @@ -386,7 +386,7 @@ _URLS = { "tatoeba": "https://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "udpos": "https://universaldependencies.org/", "SQuAD": "https://rajpurkar.github.io/SQuAD-explorer/", - "PAN-X": "", + "PAN-X": "https://github.com/afshinrahimi/mmner", } @@ -427,16 +427,6 @@ class Xtreme(datasets.GeneratorBasedBuilder): for name in _NAMES ] - @property - def manual_download_instructions(self): - if self.config.name.startswith("PAN-X"): - return """\ - You need to manually download the AmazonPhotos.zip file on Amazon Cloud Drive - (https://www.amazon.com/clouddrive/share/d3KGCRCIYwhKJF0H3eWA26hjg2ZCRhjpEQtDL70FSBN). The folder containing the saved file - can be used to load the dataset via `datasets.load_dataset("xtreme", data_dir=""). - """ - return None - def _info(self): # TODO(xtreme): Specifies the datasets.DatasetInfo object features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()} @@ -704,6 +694,7 @@ class Xtreme(datasets.GeneratorBasedBuilder): ] else: return [ + # We exclude Arabic-NYUAD which does not contains any words, only _ datasets.SplitGenerator( name=datasets.Split.VALIDATION, # These kwargs will be passed to _generate_examples @@ -714,7 +705,6 @@ class Xtreme(datasets.GeneratorBasedBuilder): for file in sorted(os.listdir(folder)) if "NYUAD" not in folder and "dev" in file and file.endswith(".conllu") ] - # we exclude Arabic NYUAD which deos not contains any word, only _ }, ), datasets.SplitGenerator( @@ -759,26 +749,15 @@ class Xtreme(datasets.GeneratorBasedBuilder): ] if self.config.name.startswith("PAN-X"): - path_to_manual_folder = os.path.abspath(os.path.expanduser(dl_manager.manual_dir)) - panx_path = os.path.join(path_to_manual_folder, _PAN_X_FOLDER) - if not os.path.exists(panx_path): - raise FileNotFoundError( - "{} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('xtreme', data_dir=...)` that includes {}. Manual download instructions: {}".format( - panx_path, _PAN_X_FOLDER, self.manual_download_instructions - ) - ) - - panx_dl_dir = dl_manager.extract(panx_path) + panx_dl_dir = dl_manager.download_and_extract(self.config.data_url) lang = self.config.name.split(".")[1] - lang_folder = dl_manager.extract(os.path.join(panx_dl_dir, "panx_dataset", lang + ".tar.gz")) + lang_folder = dl_manager.extract(os.path.join(panx_dl_dir, lang + ".tar.gz")) + return [ datasets.SplitGenerator( name=datasets.Split.VALIDATION, # These kwargs will be passed to _generate_examples - gen_kwargs={ - "filepath": os.path.join(lang_folder, "dev") - # we exclude Arabic NYUAD which deos not contains any word, only _ - }, + gen_kwargs={"filepath": os.path.join(lang_folder, "dev")}, ), datasets.SplitGenerator( name=datasets.Split.TEST, @@ -922,7 +901,7 @@ class Xtreme(datasets.GeneratorBasedBuilder): with open(file, encoding="utf-8") as f: data = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE) for id_row, row in enumerate(data): - if len(row) >= 10 and row[1] != "_": + if len(row) >= 10 and row[1] != "_" and row[3] != "_": yield str(id_file) + "_" + str(id_row), {"token": row[1], "pos_tag": row[3]} if self.config.name.startswith("PAN-X"): guid_index = 1