--- annotations_creators: - expert-generated language_creators: - expert-generated language: - ay - bzd - cni - gn - hch - nah - oto - qu - shp - tar license: cc-by-sa-4.0 multilinguality: - multilingual - translation size_categories: - unknown source_datasets: - extended|xnli task_categories: - text-classification task_ids: - natural-language-inference pretty_name: 'AmericasNLI: A NLI Corpus of 10 Indigenous Low-Resource Languages.' dataset_info: - config_name: all_languages features: - name: language dtype: string - name: premise dtype: string - name: hypothesis dtype: string - name: label dtype: class_label: names: '0': entailment '1': neutral '2': contradiction splits: - name: validation num_bytes: 1129080 num_examples: 6457 - name: test num_bytes: 1210579 num_examples: 7486 download_size: 791239 dataset_size: 2339659 - config_name: aym features: - name: premise dtype: string - name: hypothesis dtype: string - name: label dtype: class_label: names: '0': entailment '1': neutral '2': contradiction splits: - name: validation num_bytes: 117530 num_examples: 743 - name: test num_bytes: 115251 num_examples: 750 download_size: 87882 dataset_size: 232781 - config_name: bzd features: - name: premise dtype: string - name: hypothesis dtype: string - name: label dtype: class_label: names: '0': entailment '1': neutral '2': contradiction splits: - name: validation num_bytes: 143354 num_examples: 743 - name: test num_bytes: 127676 num_examples: 750 download_size: 91039 dataset_size: 271030 - config_name: cni features: - name: premise dtype: string - name: hypothesis dtype: string - name: label dtype: class_label: names: '0': entailment '1': neutral '2': contradiction splits: - name: validation num_bytes: 113256 num_examples: 658 - name: test num_bytes: 116284 num_examples: 750 download_size: 78899 dataset_size: 229540 - config_name: gn features: - name: premise dtype: string - name: hypothesis dtype: string - name: label dtype: class_label: names: '0': entailment '1': neutral '2': contradiction splits: - name: validation num_bytes: 115135 num_examples: 743 - name: test num_bytes: 101948 num_examples: 750 download_size: 80429 dataset_size: 217083 - config_name: hch features: - name: premise dtype: string - name: hypothesis dtype: string - name: label dtype: class_label: names: '0': entailment '1': neutral '2': contradiction splits: - name: validation num_bytes: 127966 num_examples: 743 - name: test num_bytes: 120857 num_examples: 750 download_size: 90748 dataset_size: 248823 - config_name: nah features: - name: premise dtype: string - name: hypothesis dtype: string - name: label dtype: class_label: names: '0': entailment '1': neutral '2': contradiction splits: - name: validation num_bytes: 50741 num_examples: 376 - name: test num_bytes: 102953 num_examples: 738 download_size: 56953 dataset_size: 153694 - config_name: oto features: - name: premise dtype: string - name: hypothesis dtype: string - name: label dtype: class_label: names: '0': entailment '1': neutral '2': contradiction splits: - name: validation num_bytes: 27010 num_examples: 222 - name: test num_bytes: 119650 num_examples: 748 download_size: 57849 dataset_size: 146660 - config_name: quy features: - name: premise dtype: string - name: hypothesis dtype: string - name: label dtype: class_label: names: '0': entailment '1': neutral '2': contradiction splits: - name: validation num_bytes: 125636 num_examples: 743 - name: test num_bytes: 112750 num_examples: 750 download_size: 85673 dataset_size: 238386 - config_name: shp features: - name: premise dtype: string - name: hypothesis dtype: string - name: label dtype: class_label: names: '0': entailment '1': neutral '2': contradiction splits: - name: validation num_bytes: 124500 num_examples: 743 - name: test num_bytes: 118934 num_examples: 750 download_size: 85544 dataset_size: 243434 - config_name: tar features: - name: premise dtype: string - name: hypothesis dtype: string - name: label dtype: class_label: names: '0': entailment '1': neutral '2': contradiction splits: - name: validation num_bytes: 139496 num_examples: 743 - name: test num_bytes: 122624 num_examples: 750 download_size: 89683 dataset_size: 262120 configs: - config_name: all_languages data_files: - split: validation path: all_languages/validation-* - split: test path: all_languages/test-* - config_name: aym data_files: - split: validation path: aym/validation-* - split: test path: aym/test-* - config_name: bzd data_files: - split: validation path: bzd/validation-* - split: test path: bzd/test-* - config_name: cni data_files: - split: validation path: cni/validation-* - split: test path: cni/test-* - config_name: gn data_files: - split: validation path: gn/validation-* - split: test path: gn/test-* - config_name: hch data_files: - split: validation path: hch/validation-* - split: test path: hch/test-* - config_name: nah data_files: - split: validation path: nah/validation-* - split: test path: nah/test-* - config_name: oto data_files: - split: validation path: oto/validation-* - split: test path: oto/test-* - config_name: quy data_files: - split: validation path: quy/validation-* - split: test path: quy/test-* - config_name: shp data_files: - split: validation path: shp/validation-* - split: test path: shp/test-* - config_name: tar data_files: - split: validation path: tar/validation-* - split: test path: tar/test-* --- # Dataset Card for AmericasNLI ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** [Needs More Information] - **Repository:** https://github.com/abteen/americasnli - **Repository:** https://github.com/nala-cub/AmericasNLI - **Paper:** https://arxiv.org/abs/2104.08726 - **Leaderboard:** [Needs More Information] - **Point of Contact:** [Needs More Information] ### Dataset Summary AmericasNLI is an extension of XNLI (Conneau et al., 2018) a natural language inference (NLI) dataset covering 15 high-resource languages to 10 low-resource indigenous languages spoken in the Americas: Ashaninka, Aymara, Bribri, Guarani, Nahuatl, Otomi, Quechua, Raramuri, Shipibo-Konibo, and Wixarika. As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). ### Supported Tasks and Leaderboards [Needs More Information] ### Languages - aym - bzd - cni - gn - hch - nah - oto - quy - shp - tar ## Dataset Structure ### Data Instances #### all_languages An example of the test split looks as follows: ``` {'language': 'aym', 'premise': "Ukhamaxa, janiw ukatuqits lup'kayätti, ukhamarus wali phiñasitayätwa, ukatx jupampiw mayamp aruskipañ qallanttha.", 'hypothesis': 'Janiw mayamp jupampix p arlxapxti.', 'label': 2} ``` #### aym An example of the test split looks as follows: ``` {'premise': "Ukhamaxa, janiw ukatuqits lup'kayätti, ukhamarus wali phiñasitayätwa, ukatx jupampiw mayamp aruskipañ qallanttha.", 'hypothesis': 'Janiw mayamp jupampix parlxapxti.', 'label ': 2} ``` #### bzd An example of the test split looks as follows: ``` {'premise': "Bua', kèq ye' kũ e' bikeitsök erë ye' chkénãwã tã ye' ujtémĩne ie' tã páxlĩnẽ.", 'hypothesis': "Kèq ye' ùtẽnẽ ie' tã páxlĩ.", 'label': 2} ``` #### cni An example of the test split looks as follows: ``` {'premise': 'Kameetsa, tee nokenkeshireajeroji, iro kantaincha tee nomateroji aisati nintajaro noñanatajiri iroakera.', 'hypothesis': 'Tee noñatajeriji.', 'label': 2} ``` #### gn An example of the test split looks as follows: ``` {'premise': "Néi, ni napensaikurihína upéva rehe, ajepichaiterei ha añepyrûjey añe'ê hendive.", 'hypothesis': "Nañe'êvéi hendive.", 'label': 2} ``` #### hch An example of the test split looks as follows: ``` {'premise': 'mu hekwa.', 'hypothesis': 'neuka tita xatawe m+k+ mat+a.', 'label': 2} ``` #### nah An example of the test split looks as follows: ``` {'premise': 'Cualtitoc, na axnimoihliaya ino, nicualaniztoya queh naha nicamohuihqui', 'hypothesis': 'Ayoc nicamohuihtoc', 'label': 2} ``` #### oto An example of the test split looks as follows: ``` {'premise': 'mi-ga, nin mibⴘy mbô̮nitho ane guenu, guedi mibⴘy nho ⴘnmⴘy xi di mⴘdi o ñana nen nⴘua manaigui', 'hypothesis': 'hin din bi pengui nen nⴘa', 'label': 2} ``` #### quy An example of the test split looks as follows: ``` {'premise': 'Allinmi, manam chaypiqa hamutachkarqanichu, ichaqa manam allinchu tarikurqani chaymi kaqllamanta paywan rimarqani.', 'hypothesis': 'Manam paywanqa kaqllamantaqa rimarqani .', 'label': 2} ``` #### shp An example of the test split looks as follows: ``` {'premise': 'Jakon riki, ja shinanamara ea ike, ikaxbi kikin frustradara ea ike jakopira ea jabe yoyo iribake.', 'hypothesis': 'Eara jabe yoyo iribiama iki.', 'label': 2} ``` #### tar An example of the test split looks as follows: ``` {'premise': 'Ga’lá ju, ke tási newalayé nejé echi kítira, we ne majáli, a’lí ko uchécho ne yua ku ra’íchaki.', 'hypothesis': 'Tási ne uchecho yua ra’ícha échi rejói.', 'label': 2} ``` ### Data Fields #### all_languages - language: a multilingual string variable, with languages including ar, bg, de, el, en. - premise: a multilingual string variable, with languages including ar, bg, de, el, en. - hypothesis: a multilingual string variable, with possible languages including ar, bg, de, el, en. - label: a classification label, with possible values including entailment (0), neutral (1), contradiction (2). #### aym - premise: a string feature. - hypothesis: a string feature. - label: a classification label, with possible values including entailment (0), neutral (1), contradiction (2). #### bzd - premise: a string feature. - hypothesis: a string feature. - label: a classification label, with possible values including entailment (0), neutral (1), contradiction (2). #### cni - premise: a string feature. - hypothesis: a string feature. - label: a classification label, with possible values including entailment (0), neutral (1), contradiction (2). #### hch - premise: a string feature. - hypothesis: a string feature. - label: a classification label, with possible values including entailment (0), neutral (1), contradiction (2). #### nah - premise: a string feature. - hypothesis: a string feature. - label: a classification label, with possible values including entailment (0), neutral (1), contradiction (2). #### oto - premise: a string feature. - hypothesis: a string feature. - label: a classification label, with possible values including entailment (0), neutral (1), contradiction (2). #### quy - premise: a string feature. - hypothesis: a string feature. - label: a classification label, with possible values including entailment (0), neutral (1), contradiction (2). #### shp - premise: a string feature. - hypothesis: a string feature. - label: a classification label, with possible values including entailment (0), neutral (1), contradiction (2). #### tar - premise: a string feature. - hypothesis: a string feature. - label: a classification label, with possible values including entailment (0), neutral (1), contradiction (2). ### Data Splits | Language | ISO | Family | Dev | Test | |-------------------|-----|:-------------|-----:|-----:| | all_languages | -- | -- | 6457 | 7486 | | Aymara | aym | Aymaran | 743 | 750 | | Ashaninka | cni | Arawak | 658 | 750 | | Bribri | bzd | Chibchan | 743 | 750 | | Guarani | gn | Tupi-Guarani | 743 | 750 | | Nahuatl | nah | Uto-Aztecan | 376 | 738 | | Otomi | oto | Oto-Manguean | 222 | 748 | | Quechua | quy | Quechuan | 743 | 750 | | Raramuri | tar | Uto-Aztecan | 743 | 750 | | Shipibo-Konibo | shp | Panoan | 743 | 750 | | Wixarika | hch | Uto-Aztecan | 743 | 750 | ## Dataset Creation ### Curation Rationale [Needs More Information] ### Source Data The authors translate from the Spanish subset of XNLI. > AmericasNLI is the translation of a subset of XNLI (Conneau et al., 2018). As translators between Spanish and the target languages are more frequently available than those for English, we translate from the Spanish version. As per paragraph 3.1 of the [original paper](https://arxiv.org/abs/2104.08726). #### Initial Data Collection and Normalization [Needs More Information] #### Who are the source language producers? [Needs More Information] ### Annotations #### Annotation process The dataset comprises expert translations from Spanish XNLI. > Additionally, some translators reported that code-switching is often used to describe certain topics, and, while many words without an exact equivalence in the target language are worked in through translation or interpretation, others are kept in Spanish. To minimize the amount of Spanish vocabulary in the translated examples, we choose sentences from genres that we judged to be relatively easy to translate into the target languages: “face-to-face,” “letters,” and “telephone.” As per paragraph 3.1 of the [original paper](https://arxiv.org/abs/2104.08726). #### Who are the annotators? [Needs More Information] ### Personal and Sensitive Information [Needs More Information] ## Considerations for Using the Data ### Social Impact of Dataset [Needs More Information] ### Discussion of Biases [Needs More Information] ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators [Needs More Information] ### Licensing Information Creative Commons Attribution Share Alike 4.0 International: https://github.com/abteen/americasnli/blob/main/LICENSE.md ### Citation Information ``` @inproceedings{ebrahimi-etal-2022-americasnli, title = "{A}mericas{NLI}: Evaluating Zero-shot Natural Language Understanding of Pretrained Multilingual Models in Truly Low-resource Languages", author = "Ebrahimi, Abteen and Mager, Manuel and Oncevay, Arturo and Chaudhary, Vishrav and Chiruzzo, Luis and Fan, Angela and Ortega, John and Ramos, Ricardo and Rios, Annette and Meza Ruiz, Ivan Vladimir and Gim{\'e}nez-Lugo, Gustavo and Mager, Elisabeth and Neubig, Graham and Palmer, Alexis and Coto-Solano, Rolando and Vu, Thang and Kann, Katharina", booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", month = may, year = "2022", address = "Dublin, Ireland", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2022.acl-long.435", pages = "6279--6299", abstract = "Pretrained multilingual models are able to perform cross-lingual transfer in a zero-shot setting, even for languages unseen during pretraining. However, prior work evaluating performance on unseen languages has largely been limited to low-level, syntactic tasks, and it remains unclear if zero-shot learning of high-level, semantic tasks is possible for unseen languages. To explore this question, we present AmericasNLI, an extension of XNLI (Conneau et al., 2018) to 10 Indigenous languages of the Americas. We conduct experiments with XLM-R, testing multiple zero-shot and translation-based approaches. Additionally, we explore model adaptation via continued pretraining and provide an analysis of the dataset by considering hypothesis-only models. We find that XLM-R{'}s zero-shot performance is poor for all 10 languages, with an average performance of 38.48{\%}. Continued pretraining offers improvements, with an average accuracy of 43.85{\%}. Surprisingly, training on poorly translated data by far outperforms all other methods with an accuracy of 49.12{\%}.", } ``` ### Contributions Thanks to [@fdschmidt93](https://github.com/fdschmidt93) for adding this dataset.