americasnlp-mt-21 / americasnlp-mt-21.py
vgaraujov's picture
Upload 4 files
f8d90d7
raw
history blame contribute delete
No virus
3.63 kB
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""AmericasNLP 2021 Shared Task on Open Machine Translation."""
import datasets
from .wmt_utils import Wmt, WmtConfig
_URL = "https://turing.iimas.unam.mx/americasnlp/2021/st.html"
_CITATION = """
@inproceedings{mager-etal-2021-findings,
title = "Findings of the {A}mericas{NLP} 2021 Shared Task on Open Machine Translation for Indigenous Languages of the {A}mericas",
author = "Mager, Manuel and
Oncevay, Arturo and
Ebrahimi, Abteen and
Ortega, John and
Rios, Annette and
Fan, Angela and
Gutierrez-Vasques, Ximena and
Chiruzzo, Luis and
Gim{\'e}nez-Lugo, Gustavo and
Ramos, Ricardo and
Meza Ruiz, Ivan Vladimir and
Coto-Solano, Rolando and
Palmer, Alexis and
Mager-Hois, Elisabeth and
Chaudhary, Vishrav and
Neubig, Graham and
Vu, Ngoc Thang and
Kann, Katharina",
booktitle = "Proceedings of the First Workshop on Natural Language Processing for Indigenous Languages of the Americas",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.americasnlp-1.23",
doi = "10.18653/v1/2021.americasnlp-1.23",
pages = "202--217",
abstract = "This paper presents the results of the 2021 Shared Task on Open Machine Translation for Indigenous Languages of the Americas. The shared task featured two independent tracks, and participants submitted machine translation systems for up to 10 indigenous languages. Overall, 8 teams participated with a total of 214 submissions. We provided training sets consisting of data collected from various sources, as well as manually translated sentences for the development and test sets. An official baseline trained on this data was also provided. Team submissions featured a variety of architectures, including both statistical and neural models, and for the majority of languages, many teams were able to considerably improve over the baseline. The best performing systems achieved 12.97 ChrF higher than baseline, when averaged across languages.",
}
"""
_LANGUAGE_PAIRS = [(lang, "es") for lang in ["aym", "bzd", "cni", "gn", "nah", "oto", "quy", "shp"]]
class AmericasNLPMT21(Wmt):
"""AmericasNLP translation datasets for all {xx, "es"} language pairs."""
BUILDER_CONFIGS = [
WmtConfig( # pylint:disable=g-complex-comprehension
description="AmericasNLP 2021 %s-%s translation task dataset." % (l1, l2),
url=_URL,
citation=_CITATION,
language_pair=(l1, l2),
version=datasets.Version("1.0.0"),
)
for l1, l2 in _LANGUAGE_PAIRS
]
@property
def _subsets(self):
return {
datasets.Split.TRAIN: [
"americasnlp2021",
],
datasets.Split.VALIDATION: ["dev2021"],
datasets.Split.TEST: ["test2021"],
}