# coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Portuguese-English and Portuguese-Spanish bilingual collections.""" import datasets from .wmt_utils import Wmt, WmtConfig _URL = "http://www.nilc.icmc.usp.br/nilc/tools/Fapesp%20Corpora.htm" _CITATION = """ @inproceedings{aziz-specia-2011-fully, title = "Fully Automatic Compilation of {P}ortuguese-{E}nglish and {P}ortuguese-{S}panish Parallel Corpora", author = "Aziz, Wilker and Specia, Lucia", booktitle = "Proceedings of the 8th {B}razilian Symposium in Information and Human Language Technology", year = "2011", url = "https://aclanthology.org/W11-4533", } """ _LANGUAGE_PAIRS = [(lang, "pt") for lang in ["es", "en"]] class Fapespv2(Wmt): """Fapesp v2 translation datasets for all {xx, "pt"} language pairs.""" BUILDER_CONFIGS = [ WmtConfig( # pylint:disable=g-complex-comprehension description="Fapesp v2 %s-%s translation task dataset." % (l1, l2), url=_URL, citation=_CITATION, language_pair=(l1, l2), version=datasets.Version("1.0.0"), ) for l1, l2 in _LANGUAGE_PAIRS ] @property def _subsets(self): return { datasets.Split.TRAIN: [ "fapesp-v2", ], datasets.Split.VALIDATION: ["dev"], datasets.Split.TEST: ["test-b"], }