# coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """mrobust dataset.""" import datasets # _CITATION = """ # @misc{bonifacio2021mmarco, # title={mMARCO: A Multilingual Version of the MS MARCO Passage Ranking Dataset}, # author={Luiz Henrique Bonifacio and Israel Campiotti and Vitor Jeronymo and Hugo Queiroz Abonizio and Roberto Lotufo and Rodrigo Nogueira}, # year={2021}, # eprint={2108.13897}, # archivePrefix={arXiv}, # primaryClass={cs.CL} # } # """ # _URL = "https://github.com/unicamp-dl/mMARCO" _DESCRIPTION = """ Robust04 translated datasets """ def generate_examples_tuples(filepath): with open(filepath, encoding="utf-8") as f: for (idx, line) in enumerate(f): idx, text = line.rstrip().split("\t") features = { "id": idx, "text": text, } yield idx, features _BASE_URLS = { "collections": "https://huggingface.co/datasets/unicamp-dl/mrobust/resolve/main/data/collections/", "queries": "https://huggingface.co/datasets/unicamp-dl/mrobust/resolve/main/data/queries/", } LANGUAGES = [ "chinese", "dutch", "english", "french", "german", "indonesian", "italian", "portuguese", "russian", "spanish", "vietnamese", ] class MRobust(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = ( [ datasets.BuilderConfig( name=f"collection-{language}", description=f"{language.capitalize()} collection", version=datasets.Version("1.0.0"), ) for language in LANGUAGES ] + [ datasets.BuilderConfig( name=f"queries-{language}", description=f"{language.capitalize()} queries", version=datasets.Version("1.0.0"), ) for language in LANGUAGES ] ) DEFAULT_CONFIG_NAME = "english" def _info(self): name = self.config.name if name.startswith("collection") or name.startswith("queries"): features = { "id": datasets.Value("string"), "text": datasets.Value("string"), } return datasets.DatasetInfo( description=f"{_DESCRIPTION}\n{self.config.description}", features=datasets.Features(features), supervised_keys=None, # homepage=_URL, # citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" if self.config.name.startswith("collection"): url = _BASE_URLS["collections"] + self.config.name[11:] + "_collection.tsv" dl_path = dl_manager.download_and_extract(url) return (datasets.SplitGenerator(name="collection", gen_kwargs={"filepath": dl_path}),) elif self.config.name.startswith("queries"): url = _BASE_URLS["queries"] + self.config.name[8:] + "_queries.tsv" dl_path = dl_manager.download_and_extract(url) return (datasets.SplitGenerator(name="queries", gen_kwargs={"filepath": dl_path}),) def _generate_examples(self, filepath, args=None): """Yields examples.""" if self.config.name.startswith("collection") or self.config.name.startswith("queries"): return generate_examples_tuples(filepath)