# coding=utf-8 """Mintaka: A Complex, Natural, and Multilingual Dataset for End-to-End Question Answering""" import json import datasets logger = datasets.logging.get_logger(__name__) _DESCRIPTION = """\ Mintaka is a complex, natural, and multilingual dataset designed for experimenting with end-to-end question-answering models. Mintaka is composed of 20,000 question-answer pairs collected in English, annotated with Wikidata entities, and translated into Arabic, French, German, Hindi, Italian, Japanese, Portuguese, and Spanish for a total of 180,000 samples. Mintaka includes 8 types of complex questions, including superlative, intersection, and multi-hop questions, which were naturally elicited from crowd workers. """ _CITATION = """\ @inproceedings{sen-etal-2022-mintaka, title = "Mintaka: A Complex, Natural, and Multilingual Dataset for End-to-End Question Answering", author = "Sen, Priyanka and Aji, Alham Fikri and Saffari, Amir", booktitle = "Proceedings of the 29th International Conference on Computational Linguistics", month = oct, year = "2022", address = "Gyeongju, Republic of Korea", publisher = "International Committee on Computational Linguistics", url = "https://aclanthology.org/2022.coling-1.138", pages = "1604--1619" } """ _LICENSE = """\ Copyright Amazon.com Inc. or its affiliates. Attribution 4.0 International """ _TRAIN_URL = "https://raw.githubusercontent.com/amazon-science/mintaka/main/data/mintaka_train.json" _DEV_URL = "https://raw.githubusercontent.com/amazon-science/mintaka/main/data/mintaka_dev.json" _TEST_URL = "https://raw.githubusercontent.com/amazon-science/mintaka/main/data/mintaka_test.json" _LANGUAGES = ['en', 'ar', 'de', 'ja', 'hi', 'pt', 'es', 'it', 'fr'] _ALL = "all" class Mintaka(datasets.GeneratorBasedBuilder): """Mintaka: A Complex, Natural, and Multilingual Dataset for End-to-End Question Answering""" BUILDER_CONFIGS = [ datasets.BuilderConfig( name = name, version = datasets.Version("1.0.0"), description = f"Mintaka: A Complex, Natural, and Multilingual Dataset for End-to-End Question Answering for {name}", ) for name in _LANGUAGES ] BUILDER_CONFIGS.append(datasets.BuilderConfig( name = _ALL, version = datasets.Version("1.0.0"), description = f"Mintaka: A Complex, Natural, and Multilingual Dataset for End-to-End Question Answering", )) DEFAULT_CONFIG_NAME = 'en' def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "lang": datasets.Value("string"), "question": datasets.Value("string"), "answerText": datasets.Value("string"), "category": datasets.Value("string"), "complexityType": datasets.Value("string"), "questionEntity": [{ "name": datasets.Value("string"), "entityType": datasets.Value("string"), "label": datasets.Value("string"), "mention": datasets.Value("string"), "span": [datasets.Value("int32")], }], "answerEntity": [{ "name": datasets.Value("string"), "label": datasets.Value("string"), }] }, ), supervised_keys=None, citation=_CITATION, license=_LICENSE, ) def _split_generators(self, dl_manager): return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "file": dl_manager.download_and_extract(_TRAIN_URL), "lang": self.config.name, } ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "file": dl_manager.download_and_extract(_DEV_URL), "lang": self.config.name, } ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "file": dl_manager.download_and_extract(_TEST_URL), "lang": self.config.name, } ), ] def _generate_examples(self, file, lang): if lang == _ALL: langs = _LANGUAGES else: langs = [lang] key_ = 0 logger.info("⏳ Generating examples from = %s", ", ".join(lang)) with open(file, encoding='utf-8') as json_file: data = json.load(json_file) for lang in langs: for sample in data: questionEntity = [ { "name": str(qe["name"]), "entityType": qe["entityType"], "label": qe["label"] if "label" in qe else "", # span only applies for English question "mention": qe["mention"] if lang == "en" else None, "span": qe["span"] if lang == "en" else [], } for qe in sample["questionEntity"] ] answers = [] if sample['answer']["answerType"] == "entity" and sample['answer']['answer'] is not None: answers = sample['answer']['answer'] elif sample['answer']["answerType"] == "numerical" and "supportingEnt" in sample["answer"]: answers = sample['answer']['supportingEnt'] # helper to get language for the corresponding language def get_label(labels, lang): if lang in labels: return labels[lang] if 'en' in labels: return labels['en'] return None answerEntity = [ { "name": str(ae["name"]), "label": get_label(ae["label"], lang), } for ae in answers ] yield key_, { "id": sample["id"], "lang": lang, "question": sample["question"] if lang == 'en' else sample['translations'][lang], "answerText": sample["answer"]["mention"], "category": sample["category"], "complexityType": sample["complexityType"], "questionEntity": questionEntity, "answerEntity": answerEntity, } key_ += 1