# coding=utf-8 # Copyright 2020 HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 # ok import os import json import datasets _DESCRIPTION = """MQA is a multilingual corpus of questions and answers parsed from the Common Crawl. Questions are divided between Frequently Asked Questions (FAQ) pages and Community Question Answering (CQA) pages.""" _HOMEPAGE_URL = "https://huggingface.co/datasets/clips/mqa" _CITATION = """ @misc{debruyn2021mfaq, title={MFAQ: a Multilingual FAQ Dataset}, author={Maxime {De Bruyn} and Ehsan Lotfi and Jeska Buhmann and Walter Daelemans}, year={2021}, booktitle={MRQA@EMNLP2021}, } """ _VERSION = "0.2" _BASE_NAME = "" _TRAIN_BASE_URL = "data/train/data.{}.json" _VALID_BASE_URL = "data/valid/data.{}.json" _LANGUAGES = [ "en", "de", "es", "fr", "ru", "ja", "it", "zh", "pt", "nl", "tr", "pl", "vi", "ar", "id", "uk", "ro", "no", "th", "sv", "el", "fi", "he", "da", "cs", "ko", "fa", "hi", "hu", "sk", "lt", "et", "hr", "is", "lv", "ms", "bg", "sr", "ca" ] class MFAQLightConfig(datasets.BuilderConfig): def __init__(self, *args, language="en", **kwargs): super().__init__( *args, name=f"{language}", **kwargs, ) self.language = language class MFAQLight(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [] for language in _LANGUAGES: BUILDER_CONFIGS.append(MFAQLightConfig(language=language)) BUILDER_CONFIG_CLASS = MFAQLightConfig def _info(self): features = { "question": datasets.Value("string"), "answer": datasets.Value("string"), "domain": datasets.Value("string"), "domain_index": datasets.Value("int32"), "id": datasets.Value("string") } return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features(features), supervised_keys=None, homepage=_HOMEPAGE_URL, citation=_CITATION, ) def _split_generators(self, dl_manager): train_filenames = [] valid_filenames = [] languages = _LANGUAGES if self.config.language == "all" else [self.config.language] for language in languages: path = dl_manager.download_and_extract(_TRAIN_BASE_URL.format(language)) train_filenames.append(path) path = dl_manager.download_and_extract(_VALID_BASE_URL.format(language)) valid_filenames.append(path) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filenames": train_filenames}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"filenames": valid_filenames}, ) ] def _generate_examples(self, filenames): for filename in filenames: with open(filename, "r") as f: for i, line in enumerate(f): question = json.loads(line) yield question["id"], { "question": question["question"], "answer": question["answer"], "domain": question["domain"], "domain_index": question["domain_index"], "id": question["id"] }