|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
import json |
|
import datasets |
|
|
|
_DESCRIPTION = """MQA is a multilingual corpus of questions and answers parsed from the Common Crawl. Questions are divided between Frequently Asked Questions (FAQ) pages and Community Question Answering (CQA) pages.""" |
|
_HOMEPAGE_URL = "https://huggingface.co/datasets/clips/mqa" |
|
_CITATION = """ |
|
@misc{debruyn2021mfaq, |
|
title={MFAQ: a Multilingual FAQ Dataset}, |
|
author={Maxime {De Bruyn} and Ehsan Lotfi and Jeska Buhmann and Walter Daelemans}, |
|
year={2021}, |
|
booktitle={MRQA@EMNLP2021}, |
|
} |
|
""" |
|
_VERSION = "0.4" |
|
_BASE_NAME = "" |
|
_TRAIN_BASE_URL = "data/train/data.{}.json" |
|
_VALID_BASE_URL = "data/valid/data.{}.json" |
|
_LANGUAGES = [ |
|
"en", "de", "es", "fr", |
|
"ru", "ja", "it", "zh", "pt", |
|
"nl", "tr", "pl", "vi", "ar", |
|
"id", "uk", "ro", "no", "th", |
|
"sv", "el", "fi", "he", "da", |
|
"cs", "ko", "fa", "hi", "hu", |
|
"sk", "lt", "et", "hr", "is", |
|
"lv", "ms", "bg", "sr", "ca" |
|
] |
|
|
|
class MFAQLightConfig(datasets.BuilderConfig): |
|
def __init__(self, *args, language="en", negatives=True, **kwargs): |
|
super().__init__( |
|
*args, |
|
name=f"{language}", |
|
**kwargs, |
|
) |
|
self.language = language |
|
self.negatives = negatives |
|
|
|
class MFAQLight(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [] |
|
for language in _LANGUAGES: |
|
BUILDER_CONFIGS.append(MFAQLightConfig(language=language)) |
|
BUILDER_CONFIGS.append(MFAQLightConfig(language="all")) |
|
BUILDER_CONFIG_CLASS = MFAQLightConfig |
|
|
|
def _info(self): |
|
features = { |
|
"question": datasets.Value("string"), |
|
"answer": datasets.Value("string"), |
|
"domain": datasets.Value("string"), |
|
"domain_index": datasets.Value("int32"), |
|
"id": datasets.Value("string"), |
|
"negative": datasets.Value("string"), |
|
"candidates": [datasets.Value("string")], |
|
"margin_score": datasets.Value("float32") |
|
} |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features(features), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE_URL, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
train_filenames = [] |
|
valid_filenames = [] |
|
languages = _LANGUAGES if self.config.language == "all" else [self.config.language] |
|
for language in languages: |
|
path = dl_manager.download_and_extract(_TRAIN_BASE_URL.format(language)) |
|
train_filenames.append(path) |
|
path = dl_manager.download_and_extract(_VALID_BASE_URL.format(language)) |
|
valid_filenames.append(path) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"filenames": train_filenames}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={"filenames": valid_filenames}, |
|
) |
|
] |
|
|
|
def _generate_examples(self, filenames): |
|
for filename in filenames: |
|
with open(filename, "r") as f: |
|
for i, line in enumerate(f): |
|
question = json.loads(line) |
|
yield question["id"], { |
|
"question": question["question"], |
|
"answer": question["answer"], |
|
"domain": question["domain"], |
|
"domain_index": question["domain_index"], |
|
"id": question["id"], |
|
"negative": question.get("negative", ""), |
|
"candidates": question.get("candidates", []), |
|
"margin_score": question.get("margin_score", 0) |
|
} |
|
|