|
import json |
|
import textwrap |
|
|
|
import datasets |
|
from datasets.tasks import QuestionAnsweringExtractive |
|
|
|
|
|
_CITATION = """\ |
|
@article{tydiqa, |
|
title = {TyDi QA: A Benchmark for Information-Seeking Question Answering in Typologically Diverse Languages}, |
|
author = {Jonathan H. Clark and Eunsol Choi and Michael Collins and Dan Garrette and Tom Kwiatkowski and Vitaly Nikolaev and Jennimaria Palomaki} |
|
year = {2020}, |
|
journal = {Transactions of the Association for Computational Linguistics} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
TyDi QA is a question answering dataset covering 11 typologically diverse languages with 204K question-answer pairs. |
|
The languages of TyDi QA are diverse with regard to their typology -- the set of linguistic features that each language |
|
expresses -- such that we expect models performing well on this set to generalize across a large number of the languages |
|
in the world. It contains language phenomena that would not be found in English-only corpora. To provide a realistic |
|
information-seeking task and avoid priming effects, questions are written by people who want to know the answer, but |
|
don’t know the answer yet, (unlike SQuAD and its descendents) and the data is collected directly in each language without |
|
the use of translation (unlike MLQA and XQuAD). |
|
""" |
|
|
|
|
|
_LANG = ["arabic", "bengali", "english", "finnish", "indonesian", "japanese", "korean", "russoam", "swahili", "telugu", "thai"] |
|
|
|
_URL = "https://huggingface.co/datasets/khalidalt/tydiqa-goldp/resolve/main/{split}/{language}-{split}.jsonl" |
|
_VERSION = datasets.Version("1.1.0", "") |
|
|
|
|
|
class tydiqa_GoldP(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name=lang, |
|
description=f"tydiqa-GoldP language {lang}", |
|
version=_VERSION, |
|
) |
|
for lang in _LANG |
|
] |
|
|
|
|
|
def _info(self): |
|
|
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"document_title": datasets.Value("string"), |
|
"passage_text": datasets.Value("string"), |
|
"question_text": datasets.Value("string"), |
|
"answers": datasets.features.Sequence( |
|
{ |
|
"text": datasets.Value("string"), |
|
"start_byte": datasets.Value("int32"), |
|
"limit_byte": datasets.Value("int32"), |
|
} |
|
), |
|
} |
|
), |
|
|
|
|
|
supervised_keys=None, |
|
homepage="https://github.com/google-research-datasets/tydiqa", |
|
citation=_CITATION, |
|
task_templates=[ |
|
QuestionAnsweringExtractive( |
|
question_column="question", context_column="context", answers_column="answers" |
|
) |
|
], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
language = self.config.name |
|
splits = {datasets.Split.TRAIN: "train", datasets.Split.VALIDATION: "dev"} |
|
|
|
data_urls = { |
|
split: _URL.format(language=language, split=splits[split]) for split in splits |
|
} |
|
|
|
dl_paths = dl_manager.download(data_urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=split, |
|
gen_kwargs={"filepath": dl_paths[split]}, |
|
) |
|
for split in splits |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples.""" |
|
|
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
for _id,row in enumerate(f): |
|
data = json.loads(row) |
|
|
|
|
|
|
|
|
|
|
|
yield _id, data |
|
|
|
|
|
|
|
|
|
|