KorQuADv1 / KorQuADv1.py
jinmang2's picture
add datasets script
33eb32b
import datasets
import pandas as pd
_DATA_URLS = {
"train": "https://korquad.github.io/dataset/KorQuAD_v1.0_train.json",
"dev": "https://korquad.github.io/dataset/KorQuAD_v1.0_dev.json",
}
_VERSION = "0.0.0"
_LICENSE = "CC BY-ND 2.0 KR"
_CITATION = """\
@misc{lim2019korquad1.0,
title={KorQuAD1.0: Korean QA Dataset for Machine Reading Comprehension},
author={Seungyoung Lim, Myungji Kim, Jooyoul Lee},
year={2019},
eprint={1909.07005},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_HOMEPAGE = "https://korquad.github.io/KorQuad%201.0/"
_DESCRIPTION = """\
KorQuAD 1.0 (Korean Question Answering Dataset v1.0)
KorQuAD 1.0 is a dataset created for Korean Machine Reading Comprehension.
The answers to all your questions are made up of some subareas in the corresponding Wikipedia article paragraphs.
It is structured in the same way as the Stanford Question Answering Dataset (SQuAD) v1.0.
"""
class KorQuADV1Config(datasets.BuilderConfig):
def __init__(self, data_url, features, **kwargs):
super().__init__(version=datasets.Version(_VERSION, ""), **kwargs)
self.features = features
self.data_url = data_url
class KorQuADV1(datasets.GeneratorBasedBuilder):
DEFAULT_CONFIG_NAME = "korquad"
BUILDER_CONFIGS = [
KorQuADV1Config(
name="korquad",
data_url=_DATA_URLS,
features=datasets.Features(
{
"answers": datasets.Sequence(
feature={
"text": datasets.Value(dtype="string"),
"answer_start": datasets.Value(dtype="int32"),
},
),
"context": datasets.Value(dtype="string"),
"guid": datasets.Value(dtype="string"),
"question": datasets.Value(dtype="string"),
"title": datasets.Value(dtype="string"),
}
)
),
]
def _info(self):
return datasets.DatasetInfo(
features=self.config.features,
description=_DESCRIPTION,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
data_path = dl_manager.download(self.config.data_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"data_file": data_path["train"]}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"data_file": data_path["dev"]}
),
]
def _generate_examples(self, data_file: str):
idx = 0
korquad = pd.read_json(data_file)
for example in korquad["data"].tolist():
paragraphs = example["paragraphs"]
title = example["title"]
for paragraph in paragraphs:
qas = paragraph["qas"]
context = paragraph["context"]
for qa in qas:
text = [answers["text"] for answers in qa["answers"]]
answer_start = [answers["answer_start"] for answers in qa["answers"]]
features = {
"guid": qa["id"],
"question": qa["question"],
"answers": {"text": text, "answer_start": answer_start},
"context": context,
"title": title,
}
yield idx, features
idx += 1