Datasets:

Languages:
Bengali
ArXiv:
License:
squad_bn / squad_bn.py
abhik1505040's picture
Added initial files
bf70c74
raw
history blame
4.2 kB
"""SQuAD Bengali Dataset"""
import os
import json
import datasets
from datasets.tasks import QuestionAnsweringExtractive
_CITATION = """\
@misc{bhattacharjee2021banglabert,
title={BanglaBERT: Combating Embedding Barrier in Multilingual Models for Low-Resource Language Understanding},
author={Abhik Bhattacharjee and Tahmid Hasan and Kazi Samin and Md Saiful Islam and M. Sohel Rahman and Anindya Iqbal and Rifat Shahriyar},
year={2021},
eprint={2101.00204},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
SQuAD-bn is derived from the SQuAD-2.0 and TyDI-QA datasets.
"""
_HOMEPAGE = "https://github.com/csebuetnlp/banglabert"
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
_URL = "https://huggingface.co/datasets/csebuetnlp/squad_bn/resolve/main/data/squad_bn.tar.bz2"
_VERSION = datasets.Version("0.0.1")
class SquadBn(datasets.GeneratorBasedBuilder):
"""SQuAD Bengali Dataset"""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="squad_bn",
version=_VERSION,
description=_DESCRIPTION,
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"title": datasets.Value("string"),
"context": datasets.Value("string"),
"question": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
task_templates=[
QuestionAnsweringExtractive(
question_column="question", context_column="context", answers_column="answers"
)
],
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = os.path.join(dl_manager.download_and_extract(_URL), "squad_bn")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, "train.json"),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, "test.json"),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(data_dir, "validation.json"),
},
),
]
def _generate_examples(self, filepath):
"""Yields examples as (key, example) tuples."""
with open(filepath, encoding="utf-8") as f:
data = json.load(f)
for example in data["data"]:
title = example.get("title", "")
for paragraph in example["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
yield id_, {
"title": title,
"context": context,
"question": question,
"id": id_,
"answers": {
"answer_start": answer_starts,
"text": answers,
},
}