File size: 6,632 Bytes
afb2e90
 
 
 
bd948a0
afb2e90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32e668e
afb2e90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dd8683b
 
 
afb2e90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11f1c7d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
# -*- coding: utf-8 -*-
"""BanglaRQA"""

import json
import os
import datasets
from datasets.tasks import QuestionAnsweringExtractive

logger = datasets.logging.get_logger(__name__)

_CITATION = """\
@inproceedings{ekram-etal-2022-banglarqa,
    title = "{B}angla{RQA}: A Benchmark Dataset for Under-resourced {B}angla Language Reading Comprehension-based Question Answering with Diverse Question-Answer Types",
    author = "Ekram, Syed Mohammed Sartaj  and
      Rahman, Adham Arik  and
      Altaf, Md. Sajid  and
      Islam, Mohammed Saidul  and
      Rahman, Mehrab Mustafy  and
      Rahman, Md Mezbaur  and
      Hossain, Md Azam  and
      Kamal, Abu Raihan Mostofa",
    booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
    month = dec,
    year = "2022",
    address = "Abu Dhabi, United Arab Emirates",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2022.findings-emnlp.186",
    pages = "2518--2532",
    abstract = "High-resource languages, such as English, have access to a plethora of datasets with various question-answer types resembling real-world reading comprehension. However, there is a severe lack of diverse and comprehensive question-answering datasets in under-resourced languages like Bangla. The ones available are either translated versions of English datasets with a niche answer format or created by human annotations focusing on a specific domain, question type, or answer type. To address these limitations, this paper introduces BanglaRQA, a reading comprehension-based Bangla question-answering dataset with various question-answer types. BanglaRQA consists of 3,000 context passages and 14,889 question-answer pairs created from those passages. The dataset comprises answerable and unanswerable questions covering four unique categories of questions and three types of answers. In addition, this paper also implemented four different Transformer models for question-answering on the proposed dataset. The best-performing model achieved an overall 62.42{\%} EM and 78.11{\%} F1 score. However, detailed analyses showed that the performance varies across question-answer types, leaving room for substantial improvement of the model performance. Furthermore, we demonstrated the effectiveness of BanglaRQA as a training resource by showing strong results on the bn{\_}squad dataset. Therefore, BanglaRQA has the potential to contribute to the advancement of future research by enhancing the capability of language models. The dataset and codes are available at https://github.com/sartajekram419/BanglaRQA",
}
"""

_DESCRIPTION = """\
BanglaRQA is a human-annotated Bangla Question Answering (QA) dataset with diverse question-answer types.
"""

_URL = "https://huggingface.co/datasets/sartajekram/BanglaRQA/resolve/main/"
_URLS = {
    "train": _URL + "Train.json",
    "dev": _URL + "Validation.json",
    "test": _URL + "Test.json"
}


class BanglaRQAConfig(datasets.BuilderConfig):
    """BuilderConfig for BanglaRQA."""

    def __init__(self, **kwargs):
        """BuilderConfig for BanglaRQA.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(BanglaRQAConfig, self).__init__(**kwargs)


class BanglaRQA(datasets.GeneratorBasedBuilder):
    """BanglaRQA"""

    BUILDER_CONFIGS = [
        BanglaRQAConfig(
            name="BanglaRQA",
            version=datasets.Version("0.0.1", ""),
            description=_DESCRIPTION,
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "passage_id": datasets.Value("string"),
                    "title": datasets.Value("string"),
                    "context": datasets.Value("string"),
                    "question_id": datasets.Value("string"),
                    "question_text": datasets.Value("string"),
                    "is_answerable": datasets.Value("string"),
                    "question_type": datasets.Value("string"),
                    "answers": datasets.features.Sequence(
                        {
                            "answer_text": datasets.Value("string"),
                            "answer_type": datasets.Value("string"),
                        }
                    )
                }
              ),
            supervised_keys=None,
            homepage="https://github.com/sartajekram419/BanglaRQA",
            citation=_CITATION,
            task_templates=[
                    QuestionAnsweringExtractive(
                        question_column="question", context_column="context", answers_column="answers"
                    )
                ],
            )


    def _split_generators(self, dl_manager):
        downloaded_files = dl_manager.download_and_extract(_URLS)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
        ]

    def _generate_examples(self, filepath):
        """This function returns the examples in the raw (text) form."""
        logger.info("generating examples from = %s", filepath)
        key = 0
        with open(filepath, encoding="utf-8") as f:
            banglaRQA = json.load(f)
            for article in banglaRQA["data"]:
                for qa in article["qas"]:
                    yield key, {
                        "passage_id": article["passage_id"],
                        "title": article['title'],
                        "context": article['context'],
                        "question_id": qa["question_id"],
                        "question_text": qa["question_text"],
                        "is_answerable": qa["is_answerable"],
                        "question_type": qa["question_type"],
                        "answers": [
                            {
                                "answer_text": qa['answers']['answer_text'][0],
                                "answer_type": qa['answers']['answer_type'][0],
                            },
                            {
                                "answer_text": qa['answers']['answer_text'][1],
                                "answer_type": qa['answers']['answer_type'][1],
                            },
                        ]
                    }
                    key += 1