Datasets:

Languages:
Turkish
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
machine-generated
Annotations Creators:
machine-generated
Source Datasets:
extended|squad
License:
File size: 7,708 Bytes
5cf6fa6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e3ddcb9
 
5cf6fa6
 
e3ddcb9
 
5cf6fa6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is based off of the dataset loader script for the original SQuAD2.0
# dataset.
#
#     https://huggingface.co/datasets/squad_v2

# Lint as: python3
"""SQuAD-TR Dataset"""


import json

import datasets
from datasets.tasks import QuestionAnsweringExtractive


logger = datasets.logging.get_logger(__name__)

_HOMEPAGE = "https://github.com/boun-tabi/squad-tr"

_CITATION = """\
@article{
  budur2023squadtr,
  title={Building Efficient and Effective OpenQA Systems for Low-Resource Languages},
  author={todo},
  journal={todo},
  year={2023}
}
"""

_DESCRIPTION = """\
SQuAD-TR is a machine translated version of the original SQuAD2.0 dataset into
Turkish.
"""

_VERSION = "1.0.0"

_DATA_URL = _HOMEPAGE + "/raw/beta/data"
_DATA_URLS = {
    "default": {
        "train": f"{_DATA_URL}/squad-tr-train-v{_VERSION}.json.gz",
        "dev": f"{_DATA_URL}/squad-tr-dev-v{_VERSION}.json.gz",
    },
    "excluded": {
        "train": f"{_DATA_URL}/squad-tr-train-v{_VERSION}-excluded.json.gz",
        "dev": f"{_DATA_URL}/squad-tr-dev-v{_VERSION}-excluded.json.gz",
    }
}


class SquadTRConfig(datasets.BuilderConfig):
    """BuilderConfig for SQuAD-TR."""

    def __init__(self, **kwargs):
        """BuilderConfig for SQuAD-TR.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(SquadTRConfig, self).__init__(**kwargs)


class SquadTR(datasets.GeneratorBasedBuilder):
    """SQuAD-TR: Machine translated version of the original SQuAD2.0 dataset into Turkish."""

    VERSION = datasets.Version(_VERSION)

    BUILDER_CONFIGS = [
        SquadTRConfig(
            name="default",
            version=datasets.Version(_VERSION),
            description="SQuAD-TR default version.",
        ),
        SquadTRConfig(
            name="excluded",
            version=datasets.Version(_VERSION),
            description="SQuAD-TR excluded version.",
        ),
        SquadTRConfig(
            name="openqa",
            version=datasets.Version(_VERSION),
            description="SQuAD-TR OpenQA version.",
        ),
    ]

    DEFAULT_CONFIG_NAME = "default"

    def _info(self):

        # We change the contents of the "answers" field based on the
        # configuration selected. Specifically, we are excluding the
        # "answer_start" field for the "excluded" and "openqa" configurations.
        if self.config.name in ["excluded", "openqa"]:
            answers_feature = datasets.features.Sequence({
                "text": datasets.Value("string"),
            })
        else:
            answers_feature = datasets.features.Sequence({
                "text": datasets.Value("string"),
                "answer_start": datasets.Value("int32"),
            })
  
        # Constructing our dataset features.
        features = datasets.Features({
            "id": datasets.Value("string"),
            "title": datasets.Value("string"),
            "context": datasets.Value("string"),
            "question": datasets.Value("string"),
            "answers": answers_feature
        })

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=None,
            homepage=_HOMEPAGE,
            citation=_CITATION,
            task_templates=[
                QuestionAnsweringExtractive(question_column="question", context_column="context", answers_column="answers")
            ],
        )

    def _split_generators(self, dl_manager):

        # If the configuration selected is "default" or "excluded", we directly
        # load the files from the URLs in _DATA_URLS. For the "openqa"
        # configuration, we combine the datapints from the two different files
        # used in the "default" and "excluded" configurations.
        if self.config.name == "openqa":
            default_files = dl_manager.download_and_extract(_DATA_URLS["default"])
            excluded_files = dl_manager.download_and_extract(_DATA_URLS["excluded"])
            train_file_paths = [default_files["train"], excluded_files["train"]]
            dev_file_paths = [default_files["dev"], excluded_files["dev"]]

            return [
                datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath_list": train_file_paths}),
                datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath_list": dev_file_paths}),
            ]
        else: 
            config_urls = _DATA_URLS[self.config.name]
            downloaded_files = dl_manager.download_and_extract(config_urls)

            return [
                datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
                datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
            ]


    def _generate_examples(self, filepath=None, filepath_list=None):
        """This function returns the examples in the raw (text) form."""
        assert filepath or filepath_list
        if filepath:
            filepath_list = [filepath]

        # Combining the generators for the different filepaths
        generators = [self._generate_examples_from_filepath(f) for f in filepath_list]
        for generator in generators:
            for element in generator:
                yield element

    def _generate_examples_from_filepath(self, filepath):
        logger.info("generating examples from = %s", filepath)
        key = 0
        with open(filepath, encoding="utf-8") as f:
            squad = json.load(f)
            for article in squad["data"]:
                title = article.get("title", "")
                for paragraph in article["paragraphs"]:
                    context = paragraph["context"]  # Do not strip leading blank spaces GH-2585
                    for qa in paragraph["qas"]:
                        # Constructing our answers dictonary. Note that the
                        # answers_dictionary won't include the answer_start
                        # field in the "excluded" and "openqa" modes.
                        answers_dictionary = {
                            "text": [answer["text"] for answer in qa["answers"]],
                        }
                        if self.config.name not in ["excluded", "openqa"]:
                            answers_dictionary["answer_start"] = [answer["answer_start"] for answer in qa["answers"]]

                        # Constructing our datapoint
                        datapoint = {
                            "title": title,
                            "context": context,
                            "question": qa["question"],
                            "id": qa["id"],
                            "answers": answers_dictionary,
                        }

                        # Features currently used are "context", "question", and "answers".
                        # Others are extracted here for the ease of future expansions.
                        yield key, datapoint
                        key += 1