# coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This file is based off of the dataset loader script for the original SQuAD2.0 # dataset. # # https://huggingface.co/datasets/squad_v2 # Lint as: python3 """SQuAD-TR Dataset""" import json import datasets from datasets.tasks import QuestionAnsweringExtractive logger = datasets.logging.get_logger(__name__) _HOMEPAGE = "https://github.com/boun-tabi/squad-tr" _CITATION = """\ @article{ budur2023squadtr, title={Building Efficient and Effective OpenQA Systems for Low-Resource Languages}, author={todo}, journal={todo}, year={2023} } """ _DESCRIPTION = """\ SQuAD-TR is a machine translated version of the original SQuAD2.0 dataset into Turkish. """ _VERSION = "1.0.0" _DATA_URL = _HOMEPAGE + "/raw/beta/data" _DATA_URLS = { "default": { "train": f"{_DATA_URL}/squad-tr-train-v{_VERSION}.json.gz", "dev": f"{_DATA_URL}/squad-tr-dev-v{_VERSION}.json.gz", }, "excluded": { "train": f"{_DATA_URL}/squad-tr-train-v{_VERSION}-excluded.json.gz", "dev": f"{_DATA_URL}/squad-tr-dev-v{_VERSION}-excluded.json.gz", } } class SquadTRConfig(datasets.BuilderConfig): """BuilderConfig for SQuAD-TR.""" def __init__(self, **kwargs): """BuilderConfig for SQuAD-TR. Args: **kwargs: keyword arguments forwarded to super. """ super(SquadTRConfig, self).__init__(**kwargs) class SquadTR(datasets.GeneratorBasedBuilder): """SQuAD-TR: Machine translated version of the original SQuAD2.0 dataset into Turkish.""" VERSION = datasets.Version(_VERSION) BUILDER_CONFIGS = [ SquadTRConfig( name="default", version=datasets.Version(_VERSION), description="SQuAD-TR default version.", ), SquadTRConfig( name="excluded", version=datasets.Version(_VERSION), description="SQuAD-TR excluded version.", ), SquadTRConfig( name="openqa", version=datasets.Version(_VERSION), description="SQuAD-TR OpenQA version.", ), ] DEFAULT_CONFIG_NAME = "default" def _info(self): # We change the contents of the "answers" field based on the # configuration selected. Specifically, we are excluding the # "answer_start" field for the "excluded" and "openqa" configurations. if self.config.name in ["excluded", "openqa"]: answers_feature = datasets.features.Sequence({ "text": datasets.Value("string"), }) else: answers_feature = datasets.features.Sequence({ "text": datasets.Value("string"), "answer_start": datasets.Value("int32"), }) # Constructing our dataset features. features = datasets.Features({ "id": datasets.Value("string"), "title": datasets.Value("string"), "context": datasets.Value("string"), "question": datasets.Value("string"), "answers": answers_feature }) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=None, homepage=_HOMEPAGE, citation=_CITATION, task_templates=[ QuestionAnsweringExtractive(question_column="question", context_column="context", answers_column="answers") ], ) def _split_generators(self, dl_manager): # If the configuration selected is "default" or "excluded", we directly # load the files from the URLs in _DATA_URLS. For the "openqa" # configuration, we combine the datapints from the two different files # used in the "default" and "excluded" configurations. if self.config.name == "openqa": default_files = dl_manager.download_and_extract(_DATA_URLS["default"]) excluded_files = dl_manager.download_and_extract(_DATA_URLS["excluded"]) train_file_paths = [default_files["train"], excluded_files["train"]] dev_file_paths = [default_files["dev"], excluded_files["dev"]] return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath_list": train_file_paths}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath_list": dev_file_paths}), ] else: config_urls = _DATA_URLS[self.config.name] downloaded_files = dl_manager.download_and_extract(config_urls) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), ] def _generate_examples(self, filepath=None, filepath_list=None): """This function returns the examples in the raw (text) form.""" assert filepath or filepath_list if filepath: filepath_list = [filepath] # Combining the generators for the different filepaths generators = [self._generate_examples_from_filepath(f) for f in filepath_list] for generator in generators: for element in generator: yield element def _generate_examples_from_filepath(self, filepath): logger.info("generating examples from = %s", filepath) key = 0 with open(filepath, encoding="utf-8") as f: squad = json.load(f) for article in squad["data"]: title = article.get("title", "") for paragraph in article["paragraphs"]: context = paragraph["context"] # Do not strip leading blank spaces GH-2585 for qa in paragraph["qas"]: # Constructing our answers dictonary. Note that the # answers_dictionary won't include the answer_start # field in the "excluded" and "openqa" modes. answers_dictionary = { "text": [answer["text"] for answer in qa["answers"]], } if self.config.name not in ["excluded", "openqa"]: answers_dictionary["answer_start"] = [answer["answer_start"] for answer in qa["answers"]] # Constructing our datapoint datapoint = { "title": title, "context": context, "question": qa["question"], "id": qa["id"], "answers": answers_dictionary, } # Features currently used are "context", "question", and "answers". # Others are extracted here for the ease of future expansions. yield key, datapoint key += 1