"""TODO: DATASET SHORT DESCRIPTION""" import json import datasets from datasets.tasks import TextClassification _DESCRIPTION = """TODO: DATASET DESCRIPTION""" _CITATION = """TODO: CITATIONS""" _URL = "https://huggingface.co/datasets/rsgrava/deepage2_qa_dataset/resolve/main/" _URLS = { "train": _URL + "expanded_ds-train.json", "dev": _URL + "expanded_ds-test.json", } class SquadV1PtBr(datasets.GeneratorBasedBuilder): """TODO: SHORT DATASET DESCRIPTION""" VERSION = datasets.Version("1.1.0") def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "source_text": datasets.Value("string"), "target_text": datasets.Value("string"), } ), # No default supervised_keys (as we have to pass both question and context as input). supervised_keys=None, homepage="TODO: HOMEPAGE", citation=_CITATION, ) def _split_generators(self, dl_manager): urls_to_download = _URLS downloaded_files = dl_manager.download_and_extract(urls_to_download) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), ] def _generate_examples(self, filepath): with open(filepath) as f: key = 0 ids = [] squad = json.load(f) for article in squad["data"]: for paragraph in article["paragraphs"]: source_text = paragraph["context"].strip() questions = [] for qa in paragraph["qas"]: id_ = qa["id"] if id_ in ids: #skip duplicate entries continue else: ids.append(id_) questions.append(qa["question"].strip()) target_text = "".join(questions) yield key, { "source_text": source_text, "target_text": target_text, } key += 1