"""TODO: DATASET SHORT DESCRIPTION""" import json import datasets from datasets.tasks import TextClassification _DESCRIPTION = """TODO: DATASET DESCRIPTION""" _CITATION = """TODO: CITATIONS""" #TODO: official mirror url _URL = "https://huggingface.co/datasets/rsgrava/triviaqa-squad-web-br/resolve/main/" _URLS = { "web-train": _URL + "web-train-squad-br.json", "web-dev": _URL + "web-dev-squad-br.json", } class SquadV1PtBr(datasets.GeneratorBasedBuilder): """TODO: SHORT DATASET DESCRIPTION""" VERSION = datasets.Version("1.1.0") def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "context": datasets.Value("string"), "qas": datasets.features.Sequence( { "question": datasets.Value("string"), "answers": datasets.features.Sequence( { "text": datasets.Value("string"), } ) } ), } ), # No default supervised_keys (as we have to pass both question and context as input). supervised_keys=None, homepage="TODO: HOMEPAGE", citation=_CITATION, ) def _split_generators(self, dl_manager): urls_to_download = _URLS downloaded_files = dl_manager.download_and_extract(urls_to_download) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["web-train"]}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["web-dev"]}), ] def _generate_examples(self, filepath): with open(filepath, 'r') as f: key = 0 ids = [] squad = [json.loads(line) for line in f.read().splitlines()] for example in squad: context = example["context"].strip() id_ = example["id"] if id_ in ids: #skip duplicate entries continue else: ids.append(id_) qas = [] qa = example["qas"] answers = [] for answer in qa["answers"]: answers.append({ "text": answer.strip(), }) qas.append({ "question": qa["question"].strip(), "answers": answers }) yield key, { "id": key, "context": context, "qas": qas, } key += 1