import json import datasets from datasets import Features, Sequence, Array2D, Value from datasets.info import DatasetInfo _DESCRIPTION = """\ GQA is a dataset containing 58K questions about subgraphs extracted from Wikidata. The data are made from Lc-QuAD 2.0 and MCWQ datasets. """ _URLS = { "train": "train.jsonl", "test": "test.jsonl", } class GQAConfig(datasets.BuilderConfig): """BuilderConfig for GQA.""" def __init__(self, **kwargs): """BuilderConfig for GQA. Args: **kwargs: keyword arguments forwarded to super. """ super(GQAConfig, self).__init__(**kwargs) class GQA(datasets.GeneratorBasedBuilder): """GQA: A graph question answering dataset.""" def _info(self) -> DatasetInfo: return DatasetInfo( description=_DESCRIPTION, features=Features( { "id": Value("string"), "question": Value("string"), "answers": Sequence(Value("string")), "sparql": Value("string"), "subgraph": { "entities": Sequence(Value("string")), "relations": Sequence(Value("string")), "adjacency": Array2D(shape=(None, 3), dtype='int64'), "entity_labels": Sequence(datasets.Value("string")), "relation_labels": Sequence(Value("string")), } } ) ) def _split_generators(self, dl_manager: datasets.DownloadManager): downloaded_files = dl_manager.download_and_extract(_URLS) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), ] def _generate_examples(self, filepath): with open(filepath, encoding="utf-8") as f: for row in f: sample = json.loads(row) id_ = sample["id"] yield id_, sample