# coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """SQUALL: Lexical-level Supervised Table Question Answering Dataset.""" import json import datasets from datasets.tasks import QuestionAnsweringExtractive logger = datasets.logging.get_logger(__name__) _CITATION = """\ @inproceedings{Shi:Zhao:Boyd-Graber:Daume-III:Lee-2020, Title = {On the Potential of Lexico-logical Alignments for Semantic Parsing to {SQL} Queries}, Author = {Tianze Shi and Chen Zhao and Jordan Boyd-Graber and Hal {Daum\'{e} III} and Lillian Lee}, Booktitle = {Findings of EMNLP}, Year = {2020}, } """ _DESCRIPTION = """\ To explore the utility of fine-grained, lexical-level supervision, authors \ introduce SQUALL, a dataset that enriches 11,276 WikiTableQuestions \ English-language questions with manually created SQL equivalents plus \ alignments between SQL and question fragments. """ _URL = "https://github.com/tzshi/squall/tree/main/data/" _URLS = { "squall": _URL, # + "squall.json", "twtq-test": _URL + "wtq-test.json", "dev-0": _URL + "dev-0.ids", "dev-1": _URL + "dev-1.ids", "dev-2": _URL + "dev-2.ids", "dev-3": _URL, # + "dev-3.ids", "dev-4": _URL + "dev-4.ids", } class SquallConfig(datasets.BuilderConfig): """BuilderConfig for Squall.""" def __init__(self, **kwargs): """BuilderConfig for Squall. Args: **kwargs: keyword arguments forwarded to super. """ super(SquallConfig, self).__init__(**kwargs) class Squall(datasets.GeneratorBasedBuilder): """SQUALL: Lexical-level Supervised Table Question Answering Dataset.""" BUILDER_CONFIGS = [ SquallConfig(name = '0'), SquallConfig(name = '1'), SquallConfig(name = '2'), SquallConfig(name = '3'), SquallConfig(name = '4') ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "nt": datasets.Value("string"), "tbl": datasets.Value("string"), "columns": { "raw_header": datasets.Value("string"), "tokenized_header": datasets.features.Sequence(datasets.Value("string")), "column_suffixes": datasets.features.Sequence(datasets.Value("string")), "column_dtype": datasets.Value("string"), "example": datasets.Value("string") }, "nl": datasets.features.Sequence(datasets.Value("string")), "nl_pos": datasets.features.Sequence(datasets.Value("string")), "nl_ner": datasets.features.Sequence(datasets.Value("string")), "nl_incolumns": datasets.features.Sequence(datasets.Value("bool_")), "nl_incells": datasets.features.Sequence(datasets.Value("bool_")), "columns_innl": datasets.features.Sequence(datasets.Value("bool_")), "tgt": datasets.Value("string"), "sql": datasets.features.Sequence(datasets.Value("string")) # "align" is not implemented } ), # No default supervised_keys (as we have to pass both question # and context as input). supervised_keys=None, homepage="https://github.com/tzshi/squall/tree/main", citation=_CITATION, task_templates=[ QuestionAnsweringExtractive( question_column="nl", context_column="columns", answers_column="tgt" ) ], ) def _split_generators(self, dl_manager): downloaded_files = dl_manager.download(_URLS) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"split_key": "train", "filepath": downloaded_files}), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"split_key": "dev", "filepath": downloaded_files}), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"split_key": "test", "filepath": downloaded_files}), ] def _generate_examples(self, split_key, filepath): """This function returns the examples in the raw (text) form.""" logger.info("generating examples from = %s", filepath) squall_full = filepath["squall"] + '/squall.json' dev_ids = filepath["dev-" + self.config.name] + "/dev-" + self.config.name + ".ids" test = filepath["twtq-test"] + "/twtq-test.json" if split_key != 'test': with open(squall_full, encoding="utf-8") as f: squall_full_data = json.load(f) with open(dev_ids) as f: dev_ids = set(json.load(f)) if split_key == "train": set = [x for x in squall_full_data if x["tbl"] not in dev_ids] else: set = [x for x in squall_full_data if x["tbl"] in dev_ids] idx = 0 for sample in set: cols = {} keys = ["raw_header", "tokenized_header", "column_suffixes", "column_dtype", "example"] for k in range(5): cols.update({keys[k]: sample["columns"][k]}) sql = [x[1] for x in sample["sql"]] yield idx, { "nt": sample["nt"], "tbl": sample["tbl"], "columns": cols, "nl": sample["nl"], "nl_pos": sample["nl_pos"], "nl_ner": sample["nl_ner"], # "nl_ralign": sample["nl_ralign"], "nl_incolumns": sample["nl_incolumns"], "nl_incells": sample["nl_incells"], "columns_innl": sample["columns_innl"], "tgt": sample["tgt"], "sql": sql, # "align": sample["align"] } idx += 1 else: with open(test, encoding="utf-8") as f: test_data = json.load(f) idx = 0 for sample in test_data: cols = {} keys = ["raw_header", "tokenized_header", "column_suffixes", "column_dtype", "example"] for k in range(5): cols.update({keys[k]: sample["columns"][k]}) sql = [x[1] for x in sample["sql"]] yield idx, { "nt": sample["nt"], "tbl": sample["tbl"], "columns": cols, "nl": sample["nl"], "nl_pos": sample["nl_pos"], "nl_ner": sample["nl_ner"], # "nl_ralign": sample["nl_ralign"], "nl_incolumns": sample["nl_incolumns"], "nl_incells": sample["nl_incells"], "columns_innl": sample["columns_innl"], "tgt": '', "sql": [], # "align": sample["align"] } idx += 1