|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""SQUALL: Lexical-level Supervised Table Question Answering Dataset.""" |
|
|
|
|
|
import json |
|
import re |
|
import datasets |
|
from datasets.tasks import QuestionAnsweringExtractive |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{Shi:Zhao:Boyd-Graber:Daume-III:Lee-2020, |
|
Title = {On the Potential of Lexico-logical Alignments for Semantic Parsing to {SQL} Queries}, |
|
Author = {Tianze Shi and Chen Zhao and Jordan Boyd-Graber and Hal {Daum\'{e} III} and Lillian Lee}, |
|
Booktitle = {Findings of EMNLP}, |
|
Year = {2020}, |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
To explore the utility of fine-grained, lexical-level supervision, authors \ |
|
introduce SQUALL, a dataset that enriches 11,276 WikiTableQuestions \ |
|
English-language questions with manually created SQL equivalents plus \ |
|
alignments between SQL and question fragments. |
|
""" |
|
|
|
_URL = "https://raw.githubusercontent.com/tzshi/squall/main/data/" |
|
_URLS = { |
|
"squall": _URL + "squall.json", |
|
"wtq-test": _URL + "wtq-test.json", |
|
"dev-0": _URL + "dev-0.ids", |
|
"dev-1": _URL + "dev-1.ids", |
|
"dev-2": _URL + "dev-2.ids", |
|
"dev-3": _URL + "dev-3.ids", |
|
"dev-4": _URL + "dev-4.ids", |
|
} |
|
|
|
class SquallConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Squall.""" |
|
|
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for Squall. |
|
|
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(SquallConfig, self).__init__(**kwargs) |
|
|
|
|
|
class Squall(datasets.GeneratorBasedBuilder): |
|
"""SQUALL: Lexical-level Supervised Table Question Answering Dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
SquallConfig(name = '0'), |
|
SquallConfig(name = '1'), |
|
SquallConfig(name = '2'), |
|
SquallConfig(name = '3'), |
|
SquallConfig(name = '4') |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"nt": datasets.Value("string"), |
|
"tbl": datasets.Value("string"), |
|
"columns": |
|
{ |
|
"raw_header": datasets.features.Sequence(datasets.Value("string")), |
|
"tokenized_header": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))), |
|
"column_suffixes": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))), |
|
"column_dtype": datasets.features.Sequence(datasets.Value("string")), |
|
"example": datasets.features.Sequence(datasets.Value("string")) |
|
}, |
|
"nl": datasets.features.Sequence(datasets.Value("string")), |
|
"nl_pos": datasets.features.Sequence(datasets.Value("string")), |
|
"nl_ner": datasets.features.Sequence(datasets.Value("string")), |
|
"nl_incolumns": datasets.features.Sequence(datasets.Value("bool_")), |
|
"nl_incells": datasets.features.Sequence(datasets.Value("bool_")), |
|
"columns_innl": datasets.features.Sequence(datasets.Value("bool_")), |
|
"tgt": datasets.Value("string"), |
|
"sql": { |
|
"sql_type": datasets.features.Sequence(datasets.Value("string")), |
|
"value": datasets.features.Sequence(datasets.Value("string")), |
|
"span_indices": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("int32"))) |
|
}, |
|
"nl_ralign": { |
|
"aligned_sql_token_type":datasets.features.Sequence(datasets.Value("string")), |
|
"aligned_sql_token_info":datasets.features.Sequence(datasets.Value("string")), |
|
}, |
|
"align":{ |
|
"nl_indices": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("int32"))), |
|
"sql_indices": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("int32"))) |
|
} |
|
} |
|
), |
|
|
|
|
|
supervised_keys=None, |
|
homepage="https://github.com/tzshi/squall/", |
|
citation=_CITATION, |
|
task_templates=[ |
|
QuestionAnsweringExtractive( |
|
question_column="nl", context_column="columns", answers_column="tgt" |
|
) |
|
], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls_to_download = { |
|
"squall": _URLS["squall"], |
|
"wtq-test": _URLS["wtq-test"], |
|
"dev-0": _URLS["dev-0"], |
|
"dev-1": _URLS["dev-1"], |
|
"dev-2": _URLS["dev-2"], |
|
"dev-3": _URLS["dev-3"], |
|
"dev-4": _URLS["dev-4"], |
|
} |
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"split_key": "train", "filepath": downloaded_files}), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={"split_key": "dev", "filepath": downloaded_files}), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"split_key": "test", "filepath": downloaded_files}), |
|
] |
|
|
|
def _generate_examples(self, split_key, filepath): |
|
"""This function returns the examples in the raw (text) form.""" |
|
logger.info("generating examples from = %s", filepath) |
|
|
|
squall_full = filepath["squall"] |
|
dev_ids = filepath["dev-" + self.config.name] |
|
test = filepath["wtq-test"] |
|
|
|
|
|
def transform(sample, sample_key, keys): |
|
cols = {} |
|
n_col = len(sample[sample_key]) |
|
for k in range(len(keys)): |
|
tmp = [] |
|
for j in range(n_col): |
|
tmp.append(sample[sample_key][j][k]) |
|
cols[keys[k]] = tmp |
|
return cols |
|
|
|
if split_key != 'test': |
|
with open(squall_full, encoding="utf-8") as f: |
|
squall_full_data = json.load(f) |
|
|
|
NUM_MAPPING = { |
|
'half': 0.5, |
|
'one': 1, |
|
'two': 2, |
|
'three': 3, |
|
'four': 4, |
|
'five': 5, |
|
'six': 6, |
|
'seven': 7, |
|
'eight': 8, |
|
'nine': 9, |
|
'ten': 10, |
|
'eleven': 11, |
|
'twelve': 12, |
|
'twenty': 20, |
|
'thirty': 30, |
|
'once': 1, |
|
'twice': 2, |
|
'first': 1, |
|
'second': 2, |
|
'third': 3, |
|
'fourth': 4, |
|
'fifth': 5, |
|
'sixth': 6, |
|
'seventh': 7, |
|
'eighth': 8, |
|
'ninth': 9, |
|
'tenth': 10, |
|
'hundred': 100, |
|
'thousand': 1000, |
|
'million': 1000000, |
|
'jan': 1, |
|
'feb': 2, |
|
'mar': 3, |
|
'apr': 4, |
|
'may': 5, |
|
'jun': 6, |
|
'jul': 7, |
|
'aug': 8, |
|
'sep': 9, |
|
'oct': 10, |
|
'nov': 11, |
|
'dec': 12, |
|
'january': 1, |
|
'february': 2, |
|
'march': 3, |
|
'april': 4, |
|
'june': 6, |
|
'july': 7, |
|
'august': 8, |
|
'september': 9, |
|
'october': 10, |
|
'november': 11, |
|
'december': 12, |
|
} |
|
|
|
def parse_number(s): |
|
if s in NUM_MAPPING: |
|
return NUM_MAPPING[s] |
|
s = s.replace(',', '') |
|
|
|
ret = re.findall(r"[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", s) |
|
if len(ret) > 0: |
|
return ret[0] |
|
return None |
|
|
|
for instance in squall_full_data: |
|
has_number = False |
|
numbers = [] |
|
for x in instance["nl"]: |
|
numbers.append(parse_number(x)) |
|
if numbers[-1] is not None: |
|
has_number = True |
|
instance["numbers"] = numbers |
|
instance["has_number"] = has_number |
|
|
|
with open(dev_ids) as f: |
|
dev_ids = json.load(f) |
|
if split_key == "train": |
|
set = [x for x in squall_full_data if x["tbl"] not in dev_ids] |
|
else: |
|
set = [x for x in squall_full_data if x["tbl"] in dev_ids] |
|
idx = 0 |
|
for sample in set: |
|
|
|
keys = ["raw_header", "tokenized_header", "column_suffixes", "column_dtype", "example"] |
|
cols = transform(sample, "columns", keys) |
|
|
|
keys = ["sql_type", "value", "span_indices"] |
|
sqls = transform(sample, "sql", keys) |
|
|
|
keys = ["nl_indices", "sql_indices"] |
|
aligns = transform(sample, "align", keys) |
|
|
|
keys = ["aligned_sql_token_type", "aligned_sql_token_info"] |
|
raligns = transform(sample, "nl_ralign", keys) |
|
yield idx, { |
|
"nt": sample["nt"], |
|
"tbl": sample["tbl"], |
|
"columns": cols, |
|
"nl": sample["nl"], |
|
"nl_pos": sample["nl_pos"], |
|
"nl_ner": sample["nl_ner"], |
|
"nl_ralign": raligns, |
|
"nl_incolumns": sample["nl_incolumns"], |
|
"nl_incells": sample["nl_incells"], |
|
"columns_innl": sample["columns_innl"], |
|
"tgt": sample["tgt"], |
|
"sql": sqls, |
|
"align": aligns |
|
} |
|
idx += 1 |
|
else: |
|
with open(test, encoding="utf-8") as f: |
|
test_data = json.load(f) |
|
idx = 0 |
|
for sample in test_data: |
|
|
|
keys = ["raw_header", "tokenized_header", "column_suffixes", "column_dtype", "example"] |
|
cols = transform(sample, "columns", keys) |
|
yield idx, { |
|
"nt": sample["nt"], |
|
"tbl": sample["tbl"], |
|
"columns": cols, |
|
"nl": sample["nl"], |
|
"nl_pos": sample["nl_pos"], |
|
"nl_ner": sample["nl_ner"], |
|
"nl_ralign": [], |
|
"nl_incolumns": sample["nl_incolumns"], |
|
"nl_incells": sample["nl_incells"], |
|
"columns_innl": sample["columns_innl"], |
|
"tgt": '', |
|
"sql": [], |
|
"align": [] |
|
} |
|
idx += 1 |
|
|