siyue commited on
Commit
1ae9e7b
1 Parent(s): e815a7f
Files changed (1) hide show
  1. squall.py +197 -0
squall.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """SQUALL: Lexical-level Supervised Table Question Answering Dataset."""
18
+
19
+
20
+ import json
21
+
22
+ import datasets
23
+ from datasets.tasks import QuestionAnsweringExtractive
24
+
25
+
26
+ logger = datasets.logging.get_logger(__name__)
27
+
28
+
29
+ _CITATION = """\
30
+ @inproceedings{Shi:Zhao:Boyd-Graber:Daume-III:Lee-2020,
31
+ Title = {On the Potential of Lexico-logical Alignments for Semantic Parsing to {SQL} Queries},
32
+ Author = {Tianze Shi and Chen Zhao and Jordan Boyd-Graber and Hal {Daum\'{e} III} and Lillian Lee},
33
+ Booktitle = {Findings of EMNLP},
34
+ Year = {2020},
35
+ }
36
+ """
37
+
38
+ _DESCRIPTION = """\
39
+ To explore the utility of fine-grained, lexical-level supervision, authors \
40
+ introduce SQUALL, a dataset that enriches 11,276 WikiTableQuestions \
41
+ English-language questions with manually created SQL equivalents plus \
42
+ alignments between SQL and question fragments.
43
+ """
44
+
45
+ _URL = "https://github.com/tzshi/squall/tree/main/data/"
46
+ _URLS = {
47
+ "squall": _URL + "squall.json",
48
+ "twtq-test": _URL + "wtq-test.json",
49
+ "dev-0": _URL + "dev-0.ids",
50
+ "dev-1": _URL + "dev-1.ids",
51
+ "dev-2": _URL + "dev-2.ids",
52
+ "dev-3": _URL + "dev-3.ids",
53
+ "dev-4": _URL + "dev-4.ids",
54
+ }
55
+
56
+
57
+ class SquallConfig(datasets.BuilderConfig):
58
+ """BuilderConfig for Squall."""
59
+
60
+ def __init__(self, fold_num, **kwargs):
61
+ """BuilderConfig for Squall.
62
+
63
+ Args:
64
+ **kwargs: keyword arguments forwarded to super.
65
+ """
66
+ super(SquallConfig, self).__init__(**kwargs)
67
+ self.fold_num = fold_num
68
+
69
+
70
+ class Squall(datasets.GeneratorBasedBuilder):
71
+ """SQUALL: Lexical-level Supervised Table Question Answering Dataset."""
72
+
73
+ BUILDER_CONFIGS = [
74
+ SquallConfig(
75
+ fold_num=0,
76
+ ),
77
+ ]
78
+
79
+ def _info(self):
80
+ return datasets.DatasetInfo(
81
+ description=_DESCRIPTION,
82
+ features=datasets.Features(
83
+ {
84
+ "nt": datasets.Value("string"),
85
+ "tbl": datasets.Value("string"),
86
+ "columns":
87
+ {
88
+ "raw_header": datasets.Value("string"),
89
+ "tokenized_header": datasets.features.Sequence(datasets.Value("string")),
90
+ "column_suffixes": datasets.features.Sequence(datasets.Value("string")),
91
+ "column_dtype": datasets.Value("string"),
92
+ "example": datasets.Value("string")
93
+ },
94
+ "nl": datasets.features.Sequence(datasets.Value("string")),
95
+ "nl_pos": datasets.features.Sequence(datasets.Value("string")),
96
+ "nl_ner": datasets.features.Sequence(datasets.Value("string")),
97
+ "nl_incolumns": datasets.features.Sequence(datasets.Value("bool_")),
98
+ "nl_incells": datasets.features.Sequence(datasets.Value("bool_")),
99
+ "columns_innl": datasets.features.Sequence(datasets.Value("bool_")),
100
+ "tgt": datasets.Value("string"),
101
+ "sql": datasets.features.Sequence(datasets.Value("string"))
102
+ # "align" is not implemented
103
+ }
104
+ ),
105
+ # No default supervised_keys (as we have to pass both question
106
+ # and context as input).
107
+ supervised_keys=None,
108
+ homepage="https://github.com/tzshi/squall/tree/main",
109
+ citation=_CITATION,
110
+ task_templates=[
111
+ QuestionAnsweringExtractive(
112
+ question_column="nl", context_column="columns", answers_column="tgt"
113
+ )
114
+ ],
115
+ )
116
+
117
+ def _split_generators(self, dl_manager):
118
+ downloaded_files = dl_manager.download_and_extract(_URLS)
119
+
120
+ return [
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.TRAIN,
123
+ gen_kwargs={"split_key": "train", "filepath": downloaded_files}),
124
+ datasets.SplitGenerator(
125
+ name=datasets.Split.VALIDATION,
126
+ gen_kwargs={"split_key": "dev", "filepath": downloaded_files}),
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.TEST,
129
+ gen_kwargs={"split_key": "test", "filepath": downloaded_files}),
130
+ ]
131
+
132
+ def _generate_examples(self, split_key, filepath):
133
+ """This function returns the examples in the raw (text) form."""
134
+ logger.info("generating examples from = %s", filepath)
135
+
136
+ squall_full = filepath["squall"] + '/squall.json'
137
+ dev_ids = filepath[f"dev-{self.fold_num}"] + f"/dev-{self.fold_num}.ids"
138
+ test = filepath["twtq-test"] + "/twtq-test.json"
139
+
140
+ if split_key != 'test':
141
+ with open(squall_full, encoding="utf-8") as f:
142
+ squall_full_data = json.load(f)
143
+ with open(dev_ids) as f:
144
+ dev_ids = set(json.load(f))
145
+ if split_key == "train":
146
+ set = [x for x in squall_full_data if x["tbl"] not in dev_ids]
147
+ else:
148
+ set = [x for x in squall_full_data if x["tbl"] in dev_ids]
149
+ idx = 0
150
+ for sample in set:
151
+ cols = {}
152
+ keys = ["raw_header", "tokenized_header", "column_suffixes", "column_dtype", "example"]
153
+ for k in range(5):
154
+ cols.update({keys[k]: sample["columns"][k]})
155
+ sql = [x[1] for x in sample["sql"]]
156
+ yield idx, {
157
+ "nt": sample["nt"],
158
+ "tbl": sample["tbl"],
159
+ "columns": cols,
160
+ "nl": sample["nl"],
161
+ "nl_pos": sample["nl_pos"],
162
+ "nl_ner": sample["nl_ner"],
163
+ # "nl_ralign": sample["nl_ralign"],
164
+ "nl_incolumns": sample["nl_incolumns"],
165
+ "nl_incells": sample["nl_incells"],
166
+ "columns_innl": sample["columns_innl"],
167
+ "tgt": sample["tgt"],
168
+ "sql": sql,
169
+ # "align": sample["align"]
170
+ }
171
+ idx += 1
172
+ else:
173
+ with open(test, encoding="utf-8") as f:
174
+ test_data = json.load(f)
175
+ idx = 0
176
+ for sample in test_data:
177
+ cols = {}
178
+ keys = ["raw_header", "tokenized_header", "column_suffixes", "column_dtype", "example"]
179
+ for k in range(5):
180
+ cols.update({keys[k]: sample["columns"][k]})
181
+ sql = [x[1] for x in sample["sql"]]
182
+ yield idx, {
183
+ "nt": sample["nt"],
184
+ "tbl": sample["tbl"],
185
+ "columns": cols,
186
+ "nl": sample["nl"],
187
+ "nl_pos": sample["nl_pos"],
188
+ "nl_ner": sample["nl_ner"],
189
+ # "nl_ralign": sample["nl_ralign"],
190
+ "nl_incolumns": sample["nl_incolumns"],
191
+ "nl_incells": sample["nl_incells"],
192
+ "columns_innl": sample["columns_innl"],
193
+ "tgt": '',
194
+ "sql": [],
195
+ # "align": sample["align"]
196
+ }
197
+ idx += 1