siyue commited on
Commit
5391641
1 Parent(s): db54a32
Files changed (2) hide show
  1. README.md +23 -0
  2. squall.py +45 -22
README.md CHANGED
@@ -23,6 +23,29 @@ Please refer to [github repo](https://github.com/tzshi/squall/) for source data.
23
  from datasets import load_dataset
24
  dataset = load_dataset("siyue/squall","0")
25
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  ## Contact
28
  For any issues or questions, kindly email us at: Siyue Zhang (siyue001@e.ntu.edu.sg).
 
23
  from datasets import load_dataset
24
  dataset = load_dataset("siyue/squall","0")
25
  ```
26
+ Example:
27
+ ```python
28
+ {
29
+ 'nt': 'nt-10922',
30
+ 'tbl': '204_879',
31
+ 'columns':
32
+ {
33
+ 'raw_header': ['year', 'host / location', 'division i overall', 'division i undergraduate', 'division ii overall', 'division ii community college'],
34
+ 'tokenized_header': [['year'], ['host', '\\\\/', 'location'], ['division', 'i', 'overall'], ['division', 'i', 'undergraduate'], ['division', 'ii', 'overall'], ['division', 'ii', 'community', 'college']],
35
+ 'column_suffixes': [['number'], ['address'], [], [], [], []],
36
+ 'column_dtype': ['number', 'address', 'text', 'text', 'text', 'text'],
37
+ 'example': ['1997', 'penn', 'chicago', 'swarthmore', 'harvard', 'valencia cc']
38
+ },
39
+ 'nl': ['when', 'was', 'the', 'last', 'time', 'the', 'event', 'was', 'held', 'in', 'minnesota', '?'],
40
+ 'nl_pos': ['WRB', 'VBD-AUX', 'DT', 'JJ', 'NN', 'DT', 'NN', 'VBD-AUX', 'VBN', 'IN', 'NNP', '.'],
41
+ 'nl_ner': ['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'LOCATION', 'O'],
42
+ 'nl_incolumns': [False, False, False, False, False, False, False, False, False, False, False, False],
43
+ 'nl_incells': [False, False, False, False, False, False, False, False, False, False, True, False],
44
+ 'columns_innl': [False, False, False, False, False, False],
45
+ 'tgt': '2007',
46
+ 'sql': ['select', 'c1', 'from', 'w', 'where', 'c2', '=', "'minnesota'", 'order', 'by', 'c1_number', 'desc', 'limit', '1']
47
+ }
48
+ ```
49
 
50
  ## Contact
51
  For any issues or questions, kindly email us at: Siyue Zhang (siyue001@e.ntu.edu.sg).
squall.py CHANGED
@@ -98,8 +98,19 @@ class Squall(datasets.GeneratorBasedBuilder):
98
  "nl_incells": datasets.features.Sequence(datasets.Value("bool_")),
99
  "columns_innl": datasets.features.Sequence(datasets.Value("bool_")),
100
  "tgt": datasets.Value("string"),
101
- "sql": datasets.features.Sequence(datasets.Value("string"))
102
- # "align" is not implemented
 
 
 
 
 
 
 
 
 
 
 
103
  }
104
  ),
105
  # No default supervised_keys (as we have to pass both question
@@ -225,6 +236,16 @@ class Squall(datasets.GeneratorBasedBuilder):
225
  instance["numbers"] = numbers
226
  instance["has_number"] = has_number
227
 
 
 
 
 
 
 
 
 
 
 
228
  with open(dev_ids) as f:
229
  dev_ids = json.load(f)
230
  if split_key == "train":
@@ -233,15 +254,20 @@ class Squall(datasets.GeneratorBasedBuilder):
233
  set = [x for x in squall_full_data if x["tbl"] in dev_ids]
234
  idx = 0
235
  for sample in set:
236
- cols = {}
 
237
  keys = ["raw_header", "tokenized_header", "column_suffixes", "column_dtype", "example"]
238
- n_col = len(sample["columns"])
239
- for k in range(5):
240
- tmp = []
241
- for j in range(n_col):
242
- tmp.append(sample["columns"][j][k])
243
- cols[keys[k]] = tmp
244
- sql = [x[1] for x in sample["sql"]]
 
 
 
 
245
  yield idx, {
246
  "nt": sample["nt"],
247
  "tbl": sample["tbl"],
@@ -249,13 +275,13 @@ class Squall(datasets.GeneratorBasedBuilder):
249
  "nl": sample["nl"],
250
  "nl_pos": sample["nl_pos"],
251
  "nl_ner": sample["nl_ner"],
252
- # "nl_ralign": sample["nl_ralign"],
253
  "nl_incolumns": sample["nl_incolumns"],
254
  "nl_incells": sample["nl_incells"],
255
  "columns_innl": sample["columns_innl"],
256
  "tgt": sample["tgt"],
257
- "sql": sql,
258
- # "align": sample["align"]
259
  }
260
  idx += 1
261
  else:
@@ -263,14 +289,11 @@ class Squall(datasets.GeneratorBasedBuilder):
263
  test_data = json.load(f)
264
  idx = 0
265
  for sample in test_data:
266
- cols = {}
 
267
  keys = ["raw_header", "tokenized_header", "column_suffixes", "column_dtype", "example"]
268
- n_col = len(sample["columns"])
269
- for k in range(5):
270
- tmp = []
271
- for j in range(n_col):
272
- tmp.append(sample["columns"][j][k])
273
- cols[keys[k]] = tmp
274
  yield idx, {
275
  "nt": sample["nt"],
276
  "tbl": sample["tbl"],
@@ -278,12 +301,12 @@ class Squall(datasets.GeneratorBasedBuilder):
278
  "nl": sample["nl"],
279
  "nl_pos": sample["nl_pos"],
280
  "nl_ner": sample["nl_ner"],
281
- # "nl_ralign": sample["nl_ralign"],
282
  "nl_incolumns": sample["nl_incolumns"],
283
  "nl_incells": sample["nl_incells"],
284
  "columns_innl": sample["columns_innl"],
285
  "tgt": '',
286
  "sql": [],
287
- # "align": sample["align"]
288
  }
289
  idx += 1
 
98
  "nl_incells": datasets.features.Sequence(datasets.Value("bool_")),
99
  "columns_innl": datasets.features.Sequence(datasets.Value("bool_")),
100
  "tgt": datasets.Value("string"),
101
+ "sql": {
102
+ "sql_type": datasets.features.Sequence(datasets.Value("string")),
103
+ "value": datasets.features.Sequence(datasets.Value("string")),
104
+ "span_indices": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("int32")))
105
+ },
106
+ "nl_ralign": {
107
+ "aligned_sql_token_type":datasets.features.Sequence(datasets.Value("string")),
108
+ "aligned_sql_token_info":datasets.features.Sequence(datasets.Value("string")),
109
+ },
110
+ "align":{
111
+ "nl_indices": datasets.features.Sequence(datasets.Value("int32")),
112
+ "sql_indices": datasets.features.Sequence(datasets.Value("int32"))
113
+ }
114
  }
115
  ),
116
  # No default supervised_keys (as we have to pass both question
 
236
  instance["numbers"] = numbers
237
  instance["has_number"] = has_number
238
 
239
+ def transform(sample_key, keys):
240
+ cols = {}
241
+ n_col = len(sample[sample_key])
242
+ for k in range(len(keys)):
243
+ tmp = []
244
+ for j in range(n_col):
245
+ tmp.append(sample[sample_key][j][k])
246
+ cols[keys[k]] = tmp
247
+ return cols
248
+
249
  with open(dev_ids) as f:
250
  dev_ids = json.load(f)
251
  if split_key == "train":
 
254
  set = [x for x in squall_full_data if x["tbl"] in dev_ids]
255
  idx = 0
256
  for sample in set:
257
+
258
+ # transform columns
259
  keys = ["raw_header", "tokenized_header", "column_suffixes", "column_dtype", "example"]
260
+ cols = transform("columns", keys)
261
+ # transform sql
262
+ keys = ["sql_type", "value", "span_indices"]
263
+ sqls = transform("sql", keys)
264
+ # transform align
265
+ keys = ["nl_indices", "sql_indices"]
266
+ aligns = transform("align", keys)
267
+ # transform ralign
268
+ keys = ["aligned_sql_token_type", "aligned_sql_token_info"]
269
+ raligns = transform("nl_ralign", keys)
270
+
271
  yield idx, {
272
  "nt": sample["nt"],
273
  "tbl": sample["tbl"],
 
275
  "nl": sample["nl"],
276
  "nl_pos": sample["nl_pos"],
277
  "nl_ner": sample["nl_ner"],
278
+ "nl_ralign": raligns,
279
  "nl_incolumns": sample["nl_incolumns"],
280
  "nl_incells": sample["nl_incells"],
281
  "columns_innl": sample["columns_innl"],
282
  "tgt": sample["tgt"],
283
+ "sql": sqls,
284
+ "align": aligns
285
  }
286
  idx += 1
287
  else:
 
289
  test_data = json.load(f)
290
  idx = 0
291
  for sample in test_data:
292
+
293
+ # transform columns
294
  keys = ["raw_header", "tokenized_header", "column_suffixes", "column_dtype", "example"]
295
+ cols = transform("columns", keys)
296
+
 
 
 
 
297
  yield idx, {
298
  "nt": sample["nt"],
299
  "tbl": sample["tbl"],
 
301
  "nl": sample["nl"],
302
  "nl_pos": sample["nl_pos"],
303
  "nl_ner": sample["nl_ner"],
304
+ "nl_ralign": [],
305
  "nl_incolumns": sample["nl_incolumns"],
306
  "nl_incells": sample["nl_incells"],
307
  "columns_innl": sample["columns_innl"],
308
  "tgt": '',
309
  "sql": [],
310
+ "align": []
311
  }
312
  idx += 1