Datasets:

Modalities:
Text
Libraries:
Datasets
parquet-converter commited on
Commit
178b1a7
·
1 Parent(s): 264e852

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,53 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.lz4 filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tflite filter=lfs diff=lfs merge=lfs -text
29
- *.tgz filter=lfs diff=lfs merge=lfs -text
30
- *.wasm filter=lfs diff=lfs merge=lfs -text
31
- *.xz filter=lfs diff=lfs merge=lfs -text
32
- *.zip filter=lfs diff=lfs merge=lfs -text
33
- *.zst filter=lfs diff=lfs merge=lfs -text
34
- *tfevents* filter=lfs diff=lfs merge=lfs -text
35
- # Audio files - uncompressed
36
- *.pcm filter=lfs diff=lfs merge=lfs -text
37
- *.sam filter=lfs diff=lfs merge=lfs -text
38
- *.raw filter=lfs diff=lfs merge=lfs -text
39
- # Audio files - compressed
40
- *.aac filter=lfs diff=lfs merge=lfs -text
41
- *.flac filter=lfs diff=lfs merge=lfs -text
42
- *.mp3 filter=lfs diff=lfs merge=lfs -text
43
- *.ogg filter=lfs diff=lfs merge=lfs -text
44
- *.wav filter=lfs diff=lfs merge=lfs -text
45
- # Image files - uncompressed
46
- *.bmp filter=lfs diff=lfs merge=lfs -text
47
- *.gif filter=lfs diff=lfs merge=lfs -text
48
- *.png filter=lfs diff=lfs merge=lfs -text
49
- *.tiff filter=lfs diff=lfs merge=lfs -text
50
- # Image files - compressed
51
- *.jpg filter=lfs diff=lfs merge=lfs -text
52
- *.jpeg filter=lfs diff=lfs merge=lfs -text
53
- *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data.zip → AppE/wikisql_value-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0930ab888f4990d664851d660b176771c7b1e08d26dc42c6eb21beb8f5d5b97
3
- size 49195968
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f5a9738bbddc1389c97fb3dce4223f04cc84035275e635fed80ac65044ac064
3
+ size 7743609
AppE/wikisql_value-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3c1747048454052a35247e0a26f36122cfb4899d670b1f6bc10d8c28f4d71f8
3
+ size 25340222
AppE/wikisql_value-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa9e9679493e8da31f773c4c4d0a0bdb04bcde9d0ba709607921503e806e29ed
3
+ size 3647919
ChcE/wikisql_value-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f1c3c35198799c09ef955db4f6249491b5b167f5eda21449119ac05ea18dc96
3
+ size 7714433
ChcE/wikisql_value-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9448ee2861943a104892a75af69a0d54d04c304a468292f7bee46ae289db959b
3
+ size 25234384
ChcE/wikisql_value-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dc6322ddbbe367abf32737f7c5a7a795abffabe709b344c9b60a3b236205d68
3
+ size 3632379
CollSgE/wikisql_value-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7abe5c5c8981ebe50ab4da3c545a65b06deb0285a0c28809041a902bb666ec38
3
+ size 7699233
CollSgE/wikisql_value-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dcee461e58dfae8206c58641578d5e3dfdfdea430442d525a09af0a86528631
3
+ size 25184398
CollSgE/wikisql_value-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9dcdbb78fc22328f8f02c7a31cb833014ce0d8f6ee0af64e28924fe50edfb98b
3
+ size 3624611
IndE/wikisql_value-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f473f1724c1e14b730abbfed4c5757ca3a8a9903972971a1d079e871ea79178
3
+ size 7757699
IndE/wikisql_value-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7c2e169b24da66f019a94f738254da7e617b48cde934d4dfb83c8f228639018
3
+ size 25387319
IndE/wikisql_value-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b45d15605cd8590a16b1e589ea4ddc0b90646daba52df3be72e869241cbae5ba
3
+ size 3655266
MULTI/wikisql_value-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d42e367c49f53cf5a4ba0e8ecf0830c5cf931b218146a3dbd452eeb8c22ed40e
3
+ size 7791980
MULTI/wikisql_value-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d15adb61731a7c2c3109d6355c34ee5c43771f726ac07c55b0f1085eb9db1a48
3
+ size 25507318
MULTI/wikisql_value-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2dc485f326792ed3a1bf55ea73af71418c25ddfccc176ad76f990fb03a9c457
3
+ size 3673414
UAAVE/wikisql_value-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c6eb3e8f2e0be7e88861910b27e09e0ccd3208dbf1a9037875db4680de89d39
3
+ size 7762444
UAAVE/wikisql_value-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df96dbfc299dc7d4796e48d168012bf8eb4ef4826a16207ab2450d8f4abc404d
3
+ size 25403979
UAAVE/wikisql_value-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16520b5ca33ed89c7b4621351613b6b3b0e4e6d4e8185c09c7ccd3acf005790d
3
+ size 3658002
wikisql_VALUE.py DELETED
@@ -1,243 +0,0 @@
1
- """A large crowd-sourced dataset for developing natural language interfaces for relational databases"""
2
-
3
-
4
- import json
5
- import os
6
- import textwrap
7
-
8
- import datasets
9
-
10
-
11
- _CITATION = """\
12
- @article{zhongSeq2SQL2017,
13
- author = {Victor Zhong and
14
- Caiming Xiong and
15
- Richard Socher},
16
- title = {Seq2SQL: Generating Structured Queries from Natural Language using
17
- Reinforcement Learning},
18
- journal = {CoRR},
19
- volume = {abs/1709.00103},
20
- year = {2017}
21
- }
22
- """
23
-
24
- _DESCRIPTION = """\
25
- A large crowd-sourced dataset for developing natural language interfaces for relational databases
26
- """
27
-
28
- _DATA_URL = "https://huggingface.co/datasets/SALT-NLP/wikisql_VALUE/resolve/main/data.zip"
29
-
30
- _AGG_OPS = ["", "MAX", "MIN", "COUNT", "SUM", "AVG"]
31
- _COND_OPS = ["=", ">", "<", "OP"]
32
-
33
-
34
- class WikiSQLConfig(datasets.BuilderConfig):
35
- """BuilderConfig for WikiSQL."""
36
-
37
- def __init__(
38
- self,
39
- name,
40
- description,
41
- train_path,
42
- dev_path,
43
- test_path,
44
- **kwargs
45
- ):
46
- super(WikiSQLConfig, self).__init__(version=datasets.Version("0.1.0", ""), **kwargs)
47
- self.features = datasets.Features(
48
- {
49
- "phase": datasets.Value("int32"),
50
- "question": datasets.Value("string"),
51
- "table": {
52
- "header": datasets.features.Sequence(datasets.Value("string")),
53
- "page_title": datasets.Value("string"),
54
- "page_id": datasets.Value("string"),
55
- "types": datasets.features.Sequence(datasets.Value("string")),
56
- "id": datasets.Value("string"),
57
- "section_title": datasets.Value("string"),
58
- "caption": datasets.Value("string"),
59
- "rows": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))),
60
- "name": datasets.Value("string"),
61
- },
62
- "sql": {
63
- "human_readable": datasets.Value("string"),
64
- "sel": datasets.Value("int32"),
65
- "agg": datasets.Value("int32"),
66
- "conds": datasets.features.Sequence(
67
- {
68
- "column_index": datasets.Value("int32"),
69
- "operator_index": datasets.Value("int32"),
70
- "condition": datasets.Value("string"),
71
- }
72
- ),
73
- },
74
- }
75
- )
76
- self.name = name
77
- self.description = description
78
- self.train_path = train_path
79
- self.dev_path = dev_path
80
- self.test_path = test_path
81
-
82
-
83
- class WikiSQL(datasets.GeneratorBasedBuilder):
84
- """WikiSQL: A large crowd-sourced dataset for developing natural language interfaces for relational databases"""
85
-
86
- VERSION = datasets.Version("0.1.0")
87
-
88
- BUILDER_CONFIGS = [
89
- WikiSQLConfig(
90
- name="AppE",
91
- description=textwrap.dedent(
92
- """\
93
- An Appalachian English variant of the large crowd-sourced dataset for developing natural language interfaces for relational databases"""
94
- ),
95
- train_path="train_AppE.jsonl",
96
- dev_path="dev_AppE.jsonl",
97
- test_path="test_AppE.jsonl"
98
- ),
99
- WikiSQLConfig(
100
- name="ChcE",
101
- description=textwrap.dedent(
102
- """\
103
- A Chicano English variant of the large crowd-sourced dataset for developing natural language interfaces for relational databases"""
104
- ),
105
- train_path="train_ChcE.jsonl",
106
- dev_path="dev_ChcE.jsonl",
107
- test_path="test_ChcE.jsonl"
108
- ),
109
- WikiSQLConfig(
110
- name="CollSgE",
111
- description=textwrap.dedent(
112
- """\
113
- A Singapore English (Singlish) variant of the large crowd-sourced dataset for developing natural language interfaces for relational databases"""
114
- ),
115
- train_path="train_CollSgE.jsonl",
116
- dev_path="dev_CollSgE.jsonl",
117
- test_path="test_CollSgE.jsonl"
118
- ),
119
- WikiSQLConfig(
120
- name="IndE",
121
- description=textwrap.dedent(
122
- """\
123
- An Indian English variant of the large crowd-sourced dataset for developing natural language interfaces for relational databases"""
124
- ),
125
- train_path="train_IndE.jsonl",
126
- dev_path="dev_IndE.jsonl",
127
- test_path="test_IndE.jsonl"
128
- ),
129
- WikiSQLConfig(
130
- name="UAAVE",
131
- description=textwrap.dedent(
132
- """\
133
- An Urban African American English variant of the large crowd-sourced dataset for developing natural language interfaces for relational databases"""
134
- ),
135
- train_path="train_UAAVE.jsonl",
136
- dev_path="dev_UAAVE.jsonl",
137
- test_path="test_UAAVE.jsonl"
138
- ),
139
- WikiSQLConfig(
140
- name="MULTI",
141
- description=textwrap.dedent(
142
- """\
143
- A mixed-dialectal variant of the large crowd-sourced dataset for developing natural language interfaces for relational databases"""
144
- ),
145
- train_path="train_MULTI.jsonl",
146
- dev_path="dev_MULTI.jsonl",
147
- test_path="test_MULTI.jsonl"
148
- ),
149
- ]
150
-
151
- def _info(self):
152
- return datasets.DatasetInfo(
153
- description=self.config.description,
154
- features=self.config.features,
155
- # If there's a common (input, target) tuple from the features,
156
- # specify them here. They'll be used if as_supervised=True in
157
- # builder.as_dataset.
158
- supervised_keys=None,
159
- # Homepage of the dataset for documentation
160
- homepage="https://github.com/salesforce/WikiSQL",
161
- citation=_CITATION,
162
- )
163
-
164
- def _split_generators(self, dl_manager):
165
- """Returns SplitGenerators."""
166
- dl_dir = dl_manager.download_and_extract(_DATA_URL)
167
- dl_dir = os.path.join(dl_dir, "data")
168
-
169
- return [
170
- datasets.SplitGenerator(
171
- name=datasets.Split.TEST,
172
- gen_kwargs={
173
- "main_filepath": os.path.join(dl_dir, self.config.test_path),
174
- "tables_filepath": os.path.join(dl_dir, "test.tables.jsonl"),
175
- },
176
- ),
177
- datasets.SplitGenerator(
178
- name=datasets.Split.VALIDATION,
179
- gen_kwargs={
180
- "main_filepath": os.path.join(dl_dir, self.config.dev_path),
181
- "tables_filepath": os.path.join(dl_dir, "dev.tables.jsonl"),
182
- },
183
- ),
184
- datasets.SplitGenerator(
185
- name=datasets.Split.TRAIN,
186
- gen_kwargs={
187
- "main_filepath": os.path.join(dl_dir, self.config.train_path),
188
- "tables_filepath": os.path.join(dl_dir, "train.tables.jsonl"),
189
- },
190
- )
191
- ]
192
-
193
- def _convert_to_human_readable(self, sel, agg, columns, conditions):
194
- """Make SQL query string. Based on https://github.com/salesforce/WikiSQL/blob/c2ed4f9b22db1cc2721805d53e6e76e07e2ccbdc/lib/query.py#L10"""
195
-
196
- rep = f"SELECT {_AGG_OPS[agg]} {columns[sel] if columns is not None else f'col{sel}'} FROM table"
197
-
198
- if conditions:
199
- rep += " WHERE " + " AND ".join([f"{columns[i]} {_COND_OPS[o]} {v}" for i, o, v in conditions])
200
- return " ".join(rep.split())
201
-
202
- def _generate_examples(self, main_filepath, tables_filepath):
203
- """Yields examples."""
204
-
205
- # Build dictionary to table_ids:tables
206
- with open(tables_filepath, encoding="utf-8") as f:
207
- tables = [json.loads(line) for line in f]
208
- id_to_tables = {x["id"]: x for x in tables}
209
-
210
- with open(main_filepath, encoding="utf-8") as f:
211
- for idx, line in enumerate(f):
212
- row = json.loads(line)
213
- row["table"] = id_to_tables[row["table_id"]]
214
- del row["table_id"]
215
-
216
- # Handle missing data
217
- row["table"]["page_title"] = row["table"].get("page_title", "")
218
- row["table"]["section_title"] = row["table"].get("section_title", "")
219
- row["table"]["caption"] = row["table"].get("caption", "")
220
- row["table"]["name"] = row["table"].get("name", "")
221
- row["table"]["page_id"] = str(row["table"].get("page_id", ""))
222
-
223
- # Fix row types
224
- row["table"]["rows"] = [[str(e) for e in r] for r in row["table"]["rows"]]
225
-
226
- # Get human-readable version
227
- row["sql"]["human_readable"] = self._convert_to_human_readable(
228
- row["sql"]["sel"],
229
- row["sql"]["agg"],
230
- row["table"]["header"],
231
- row["sql"]["conds"],
232
- )
233
-
234
- # Restructure sql->conds
235
- # - wikiSQL provides a tuple [column_index, operator_index, condition]
236
- # as 'condition' can have 2 types (float or str) we convert to dict
237
- for i in range(len(row["sql"]["conds"])):
238
- row["sql"]["conds"][i] = {
239
- "column_index": row["sql"]["conds"][i][0],
240
- "operator_index": row["sql"]["conds"][i][1],
241
- "condition": str(row["sql"]["conds"][i][2]),
242
- }
243
- yield idx, row