system HF staff commited on
Commit
89a0cd8
0 Parent(s):

Update files from the datasets library (from 1.2.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.2.0

Files changed (5) hide show
  1. .gitattributes +27 -0
  2. README.md +157 -0
  3. dataset_infos.json +1 -0
  4. dummy/spider/1.0.0/dummy_data.zip +3 -0
  5. spider.py +107 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - expert-generated
4
+ language_creators:
5
+ - expert-generated
6
+ - machine-generated
7
+ languages:
8
+ - en
9
+ licenses:
10
+ - cc-by-4-0
11
+ multilinguality:
12
+ - monolingual
13
+ size_categories:
14
+ - 1K<n<10K
15
+ source_datasets:
16
+ - original
17
+ task_categories:
18
+ - conditional-text-generation
19
+ task_ids:
20
+ - conditional-text-generation-other-stuctured-to-text
21
+ ---
22
+
23
+
24
+ # Dataset Card for [spider]
25
+
26
+ ## Table of Contents
27
+ - [Dataset Description](#dataset-description)
28
+ - [Dataset Summary](#dataset-summary)
29
+ - [Supported Tasks](#supported-tasks-and-leaderboards)
30
+ - [Languages](#languages)
31
+ - [Dataset Structure](#dataset-structure)
32
+ - [Data Instances](#data-instances)
33
+ - [Data Fields](#data-instances)
34
+ - [Data Splits](#data-instances)
35
+ - [Dataset Creation](#dataset-creation)
36
+ - [Curation Rationale](#curation-rationale)
37
+ - [Source Data](#source-data)
38
+ - [Annotations](#annotations)
39
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
40
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
41
+ - [Social Impact of Dataset](#social-impact-of-dataset)
42
+ - [Discussion of Biases](#discussion-of-biases)
43
+ - [Other Known Limitations](#other-known-limitations)
44
+ - [Additional Information](#additional-information)
45
+ - [Dataset Curators](#dataset-curators)
46
+ - [Licensing Information](#licensing-information)
47
+ - [Citation Information](#citation-information)
48
+
49
+ ## Dataset Description
50
+
51
+ - **Homepage:** https://yale-lily.github.io/spider
52
+ - **Repository:** https://github.com/taoyds/spider
53
+ - **Paper:** https://www.aclweb.org/anthology/D18-1425/
54
+ - **Point of Contact:** [Yale LILY](https://yale-lily.github.io/)
55
+
56
+ ### Dataset Summary
57
+
58
+ Spider is a large-scale complex and cross-domain semantic parsing and text-to-SQL dataset annotated by 11 Yale students
59
+ The goal of the Spider challenge is to develop natural language interfaces to cross-domain databases
60
+
61
+ ### Supported Tasks and Leaderboards
62
+
63
+ The leaderboard can be seen at https://yale-lily.github.io/spider
64
+
65
+ ### Languages
66
+
67
+ The text in the dataset is in English.
68
+
69
+ ## Dataset Structure
70
+
71
+ ### Data Instances
72
+
73
+ **What do the instances that comprise the dataset represent?**
74
+
75
+ Each instance is natural language question and the equivalent SQL query
76
+
77
+ **How many instances are there in total?**
78
+
79
+ **What data does each instance consist of?**
80
+
81
+ [More Information Needed]
82
+
83
+ ### Data Fields
84
+
85
+ * **db_id**: Database name
86
+ * **question**: Natural language to interpret into SQL
87
+ * **query**: Target SQL query
88
+ * **query_toks**: List of tokens for the query
89
+ * **query_toks_no_value**: List of tokens for the query
90
+ * **question_toks**: List of tokens for the question
91
+
92
+ ### Data Splits
93
+
94
+ **train**: 7000 questions and SQL query pairs
95
+ **dev**: 1034 question and SQL query pairs
96
+
97
+ [More Information Needed]
98
+
99
+ ## Dataset Creation
100
+
101
+ ### Curation Rationale
102
+
103
+ [More Information Needed]
104
+
105
+ ### Source Data
106
+
107
+ #### Initial Data Collection and Normalization
108
+
109
+ #### Who are the source language producers?
110
+
111
+ [More Information Needed]
112
+
113
+ ### Annotations
114
+
115
+ The dataset was annotated by 11 college students at Yale University
116
+
117
+ #### Annotation process
118
+
119
+ #### Who are the annotators?
120
+
121
+ ### Personal and Sensitive Information
122
+
123
+ [More Information Needed]
124
+
125
+ ## Considerations for Using the Data
126
+
127
+ ### Social Impact of Dataset
128
+
129
+ ### Discussion of Biases
130
+
131
+ [More Information Needed]
132
+
133
+ ### Other Known Limitations
134
+
135
+ ## Additional Information
136
+
137
+ The listed authors in the homepage are maintaining/supporting the dataset.
138
+
139
+ ### Dataset Curators
140
+
141
+ [More Information Needed]
142
+
143
+ The spider dataset is licensed under
144
+ the [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/legalcode)
145
+
146
+ [More Information Needed]
147
+
148
+ ### Citation Information
149
+
150
+ ```
151
+ @article{yu2018spider,
152
+ title={Spider: A large-scale human-labeled dataset for complex and cross-domain semantic parsing and text-to-sql task},
153
+ author={Yu, Tao and Zhang, Rui and Yang, Kai and Yasunaga, Michihiro and Wang, Dongxu and Li, Zifan and Ma, James and Li, Irene and Yao, Qingning and Roman, Shanelle and others},
154
+ journal={arXiv preprint arXiv:1809.08887},
155
+ year={2018}
156
+ }
157
+ ```
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"spider": {"description": "Spider is a large-scale complex and cross-domain semantic parsing and text-toSQL dataset annotated by 11 college students\n", "citation": "@article{yu2018spider,\n title={Spider: A large-scale human-labeled dataset for complex and cross-domain semantic parsing and text-to-sql task},\n author={Yu, Tao and Zhang, Rui and Yang, Kai and Yasunaga, Michihiro and Wang, Dongxu and Li, Zifan and Ma, James and Li, Irene and Yao, Qingning and Roman, Shanelle and others},\n journal={arXiv preprint arXiv:1809.08887},\n year={2018}\n}\n", "homepage": "https://yale-lily.github.io/spider", "license": "CC BY-SA 4.0", "features": {"db_id": {"dtype": "string", "id": null, "_type": "Value"}, "query": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "query_toks": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "query_toks_no_value": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "question_toks": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "spider", "config_name": "spider", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4743822, "num_examples": 7000, "dataset_name": "spider"}, "validation": {"name": "validation", "num_bytes": 682126, "num_examples": 1034, "dataset_name": "spider"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1_AckYkinAnhqmRQtGsQgUKAnTHxxX5J0": {"num_bytes": 99736136, "checksum": "5ddff97bb1d421282c593e8d30ce0ce107270f4dd4a21d60eba4bf287d5956b1"}}, "download_size": 99736136, "post_processing_size": null, "dataset_size": 5425948, "size_in_bytes": 105162084}}
dummy/spider/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95b2b5441be8b0ba21e6534b264c15e42130114ce2d65d790f2068f36ecf64f1
3
+ size 2518
spider.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Spider: A Large-Scale Human-Labeled Dataset for Text-to-SQL Tasks"""
16
+
17
+ from __future__ import absolute_import, division, print_function
18
+
19
+ import json
20
+ import logging
21
+
22
+ import datasets
23
+
24
+
25
+ _CITATION = """\
26
+ @article{yu2018spider,
27
+ title={Spider: A large-scale human-labeled dataset for complex and cross-domain semantic parsing and text-to-sql task},
28
+ author={Yu, Tao and Zhang, Rui and Yang, Kai and Yasunaga, Michihiro and Wang, Dongxu and Li, Zifan and Ma, James and Li, Irene and Yao, Qingning and Roman, Shanelle and others},
29
+ journal={arXiv preprint arXiv:1809.08887},
30
+ year={2018}
31
+ }
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ Spider is a large-scale complex and cross-domain semantic parsing and text-toSQL dataset annotated by 11 college students
36
+ """
37
+
38
+ _HOMEPAGE = "https://yale-lily.github.io/spider"
39
+
40
+ _LICENSE = "CC BY-SA 4.0"
41
+
42
+ _URL = "https://drive.google.com/uc?export=download&id=1_AckYkinAnhqmRQtGsQgUKAnTHxxX5J0"
43
+
44
+
45
+ class Spider(datasets.GeneratorBasedBuilder):
46
+ VERSION = datasets.Version("1.0.0")
47
+
48
+ BUILDER_CONFIGS = [
49
+ datasets.BuilderConfig(
50
+ name="spider",
51
+ version=VERSION,
52
+ description="Spider: A Large-Scale Human-Labeled Dataset for Text-to-SQL Tasks",
53
+ ),
54
+ ]
55
+
56
+ def _info(self):
57
+ features = datasets.Features(
58
+ {
59
+ "db_id": datasets.Value("string"),
60
+ "query": datasets.Value("string"),
61
+ "question": datasets.Value("string"),
62
+ "query_toks": datasets.features.Sequence(datasets.Value("string")),
63
+ "query_toks_no_value": datasets.features.Sequence(datasets.Value("string")),
64
+ "question_toks": datasets.features.Sequence(datasets.Value("string")),
65
+ }
66
+ )
67
+ return datasets.DatasetInfo(
68
+ description=_DESCRIPTION,
69
+ features=features,
70
+ supervised_keys=None,
71
+ homepage=_HOMEPAGE,
72
+ license=_LICENSE,
73
+ citation=_CITATION,
74
+ )
75
+
76
+ def _split_generators(self, dl_manager):
77
+ downloaded_filepath = dl_manager.download_and_extract(_URL)
78
+
79
+ return [
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.TRAIN,
82
+ gen_kwargs={
83
+ "data_filepath": downloaded_filepath + "/spider/train_spider.json",
84
+ },
85
+ ),
86
+ datasets.SplitGenerator(
87
+ name=datasets.Split.VALIDATION,
88
+ gen_kwargs={
89
+ "data_filepath": downloaded_filepath + "/spider/dev.json",
90
+ },
91
+ ),
92
+ ]
93
+
94
+ def _generate_examples(self, data_filepath):
95
+ """This function returns the examples in the raw (text) form."""
96
+ logging.info("generating examples from = %s", data_filepath)
97
+ with open(data_filepath, encoding="utf-8") as f:
98
+ spider = json.load(f)
99
+ for idx, sample in enumerate(spider):
100
+ yield idx, {
101
+ "db_id": sample["db_id"],
102
+ "query": sample["query"],
103
+ "question": sample["question"],
104
+ "query_toks": sample["query_toks"],
105
+ "query_toks_no_value": sample["query_toks_no_value"],
106
+ "question_toks": sample["question_toks"],
107
+ }