Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Annotations Creators:
expert-generated
Source Datasets:
original
ArXiv:
Tags:
text-to-sql
License:
albertvillanova HF staff commited on
Commit
9757161
1 Parent(s): 3374e30

Delete loading script

Browse files
Files changed (1) hide show
  1. spider.py +0 -109
spider.py DELETED
@@ -1,109 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Spider: A Large-Scale Human-Labeled Dataset for Text-to-SQL Tasks"""
16
-
17
-
18
- import json
19
- import os
20
-
21
- import datasets
22
-
23
-
24
- logger = datasets.logging.get_logger(__name__)
25
-
26
-
27
- _CITATION = """\
28
- @article{yu2018spider,
29
- title={Spider: A large-scale human-labeled dataset for complex and cross-domain semantic parsing and text-to-sql task},
30
- author={Yu, Tao and Zhang, Rui and Yang, Kai and Yasunaga, Michihiro and Wang, Dongxu and Li, Zifan and Ma, James and Li, Irene and Yao, Qingning and Roman, Shanelle and others},
31
- journal={arXiv preprint arXiv:1809.08887},
32
- year={2018}
33
- }
34
- """
35
-
36
- _DESCRIPTION = """\
37
- Spider is a large-scale complex and cross-domain semantic parsing and text-toSQL dataset annotated by 11 college students
38
- """
39
-
40
- _HOMEPAGE = "https://yale-lily.github.io/spider"
41
-
42
- _LICENSE = "CC BY-SA 4.0"
43
-
44
- _URL = "https://huggingface.co/datasets/spider/resolve/main/data/spider.zip"
45
-
46
-
47
- class Spider(datasets.GeneratorBasedBuilder):
48
- VERSION = datasets.Version("1.0.0")
49
-
50
- BUILDER_CONFIGS = [
51
- datasets.BuilderConfig(
52
- name="spider",
53
- version=VERSION,
54
- description="Spider: A Large-Scale Human-Labeled Dataset for Text-to-SQL Tasks",
55
- ),
56
- ]
57
-
58
- def _info(self):
59
- features = datasets.Features(
60
- {
61
- "db_id": datasets.Value("string"),
62
- "query": datasets.Value("string"),
63
- "question": datasets.Value("string"),
64
- "query_toks": datasets.features.Sequence(datasets.Value("string")),
65
- "query_toks_no_value": datasets.features.Sequence(datasets.Value("string")),
66
- "question_toks": datasets.features.Sequence(datasets.Value("string")),
67
- }
68
- )
69
- return datasets.DatasetInfo(
70
- description=_DESCRIPTION,
71
- features=features,
72
- supervised_keys=None,
73
- homepage=_HOMEPAGE,
74
- license=_LICENSE,
75
- citation=_CITATION,
76
- )
77
-
78
- def _split_generators(self, dl_manager):
79
- downloaded_filepath = dl_manager.download_and_extract(_URL)
80
-
81
- return [
82
- datasets.SplitGenerator(
83
- name=datasets.Split.TRAIN,
84
- gen_kwargs={
85
- "data_filepath": os.path.join(downloaded_filepath, "spider/train_spider.json"),
86
- },
87
- ),
88
- datasets.SplitGenerator(
89
- name=datasets.Split.VALIDATION,
90
- gen_kwargs={
91
- "data_filepath": os.path.join(downloaded_filepath, "spider/dev.json"),
92
- },
93
- ),
94
- ]
95
-
96
- def _generate_examples(self, data_filepath):
97
- """This function returns the examples in the raw (text) form."""
98
- logger.info("generating examples from = %s", data_filepath)
99
- with open(data_filepath, encoding="utf-8") as f:
100
- spider = json.load(f)
101
- for idx, sample in enumerate(spider):
102
- yield idx, {
103
- "db_id": sample["db_id"],
104
- "query": sample["query"],
105
- "question": sample["question"],
106
- "query_toks": sample["query_toks"],
107
- "query_toks_no_value": sample["query_toks_no_value"],
108
- "question_toks": sample["question_toks"],
109
- }