|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Spider: A Large-Scale Human-Labeled Dataset for Text-to-SQL Tasks""" |
|
|
|
|
|
import json |
|
import os |
|
import textwrap |
|
|
|
import datasets |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_CITATION = """\ |
|
@article{yu2018spider, |
|
title={Spider: A large-scale human-labeled dataset for complex and cross-domain semantic parsing and text-to-sql task}, |
|
author={Yu, Tao and Zhang, Rui and Yang, Kai and Yasunaga, Michihiro and Wang, Dongxu and Li, Zifan and Ma, James and Li, Irene and Yao, Qingning and Roman, Shanelle and others}, |
|
journal={arXiv preprint arXiv:1809.08887}, |
|
year={2018} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Spider is a large-scale complex and cross-domain semantic parsing and text-toSQL dataset annotated by 11 college students |
|
""" |
|
|
|
_HOMEPAGE = "https://yale-lily.github.io/spider" |
|
|
|
_LICENSE = "CC BY-SA 4.0" |
|
|
|
_URL = "https://huggingface.co/datasets/SALT-NLP/spider_VALUE/resolve/main/data.zip" |
|
|
|
class SpiderConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Spider.""" |
|
|
|
def __init__( |
|
self, |
|
name, |
|
description, |
|
train_path, |
|
dev_path, |
|
**kwargs |
|
): |
|
super(SpiderConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs) |
|
self.name = name |
|
self.description = description |
|
self.train_path = train_path |
|
self.dev_path = dev_path |
|
|
|
|
|
class Spider(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
SpiderConfig( |
|
name="AppE", |
|
description=textwrap.dedent( |
|
"""\ |
|
An Appalachian English variant of a large-scale complex and cross-domain semantic parsing and text-to-SQL dataset annotated by 11 college students""" |
|
), |
|
train_path="train_spider_AppE.json", |
|
dev_path="dev_AppE.json", |
|
), |
|
SpiderConfig( |
|
name="ChcE", |
|
description=textwrap.dedent( |
|
"""\ |
|
A Chicano English variant of a large-scale complex and cross-domain semantic parsing and text-to-SQL dataset annotated by 11 college students""" |
|
), |
|
train_path="train_spider_ChcE.json", |
|
dev_path="dev_ChcE.json", |
|
), |
|
SpiderConfig( |
|
name="CollSgE", |
|
description=textwrap.dedent( |
|
"""\ |
|
A Singapore English (Singlish) variant of a large-scale complex and cross-domain semantic parsing and text-to-SQL dataset annotated by 11 college students""" |
|
), |
|
train_path="train_spider_CollSgE.json", |
|
dev_path="dev_CollSgE.json", |
|
), |
|
SpiderConfig( |
|
name="IndE", |
|
description=textwrap.dedent( |
|
"""\ |
|
An Indian English variant of a large-scale complex and cross-domain semantic parsing and text-to-SQL dataset annotated by 11 college students""" |
|
), |
|
train_path="train_spider_IndE.json", |
|
dev_path="dev_IndE.json", |
|
), |
|
SpiderConfig( |
|
name="UAAVE", |
|
description=textwrap.dedent( |
|
"""\ |
|
An Urban African American English variant of a large-scale complex and cross-domain semantic parsing and text-to-SQL dataset annotated by 11 college students""" |
|
), |
|
train_path="train_spider_UAAVE.json", |
|
dev_path="dev_UAAVE.json", |
|
), |
|
SpiderConfig( |
|
name="MULTI", |
|
description=textwrap.dedent( |
|
"""\ |
|
A mixed-dialectal variant of a large-scale complex and cross-domain semantic parsing and text-to-SQL dataset annotated by 11 college students""" |
|
), |
|
train_path="train_spider_MULTI.json", |
|
dev_path="dev_MULTI.json", |
|
), |
|
] |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"db_id": datasets.Value("string"), |
|
"query": datasets.Value("string"), |
|
"question": datasets.Value("string"), |
|
"query_toks": datasets.features.Sequence(datasets.Value("string")), |
|
"query_toks_no_value": datasets.features.Sequence(datasets.Value("string")), |
|
"question_toks": datasets.features.Sequence(datasets.Value("string")), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
downloaded_filepath = dl_manager.download_and_extract(_URL) |
|
downloaded_filepath = os.path.join(downloaded_filepath, "data") |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data_filepath": os.path.join(downloaded_filepath, self.config.train_path), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"data_filepath": os.path.join(downloaded_filepath, self.config.dev_path), |
|
}, |
|
) |
|
] |
|
|
|
def _generate_examples(self, data_filepath): |
|
"""This function returns the examples in the raw (text) form.""" |
|
logger.info("generating examples from = %s", data_filepath) |
|
with open(data_filepath, encoding="utf-8") as f: |
|
spider = json.load(f) |
|
for idx, sample in enumerate(spider): |
|
yield idx, { |
|
"db_id": sample["db_id"], |
|
"query": sample["query"], |
|
"question": sample["question"], |
|
"query_toks": sample["query_toks"], |
|
"query_toks_no_value": sample["query_toks_no_value"], |
|
"question_toks": sample["question_toks"], |
|
} |