"""A dataset script that will hit SQLite file and return the results.""" | |
import sqlite3 | |
from pathlib import Path | |
import datasets | |
# TODO: Add BibTeX citation | |
# Find for instance the citation on arxiv or on the dataset repo/website | |
_CITATION = """\ | |
@InProceedings{huggingface:dataset, | |
title = {A great new dataset}, | |
author={huggingface, Inc. | |
}, | |
year={2020} | |
} | |
""" | |
# TODO: Add description of the dataset here | |
# You can copy an official description | |
_DESCRIPTION = """\ | |
This new dataset is designed to solve this great NLP task and is crafted with a lot of care. | |
""" | |
# TODO: Add a link to an official homepage for the dataset here | |
_HOMEPAGE = "" | |
# TODO: Add the licence for the dataset here if you can find it | |
_LICENSE = "" | |
# TODO: Add link to the official dataset URLs here | |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files. | |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) | |
_URLS = { | |
"train": "https://huggingface.co/datasets/nateraw/sqllitetest/resolve/main/test.db", | |
} | |
# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case | |
class NewDataset(datasets.GeneratorBasedBuilder): | |
"""TODO: Short description of my dataset.""" | |
# VERSION = datasets.Version("1.1.0") | |
# This is an example of a dataset with multiple configurations. | |
# If you don't want/need to define several sub-sets in your dataset, | |
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. | |
# If you need to make complex sub-parts in the datasets with configurable options | |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig | |
# BUILDER_CONFIG_CLASS = MyBuilderConfig | |
# You will be able to load one or the other configurations in the following list with | |
# data = datasets.load_dataset('my_dataset', 'first_domain') | |
# data = datasets.load_dataset('my_dataset', 'second_domain') | |
# BUILDER_CONFIGS = [ | |
# datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"), | |
# datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"), | |
# ] | |
# DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense. | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"ssn": datasets.Value("int32"), | |
"first_name": datasets.Value("string"), | |
"last_name": datasets.Value("string"), | |
"department": datasets.Value("int32"), | |
"salary": datasets.Value("int32"), | |
} | |
), | |
homepage=_HOMEPAGE, | |
license=_LICENSE, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
db_file = dl_manager.download(_URLS["train"]) | |
# NOTE - Here is where you would put actual connection details if you're connecting to a database | |
# For the sake of this example, we use a local path to a sqllite file. | |
conn = sqlite3.connect(db_file) | |
# Execute a query and get the cursor object back, which we'll use to iterate over | |
curr = conn.execute("SELECT * FROM Employees") | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={"cursor": curr}, | |
) | |
] | |
def _generate_examples(self, cursor): | |
for i, ex in enumerate(cursor): | |
yield str(i), { | |
"ssn": ex[0], | |
"first_name": ex[1], | |
"last_name": ex[2], | |
"department": ex[3], | |
"salary": ex[4], | |
} | |
# Probably not necessary but just in case...we close the connection which we can find within the cursor object | |
cursor.connection.close() | |