Datasets:
michaelnetbiz
commited on
Commit
•
45bc22a
1
Parent(s):
0f6c9b4
Add scripts/fetch_data.py, scripts/prep_data.py
Browse files- .gitignore +3 -0
- data/test.tar.gz +0 -0
- data/train.tar.gz +0 -0
- kendex.py +0 -137
- requirements.txt +68 -6
- scripts/fetch_data.py +26 -0
- scripts/prep_data.py +26 -0
.gitignore
CHANGED
@@ -159,3 +159,6 @@ cython_debug/
|
|
159 |
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
160 |
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
161 |
.idea/
|
|
|
|
|
|
|
|
159 |
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
160 |
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
161 |
.idea/
|
162 |
+
|
163 |
+
/data
|
164 |
+
.DS_Store
|
data/test.tar.gz
DELETED
File without changes
|
data/train.tar.gz
DELETED
File without changes
|
kendex.py
DELETED
@@ -1,137 +0,0 @@
|
|
1 |
-
import csv
|
2 |
-
import json
|
3 |
-
import os
|
4 |
-
|
5 |
-
import datasets
|
6 |
-
|
7 |
-
_CITATION = """\
|
8 |
-
@InProceedings{
|
9 |
-
huggingface:dataset,
|
10 |
-
title = {Kendex: A TTS Dataset for the Glory of GOD!},
|
11 |
-
author={michaelnetbiz},
|
12 |
-
year={2023}
|
13 |
-
}
|
14 |
-
"""
|
15 |
-
|
16 |
-
_DESCRIPTION = """\
|
17 |
-
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
|
18 |
-
"""
|
19 |
-
|
20 |
-
_HOMEPAGE = "https://michaelnet.biz/kendex"
|
21 |
-
|
22 |
-
_LICENSE = "MIT"
|
23 |
-
|
24 |
-
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
25 |
-
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
26 |
-
_URLS = {
|
27 |
-
"kendex": "https://huggingface.co/kendex-kendex.zip",
|
28 |
-
}
|
29 |
-
|
30 |
-
|
31 |
-
class Kendex(datasets.GeneratorBasedBuilder):
|
32 |
-
VERSION = datasets.Version("1.1.0")
|
33 |
-
|
34 |
-
# This is an example of a dataset with multiple configurations.
|
35 |
-
# If you don't want/need to define several sub-sets in your dataset,
|
36 |
-
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
|
37 |
-
|
38 |
-
# If you need to make complex sub-parts in the datasets with configurable options
|
39 |
-
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
40 |
-
# BUILDER_CONFIG_CLASS = MyBuilderConfig
|
41 |
-
|
42 |
-
# You will be able to load one or the other configurations in the following list with
|
43 |
-
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
44 |
-
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
45 |
-
BUILDER_CONFIGS = [
|
46 |
-
datasets.BuilderConfig(name="kendex", version=VERSION,
|
47 |
-
description="This is kendex"),
|
48 |
-
]
|
49 |
-
|
50 |
-
# It's not mandatory to have a default configuration. Just use one if it make sense.
|
51 |
-
DEFAULT_CONFIG_NAME = "kendex"
|
52 |
-
|
53 |
-
def _info(self):
|
54 |
-
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
55 |
-
# This is the name of the configuration selected in BUILDER_CONFIGS above
|
56 |
-
|
57 |
-
return datasets.DatasetInfo(
|
58 |
-
# This is the description that will appear on the datasets page.
|
59 |
-
description=_DESCRIPTION,
|
60 |
-
# This defines the different columns of the dataset and their types
|
61 |
-
# Here we define them above because they are different between the two configurations
|
62 |
-
features=datasets.Features(
|
63 |
-
{
|
64 |
-
"sentence": datasets.Value("string"),
|
65 |
-
"option1": datasets.Value("string"),
|
66 |
-
"answer": datasets.Value("string")
|
67 |
-
# These are the features of your dataset like images, labels ...
|
68 |
-
}
|
69 |
-
),
|
70 |
-
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
|
71 |
-
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
|
72 |
-
# supervised_keys=("sentence", "label"),
|
73 |
-
# Homepage of the dataset for documentation
|
74 |
-
homepage=_HOMEPAGE,
|
75 |
-
# License for the dataset if available
|
76 |
-
license=_LICENSE,
|
77 |
-
# Citation for the dataset
|
78 |
-
citation=_CITATION,
|
79 |
-
)
|
80 |
-
|
81 |
-
def _split_generators(self, dl_manager):
|
82 |
-
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
83 |
-
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
84 |
-
|
85 |
-
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
86 |
-
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
87 |
-
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
88 |
-
urls = _URLS[self.config.name]
|
89 |
-
data_dir = dl_manager.download_and_extract(urls)
|
90 |
-
|
91 |
-
return [
|
92 |
-
datasets.SplitGenerator(
|
93 |
-
name=datasets.Split.TRAIN,
|
94 |
-
# These kwargs will be passed to _generate_examples
|
95 |
-
gen_kwargs={
|
96 |
-
"filepath": os.path.join(data_dir, "train.jsonl"),
|
97 |
-
"split": "train",
|
98 |
-
},
|
99 |
-
),
|
100 |
-
datasets.SplitGenerator(
|
101 |
-
name=datasets.Split.VALIDATION,
|
102 |
-
# These kwargs will be passed to _generate_examples
|
103 |
-
gen_kwargs={
|
104 |
-
"filepath": os.path.join(data_dir, "dev.jsonl"),
|
105 |
-
"split": "dev",
|
106 |
-
},
|
107 |
-
),
|
108 |
-
datasets.SplitGenerator(
|
109 |
-
name=datasets.Split.TEST,
|
110 |
-
# These kwargs will be passed to _generate_examples
|
111 |
-
gen_kwargs={
|
112 |
-
"filepath": os.path.join(data_dir, "test.jsonl"),
|
113 |
-
"split": "test"
|
114 |
-
},
|
115 |
-
),
|
116 |
-
]
|
117 |
-
|
118 |
-
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
119 |
-
def _generate_examples(self, filepath, split):
|
120 |
-
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
121 |
-
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
122 |
-
with open(filepath, encoding="utf-8") as f:
|
123 |
-
for key, row in enumerate(f):
|
124 |
-
data = json.loads(row)
|
125 |
-
if self.config.name == "first_domain":
|
126 |
-
# Yields examples as (key, example) tuples
|
127 |
-
yield key, {
|
128 |
-
"sentence": data["sentence"],
|
129 |
-
"option1": data["option1"],
|
130 |
-
"answer": "" if split == "test" else data["answer"],
|
131 |
-
}
|
132 |
-
else:
|
133 |
-
yield key, {
|
134 |
-
"sentence": data["sentence"],
|
135 |
-
"option2": data["option2"],
|
136 |
-
"second_domain_answer": "" if split == "test" else data["second_domain_answer"],
|
137 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -1,49 +1,90 @@
|
|
1 |
aiohttp==3.8.5
|
2 |
aiosignal==1.3.1
|
|
|
3 |
appnope==0.1.3
|
|
|
|
|
|
|
4 |
asttokens==2.4.0
|
|
|
5 |
async-timeout==4.0.3
|
6 |
attrs==23.1.0
|
7 |
audioread==3.0.0
|
|
|
8 |
backcall==0.2.0
|
|
|
|
|
|
|
|
|
9 |
certifi==2023.7.22
|
10 |
cffi==1.15.1
|
11 |
charset-normalizer==3.2.0
|
12 |
comm==0.1.4
|
13 |
datasets==2.14.5
|
14 |
-
debugpy==1.
|
15 |
decorator==5.1.1
|
|
|
16 |
dill==0.3.7
|
17 |
executing==1.2.0
|
18 |
-
|
|
|
|
|
19 |
frozenlist==1.4.0
|
20 |
fsspec==2023.6.0
|
21 |
huggingface-hub==0.16.4
|
22 |
idna==3.4
|
23 |
ipykernel==6.25.2
|
24 |
ipython==8.15.0
|
|
|
|
|
|
|
25 |
jedi==0.19.0
|
|
|
|
|
26 |
joblib==1.3.2
|
|
|
|
|
|
|
|
|
|
|
27 |
jupyter-console==6.6.3
|
|
|
|
|
28 |
jupyter_client==8.3.1
|
29 |
jupyter_core==5.3.1
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
lazy_loader==0.3
|
31 |
librosa==0.10.1
|
32 |
llvmlite==0.40.1
|
|
|
33 |
matplotlib-inline==0.1.6
|
|
|
34 |
msgpack==1.0.5
|
35 |
multidict==6.0.4
|
36 |
multiprocess==0.70.15
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
38 |
numba==0.57.1
|
39 |
-
numpy==1.
|
|
|
40 |
packaging==23.1
|
41 |
pandas==2.1.0
|
|
|
42 |
parso==0.8.3
|
43 |
pexpect==4.8.0
|
44 |
pickleshare==0.7.5
|
45 |
platformdirs==3.10.0
|
46 |
pooch==1.7.0
|
|
|
47 |
prompt-toolkit==3.0.39
|
48 |
psutil==5.9.5
|
49 |
ptyprocess==0.7.0
|
@@ -52,23 +93,44 @@ pyarrow==13.0.0
|
|
52 |
pycparser==2.21
|
53 |
Pygments==2.16.1
|
54 |
python-dateutil==2.8.2
|
|
|
55 |
pytz==2023.3.post1
|
56 |
PyYAML==6.0.1
|
57 |
pyzmq==25.1.1
|
|
|
|
|
|
|
|
|
58 |
requests==2.31.0
|
|
|
|
|
|
|
|
|
|
|
59 |
scikit-learn==1.3.0
|
60 |
scipy==1.11.2
|
|
|
61 |
six==1.16.0
|
|
|
62 |
soundfile==0.12.1
|
|
|
63 |
soxr==0.3.6
|
64 |
stack-data==0.6.2
|
|
|
65 |
threadpoolctl==3.2.0
|
|
|
|
|
66 |
tornado==6.3.3
|
67 |
tqdm==4.66.1
|
68 |
-
traitlets==5.
|
|
|
69 |
typing_extensions==4.7.1
|
70 |
tzdata==2023.3
|
71 |
-
|
72 |
wcwidth==0.2.6
|
|
|
|
|
|
|
|
|
73 |
xxhash==3.3.0
|
74 |
yarl==1.9.2
|
|
|
1 |
aiohttp==3.8.5
|
2 |
aiosignal==1.3.1
|
3 |
+
anyio==4.0.0
|
4 |
appnope==0.1.3
|
5 |
+
argon2-cffi==23.1.0
|
6 |
+
argon2-cffi-bindings==21.2.0
|
7 |
+
arrow==1.2.3
|
8 |
asttokens==2.4.0
|
9 |
+
async-lru==2.0.4
|
10 |
async-timeout==4.0.3
|
11 |
attrs==23.1.0
|
12 |
audioread==3.0.0
|
13 |
+
Babel==2.12.1
|
14 |
backcall==0.2.0
|
15 |
+
beautifulsoup4==4.12.2
|
16 |
+
bleach==6.0.0
|
17 |
+
boto3==1.28.61
|
18 |
+
botocore==1.31.61
|
19 |
certifi==2023.7.22
|
20 |
cffi==1.15.1
|
21 |
charset-normalizer==3.2.0
|
22 |
comm==0.1.4
|
23 |
datasets==2.14.5
|
24 |
+
debugpy==1.8.0
|
25 |
decorator==5.1.1
|
26 |
+
defusedxml==0.7.1
|
27 |
dill==0.3.7
|
28 |
executing==1.2.0
|
29 |
+
fastjsonschema==2.18.0
|
30 |
+
filelock==3.12.4
|
31 |
+
fqdn==1.5.1
|
32 |
frozenlist==1.4.0
|
33 |
fsspec==2023.6.0
|
34 |
huggingface-hub==0.16.4
|
35 |
idna==3.4
|
36 |
ipykernel==6.25.2
|
37 |
ipython==8.15.0
|
38 |
+
ipython-genutils==0.2.0
|
39 |
+
ipywidgets==8.1.1
|
40 |
+
isoduration==20.11.0
|
41 |
jedi==0.19.0
|
42 |
+
Jinja2==3.1.2
|
43 |
+
jmespath==1.0.1
|
44 |
joblib==1.3.2
|
45 |
+
json5==0.9.14
|
46 |
+
jsonpointer==2.4
|
47 |
+
jsonschema==4.19.0
|
48 |
+
jsonschema-specifications==2023.7.1
|
49 |
+
jupyter==1.0.0
|
50 |
jupyter-console==6.6.3
|
51 |
+
jupyter-events==0.7.0
|
52 |
+
jupyter-lsp==2.2.0
|
53 |
jupyter_client==8.3.1
|
54 |
jupyter_core==5.3.1
|
55 |
+
jupyter_server==2.7.3
|
56 |
+
jupyter_server_terminals==0.4.4
|
57 |
+
jupyterlab==4.0.6
|
58 |
+
jupyterlab-pygments==0.2.2
|
59 |
+
jupyterlab-widgets==3.0.9
|
60 |
+
jupyterlab_server==2.25.0
|
61 |
lazy_loader==0.3
|
62 |
librosa==0.10.1
|
63 |
llvmlite==0.40.1
|
64 |
+
MarkupSafe==2.1.3
|
65 |
matplotlib-inline==0.1.6
|
66 |
+
mistune==3.0.1
|
67 |
msgpack==1.0.5
|
68 |
multidict==6.0.4
|
69 |
multiprocess==0.70.15
|
70 |
+
nbclient==0.8.0
|
71 |
+
nbconvert==7.8.0
|
72 |
+
nbformat==5.9.2
|
73 |
+
nest-asyncio==1.5.8
|
74 |
+
notebook==7.0.3
|
75 |
+
notebook_shim==0.2.3
|
76 |
numba==0.57.1
|
77 |
+
numpy==1.26.0
|
78 |
+
overrides==7.4.0
|
79 |
packaging==23.1
|
80 |
pandas==2.1.0
|
81 |
+
pandocfilters==1.5.0
|
82 |
parso==0.8.3
|
83 |
pexpect==4.8.0
|
84 |
pickleshare==0.7.5
|
85 |
platformdirs==3.10.0
|
86 |
pooch==1.7.0
|
87 |
+
prometheus-client==0.17.1
|
88 |
prompt-toolkit==3.0.39
|
89 |
psutil==5.9.5
|
90 |
ptyprocess==0.7.0
|
|
|
93 |
pycparser==2.21
|
94 |
Pygments==2.16.1
|
95 |
python-dateutil==2.8.2
|
96 |
+
python-json-logger==2.0.7
|
97 |
pytz==2023.3.post1
|
98 |
PyYAML==6.0.1
|
99 |
pyzmq==25.1.1
|
100 |
+
qtconsole==5.4.4
|
101 |
+
QtPy==2.4.0
|
102 |
+
referencing==0.30.2
|
103 |
+
regex==2023.10.3
|
104 |
requests==2.31.0
|
105 |
+
rfc3339-validator==0.1.4
|
106 |
+
rfc3986-validator==0.1.1
|
107 |
+
rpds-py==0.10.3
|
108 |
+
s3transfer==0.7.0
|
109 |
+
safetensors==0.3.3
|
110 |
scikit-learn==1.3.0
|
111 |
scipy==1.11.2
|
112 |
+
Send2Trash==1.8.2
|
113 |
six==1.16.0
|
114 |
+
sniffio==1.3.0
|
115 |
soundfile==0.12.1
|
116 |
+
soupsieve==2.5
|
117 |
soxr==0.3.6
|
118 |
stack-data==0.6.2
|
119 |
+
terminado==0.17.1
|
120 |
threadpoolctl==3.2.0
|
121 |
+
tinycss2==1.2.1
|
122 |
+
tokenizers==0.14.0
|
123 |
tornado==6.3.3
|
124 |
tqdm==4.66.1
|
125 |
+
traitlets==5.10.0
|
126 |
+
transformers==4.34.0
|
127 |
typing_extensions==4.7.1
|
128 |
tzdata==2023.3
|
129 |
+
uri-template==1.3.0
|
130 |
wcwidth==0.2.6
|
131 |
+
webcolors==1.13
|
132 |
+
webencodings==0.5.1
|
133 |
+
websocket-client==1.6.3
|
134 |
+
widgetsnbextension==4.0.9
|
135 |
xxhash==3.3.0
|
136 |
yarl==1.9.2
|
scripts/fetch_data.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
|
3 |
+
import os
|
4 |
+
import boto3
|
5 |
+
import tarfile
|
6 |
+
from pathlib import Path
|
7 |
+
|
8 |
+
DATASET = 'kendex.tar.gz'
|
9 |
+
|
10 |
+
repo_root = Path(__file__).resolve().parent.parent
|
11 |
+
|
12 |
+
local_path = os.path.join(repo_root, 'data', DATASET)
|
13 |
+
|
14 |
+
if not Path(local_path).exists():
|
15 |
+
s3 = boto3.resource('s3')
|
16 |
+
kendex = s3.Bucket('kendex')
|
17 |
+
kendex.download_file(DATASET, local_path)
|
18 |
+
print('downloaded')
|
19 |
+
|
20 |
+
expanded_path = os.path.join(repo_root, 'data', 'kendex')
|
21 |
+
|
22 |
+
if not Path(expanded_path).exists():
|
23 |
+
file = tarfile.open(local_path)
|
24 |
+
file.extractall(expanded_path)
|
25 |
+
file.close()
|
26 |
+
print('untar\'d, gunzip\'d')
|
scripts/prep_data.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import os
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
import huggingface_hub
|
6 |
+
from datasets import Audio, Dataset
|
7 |
+
import pandas as pd
|
8 |
+
|
9 |
+
repo_root = Path(__file__).resolve().parent.parent
|
10 |
+
|
11 |
+
metadata = os.path.join(repo_root, 'data', 'kendex', 'metadata.csv')
|
12 |
+
wavs = os.path.join(repo_root, 'data', 'kendex', 'wavs')
|
13 |
+
df = pd.read_csv(metadata, delimiter='|', header=None)
|
14 |
+
|
15 |
+
dataset = Dataset.from_pandas(pd.DataFrame({
|
16 |
+
'audio': pd.Series([
|
17 |
+
os.path.join(wavs, f"{f}.wav") for f in df[0]
|
18 |
+
]),
|
19 |
+
'text': df[1],
|
20 |
+
}))
|
21 |
+
|
22 |
+
dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000))
|
23 |
+
|
24 |
+
huggingface_hub.login()
|
25 |
+
|
26 |
+
dataset.push_to_hub("michaelnetbiz/kendex")
|