black/isort
Browse files
temp_africaNLP_keyword_spotting_for_african_languages.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
# Testing how to load rarfile from Zenodo, specifically https://zenodo.org/record/4661645.
|
2 |
# https://github.com/huggingface/datasets/blob/dfdd2f949c1840926c02ae47f0f0c43083ef0b1f/datasets/common_voice/common_voice.py#L661 provided some inspiration
|
3 |
# also https://huggingface.co/docs/datasets/master/dataset_script.html
|
4 |
|
@@ -22,11 +22,10 @@
|
|
22 |
import csv
|
23 |
import json
|
24 |
import os
|
25 |
-
import rarfile
|
26 |
-
import pandas as pd
|
27 |
|
28 |
import datasets
|
29 |
-
|
|
|
30 |
|
31 |
# TODO: Add BibTeX citation
|
32 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
@@ -55,7 +54,7 @@ _LICENSE = ""
|
|
55 |
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
56 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
57 |
_URLs = {
|
58 |
-
|
59 |
}
|
60 |
|
61 |
|
@@ -77,20 +76,26 @@ class TempAfricaNLPKeywordSpottingForAfricanLanguages(datasets.GeneratorBasedBui
|
|
77 |
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
78 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
79 |
BUILDER_CONFIGS = [
|
80 |
-
datasets.BuilderConfig(
|
|
|
|
|
|
|
|
|
81 |
]
|
82 |
|
83 |
DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
84 |
|
85 |
def _info(self):
|
86 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
87 |
-
if
|
|
|
|
|
88 |
features = datasets.Features(
|
89 |
{
|
90 |
"sentence": datasets.Value("string"),
|
91 |
"path": datasets.Value("string"),
|
92 |
-
"audio": datasets.features.Audio()
|
93 |
-
# 'id', 'client_id', 'path', 'sentence', 'original_sentence_id', 'created_at', 'bucket', 'locale_id'
|
94 |
}
|
95 |
)
|
96 |
else: # This is an example to show how to have different features for "first_domain" and "second_domain"
|
@@ -128,8 +133,10 @@ class TempAfricaNLPKeywordSpottingForAfricanLanguages(datasets.GeneratorBasedBui
|
|
128 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
129 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
130 |
my_urls = _URLs[self.config.name]
|
131 |
-
data_dir = dl_manager.download_and_extract(my_urls)
|
132 |
-
data_dir = os.path.join(
|
|
|
|
|
133 |
return [
|
134 |
datasets.SplitGenerator(
|
135 |
name=datasets.Split.TRAIN,
|
@@ -137,36 +144,39 @@ class TempAfricaNLPKeywordSpottingForAfricanLanguages(datasets.GeneratorBasedBui
|
|
137 |
gen_kwargs={
|
138 |
"filepath": os.path.join(data_dir, "clips.xlsx"),
|
139 |
"split": "train",
|
140 |
-
"data_dir":data_dir,
|
141 |
},
|
142 |
),
|
143 |
-
#
|
144 |
-
# datasets.SplitGenerator(
|
145 |
-
# name=datasets.Split.TEST,
|
146 |
-
# # These kwargs will be passed to _generate_examples
|
147 |
-
# gen_kwargs={
|
148 |
-
# "filepath": os.path.join(data_dir, "test.jsonl"),
|
149 |
-
# "split": "test"
|
150 |
-
# },
|
151 |
-
# ),
|
152 |
-
# datasets.SplitGenerator(
|
153 |
-
# name=datasets.Split.VALIDATION,
|
154 |
-
# # These kwargs will be passed to _generate_examples
|
155 |
-
# gen_kwargs={
|
156 |
-
# "filepath": os.path.join(data_dir, "dev.jsonl"),
|
157 |
-
# "split": "dev",
|
158 |
-
# },
|
159 |
-
# ),
|
160 |
-
#
|
161 |
]
|
162 |
|
163 |
def _generate_examples(
|
164 |
-
self,
|
|
|
|
|
|
|
165 |
):
|
166 |
-
"""
|
167 |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
168 |
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
169 |
-
clips_df= pd.read_excel(filepath)
|
170 |
with open(filepath, encoding="utf-8") as f:
|
171 |
for id_, row in clips_df.iterrows():
|
172 |
data = row
|
@@ -175,5 +185,7 @@ class TempAfricaNLPKeywordSpottingForAfricanLanguages(datasets.GeneratorBasedBui
|
|
175 |
yield id_, {
|
176 |
"sentence": data["sentence"],
|
177 |
"path": data["path"],
|
178 |
-
"audio": os.path.join(
|
179 |
-
|
|
|
|
|
|
1 |
+
# Testing how to load rarfile from Zenodo, specifically https://zenodo.org/record/4661645.
|
2 |
# https://github.com/huggingface/datasets/blob/dfdd2f949c1840926c02ae47f0f0c43083ef0b1f/datasets/common_voice/common_voice.py#L661 provided some inspiration
|
3 |
# also https://huggingface.co/docs/datasets/master/dataset_script.html
|
4 |
|
|
|
22 |
import csv
|
23 |
import json
|
24 |
import os
|
|
|
|
|
25 |
|
26 |
import datasets
|
27 |
+
import pandas as pd
|
28 |
+
import rarfile
|
29 |
|
30 |
# TODO: Add BibTeX citation
|
31 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
|
|
54 |
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
55 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
56 |
_URLs = {
|
57 |
+
"first_domain": "https://zenodo.org/record/4661645/files/Keyword_spotting_dataset_v0.01_17042021.rar",
|
58 |
}
|
59 |
|
60 |
|
|
|
76 |
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
77 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
78 |
BUILDER_CONFIGS = [
|
79 |
+
datasets.BuilderConfig(
|
80 |
+
name="first_domain",
|
81 |
+
version=VERSION,
|
82 |
+
description="This part of my dataset covers a first domain",
|
83 |
+
),
|
84 |
]
|
85 |
|
86 |
DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
87 |
|
88 |
def _info(self):
|
89 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
90 |
+
if (
|
91 |
+
self.config.name == "first_domain"
|
92 |
+
): # This is the name of the configuration selected in BUILDER_CONFIGS above
|
93 |
features = datasets.Features(
|
94 |
{
|
95 |
"sentence": datasets.Value("string"),
|
96 |
"path": datasets.Value("string"),
|
97 |
+
"audio": datasets.features.Audio() # TODO: sampling rate? https://huggingface.co/docs/datasets/master/package_reference/main_classes.html#datasets.Audio
|
98 |
+
# TODO: 'id', 'client_id', 'path', 'sentence', 'original_sentence_id', 'created_at', 'bucket', 'locale_id'
|
99 |
}
|
100 |
)
|
101 |
else: # This is an example to show how to have different features for "first_domain" and "second_domain"
|
|
|
133 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
134 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
135 |
my_urls = _URLs[self.config.name]
|
136 |
+
data_dir = dl_manager.download_and_extract(my_urls)
|
137 |
+
data_dir = os.path.join(
|
138 |
+
data_dir, "data_17042021"
|
139 |
+
) # the rar file has a subfolder.
|
140 |
return [
|
141 |
datasets.SplitGenerator(
|
142 |
name=datasets.Split.TRAIN,
|
|
|
144 |
gen_kwargs={
|
145 |
"filepath": os.path.join(data_dir, "clips.xlsx"),
|
146 |
"split": "train",
|
147 |
+
"data_dir": data_dir,
|
148 |
},
|
149 |
),
|
150 |
+
#
|
151 |
+
# datasets.SplitGenerator(
|
152 |
+
# name=datasets.Split.TEST,
|
153 |
+
# # These kwargs will be passed to _generate_examples
|
154 |
+
# gen_kwargs={
|
155 |
+
# "filepath": os.path.join(data_dir, "test.jsonl"),
|
156 |
+
# "split": "test"
|
157 |
+
# },
|
158 |
+
# ),
|
159 |
+
# datasets.SplitGenerator(
|
160 |
+
# name=datasets.Split.VALIDATION,
|
161 |
+
# # These kwargs will be passed to _generate_examples
|
162 |
+
# gen_kwargs={
|
163 |
+
# "filepath": os.path.join(data_dir, "dev.jsonl"),
|
164 |
+
# "split": "dev",
|
165 |
+
# },
|
166 |
+
# ),
|
167 |
+
#
|
168 |
]
|
169 |
|
170 |
def _generate_examples(
|
171 |
+
self,
|
172 |
+
filepath,
|
173 |
+
split,
|
174 |
+
data_dir, # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
175 |
):
|
176 |
+
"""Yields examples as (key, example) tuples."""
|
177 |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
178 |
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
179 |
+
clips_df = pd.read_excel(filepath)
|
180 |
with open(filepath, encoding="utf-8") as f:
|
181 |
for id_, row in clips_df.iterrows():
|
182 |
data = row
|
|
|
185 |
yield id_, {
|
186 |
"sentence": data["sentence"],
|
187 |
"path": data["path"],
|
188 |
+
"audio": os.path.join(
|
189 |
+
data_dir, data["path"]
|
190 |
+
), # set the audio feature, should be able to handle things automatically?
|
191 |
+
}
|