update Features, and data_dir
Browse files
temp_africaNLP_keyword_spotting_for_african_languages.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
# Testing how to load rarfile from Zenodo.
|
2 |
# https://github.com/huggingface/datasets/blob/dfdd2f949c1840926c02ae47f0f0c43083ef0b1f/datasets/common_voice/common_voice.py#L661 provided some inspiration
|
3 |
# also https://huggingface.co/docs/datasets/master/dataset_script.html
|
4 |
|
@@ -88,9 +88,9 @@ class TempAfricaNLPKeywordSpottingForAfricanLanguages(datasets.GeneratorBasedBui
|
|
88 |
features = datasets.Features(
|
89 |
{
|
90 |
"sentence": datasets.Value("string"),
|
91 |
-
"
|
92 |
-
"
|
93 |
-
#
|
94 |
}
|
95 |
)
|
96 |
else: # This is an example to show how to have different features for "first_domain" and "second_domain"
|
@@ -128,7 +128,7 @@ class TempAfricaNLPKeywordSpottingForAfricanLanguages(datasets.GeneratorBasedBui
|
|
128 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
129 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
130 |
my_urls = _URLs[self.config.name]
|
131 |
-
data_dir = dl_manager.download_and_extract(my_urls)
|
132 |
return [
|
133 |
datasets.SplitGenerator(
|
134 |
name=datasets.Split.TRAIN,
|
@@ -173,4 +173,5 @@ class TempAfricaNLPKeywordSpottingForAfricanLanguages(datasets.GeneratorBasedBui
|
|
173 |
yield id_, {
|
174 |
"sentence": data["sentence"],
|
175 |
"path": data["path"],
|
|
|
176 |
}
|
|
|
1 |
+
# Testing how to load rarfile from Zenodo, specifically https://zenodo.org/record/4661645.
|
2 |
# https://github.com/huggingface/datasets/blob/dfdd2f949c1840926c02ae47f0f0c43083ef0b1f/datasets/common_voice/common_voice.py#L661 provided some inspiration
|
3 |
# also https://huggingface.co/docs/datasets/master/dataset_script.html
|
4 |
|
|
|
88 |
features = datasets.Features(
|
89 |
{
|
90 |
"sentence": datasets.Value("string"),
|
91 |
+
"path": datasets.Value("string"),
|
92 |
+
"audio": datasets.features.Audio() # TODO: sampling rate? https://huggingface.co/docs/datasets/master/package_reference/main_classes.html#datasets.Audio
|
93 |
+
# 'id', 'client_id', 'path', 'sentence', 'original_sentence_id', 'created_at', 'bucket', 'locale_id'
|
94 |
}
|
95 |
)
|
96 |
else: # This is an example to show how to have different features for "first_domain" and "second_domain"
|
|
|
128 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
129 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
130 |
my_urls = _URLs[self.config.name]
|
131 |
+
data_dir = dl_manager.download_and_extract(my_urls) / "data_17042021" # the rar file has a subfolder.
|
132 |
return [
|
133 |
datasets.SplitGenerator(
|
134 |
name=datasets.Split.TRAIN,
|
|
|
173 |
yield id_, {
|
174 |
"sentence": data["sentence"],
|
175 |
"path": data["path"],
|
176 |
+
"audio": data["path"], # set the audio feature, should be able to handle things automatically?
|
177 |
}
|