Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
- dataset_infos.json +1 -1
- dummy/1.1.0/dummy_data.zip +2 -2
- vivos.py +33 -20
dataset_infos.json
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
{"default": {"description": "VIVOS is a free Vietnamese speech corpus consisting of 15 hours of recording speech prepared for\nVietnamese Automatic Speech Recognition task.\nThe corpus was prepared by AILAB, a computer science lab of VNUHCM - University of Science, with Prof. Vu Hai Quan is the head of.\nWe publish this corpus in hope to attract more scientists to solve Vietnamese speech recognition problems.\n", "citation": "@InProceedings{vivos:2016,\nAddress = {Ho Chi Minh, Vietnam}\ntitle = {VIVOS: 15 hours of recording speech prepared for Vietnamese Automatic Speech Recognition},\nauthor={Prof. Vu Hai Quan},\nyear={2016}\n}\n", "homepage": "https://ailab.hcmus.edu.vn/vivos", "license": "cc-by-sa-4.0", "features": {"speaker_id": {"dtype": "string", "id": null, "_type": "Value"}, "path": {"dtype": "string", "id": null, "_type": "Value"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "vivos_dataset", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes":
|
|
|
|
| 1 |
+
{"default": {"description": "VIVOS is a free Vietnamese speech corpus consisting of 15 hours of recording speech prepared for\nVietnamese Automatic Speech Recognition task.\nThe corpus was prepared by AILAB, a computer science lab of VNUHCM - University of Science, with Prof. Vu Hai Quan is the head of.\nWe publish this corpus in hope to attract more scientists to solve Vietnamese speech recognition problems.\n", "citation": "@InProceedings{vivos:2016,\nAddress = {Ho Chi Minh, Vietnam}\ntitle = {VIVOS: 15 hours of recording speech prepared for Vietnamese Automatic Speech Recognition},\nauthor={Prof. Vu Hai Quan},\nyear={2016}\n}\n", "homepage": "https://ailab.hcmus.edu.vn/vivos", "license": "cc-by-sa-4.0", "features": {"speaker_id": {"dtype": "string", "id": null, "_type": "Value"}, "path": {"dtype": "string", "id": null, "_type": "Value"}, "audio": {"sampling_rate": 16000, "mono": true, "_storage_dtype": "struct", "id": null, "_type": "Audio"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "vivos_dataset", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1722000675, "num_examples": 11660, "dataset_name": "vivos_dataset"}, "test": {"name": "test", "num_bytes": 86120132, "num_examples": 760, "dataset_name": "vivos_dataset"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/vivos/train/prompts.txt": {"num_bytes": 1075754, "checksum": "d6c6fcbe258d80d0f63e0f87d414b805f6ae11f41d40cdba5454152c3d6f14c0"}, "https://s3.amazonaws.com/datasets.huggingface.co/vivos/test/prompts.txt": {"num_bytes": 56446, "checksum": "ed27898d081eaa41b1e7e38451eb85f7ca06138896b471691510e7bab1187c2e"}, "https://ailab.hcmus.edu.vn/assets/vivos.tar.gz": {"num_bytes": 1474408300, "checksum": "147477f7a7702cbafc2ee3808d1c142989d0dbc8d9fce8e07d5f329d5119e4ca"}}, "download_size": 1475540500, "post_processing_size": null, "dataset_size": 1808120807, "size_in_bytes": 3283661307}}
|
dummy/1.1.0/dummy_data.zip
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:26b29828f134616652520ae1ebfadc70bc2e28c25f32ff60156a019a75dbd117
|
| 3 |
+
size 14710
|
vivos.py
CHANGED
|
@@ -12,7 +12,6 @@
|
|
| 12 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
# See the License for the specific language governing permissions and
|
| 14 |
# limitations under the License.
|
| 15 |
-
import os
|
| 16 |
|
| 17 |
import datasets
|
| 18 |
|
|
@@ -40,6 +39,11 @@ _LICENSE = "cc-by-sa-4.0"
|
|
| 40 |
|
| 41 |
_DATA_URL = "https://ailab.hcmus.edu.vn/assets/vivos.tar.gz"
|
| 42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
|
| 44 |
class VivosDataset(datasets.GeneratorBasedBuilder):
|
| 45 |
"""VIVOS is a free Vietnamese speech corpus consisting of 15 hours of recording speech prepared for
|
|
@@ -80,46 +84,55 @@ class VivosDataset(datasets.GeneratorBasedBuilder):
|
|
| 80 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
|
| 81 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 82 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
train_dir =
|
| 86 |
-
test_dir =
|
| 87 |
|
| 88 |
return [
|
| 89 |
datasets.SplitGenerator(
|
| 90 |
name=datasets.Split.TRAIN,
|
| 91 |
# These kwargs will be passed to _generate_examples
|
| 92 |
gen_kwargs={
|
| 93 |
-
"
|
| 94 |
-
"path_to_clips":
|
|
|
|
| 95 |
},
|
| 96 |
),
|
| 97 |
datasets.SplitGenerator(
|
| 98 |
name=datasets.Split.TEST,
|
| 99 |
# These kwargs will be passed to _generate_examples
|
| 100 |
gen_kwargs={
|
| 101 |
-
"
|
| 102 |
-
"path_to_clips":
|
|
|
|
| 103 |
},
|
| 104 |
),
|
| 105 |
]
|
| 106 |
|
| 107 |
-
def _generate_examples(
|
| 108 |
-
self,
|
| 109 |
-
filepath,
|
| 110 |
-
path_to_clips, # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
| 111 |
-
):
|
| 112 |
"""Yields examples as (key, example) tuples."""
|
| 113 |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
| 114 |
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
| 115 |
-
|
| 116 |
-
with open(
|
| 117 |
-
for
|
| 118 |
data = row.strip().split(" ", 1)
|
| 119 |
speaker_id = data[0].split("_")[0]
|
| 120 |
-
|
|
|
|
| 121 |
"speaker_id": speaker_id,
|
| 122 |
-
"path":
|
| 123 |
-
"audio": os.path.join(path_to_clips, speaker_id, data[0] + ".wav"),
|
| 124 |
"sentence": data[1],
|
| 125 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
# See the License for the specific language governing permissions and
|
| 14 |
# limitations under the License.
|
|
|
|
| 15 |
|
| 16 |
import datasets
|
| 17 |
|
|
|
|
| 39 |
|
| 40 |
_DATA_URL = "https://ailab.hcmus.edu.vn/assets/vivos.tar.gz"
|
| 41 |
|
| 42 |
+
_PROMPTS_URLS = {
|
| 43 |
+
"train": "https://s3.amazonaws.com/datasets.huggingface.co/vivos/train/prompts.txt",
|
| 44 |
+
"test": "https://s3.amazonaws.com/datasets.huggingface.co/vivos/test/prompts.txt",
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
|
| 48 |
class VivosDataset(datasets.GeneratorBasedBuilder):
|
| 49 |
"""VIVOS is a free Vietnamese speech corpus consisting of 15 hours of recording speech prepared for
|
|
|
|
| 84 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
|
| 85 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 86 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 87 |
+
prompts_paths = dl_manager.download(_PROMPTS_URLS)
|
| 88 |
+
archive = dl_manager.download(_DATA_URL)
|
| 89 |
+
train_dir = "vivos/train"
|
| 90 |
+
test_dir = "vivos/test"
|
| 91 |
|
| 92 |
return [
|
| 93 |
datasets.SplitGenerator(
|
| 94 |
name=datasets.Split.TRAIN,
|
| 95 |
# These kwargs will be passed to _generate_examples
|
| 96 |
gen_kwargs={
|
| 97 |
+
"prompts_path": prompts_paths["train"],
|
| 98 |
+
"path_to_clips": train_dir + "/waves",
|
| 99 |
+
"audio_files": dl_manager.iter_archive(archive),
|
| 100 |
},
|
| 101 |
),
|
| 102 |
datasets.SplitGenerator(
|
| 103 |
name=datasets.Split.TEST,
|
| 104 |
# These kwargs will be passed to _generate_examples
|
| 105 |
gen_kwargs={
|
| 106 |
+
"prompts_path": prompts_paths["test"],
|
| 107 |
+
"path_to_clips": test_dir + "/waves",
|
| 108 |
+
"audio_files": dl_manager.iter_archive(archive),
|
| 109 |
},
|
| 110 |
),
|
| 111 |
]
|
| 112 |
|
| 113 |
+
def _generate_examples(self, prompts_path, path_to_clips, audio_files):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
"""Yields examples as (key, example) tuples."""
|
| 115 |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
| 116 |
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
| 117 |
+
examples = {}
|
| 118 |
+
with open(prompts_path, encoding="utf-8") as f:
|
| 119 |
+
for row in f:
|
| 120 |
data = row.strip().split(" ", 1)
|
| 121 |
speaker_id = data[0].split("_")[0]
|
| 122 |
+
audio_path = "/".join([path_to_clips, speaker_id, data[0] + ".wav"])
|
| 123 |
+
examples[audio_path] = {
|
| 124 |
"speaker_id": speaker_id,
|
| 125 |
+
"path": audio_path,
|
|
|
|
| 126 |
"sentence": data[1],
|
| 127 |
}
|
| 128 |
+
inside_clips_dir = False
|
| 129 |
+
id_ = 0
|
| 130 |
+
for path, f in audio_files:
|
| 131 |
+
if path.startswith(path_to_clips):
|
| 132 |
+
inside_clips_dir = True
|
| 133 |
+
if path in examples:
|
| 134 |
+
audio = {"path": path, "bytes": f.read()}
|
| 135 |
+
yield id_, {**examples[path], "audio": audio}
|
| 136 |
+
id_ += 1
|
| 137 |
+
elif inside_clips_dir:
|
| 138 |
+
break
|