Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
37cfe69
1 Parent(s): eac3d49

Upload vivos.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. vivos.py +204 -0
vivos.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+
21
+ from seacrowd.utils import schemas
22
+ from seacrowd.utils.configs import SEACrowdConfig
23
+ from seacrowd.utils.constants import Licenses, Tasks
24
+
25
+ _CITATION = """\
26
+ @inproceedings{luong-vu-2016-non,
27
+ title = "A non-expert {K}aldi recipe for {V}ietnamese Speech Recognition System",
28
+ author = "Luong, Hieu-Thi and
29
+ Vu, Hai-Quan",
30
+ editor = "Murakami, Yohei and
31
+ Lin, Donghui and
32
+ Ide, Nancy and
33
+ Pustejovsky, James",
34
+ booktitle = "Proceedings of the Third International Workshop on Worldwide Language Service
35
+ Infrastructure and Second Workshop on Open Infrastructures and Analysis Frameworks for
36
+ Human Language Technologies ({WLSI}/{OIAF}4{HLT}2016)",
37
+ month = dec,
38
+ year = "2016",
39
+ address = "Osaka, Japan",
40
+ publisher = "The COLING 2016 Organizing Committee",
41
+ url = "https://aclanthology.org/W16-5207",
42
+ pages = "51--55",
43
+ abstract = "In this paper we describe a non-expert setup for Vietnamese speech recognition
44
+ system using Kaldi toolkit. We collected a speech corpus over fifteen hours from about fifty
45
+ Vietnamese native speakers and using it to test the feasibility of our setup. The essential
46
+ linguistic components for the Automatic Speech Recognition (ASR) system was prepared basing
47
+ on the written form of the language instead of expertise knowledge on linguistic and phonology
48
+ as commonly seen in rich resource languages like English. The modeling of tones by integrating
49
+ them into the phoneme and using the phonetic decision tree is also discussed. Experimental
50
+ results showed this setup for ASR systems does yield competitive results while still have
51
+ potentials for further improvements.",
52
+ }
53
+ """
54
+
55
+ _DATASETNAME = "vivos"
56
+
57
+ _DESCRIPTION = """\
58
+ VIVOS is a Vietnamese speech corpus consisting of 15 hours of recording speech prepared for
59
+ Automatic Speech Recognition task. This speech corpus is collected by recording speech data
60
+ from more than 50 native Vietnamese volunteers.
61
+ """
62
+
63
+ _HOMEPAGE = "https://zenodo.org/records/7068130"
64
+
65
+ _LANGUAGES = ["vie"]
66
+
67
+ _LICENSE = Licenses.CC_BY_SA_4_0.value
68
+
69
+ _LOCAL = False
70
+
71
+ _URLS = {
72
+ "audio": "https://huggingface.co/datasets/vivos/resolve/main/data/vivos.tar.gz",
73
+ "train_prompt": "https://huggingface.co/datasets/vivos/resolve/main/data/prompts-train.txt.gz",
74
+ "test_prompt": "https://huggingface.co/datasets/vivos/resolve/main/data/prompts-test.txt.gz",
75
+ }
76
+
77
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
78
+
79
+ _SOURCE_VERSION = "1.0.0"
80
+
81
+ _SEACROWD_VERSION = "2024.06.20"
82
+
83
+ logger = datasets.logging.get_logger(__name__)
84
+
85
+
86
+ class VIVOSDataset(datasets.GeneratorBasedBuilder):
87
+ """
88
+ VIVOS is a Vietnamese speech corpus from https://zenodo.org/records/7068130.
89
+ """
90
+
91
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
92
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
93
+
94
+ BUILDER_CONFIGS = [
95
+ SEACrowdConfig(
96
+ name=f"{_DATASETNAME}_source",
97
+ version=datasets.Version(_SOURCE_VERSION),
98
+ description=f"{_DATASETNAME} source schema",
99
+ schema="source",
100
+ subset_id=f"{_DATASETNAME}",
101
+ ),
102
+ SEACrowdConfig(
103
+ name=f"{_DATASETNAME}_seacrowd_sptext",
104
+ version=datasets.Version(_SEACROWD_VERSION),
105
+ description=f"{_DATASETNAME} SEACrowd schema",
106
+ schema="seacrowd_sptext",
107
+ subset_id=f"{_DATASETNAME}",
108
+ ),
109
+ ]
110
+
111
+ def _info(self) -> datasets.DatasetInfo:
112
+ if self.config.schema == "source":
113
+ features = datasets.Features(
114
+ {
115
+ "speaker_id": datasets.Value("string"),
116
+ "path": datasets.Value("string"),
117
+ "audio": datasets.Audio(sampling_rate=16_000),
118
+ "sentence": datasets.Value("string"),
119
+ }
120
+ )
121
+ elif self.config.schema == "seacrowd_sptext":
122
+ features = schemas.speech_text_features
123
+
124
+ return datasets.DatasetInfo(
125
+ description=_DESCRIPTION,
126
+ features=features,
127
+ homepage=_HOMEPAGE,
128
+ license=_LICENSE,
129
+ citation=_CITATION,
130
+ )
131
+
132
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
133
+ """
134
+ Returns SplitGenerators.
135
+ """
136
+
137
+ audio_path = dl_manager.download(_URLS["audio"])
138
+ train_prompt_path = dl_manager.download_and_extract(_URLS["train_prompt"])
139
+ test_prompt_path = dl_manager.download_and_extract(_URLS["test_prompt"])
140
+
141
+ return [
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.TRAIN,
144
+ gen_kwargs={
145
+ "prompts_path": train_prompt_path,
146
+ "clips_path": "vivos/train/waves",
147
+ "audio_files": dl_manager.iter_archive(audio_path),
148
+ "split": "train",
149
+ },
150
+ ),
151
+ datasets.SplitGenerator(
152
+ name=datasets.Split.TEST,
153
+ gen_kwargs={
154
+ "prompts_path": test_prompt_path,
155
+ "clips_path": "vivos/test/waves",
156
+ "audio_files": dl_manager.iter_archive(audio_path),
157
+ "split": "test",
158
+ },
159
+ ),
160
+ ]
161
+
162
+ def _generate_examples(self, prompts_path: Path, clips_path: Path, audio_files, split: str) -> Tuple[int, Dict]:
163
+ """
164
+ Yields examples as (key, example) tuples.
165
+ """
166
+ examples = {}
167
+ with open(prompts_path, encoding="utf-8") as f:
168
+ if self.config.schema == "source":
169
+ for row in f:
170
+ data = row.strip().split(" ", 1)
171
+ speaker_id = data[0].split("_")[0]
172
+ audio_path = "/".join([clips_path, speaker_id, data[0] + ".wav"])
173
+ examples[audio_path] = {
174
+ "speaker_id": speaker_id,
175
+ "path": audio_path,
176
+ "sentence": data[1],
177
+ }
178
+ elif self.config.schema == "seacrowd_sptext":
179
+ audio_id = 0
180
+ for row in f:
181
+ data = row.strip().split(" ", 1)
182
+ speaker_id = data[0].split("_")[0]
183
+ audio_path = "/".join([clips_path, speaker_id, data[0] + ".wav"])
184
+ examples[audio_path] = {
185
+ "id": audio_id,
186
+ "path": audio_path,
187
+ "text": data[1],
188
+ "speaker_id": speaker_id,
189
+ "metadata": {
190
+ "speaker_age": None,
191
+ "speaker_gender": None,
192
+ },
193
+ }
194
+ audio_id += 1
195
+
196
+ idx = 0
197
+ for path, f in audio_files:
198
+ if path.startswith(clips_path):
199
+ if path in examples:
200
+ audio = {"path": path, "bytes": f.read()}
201
+ yield idx, {**examples[path], "audio": audio}
202
+ idx += 1
203
+ else:
204
+ continue