holylovenia commited on
Commit
1ab1a62
1 Parent(s): 1e0f8ce

Upload librivox_indonesia.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. librivox_indonesia.py +212 -0
librivox_indonesia.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+
22
+ from nusacrowd.utils import schemas
23
+ from nusacrowd.utils.configs import NusantaraConfig
24
+ from nusacrowd.utils.constants import Tasks, DEFAULT_SOURCE_VIEW_NAME, DEFAULT_NUSANTARA_VIEW_NAME
25
+
26
+ import pandas as pd
27
+
28
+ _CITATION = """\
29
+ @misc{
30
+ research,
31
+ title={indonesian-nlp/librivox-indonesia · datasets at hugging face},
32
+ url={https://huggingface.co/datasets/indonesian-nlp/librivox-indonesia},
33
+ author={Indonesian-nlp}
34
+ }
35
+ """
36
+
37
+ _DATASETNAME = "librivox_indonesia"
38
+ _DESCRIPTION = """\
39
+ The LibriVox Indonesia dataset consists of MP3 audio and a corresponding text file we generated from the public domain audiobooks LibriVox.
40
+ We collected only languages in Indonesia for this dataset.
41
+ The original LibriVox audiobooks or sound files' duration varies from a few minutes to a few hours.
42
+ Each audio file in the speech dataset now lasts from a few seconds to a maximum of 20 seconds.
43
+ We converted the audiobooks to speech datasets using the forced alignment software we developed.
44
+ It supports multilingual, including low-resource languages, such as Acehnese, Balinese, or Minangkabau.
45
+ We can also use it for other languages without additional work to train the model.
46
+ The dataset currently consists of 8 hours in 7 languages from Indonesia.
47
+ We will add more languages or audio files as we collect them.
48
+ """
49
+
50
+ _HOMEPAGE = "https://huggingface.co/indonesian-nlp/librivox-indonesia"
51
+
52
+ _LICENSE = "CC0"
53
+
54
+ _URLS = {
55
+ _DATASETNAME: "https://huggingface.co/datasets/indonesian-nlp/librivox-indonesia/resolve/main/data",
56
+ }
57
+ _LANGUAGES = {"ind", "sun", "jav", "min", "bug", "ban", "ace"}
58
+ _LANG_CODE = {
59
+ "ind": ["ind", "indonesian"],
60
+ "sun": ["sun", "sundanese"],
61
+ "jav": ["jav", "javanese"],
62
+ "min": ["min", "minangkabau"],
63
+ "bug": ["bug", "bugisnese"],
64
+ "ban": ["bal", "balinese"],
65
+ "ace": ["ace", "acehnese"]
66
+ }
67
+ _LOCAL = False
68
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION] # example: [Tasks.TRANSLATION, Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
69
+
70
+ _SOURCE_VERSION = "1.0.0"
71
+ _NUSANTARA_VERSION = "1.0.0"
72
+
73
+
74
+ class LibrivoxIndonesia(datasets.GeneratorBasedBuilder):
75
+ """
76
+ Librivox-indonesia is a speech-to-text dataset in 7 languages available in Indonesia.
77
+ The default dataloader contains all languages, while the other available dataloaders contain a designated language.
78
+ """
79
+
80
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
81
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
82
+
83
+ BUILDER_CONFIGS = [
84
+ NusantaraConfig(
85
+ name="librivox_indonesia_source",
86
+ version=_SOURCE_VERSION,
87
+ description="Librivox-Indonesia source schema for all languages",
88
+ schema="source",
89
+ subset_id="librivox_indonesia",
90
+ )] + [
91
+ NusantaraConfig(
92
+ name="librivox_indonesia_{lang}_source".format(lang=lang),
93
+ version=_SOURCE_VERSION,
94
+ description="Librivox-Indonesia source schema for {lang} languages".format(lang=_LANG_CODE[lang][1]),
95
+ schema="source",
96
+ subset_id="librivox_indonesia_{lang}".format(lang=lang),
97
+ ) for lang in _LANGUAGES] + [
98
+ NusantaraConfig(
99
+ name="librivox_indonesia_nusantara_sptext",
100
+ version=_NUSANTARA_VERSION,
101
+ description="Librivox-Indonesia Nusantara schema for all languages",
102
+ schema="nusantara_sptext",
103
+ subset_id="librivox_indonesia",
104
+ )] + [
105
+ NusantaraConfig(
106
+ name="librivox_indonesia_{lang}_nusantara_sptext".format(lang=lang),
107
+ version=_NUSANTARA_VERSION,
108
+ description="Librivox-Indonesia Nusantara schema for {lang} languages".format(lang=_LANG_CODE[lang][1]),
109
+ schema="nusantara_sptext",
110
+ subset_id="librivox_indonesia_{lang}".format(lang=lang),
111
+ )for lang in _LANGUAGES]
112
+
113
+ DEFAULT_CONFIG_NAME = "librivox_indonesia_source"
114
+
115
+ def _info(self) -> datasets.DatasetInfo:
116
+ if self.config.schema == "source":
117
+ features = datasets.Features(
118
+ {
119
+ "path": datasets.Value("string"),
120
+ "language": datasets.Value("string"),
121
+ "reader": datasets.Value("string"),
122
+ "sentence": datasets.Value("string"),
123
+ "audio": datasets.features.Audio(sampling_rate=44100)
124
+ }
125
+ )
126
+ elif self.config.schema == "nusantara_sptext":
127
+ features = schemas.speech_text_features
128
+
129
+ return datasets.DatasetInfo(
130
+ description=_DESCRIPTION,
131
+ features=features,
132
+ homepage=_HOMEPAGE,
133
+ license=_LICENSE,
134
+ citation=_CITATION,
135
+ )
136
+
137
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
138
+ urls = _URLS[_DATASETNAME]
139
+
140
+ audio_path = {}
141
+ local_extracted_archive = {}
142
+ metadata_path = {}
143
+ splits = ["train", "test"]
144
+ for split in splits:
145
+ audio_path[split] = dl_manager.download(os.path.join(urls, "audio_{split}.tgz".format(split=split)))
146
+ local_extracted_archive[split] = dl_manager.extract(audio_path[split]) if not dl_manager.is_streaming else None
147
+ metadata_path[split] = dl_manager.download_and_extract(
148
+ os.path.join(urls, "metadata_{split}.csv.gz".format(split=split))
149
+ )
150
+
151
+ return [
152
+ datasets.SplitGenerator(
153
+ name=datasets.Split.TRAIN,
154
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
155
+ gen_kwargs={
156
+ "local_extracted_archive": local_extracted_archive["train"],
157
+ "audio_path": dl_manager.iter_archive(audio_path["train"]),
158
+ "metadata_path": metadata_path["train"],
159
+ "split": "train",
160
+ },
161
+ ),
162
+ datasets.SplitGenerator(
163
+ name=datasets.Split.TEST,
164
+ gen_kwargs={
165
+ "local_extracted_archive": local_extracted_archive["test"],
166
+ "audio_path": dl_manager.iter_archive(audio_path["test"]),
167
+ "metadata_path": metadata_path["test"],
168
+ "split": "test",
169
+ },
170
+ ),
171
+ ]
172
+
173
+ def _generate_examples(self, local_extracted_archive: Path, audio_path, metadata_path: Path, split: str) -> Tuple[int, Dict]:
174
+ df = pd.read_csv(
175
+ metadata_path,
176
+ encoding="utf-8"
177
+ )
178
+ lang = self.config.subset_id.split("_")[-1]
179
+ if lang != "indonesia":
180
+ lang = _LANG_CODE[lang][0]
181
+ path_to_audio = "librivox-indonesia"
182
+ metadata = {}
183
+ for id, row in df.iterrows():
184
+ if lang == row["language"] or lang == "indonesia":
185
+ path = os.path.join(path_to_audio, row["path"])
186
+ metadata[path] = row
187
+ metadata[path]["id"] = id
188
+
189
+ for path, f in audio_path:
190
+ if path in metadata:
191
+ row = metadata[path]
192
+ path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
193
+ if self.config.schema == "source":
194
+ yield row["id"], {
195
+ "path": path,
196
+ "language": row["language"],
197
+ "reader": row["reader"],
198
+ "sentence": row["sentence"],
199
+ "audio": path,
200
+ }
201
+ elif self.config.schema == "nusantara_sptext":
202
+ yield row["id"], {
203
+ "id": row["id"],
204
+ "speaker_id": row["reader"],
205
+ "path": path,
206
+ "audio": path,
207
+ "text": row["sentence"],
208
+ "metadata": {
209
+ "speaker_age": None,
210
+ "speaker_gender": None,
211
+ }
212
+ }