holylovenia commited on
Commit
59e1216
1 Parent(s): ebec41a

Upload su_id_tts.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. su_id_tts.py +177 -0
su_id_tts.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+ from pathlib import Path
4
+ from typing import List
5
+
6
+ import datasets
7
+
8
+ from nusacrowd.utils import schemas
9
+ from nusacrowd.utils.configs import NusantaraConfig
10
+ from nusacrowd.utils.constants import (DEFAULT_NUSANTARA_VIEW_NAME,
11
+ DEFAULT_SOURCE_VIEW_NAME, Tasks)
12
+
13
+ _DATASETNAME = "su_id_tts"
14
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
15
+ _UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
16
+
17
+ _LANGUAGES = ["sun"]
18
+ _LOCAL = False
19
+ _CITATION = """\
20
+ @inproceedings{sodimana18_sltu,
21
+ author={Keshan Sodimana and Pasindu {De Silva} and Supheakmungkol Sarin and Oddur Kjartansson and Martin Jansche and Knot Pipatsrisawat and Linne Ha},
22
+ title={{A Step-by-Step Process for Building TTS Voices Using Open Source Data and Frameworks for Bangla, Javanese, Khmer, Nepali, Sinhala, and Sundanese}},
23
+ year=2018,
24
+ booktitle={Proc. 6th Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU 2018)},
25
+ pages={66--70},
26
+ doi={10.21437/SLTU.2018-14}
27
+ }
28
+ """
29
+
30
+ _DESCRIPTION = """\
31
+ This data set contains high-quality transcribed audio data for Sundanese. The data set consists of wave files, and a TSV file. The file line_index.tsv contains a filename and the transcription of audio in the file. Each filename is prepended with a speaker identification number.
32
+ The data set has been manually quality checked, but there might still be errors.
33
+ This dataset was collected by Google in collaboration with Universitas Pendidikan Indonesia.
34
+ """
35
+
36
+ _HOMEPAGE = "http://openslr.org/44/"
37
+
38
+ _LICENSE = "CC BY-SA 4.0"
39
+
40
+ _URLs = {
41
+ _DATASETNAME: {
42
+ "female": "https://www.openslr.org/resources/44/su_id_female.zip",
43
+ "male": "https://www.openslr.org/resources/44/su_id_male.zip",
44
+ }
45
+ }
46
+
47
+ _SUPPORTED_TASKS = [Tasks.TEXT_TO_SPEECH]
48
+
49
+ _SOURCE_VERSION = "1.0.0"
50
+ _NUSANTARA_VERSION = "1.0.0"
51
+
52
+
53
+ class SuIdTTS(datasets.GeneratorBasedBuilder):
54
+ """su_id_tts contains high-quality Multi-speaker TTS data for Sundanese (SU-ID)."""
55
+
56
+ BUILDER_CONFIGS = [
57
+ NusantaraConfig(
58
+ name="su_id_tts_source",
59
+ version=datasets.Version(_SOURCE_VERSION),
60
+ description="SU_ID_TTS source schema",
61
+ schema="source",
62
+ subset_id="su_id_tts",
63
+ ),
64
+ NusantaraConfig(
65
+ name="su_id_tts_nusantara_sptext",
66
+ version=datasets.Version(_NUSANTARA_VERSION),
67
+ description="SU_ID_TTS Nusantara schema",
68
+ schema="nusantara_sptext",
69
+ subset_id="su_id_tts",
70
+ ),
71
+ ]
72
+
73
+ DEFAULT_CONFIG_NAME = "su_id_tts_source"
74
+
75
+ def _info(self):
76
+ if self.config.schema == "source":
77
+ features = datasets.Features(
78
+ {
79
+ "id": datasets.Value("string"),
80
+ "speaker_id": datasets.Value("string"),
81
+ "path": datasets.Value("string"),
82
+ "audio": datasets.Audio(sampling_rate=16_000),
83
+ "text": datasets.Value("string"),
84
+ "gender": datasets.Value("string"),
85
+ }
86
+ )
87
+ elif self.config.schema == "nusantara_sptext":
88
+ features = schemas.speech_text_features
89
+
90
+ return datasets.DatasetInfo(
91
+ description=_DESCRIPTION,
92
+ features=features,
93
+ homepage=_HOMEPAGE,
94
+ license=_LICENSE,
95
+ citation=_CITATION,
96
+ task_templates=[datasets.AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
97
+ )
98
+
99
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
100
+ male_path = Path(dl_manager.download_and_extract(_URLs[_DATASETNAME]["male"]))
101
+ female_path = Path(dl_manager.download_and_extract(_URLs[_DATASETNAME]["female"]))
102
+
103
+ return [
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.TRAIN,
106
+ gen_kwargs={
107
+ "male_filepath": male_path,
108
+ "female_filepath": female_path,
109
+ },
110
+ ),
111
+ ]
112
+
113
+ def _generate_examples(self, male_filepath: Path, female_filepath: Path):
114
+
115
+ if self.config.schema == "source" or self.config.schema == "nusantara_sptext":
116
+ tsv_m = os.path.join(male_filepath, "su_id_male", "line_index.tsv")
117
+ tsv_f = os.path.join(female_filepath, "su_id_female", "line_index.tsv")
118
+
119
+ with open(tsv_m, "r") as file:
120
+ tsv_m_data = csv.reader(file, delimiter="\t")
121
+ for line in tsv_m_data:
122
+ spk_trans_info = line[0].split("_")
123
+ if self.config.schema == "source":
124
+ ex = {
125
+ "id": line[0],
126
+ "speaker_id": spk_trans_info[0] + "_" + spk_trans_info[1],
127
+ "path": os.path.join(male_filepath, "su_id_male", "wavs", "{}.wav".format(line[0])),
128
+ "audio": os.path.join(male_filepath, "su_id_male", "wavs", "{}.wav".format(line[0])),
129
+ "text": line[2],
130
+ "gender": spk_trans_info[0][2],
131
+ }
132
+ yield line[0], ex
133
+
134
+ elif self.config.schema == "nusantara_sptext":
135
+ ex = {
136
+ "id": line[0],
137
+ "speaker_id": spk_trans_info[0] + "_" + spk_trans_info[1],
138
+ "path": os.path.join(male_filepath, "su_id_male", "wavs", "{}.wav".format(line[0])),
139
+ "audio": os.path.join(male_filepath, "su_id_male", "wavs", "{}.wav".format(line[0])),
140
+ "text": line[2],
141
+ "metadata": {
142
+ "speaker_age": None,
143
+ "speaker_gender": spk_trans_info[0][2],
144
+ },
145
+ }
146
+ yield line[0], ex
147
+
148
+ with open(tsv_f, "r") as file:
149
+ tsv_f_data = csv.reader(file, delimiter="\t")
150
+ for line in tsv_f_data:
151
+ spk_trans_info = line[0].split("_")
152
+ if self.config.schema == "source":
153
+ ex = {
154
+ "id": line[0],
155
+ "speaker_id": spk_trans_info[0] + "_" + spk_trans_info[1],
156
+ "path": os.path.join(female_filepath, "su_id_female", "wavs", "{}.wav".format(line[0])),
157
+ "audio": os.path.join(female_filepath, "su_id_female", "wavs", "{}.wav".format(line[0])),
158
+ "text": line[2],
159
+ "gender": spk_trans_info[0][2],
160
+ }
161
+ yield line[0], ex
162
+
163
+ elif self.config.schema == "nusantara_sptext":
164
+ ex = {
165
+ "id": line[0],
166
+ "speaker_id": spk_trans_info[0] + "_" + spk_trans_info[1],
167
+ "path": os.path.join(female_filepath, "su_id_female", "wavs", "{}.wav".format(line[0])),
168
+ "audio": os.path.join(female_filepath, "su_id_female", "wavs", "{}.wav".format(line[0])),
169
+ "text": line[2],
170
+ "metadata": {
171
+ "speaker_age": None,
172
+ "speaker_gender": spk_trans_info[0][2],
173
+ },
174
+ }
175
+ yield line[0], ex
176
+ else:
177
+ raise ValueError(f"Invalid config: {self.config.name}")