brthor commited on
Commit
c596804
1 Parent(s): 0129042

Create libritts_asr_builder.py

Browse files
Files changed (1) hide show
  1. libritts_asr_builder.py +239 -0
libritts_asr_builder.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 blabble.io
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import os
16
+
17
+ import datasets
18
+
19
+ _CITATION = """\
20
+ @ARTICLE{Koizumi2023-hs,
21
+ title = "{LibriTTS-R}: A restored multi-speaker text-to-speech corpus",
22
+ author = "Koizumi, Yuma and Zen, Heiga and Karita, Shigeki and Ding,
23
+ Yifan and Yatabe, Kohei and Morioka, Nobuyuki and Bacchiani,
24
+ Michiel and Zhang, Yu and Han, Wei and Bapna, Ankur",
25
+ abstract = "This paper introduces a new speech dataset called
26
+ ``LibriTTS-R'' designed for text-to-speech (TTS) use. It is
27
+ derived by applying speech restoration to the LibriTTS
28
+ corpus, which consists of 585 hours of speech data at 24 kHz
29
+ sampling rate from 2,456 speakers and the corresponding
30
+ texts. The constituent samples of LibriTTS-R are identical
31
+ to those of LibriTTS, with only the sound quality improved.
32
+ Experimental results show that the LibriTTS-R ground-truth
33
+ samples showed significantly improved sound quality compared
34
+ to those in LibriTTS. In addition, neural end-to-end TTS
35
+ trained with LibriTTS-R achieved speech naturalness on par
36
+ with that of the ground-truth samples. The corpus is freely
37
+ available for download from
38
+ \textbackslashurl\{http://www.openslr.org/141/\}.",
39
+ month = may,
40
+ year = 2023,
41
+ copyright = "http://creativecommons.org/licenses/by-nc-nd/4.0/",
42
+ archivePrefix = "arXiv",
43
+ primaryClass = "eess.AS",
44
+ eprint = "2305.18802"
45
+ }
46
+ """
47
+
48
+ _DESCRIPTION = """\
49
+ LibriTTS-R [1] is a sound quality improved version of the LibriTTS corpus (http://www.openslr.org/60/) which is a
50
+ multi-speaker English corpus of approximately 585 hours of read English speech at 24kHz sampling rate,
51
+ published in 2019. The constituent samples of LibriTTS-R are identical to those of LibriTTS, with only the sound
52
+ quality improved. To improve sound quality, a speech restoration model, Miipher proposed by Yuma Koizumi [2], was used.
53
+ """
54
+
55
+ _HOMEPAGE = "https://www.openslr.org/141/"
56
+
57
+ _LICENSE = "CC BY 4.0"
58
+
59
+ _DL_URL = "https://us.openslr.org/resources/141/"
60
+
61
+ _DATA_URLS = {
62
+ 'dev.clean': _DL_URL + 'dev_clean.tar.gz',
63
+ 'dev.other': _DL_URL + 'dev_other.tar.gz',
64
+ 'test.clean': _DL_URL + 'test_clean.tar.gz',
65
+ 'test.other': _DL_URL + 'test_other.tar.gz',
66
+ 'train.clean.100': _DL_URL + 'train_clean_100.tar.gz',
67
+ 'train.clean.360': _DL_URL + 'train_clean_360.tar.gz',
68
+ 'train.other.500': _DL_URL + 'train_other_500.tar.gz',
69
+ }
70
+
71
+
72
+ def _generate_transcripts(transcript_csv_file):
73
+ """Generates partial examples from transcript CSV file."""
74
+ for line in transcript_csv_file:
75
+ key, text_original, text_normalized = line.decode("utf-8").replace('\n', '').split("\t")
76
+ speaker_id, chapter_id = [int(el) for el in key.split("_")[:2]]
77
+ example = {
78
+ "text_normalized": text_normalized,
79
+ "text_original": text_original,
80
+ "speaker_id": speaker_id,
81
+ "chapter_id": chapter_id,
82
+ "id_": key,
83
+ }
84
+ yield example
85
+
86
+
87
+ class LibriTTS_R_Dataset(datasets.GeneratorBasedBuilder):
88
+ """
89
+ LibriTTS-R [1] is a sound quality improved version of the LibriTTS corpus (http://www.openslr.org/60/) which is a
90
+ multi-speaker English corpus of approximately 585 hours of read English speech at 24kHz sampling rate,
91
+ published in 2019.
92
+ """
93
+
94
+ VERSION = datasets.Version("1.0.0")
95
+
96
+ DEFAULT_CONFIG_NAME = "all"
97
+ BUILDER_CONFIGS = [
98
+ datasets.BuilderConfig(name="dev", description="Only the 'dev.clean' split."),
99
+ datasets.BuilderConfig(name="clean", description="'Clean' speech."),
100
+ datasets.BuilderConfig(name="other", description="'Other', more challenging, speech."),
101
+ datasets.BuilderConfig(name="all", description="Combined clean and other dataset."),
102
+ ]
103
+
104
+ def _info(self):
105
+ return datasets.DatasetInfo(
106
+ # This is the description that will appear on the datasets page.
107
+ description=_DESCRIPTION,
108
+ features=datasets.Features(
109
+ {
110
+ "audio": datasets.Audio(sampling_rate=24_000),
111
+ "text_normalized": datasets.Value("string"),
112
+ "text_original": datasets.Value("string"),
113
+ "speaker_id": datasets.Value("string"),
114
+ "path": datasets.Value("string"),
115
+ "chapter_id": datasets.Value("string"),
116
+ "id": datasets.Value("string"),
117
+ }
118
+ ),
119
+ supervised_keys=None,
120
+ homepage=_HOMEPAGE,
121
+ license=_LICENSE,
122
+ citation=_CITATION,
123
+ )
124
+
125
+ def _split_generators(self, dl_manager):
126
+ split_names = _DATA_URLS.keys()
127
+
128
+ if self.config.name == "clean":
129
+ split_names = [k for k in _DATA_URLS.keys() if 'clean' in k]
130
+ elif self.config.name == "other":
131
+ split_names = [k for k in _DATA_URLS.keys() if 'other' in k]
132
+
133
+ archive_path = dl_manager.download({k: v for k, v in _DATA_URLS.items() if k in split_names})
134
+
135
+ # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
136
+ local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
137
+
138
+ all_splits = [
139
+ datasets.SplitGenerator(
140
+ name=split_name,
141
+ gen_kwargs={
142
+ "local_extracted_archive": local_extracted_archive.get(split_name),
143
+ "files": dl_manager.iter_archive(archive_path[split_name]),
144
+ "split_name": split_name
145
+ },
146
+ ) for split_name in split_names
147
+ ]
148
+
149
+ return all_splits
150
+
151
+ def _generate_examples(self, split_name, files, local_extracted_archive):
152
+ """Generate examples from a LibriTTS-R archive_path."""
153
+ audio_extension = '.wav'
154
+
155
+ key = 0
156
+ all_audio_data = {}
157
+ transcripts = {}
158
+
159
+ def get_return_data(transcript, audio_data):
160
+ nonlocal key
161
+
162
+ audio = {"path": transcript["path"], "bytes": audio_data}
163
+ key += 1
164
+
165
+ return key, {"audio": audio, **transcript}
166
+
167
+ for path, f in files:
168
+ if path.endswith(audio_extension):
169
+ id_ = path.split("/")[-1][: -len(audio_extension)]
170
+
171
+ audio_data = f.read()
172
+
173
+ # If we already have the transcript for this audio, yield it right away
174
+ # Otherwise, save it for when we get the transcript.
175
+ transcript = transcripts.get(id_, None)
176
+
177
+ if transcript is not None:
178
+ yield get_return_data(transcript, audio_data)
179
+ del transcripts[id_]
180
+ else:
181
+ all_audio_data[id_] = f.read()
182
+
183
+ elif path.endswith(".trans.tsv"):
184
+ for example in _generate_transcripts(f):
185
+ example_id = example['id_']
186
+
187
+ audio_file = f"{example_id}{audio_extension}"
188
+
189
+ # TODO: this path is probably not right, there are subdirectories
190
+ audio_file = (
191
+ os.path.join(
192
+ local_extracted_archive, 'LibriTTS_R',
193
+ split_name.replace('.', '-'),
194
+ str(example['speaker_id']), str(example['chapter_id']), audio_file)
195
+ if local_extracted_archive
196
+ else audio_file
197
+ )
198
+
199
+ transcript = {
200
+ "id": example_id,
201
+ "speaker_id": example['speaker_id'],
202
+ "chapter_id": example['chapter_id'],
203
+ "text_normalized": example['text_normalized'],
204
+ "text_original": example['text_original'],
205
+ "path": audio_file,
206
+ }
207
+
208
+ # If we already have the audio for this transcript, yield it right away
209
+ # Otherwise, save it for when we get the audio.
210
+ audio_data = all_audio_data.get(example_id, None)
211
+ if audio_data is not None:
212
+ yield get_return_data(transcript, audio_data)
213
+ del all_audio_data[example_id]
214
+ else:
215
+ transcripts[example_id] = transcript
216
+
217
+ for id_, audio_data in all_audio_data.items():
218
+ transcript = transcripts.get(id_, None)
219
+
220
+ if transcript is None:
221
+ # for debugging, this dataset has extra audio
222
+ # print(f"[libritts_r {split_name}] Audio without transcript: {id_}")
223
+ continue
224
+
225
+ else:
226
+ yield get_return_data(transcript, audio_data)
227
+ del transcripts[id_]
228
+
229
+ for id_, transcript in transcripts.items():
230
+ audio_data = all_audio_data.get(id_, None)
231
+
232
+ if audio_data is None:
233
+ # for debugging, this dataset has extra transcripts
234
+ # print(f"[libritts_r {split_name}] Transcript without audio: {id_}")
235
+ continue
236
+
237
+ else:
238
+ yield get_return_data(audio_data, transcript)
239
+ # no del needed here