brthor commited on
Commit
f9c2a6e
1 Parent(s): 630fb85

add the parquet builder

Browse files
Files changed (1) hide show
  1. libritts_parquet_builder.py +296 -0
libritts_parquet_builder.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 blabble.io
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import os
16
+
17
+ import datasets
18
+ from datasets import load_dataset
19
+ from datasets.features.features import require_decoding
20
+ from datasets.table import embed_table_storage
21
+ from datasets.utils.py_utils import convert_file_size_to_int
22
+ from tqdm import tqdm
23
+
24
+ _CITATION = """\
25
+ @ARTICLE{Zen2019-kz,
26
+ title = "{LibriTTS}: A corpus derived from {LibriSpeech} for
27
+ text-to-speech",
28
+ author = "Zen, Heiga and Dang, Viet and Clark, Rob and Zhang, Yu and
29
+ Weiss, Ron J and Jia, Ye and Chen, Zhifeng and Wu, Yonghui",
30
+ abstract = "This paper introduces a new speech corpus called
31
+ ``LibriTTS'' designed for text-to-speech use. It is derived
32
+ from the original audio and text materials of the
33
+ LibriSpeech corpus, which has been used for training and
34
+ evaluating automatic speech recognition systems. The new
35
+ corpus inherits desired properties of the LibriSpeech corpus
36
+ while addressing a number of issues which make LibriSpeech
37
+ less than ideal for text-to-speech work. The released corpus
38
+ consists of 585 hours of speech data at 24kHz sampling rate
39
+ from 2,456 speakers and the corresponding texts.
40
+ Experimental results show that neural end-to-end TTS models
41
+ trained from the LibriTTS corpus achieved above 4.0 in mean
42
+ opinion scores in naturalness in five out of six evaluation
43
+ speakers. The corpus is freely available for download from
44
+ http://www.openslr.org/60/.",
45
+ month = apr,
46
+ year = 2019,
47
+ copyright = "http://arxiv.org/licenses/nonexclusive-distrib/1.0/",
48
+ archivePrefix = "arXiv",
49
+ primaryClass = "cs.SD",
50
+ eprint = "1904.02882"
51
+ }
52
+ """
53
+
54
+ _DESCRIPTION = """\
55
+ LibriTTS is a multi-speaker English corpus of approximately 585 hours of read English speech at 24kHz sampling rate,
56
+ prepared by Heiga Zen with the assistance of Google Speech and Google Brain team members. The LibriTTS corpus is
57
+ designed for TTS research. It is derived from the original materials (mp3 audio files from LibriVox and text files
58
+ from Project Gutenberg) of the LibriSpeech corpus.
59
+ """
60
+
61
+ _HOMEPAGE = "https://www.openslr.org/60/"
62
+
63
+ _LICENSE = "CC BY 4.0"
64
+
65
+ _DL_URL = "https://us.openslr.org/resources/60/"
66
+
67
+ _DATA_URLS = {
68
+ 'dev.clean': _DL_URL + 'dev-clean.tar.gz',
69
+ 'dev.other': _DL_URL + 'dev-other.tar.gz',
70
+ 'test.clean': _DL_URL + 'test-clean.tar.gz',
71
+ 'test.other': _DL_URL + 'test-other.tar.gz',
72
+ 'train.clean.100': _DL_URL + 'train-clean-100.tar.gz',
73
+ 'train.clean.360': _DL_URL + 'train-clean-360.tar.gz',
74
+ 'train.other.500': _DL_URL + 'train-other-500.tar.gz',
75
+ }
76
+
77
+
78
+ def _generate_transcripts(transcript_csv_file):
79
+ """Generates partial examples from transcript CSV file."""
80
+ for line in transcript_csv_file:
81
+ key, text_original, text_normalized = line.decode("utf-8").replace('\n', '').split("\t")
82
+ speaker_id, chapter_id = [int(el) for el in key.split("_")[:2]]
83
+ example = {
84
+ "text_normalized": text_normalized,
85
+ "text_original": text_original,
86
+ "speaker_id": speaker_id,
87
+ "chapter_id": chapter_id,
88
+ "id_": key,
89
+ }
90
+ yield example
91
+
92
+
93
+ class LibriTTS_Dataset(datasets.GeneratorBasedBuilder):
94
+ """
95
+ LibriTTS is a multi-speaker English corpus of approximately 585 hours of read English speech at 24kHz sampling rate,
96
+ prepared by Heiga Zen with the assistance of Google Speech and Google Brain team members.
97
+ """
98
+
99
+ VERSION = datasets.Version("1.0.0")
100
+
101
+ DEFAULT_CONFIG_NAME = "all"
102
+ BUILDER_CONFIGS = [
103
+ datasets.BuilderConfig(name="dev", description="Only the 'dev.clean' split."),
104
+ datasets.BuilderConfig(name="clean", description="'Clean' speech."),
105
+ datasets.BuilderConfig(name="other", description="'Other', more challenging, speech."),
106
+ datasets.BuilderConfig(name="all", description="Combined clean and other dataset."),
107
+ ]
108
+
109
+ def _info(self):
110
+ return datasets.DatasetInfo(
111
+ # This is the description that will appear on the datasets page.
112
+ description=_DESCRIPTION,
113
+ features=datasets.Features(
114
+ {
115
+ "audio": datasets.Audio(sampling_rate=24_000),
116
+ "text_normalized": datasets.Value("string"),
117
+ "text_original": datasets.Value("string"),
118
+ "speaker_id": datasets.Value("string"),
119
+ "path": datasets.Value("string"),
120
+ "chapter_id": datasets.Value("string"),
121
+ "id": datasets.Value("string"),
122
+ }
123
+ ),
124
+ supervised_keys=None,
125
+ homepage=_HOMEPAGE,
126
+ license=_LICENSE,
127
+ citation=_CITATION,
128
+ )
129
+
130
+ def _split_generators(self, dl_manager):
131
+ split_names = _DATA_URLS.keys()
132
+
133
+ if self.config.name == "clean":
134
+ split_names = [k for k in _DATA_URLS.keys() if 'clean' in k]
135
+ elif self.config.name == "other":
136
+ split_names = [k for k in _DATA_URLS.keys() if 'other' in k]
137
+
138
+ archive_path = dl_manager.download({k: v for k, v in _DATA_URLS.items() if k in split_names})
139
+
140
+ # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
141
+ local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
142
+
143
+ all_splits = [
144
+ datasets.SplitGenerator(
145
+ name=split_name,
146
+ gen_kwargs={
147
+ "local_extracted_archive": local_extracted_archive.get(split_name),
148
+ "files": dl_manager.iter_archive(archive_path[split_name]),
149
+ "split_name": split_name
150
+ },
151
+ ) for split_name in split_names
152
+ ]
153
+
154
+ return all_splits
155
+
156
+ def _generate_examples(self, split_name, files, local_extracted_archive):
157
+ """Generate examples from a LibriTTS archive_path."""
158
+ audio_extension = '.wav'
159
+
160
+ key = 0
161
+ all_audio_data = {}
162
+ transcripts = {}
163
+
164
+ def get_return_data(transcript, audio_data):
165
+ nonlocal key
166
+
167
+ audio = {"path": transcript["path"], "bytes": audio_data}
168
+ key += 1
169
+
170
+ return key, {"audio": audio, **transcript}
171
+
172
+ for path, f in files:
173
+ if path.endswith(audio_extension):
174
+ id_ = path.split("/")[-1][: -len(audio_extension)]
175
+
176
+ audio_data = f.read()
177
+
178
+ # If we already have the transcript for this audio, yield it right away
179
+ # Otherwise, save it for when we get the transcript.
180
+ transcript = transcripts.get(id_, None)
181
+
182
+ if transcript is not None:
183
+ yield get_return_data(transcript, audio_data)
184
+ del transcripts[id_]
185
+ else:
186
+ all_audio_data[id_] = f.read()
187
+
188
+ elif path.endswith(".trans.tsv"):
189
+ for example in _generate_transcripts(f):
190
+ example_id = example['id_']
191
+
192
+ audio_file = f"{example_id}{audio_extension}"
193
+
194
+ audio_file = (
195
+ os.path.join(
196
+ local_extracted_archive, 'LibriTTS',
197
+ split_name.replace('.', '-'),
198
+ str(example['speaker_id']), str(example['chapter_id']), audio_file)
199
+ if local_extracted_archive
200
+ else audio_file
201
+ )
202
+
203
+ transcript = {
204
+ "id": example_id,
205
+ "speaker_id": example['speaker_id'],
206
+ "chapter_id": example['chapter_id'],
207
+ "text_normalized": example['text_normalized'],
208
+ "text_original": example['text_original'],
209
+ "path": audio_file,
210
+ }
211
+
212
+ # If we already have the audio for this transcript, yield it right away
213
+ # Otherwise, save it for when we get the audio.
214
+ audio_data = all_audio_data.get(example_id, None)
215
+ if audio_data is not None:
216
+ yield get_return_data(transcript, audio_data)
217
+ del all_audio_data[example_id]
218
+ else:
219
+ transcripts[example_id] = transcript
220
+
221
+ for id_, audio_data in all_audio_data.items():
222
+ transcript = transcripts.get(id_, None)
223
+
224
+ if transcript is None:
225
+ # for debugging, this dataset may extra audio
226
+ # print(f"[libritts {split_name}] Audio without transcript: {id_}")
227
+ continue
228
+
229
+ else:
230
+ yield get_return_data(transcript, audio_data)
231
+ del transcripts[id_]
232
+
233
+ for id_, transcript in transcripts.items():
234
+ audio_data = all_audio_data.get(id_, None)
235
+
236
+ if audio_data is None:
237
+ # for debugging, this dataset has extra transcripts
238
+ # print(f"[libritts {split_name}] Transcript without audio: {id_}")
239
+ continue
240
+
241
+ else:
242
+ yield get_return_data(audio_data, transcript)
243
+ # no del needed here
244
+
245
+
246
+ def to_parquet_with_audio(dataset, data_out_dir, split_name, max_shard_size='500MB'):
247
+ from datasets import config
248
+
249
+ # decodable_columns = (
250
+ # [k for k, v in dataset.features.items() if require_decoding(v, ignore_decode_attribute=True)]
251
+ # )
252
+ dataset_nbytes = dataset._estimate_nbytes()
253
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
254
+ num_shards = int(dataset_nbytes / max_shard_size) + 1
255
+ num_shards = max(num_shards, 1)
256
+ shards = (dataset.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards))
257
+
258
+ def shards_with_embedded_external_files(shards):
259
+ for shard in shards:
260
+ format = shard.format
261
+ shard = shard.with_format("arrow")
262
+ shard = shard.map(
263
+ embed_table_storage,
264
+ batched=True,
265
+ batch_size=1000,
266
+ keep_in_memory=True,
267
+ )
268
+ shard = shard.with_format(**format)
269
+ yield shard
270
+
271
+ shards = shards_with_embedded_external_files(shards)
272
+
273
+ os.makedirs(data_out_dir, exist_ok=True)
274
+
275
+ for index, shard in tqdm(
276
+ enumerate(shards),
277
+ desc="Save the dataset shards",
278
+ total=num_shards,
279
+ ):
280
+ shard_path = f"{data_out_dir}/{split_name}-{index:05d}-of-{num_shards:05d}.parquet"
281
+ shard.to_parquet(shard_path)
282
+
283
+
284
+ if __name__ == '__main__':
285
+ file_path = os.path.abspath(
286
+ os.path.realpath(__file__))
287
+
288
+ file_dir = os.path.dirname(file_path)
289
+
290
+ dataset_splits = load_dataset(file_path, "all")
291
+
292
+ for split in dataset_splits:
293
+ out_dir = f'{file_dir}/data/{split}/'
294
+ os.makedirs(os.path.dirname(out_dir), exist_ok=True)
295
+
296
+ to_parquet_with_audio(dataset_splits[split], out_dir, split)