|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
|
|
import datasets |
|
from datasets import load_dataset |
|
from datasets.features.features import require_decoding |
|
from datasets.table import embed_table_storage |
|
from datasets.utils.py_utils import convert_file_size_to_int |
|
from tqdm import tqdm |
|
|
|
_CITATION = """\ |
|
@ARTICLE{Zen2019-kz, |
|
title = "{LibriTTS}: A corpus derived from {LibriSpeech} for |
|
text-to-speech", |
|
author = "Zen, Heiga and Dang, Viet and Clark, Rob and Zhang, Yu and |
|
Weiss, Ron J and Jia, Ye and Chen, Zhifeng and Wu, Yonghui", |
|
abstract = "This paper introduces a new speech corpus called |
|
``LibriTTS'' designed for text-to-speech use. It is derived |
|
from the original audio and text materials of the |
|
LibriSpeech corpus, which has been used for training and |
|
evaluating automatic speech recognition systems. The new |
|
corpus inherits desired properties of the LibriSpeech corpus |
|
while addressing a number of issues which make LibriSpeech |
|
less than ideal for text-to-speech work. The released corpus |
|
consists of 585 hours of speech data at 24kHz sampling rate |
|
from 2,456 speakers and the corresponding texts. |
|
Experimental results show that neural end-to-end TTS models |
|
trained from the LibriTTS corpus achieved above 4.0 in mean |
|
opinion scores in naturalness in five out of six evaluation |
|
speakers. The corpus is freely available for download from |
|
http://www.openslr.org/60/.", |
|
month = apr, |
|
year = 2019, |
|
copyright = "http://arxiv.org/licenses/nonexclusive-distrib/1.0/", |
|
archivePrefix = "arXiv", |
|
primaryClass = "cs.SD", |
|
eprint = "1904.02882" |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
LibriTTS is a multi-speaker English corpus of approximately 585 hours of read English speech at 24kHz sampling rate, |
|
prepared by Heiga Zen with the assistance of Google Speech and Google Brain team members. The LibriTTS corpus is |
|
designed for TTS research. It is derived from the original materials (mp3 audio files from LibriVox and text files |
|
from Project Gutenberg) of the LibriSpeech corpus. |
|
""" |
|
|
|
_HOMEPAGE = "https://www.openslr.org/60/" |
|
|
|
_LICENSE = "CC BY 4.0" |
|
|
|
|
|
_DL_URL = "https://openslr.elda.org/resources/60/" |
|
|
|
|
|
_DATA_URLS = { |
|
'dev.clean': _DL_URL + 'dev-clean.tar.gz', |
|
'dev.other': _DL_URL + 'dev-other.tar.gz', |
|
'test.clean': _DL_URL + 'test-clean.tar.gz', |
|
'test.other': _DL_URL + 'test-other.tar.gz', |
|
'train.clean.100': _DL_URL + 'train-clean-100.tar.gz', |
|
'train.clean.360': _DL_URL + 'train-clean-360.tar.gz', |
|
'train.other.500': _DL_URL + 'train-other-500.tar.gz', |
|
} |
|
|
|
|
|
def _generate_transcripts(transcript_csv_file): |
|
"""Generates partial examples from transcript CSV file.""" |
|
for line in transcript_csv_file: |
|
key, text_original, text_normalized = line.decode("utf-8").replace('\n', '').split("\t") |
|
speaker_id, chapter_id = [int(el) for el in key.split("_")[:2]] |
|
example = { |
|
"text_normalized": text_normalized, |
|
"text_original": text_original, |
|
"speaker_id": speaker_id, |
|
"chapter_id": chapter_id, |
|
"id_": key, |
|
} |
|
yield example |
|
|
|
|
|
class LibriTTS_Dataset(datasets.GeneratorBasedBuilder): |
|
""" |
|
LibriTTS is a multi-speaker English corpus of approximately 585 hours of read English speech at 24kHz sampling rate, |
|
prepared by Heiga Zen with the assistance of Google Speech and Google Brain team members. |
|
""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
DEFAULT_CONFIG_NAME = "all" |
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="dev", description="Only the 'dev.clean' split."), |
|
datasets.BuilderConfig(name="clean", description="'Clean' speech."), |
|
datasets.BuilderConfig(name="other", description="'Other', more challenging, speech."), |
|
datasets.BuilderConfig(name="all", description="Combined clean and other dataset."), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"audio": datasets.Audio(sampling_rate=24_000), |
|
"text_normalized": datasets.Value("string"), |
|
"text_original": datasets.Value("string"), |
|
"speaker_id": datasets.Value("string"), |
|
"path": datasets.Value("string"), |
|
"chapter_id": datasets.Value("string"), |
|
"id": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
split_names = _DATA_URLS.keys() |
|
|
|
if self.config.name == "clean": |
|
split_names = [k for k in _DATA_URLS.keys() if 'clean' in k] |
|
elif self.config.name == "other": |
|
split_names = [k for k in _DATA_URLS.keys() if 'other' in k] |
|
|
|
archive_path = dl_manager.download({k: v for k, v in _DATA_URLS.items() if k in split_names}) |
|
|
|
|
|
local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {} |
|
|
|
all_splits = [ |
|
datasets.SplitGenerator( |
|
name=split_name, |
|
gen_kwargs={ |
|
"local_extracted_archive": local_extracted_archive.get(split_name), |
|
"files": dl_manager.iter_archive(archive_path[split_name]), |
|
"split_name": split_name |
|
}, |
|
) for split_name in split_names |
|
] |
|
|
|
return all_splits |
|
|
|
def _generate_examples(self, split_name, files, local_extracted_archive): |
|
"""Generate examples from a LibriTTS archive_path.""" |
|
audio_extension = '.wav' |
|
|
|
key = 0 |
|
all_audio_data = {} |
|
transcripts = {} |
|
|
|
def get_return_data(transcript, audio_data): |
|
nonlocal key |
|
|
|
audio = {"path": transcript["path"], "bytes": audio_data} |
|
key += 1 |
|
|
|
return key, {"audio": audio, **transcript} |
|
|
|
for path, f in files: |
|
if path.endswith(audio_extension): |
|
id_ = path.split("/")[-1][: -len(audio_extension)] |
|
|
|
audio_data = f.read() |
|
|
|
|
|
|
|
transcript = transcripts.get(id_, None) |
|
|
|
if transcript is not None: |
|
yield get_return_data(transcript, audio_data) |
|
del transcripts[id_] |
|
else: |
|
all_audio_data[id_] = f.read() |
|
|
|
elif path.endswith(".trans.tsv"): |
|
for example in _generate_transcripts(f): |
|
example_id = example['id_'] |
|
|
|
audio_file = f"{example_id}{audio_extension}" |
|
|
|
audio_file = ( |
|
os.path.join( |
|
local_extracted_archive, 'LibriTTS', |
|
split_name.replace('.', '-'), |
|
str(example['speaker_id']), str(example['chapter_id']), audio_file) |
|
if local_extracted_archive |
|
else audio_file |
|
) |
|
|
|
transcript = { |
|
"id": example_id, |
|
"speaker_id": example['speaker_id'], |
|
"chapter_id": example['chapter_id'], |
|
"text_normalized": example['text_normalized'], |
|
"text_original": example['text_original'], |
|
"path": audio_file, |
|
} |
|
|
|
|
|
|
|
audio_data = all_audio_data.get(example_id, None) |
|
if audio_data is not None: |
|
yield get_return_data(transcript, audio_data) |
|
del all_audio_data[example_id] |
|
else: |
|
transcripts[example_id] = transcript |
|
|
|
for id_, audio_data in all_audio_data.items(): |
|
transcript = transcripts.get(id_, None) |
|
|
|
if transcript is None: |
|
|
|
|
|
continue |
|
|
|
else: |
|
yield get_return_data(transcript, audio_data) |
|
del transcripts[id_] |
|
|
|
for id_, transcript in transcripts.items(): |
|
audio_data = all_audio_data.get(id_, None) |
|
|
|
if audio_data is None: |
|
|
|
|
|
continue |
|
|
|
else: |
|
yield get_return_data(audio_data, transcript) |
|
|
|
|
|
|
|
def to_parquet_with_audio(dataset, data_out_dir, split_name, max_shard_size='500MB'): |
|
from datasets import config |
|
|
|
|
|
|
|
|
|
dataset_nbytes = dataset._estimate_nbytes() |
|
max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE) |
|
num_shards = int(dataset_nbytes / max_shard_size) + 1 |
|
num_shards = max(num_shards, 1) |
|
shards = (dataset.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards)) |
|
|
|
def shards_with_embedded_external_files(shards): |
|
for shard in shards: |
|
format = shard.format |
|
shard = shard.with_format("arrow") |
|
shard = shard.map( |
|
embed_table_storage, |
|
batched=True, |
|
batch_size=1000, |
|
keep_in_memory=True, |
|
) |
|
shard = shard.with_format(**format) |
|
yield shard |
|
|
|
shards = shards_with_embedded_external_files(shards) |
|
|
|
os.makedirs(data_out_dir, exist_ok=True) |
|
|
|
for index, shard in tqdm( |
|
enumerate(shards), |
|
desc="Save the dataset shards", |
|
total=num_shards, |
|
): |
|
shard_path = f"{data_out_dir}/{split_name}-{index:05d}-of-{num_shards:05d}.parquet" |
|
shard.to_parquet(shard_path) |
|
|
|
|
|
if __name__ == '__main__': |
|
file_path = os.path.abspath( |
|
os.path.realpath(__file__)) |
|
|
|
file_dir = os.path.dirname(file_path) |
|
|
|
dataset_splits = load_dataset(file_path, "all") |
|
|
|
for split in dataset_splits: |
|
out_dir = f'{file_dir}/data/{split}/' |
|
os.makedirs(os.path.dirname(out_dir), exist_ok=True) |
|
|
|
to_parquet_with_audio(dataset_splits[split], out_dir, split) |
|
|