"""LibriTTS dataset with forced alignments.""" import os from pathlib import Path import hashlib import pickle import datasets import pandas as pd import numpy as np from alignments.datasets.librispeech import LibrittsRDataset from tqdm.contrib.concurrent import process_map from tqdm.auto import tqdm from multiprocessing import cpu_count from phones.convert import Converter import torchaudio import torchaudio.transforms as AT from functools import lru_cache logger = datasets.logging.get_logger(__name__) _PHONESET = "arpabet" _VERBOSE = os.environ.get("LIBRITTS_VERBOSE", True) _MAX_WORKERS = os.environ.get("LIBRITTS_MAX_WORKERS", cpu_count()) _PATH = os.environ.get("LIBRITTS_PATH", os.environ.get("HF_DATASETS_CACHE", None)) if _PATH is not None and not os.path.exists(_PATH): os.makedirs(_PATH) _VERSION = "1.1.0" _CITATION = """\ @article{koizumi2023libritts, title={LibriTTS-R: A Restored Multi-Speaker Text-to-Speech Corpus}, author={Koizumi, Yuma and Zen, Heiga and Karita, Shigeki and Ding, Yifan and Yatabe, Kohei and Morioka, Nobuyuki and Bacchiani, Michiel and Zhang, Yu and Han, Wei and Bapna, Ankur}, journal={arXiv preprint arXiv:2305.18802}, year={2023} } @article{zen2019libritts, title={LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech}, author={Zen, Heiga and Dang, Viet and Clark, Rob and Zhang, Yu and Weiss, Ron J and Jia, Ye and Chen, Zhifeng and Wu, Yonghui}, journal={Interspeech}, year={2019} } @article{https://doi.org/10.48550/arxiv.2211.16049, author = {Minixhofer, Christoph and Klejch, Ondřej and Bell, Peter}, title = {Evaluating and reducing the distance between synthetic and real speech distributions}, year = {2022} } """ _DESCRIPTION = """\ Dataset used for loading TTS spectrograms and waveform audio with alignments and a number of configurable "measures", which are extracted from the raw audio. """ _URL = "http://www.openslr.org/resources/141/" _URLS = { "dev-clean": _URL + "dev_clean.tar.gz", "dev-other": _URL + "dev_other.tar.gz", "test-clean": _URL + "test_clean.tar.gz", "test-other": _URL + "test_other.tar.gz", "train-clean-100": _URL + "train_clean_100.tar.gz", "train-clean-360": _URL + "train_clean_360.tar.gz", "train-other-500": _URL + "train_other_500.tar.gz", } @lru_cache(maxsize=1000) def get_speaker_prompts(speaker, hash_ds): ds = hash_ds.df speaker_prompts = ds[ds["speaker"] == speaker] speaker_prompts = tuple(speaker_prompts["audio"]) return speaker_prompts class LibriTTSAlignConfig(datasets.BuilderConfig): """BuilderConfig for LibriTTSAlign.""" def __init__(self, sampling_rate=22050, hop_length=256, win_length=1024, **kwargs): """BuilderConfig for LibriTTSAlign. Args: **kwargs: keyword arguments forwarded to super. """ super(LibriTTSAlignConfig, self).__init__(**kwargs) self.sampling_rate = sampling_rate self.hop_length = hop_length self.win_length = win_length if _PATH is None: raise ValueError("Please set the environment variable LIBRITTS_PATH to point to the LibriTTS dataset directory.") elif _PATH == os.environ.get("HF_DATASETS_CACHE", None): logger.warning("Please set the environment variable LIBRITTS_PATH to point to the LibriTTS dataset directory. Using HF_DATASETS_CACHE as a fallback.") class LibriTTSAlign(datasets.GeneratorBasedBuilder): """LibriTTSAlign dataset.""" BUILDER_CONFIGS = [ LibriTTSAlignConfig( name="libritts", version=datasets.Version(_VERSION, ""), ), ] def _info(self): features = { "id": datasets.Value("string"), "speaker": datasets.Value("string"), "text": datasets.Value("string"), "start": datasets.Value("float32"), "end": datasets.Value("float32"), # phone features "phones": datasets.Sequence(datasets.Value("string")), "phone_durations": datasets.Sequence(datasets.Value("int32")), # audio feature "audio": datasets.Value("string"), "audio_speaker_prompt": datasets.Sequence(datasets.Value("string")), } return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features(features), supervised_keys=None, homepage="https://github.com/MiniXC/MeasureCollator", citation=_CITATION, task_templates=None, ) def _split_generators(self, dl_manager): ds_dict = {} for name, url in _URLS.items(): ds_dict[name] = self._create_alignments_ds(name, url) splits = [ datasets.SplitGenerator( name=key.replace("-", "."), gen_kwargs={"ds": self._create_data(value)} ) for key, value in ds_dict.items() ] # dataframe with all data data_train = self._create_data([ds_dict["train-clean-100"], ds_dict["train-clean-360"], ds_dict["train-other-500"]]) data_dev = self._create_data([ds_dict["dev-clean"], ds_dict["dev-other"]]) data_test = self._create_data([ds_dict["test-clean"], ds_dict["test-other"]]) splits += [ datasets.SplitGenerator( name="train.all", gen_kwargs={ "ds": data_train, } ), datasets.SplitGenerator( name="dev.all", gen_kwargs={ "ds": data_dev, } ), datasets.SplitGenerator( name="test.all", gen_kwargs={ "ds": data_test, } ), ] data_all = pd.concat([data_train, data_dev, data_test]) # create a new split which takes one sample from each speaker in data_all and puts it into the dev split # we then remove these samples from data_all speakers = data_all["speaker"].unique() # seed for reproducibility np.random.seed(42) self.data_all = data_all del data_all data_dev_all = [ x for x in process_map( self._create_dev_split, speakers, chunksize=1000, max_workers=_MAX_WORKERS, desc="creating dev split", tqdm_class=tqdm, ) if x is not None ] data_dev_all = pd.concat(data_dev_all) data_all = self.data_all data_all = data_all[data_all["speaker"].isin(data_dev_all["speaker"].unique())] data_all = data_all[~data_all["basename"].isin(data_dev_all["basename"].unique())] del self.data_all self.speaker2idxs = {} self.speaker2idxs["all"] = {speaker: idx for idx, speaker in enumerate(sorted(list(data_dev_all["speaker"].unique())))} self.speaker2idxs["train"] = {speaker: idx for idx, speaker in enumerate(sorted(list(data_train["speaker"].unique())))} self.speaker2idxs["dev"] = {speaker: idx for idx, speaker in enumerate(sorted(list(data_dev["speaker"].unique())))} self.speaker2idxs["test"] = {speaker: idx for idx, speaker in enumerate(sorted(list(data_test["speaker"].unique())))} splits += [ datasets.SplitGenerator( name="train", gen_kwargs={ "ds": data_all, } ), datasets.SplitGenerator( name="dev", gen_kwargs={ "ds": data_dev_all, } ), ] self.alignments_ds = None self.data = None return splits def _create_dev_split(self, speaker): data_speaker = self.data_all[self.data_all["speaker"] == speaker] if len(data_speaker) < 10: print(f"Speaker {speaker} has only {len(data_speaker)} samples, skipping") return None else: data_speaker = data_speaker.sample(2) return data_speaker def _create_alignments_ds(self, name, url): self.empty_textgrids = 0 ds_hash = hashlib.md5(os.path.join(_PATH, f"{name}-alignments").encode()).hexdigest() pkl_path = os.path.join(_PATH, f"{ds_hash}.pkl") if os.path.exists(pkl_path): ds = pickle.load(open(pkl_path, "rb")) else: tgt_dir = os.path.join(_PATH, f"{name}-alignments") src_dir = os.path.join(_PATH, f"{name}-data") if os.path.exists(tgt_dir): src_dir = None url = None if os.path.exists(src_dir): url = None ds = LibrittsRDataset( target_directory=tgt_dir, source_directory=src_dir, source_url=url, verbose=_VERBOSE, tmp_directory=os.path.join(_PATH, f"{name}-tmp"), chunk_size=1000, ) pickle.dump(ds, open(pkl_path, "wb")) return ds, ds_hash def _create_data(self, data): entries = [] self.phone_cache = {} self.phone_converter = Converter() if not isinstance(data, list): data = [data] hashes = [ds_hash for ds, ds_hash in data] ds = [ds for ds, ds_hash in data] self.ds = ds del data for i, ds in enumerate(ds): if os.path.exists(os.path.join(_PATH, f"{hashes[i]}-entries.pkl")): add_entries = pickle.load(open(os.path.join(_PATH, f"{hashes[i]}-entries.pkl"), "rb")) else: add_entries = [ entry for entry in process_map( self._create_entry, zip([i] * len(ds), np.arange(len(ds))), chunksize=10_000, max_workers=_MAX_WORKERS, desc=f"processing dataset {hashes[i]}", tqdm_class=tqdm, ) if entry is not None ] pickle.dump(add_entries, open(os.path.join(_PATH, f"{hashes[i]}-entries.pkl"), "wb")) entries += add_entries if self.empty_textgrids > 0: logger.warning(f"Found {self.empty_textgrids} empty textgrids") del self.ds, self.phone_cache, self.phone_converter df = pd.DataFrame( entries, columns=[ "phones", "duration", "start", "end", "audio", "speaker", "text", "basename", ], ) return df def _create_entry(self, dsi_idx): dsi, idx = dsi_idx item = self.ds[dsi][idx] start, end = item["phones"][0][0], item["phones"][-1][1] phones = [] durations = [] for i, p in enumerate(item["phones"]): s, e, phone = p phone.replace("ˌ", "") r_phone = phone.replace("0", "").replace("1", "") if len(r_phone) > 0: phone = r_phone if "[" not in phone: o_phone = phone if o_phone not in self.phone_cache: phone = self.phone_converter( phone, _PHONESET, lang=None )[0] self.phone_cache[o_phone] = phone phone = self.phone_cache[o_phone] phones.append(phone) durations.append( int( np.round(e * self.config.sampling_rate / self.config.hop_length) - np.round(s * self.config.sampling_rate / self.config.hop_length) ) ) if start >= end: self.empty_textgrids += 1 return None return ( phones, durations, start, end, item["wav"], str(item["speaker"]).split("/")[-1], item["transcript"], Path(item["wav"]).name, ) def _generate_examples(self, ds): j = 0 hash_col = "audio" hash_ds = HashableDataFrame(ds, hash_col) for i, row in ds.iterrows(): # 10kB is the minimum size of a wav file for our purposes if Path(row["audio"]).stat().st_size >= 10_000: if len(row["phones"]) < 384: speaker_prompts = get_speaker_prompts(row["speaker"], hash_ds) result = { "id": row["basename"], "speaker": row["speaker"], "text": row["text"], "start": row["start"], "end": row["end"], "phones": row["phones"], "phone_durations": row["duration"], "audio": str(row["audio"]), "audio_speaker_prompt": speaker_prompts, } yield j, result j += 1 class HashableDataFrame(): def __init__(self, df, hash_col): self.df = df self.hash_col = hash_col self.hash = hashlib.md5(self.df[self.hash_col].values).hexdigest() # to integer self.hash = int(self.hash, 16) def __hash__(self): return self.hash def __eq__(self, other): return self.hash == other.hash