librispeech-phones-and-mel / librispeech-phones-and-mel.py
cdminix's picture
add text to dict
059d2d4
raw
history blame contribute delete
No virus
5.89 kB
"""LibriSpeech dataset with phone alignments, prosody and mel spectrograms."""
import os
from pathlib import Path
import hashlib
import pickle
import datasets
import pandas as pd
import numpy as np
from tqdm.contrib.concurrent import process_map
from tqdm.auto import tqdm
from multiprocessing import cpu_count
from PIL import Image
logger = datasets.logging.get_logger(__name__)
_VERSION = "0.0.2"
_CITATION = """\
@inproceedings{panayotov2015librispeech,
title={Librispeech: an asr corpus based on public domain audio books},
author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},
booktitle={2015 IEEE international conference on acoustics, speech and signal processing (ICASSP)},
pages={5206--5210},
year={2015},
organization={IEEE}
}
"""
_DESCRIPTION = """\
Dataset containing Mel Spectrograms, Prosody and Phone Alignments for the LibriSpeech dataset.
"""
_URLS = {
"dev.clean": "https://huggingface.co/datasets/cdminix/librispeech-phones-and-mel/resolve/main/data/dev_clean.tar.gz",
"dev.other": "https://huggingface.co/datasets/cdminix/librispeech-phones-and-mel/resolve/main/data/dev_other.tar.gz",
"test.clean": "https://huggingface.co/datasets/cdminix/librispeech-phones-and-mel/resolve/main/data/test_clean.tar.gz",
"test.other": "https://huggingface.co/datasets/cdminix/librispeech-phones-and-mel/resolve/main/data/test_other.tar.gz",
"train.clean.100": "https://huggingface.co/datasets/cdminix/librispeech-phones-and-mel/resolve/main/data/train_clean_100.tar.gz",
"train.clean.360": "https://huggingface.co/datasets/cdminix/librispeech-phones-and-mel/resolve/main/data/train_clean_360.tar.gz",
"train.other.500": "https://huggingface.co/datasets/cdminix/librispeech-phones-and-mel/resolve/main/data/train_other_500.tar.gz",
}
class LibrispeechConfig(datasets.BuilderConfig):
"""BuilderConfig for LibriTTSAlign."""
def __init__(self, **kwargs):
"""BuilderConfig for LibriTTSAlign.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(LibrispeechConfig, self).__init__(**kwargs)
class Librispeech(datasets.GeneratorBasedBuilder):
"""LibriTTSAlign dataset."""
BUILDER_CONFIGS = [
LibrispeechConfig(
name="libritts",
version=datasets.Version(_VERSION, ""),
),
]
def _info(self):
features = {
"id": datasets.Value("string"),
"speaker_id": datasets.Value("string"),
"chapter_id": datasets.Value("string"),
"phones": datasets.Value("string"),
"mel": datasets.Value("string"),
"prosody": datasets.Value("string"),
"speaker_utterance": datasets.Value("string"),
"mean_speaker_utterance": datasets.Value("string"),
"mean_speaker": datasets.Value("string"),
"text": datasets.Value("string"),
}
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(features),
supervised_keys=None,
homepage="https://huggingface.co/datasets/cdminix/librispeech-phones-and-mel",
citation=_CITATION,
task_templates=None,
)
def _split_generators(self, dl_manager):
splits = [
datasets.SplitGenerator(
name=key,
gen_kwargs={"data_path": dl_manager.download_and_extract(value)},
)
for key, value in _URLS.items()
]
return splits
def _df_from_path(self, path):
mel_path = Path(path)
prosody_path = Path(str(mel_path).replace("_mel.png", "_prosody.png"))
phones_path = Path(str(mel_path).replace("_mel.png", "_phones.npy"))
overall_speaker_path = Path(str(mel_path).replace("_mel.png", "_speaker.npy"))
temporal_speaker_path = Path(str(mel_path).replace("_mel.png", "_speaker.png"))
mean_speaker_path = mel_path.parent.parent / "mean_speaker.npy"
text = Path(str(mel_path).replace("_mel.png", "_text.txt")).read_text().lower()
speaker_id = mel_path.parent.parent.name
chapter_id = mel_path.parent.name
_id = str(mel_path).replace("_mel.png", "")
return {
"id": _id,
"speaker_id": speaker_id,
"chapter_id": chapter_id,
"phones": phones_path,
"mel": mel_path,
"prosody": prosody_path,
"speaker_utterance": temporal_speaker_path,
"mean_speaker_utterance": overall_speaker_path,
"mean_speaker": mean_speaker_path,
"text": text,
}
def _df_from_paths_mp(self, paths):
return pd.DataFrame(
process_map(
self._df_from_path,
paths,
desc="Reading files",
max_workers=cpu_count(),
chunksize=100,
)
)
def _create_mean_speaker(self, df):
# Create mean speaker for each speaker
for _, speaker_df in df.groupby("speaker_id"):
if not Path(speaker_df["mean_speaker"].iloc[0]).exists():
mean_speaker = np.mean(
np.array(
[np.load(path) for path in speaker_df["mean_speaker_utterance"]]
),
axis=0,
)
np.save(speaker_df["mean_speaker"].iloc[0], mean_speaker)
def _generate_examples(self, data_path):
"""Generate examples."""
logger.info("⏳ Generating examples from = %s", data_path)
paths = sorted(list(Path(data_path).rglob("*_mel.png")))
df = self._df_from_paths_mp(paths)
self._create_mean_speaker(df)
for i, row in tqdm(df.iterrows(), desc="Generating examples"):
yield row["id"], row.to_dict()