import json import gzip import datasets import numpy as np logger = datasets.logging.get_logger(__name__) _DESCRIPTION = """\ Libriheavy is a labeled version of Librilight. This (unofficial) huggingface dataset contains the medium (4500 hours) split of the Libriheavy dataset with alignments and mel spectrograms. """ _URL = """\ https://github.com/k2-fsa/libriheavy """ _CITATION = """\ @article{kang2023libriheavy, title={Libriheavy: a 50,000 hours asr corpus with punctuation casing and context}, author={Kang, Wei and Yang, Xiaoyu and Yao, Zengwei and Kuang, Fangjun and Yang, Yifan and Guo, Liyong and Lin, Long and Povey, Daniel}, journal={arXiv preprint arXiv:2309.08105}, year={2023} } """ class LibriheavyConfig(datasets.BuilderConfig): """BuilderConfig for Libriheavy.""" def __init__(self, **kwargs): """BuilderConfig for Libriheavy. Args: **kwargs: keyword arguments forwarded to super. """ super(LibriheavyConfig, self).__init__(**kwargs) class Libriheavy(datasets.GeneratorBasedBuilder): """Libriheavy dataset.""" BUILDER_CONFIGS = [ LibriheavyConfig(name="libriheavy", version=datasets.Version("1.0.0"), description="Libriheavy dataset."), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "speaker_id": datasets.Value("string"), "audio": datasets.Value("string"), "text": datasets.Value("string"), "word_segments": datasets.Sequence( { "start": datasets.Value("int32"), "end": datasets.Value("int32"), "word": datasets.Value("string"), } ), "mel_spectrogram": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))), } ), supervised_keys=None, homepage=_URL, citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" # first, we load speaker_list.json speaker_list = "medium_data/speaker_list.json" speaker_list = dl_manager.download_and_extract(speaker_list) with open(speaker_list, "r") as f: speaker_list = json.load(f) # now we load the individual speaker metadata speaker_metadata = {} for speaker_id, metadata_path in speaker_list.items(): metadata_path = f"medium_data/{metadata_path}" metadata_path = dl_manager.download_and_extract(metadata_path) with open(metadata_path, "r") as f: speaker_metadata[speaker_id] = json.load(f) speaker_chunks = [] for speaker_id, metadata in speaker_metadata.items(): for chunk_id, chunk in metadata["chunks"].items(): speaker_chunks.append( { "speaker_id": speaker_id, "id": f"{speaker_id}_{chunk_id}", "audio": dl_manager.download(f"medium_data/{chunk['npz'].replace('.gz', '')}"), "text": dl_manager.download(f"medium_data/{chunk['json']}"), } ) # shuffle the chunks np.random.seed(42) np.random.shuffle(speaker_chunks) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"speaker_chunks": speaker_chunks}, ) ] def _generate_examples(self, speaker_chunks): """Yields examples.""" for chunk in speaker_chunks: npz = dict(np.load(chunk["audio"], allow_pickle=True)) utterances = npz.keys() with gzip.open(chunk["text"], "rt") as f: text = json.load(f) for utterance_id, utterance in text.items(): result = { "id": chunk["speaker_id"] + "_" + utterance_id, "speaker_id": chunk["speaker_id"], "audio": chunk["audio"], "text": chunk["text"], "word_segments": [ {"start": segment[0], "end": segment[1], "word": segment[2]} for segment in utterance["word_segments"] ], "mel_spectrogram": npz[str(utterance_id)].item()["mel"][0][0], } yield chunk["speaker_id"] + "_" + utterance_id, result