|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
Librispeech automatic speech recognition dataset for reproducing Reborn UASR results. |
|
Note that the silence in each audio has been removed by performing unsupervised VAD (https://github.com/zhenghuatan/rVADfast). |
|
We only process the 100-hour split from LibriSpeech 'train-clean-100' as the training split. |
|
""" |
|
|
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{panayotov2015librispeech, |
|
title={Librispeech: an ASR corpus based on public domain audio books}, |
|
author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev}, |
|
booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on}, |
|
pages={5206--5210}, |
|
year={2015}, |
|
organization={IEEE} |
|
} |
|
@article{tan2020rvad, |
|
title={rVAD: An unsupervised segment-based robust voice activity detection method}, |
|
author={Tan, Zheng-Hua and Dehak, Najim and others}, |
|
journal={Computer speech \& language}, |
|
volume={59}, |
|
pages={1--21}, |
|
year={2020}, |
|
publisher={Elsevier} |
|
} |
|
@article{tseng2024reborn, |
|
title={REBORN: Reinforcement-Learned Boundary Segmentation with Iterative Training for Unsupervised ASR}, |
|
author={Tseng, Liang-Hsuan and Hu, En-Pei and Chiang, Cheng-Han and Tseng, Yuan and Lee, Hung-yi and Lee, Lin-shan and Sun, Shao-Hua}, |
|
journal={arXiv preprint arXiv:2402.03988}, |
|
year={2024} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz, |
|
prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read |
|
audiobooks from the LibriVox project, and has been carefully segmented and aligned |
|
|
|
This dataset is the 100-hour subset of LibriSpeech 'train-clean-100' split, with silence removed. |
|
Additionally, all the dev and test sets are included for fair comparison and evaluation if needed. |
|
The dataset is prepared by the Reborn UASR team. |
|
Arxiv paper link: https://arxiv.org/abs/2402.03988 |
|
""" |
|
|
|
_URL = "http://www.openslr.org/12" |
|
|
|
_DL_URL_FORMAT = "data" |
|
|
|
|
|
class RebornLibrispeechConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Reborn-Librispeech.""" |
|
|
|
def __init__(self, name, **kwargs): |
|
""" |
|
Args: |
|
name: `string`, name of dataset config (=language) |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(RebornLibrispeechConfig, self).__init__( |
|
version=datasets.Version("2.12.0", ""), name=name, **kwargs |
|
) |
|
|
|
self.data_root_url = _DL_URL_FORMAT |
|
|
|
|
|
class RebornLibrispeech(datasets.GeneratorBasedBuilder): |
|
"""Multilingual Librispeech dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
RebornLibrispeechConfig(name="reborn_ls100hr", description="train-clean-100 LibriSpeech dataset without silence"), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"file": datasets.Value("string"), |
|
"audio": datasets.features.Audio(sampling_rate=16_000), |
|
"word": datasets.Value("string"), |
|
"phoneme": datasets.Value("string"), |
|
"speaker_id": datasets.Value("int64"), |
|
"chapter_id": datasets.Value("int64"), |
|
"id": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=("file", "phone"), |
|
homepage=_URL, |
|
citation=_CITATION, |
|
task_templates=None, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
metadata = dl_manager.download({ |
|
"train-clean-100": self.config.data_root_url + "/metadata/train-clean-100.tsv", |
|
"dev-clean": self.config.data_root_url + "/metadata/dev-clean.tsv", |
|
"dev-clean-small": self.config.data_root_url + "/metadata/dev-clean-small.tsv", |
|
"dev-other": self.config.data_root_url + "/metadata/dev-other.tsv", |
|
"test-clean": self.config.data_root_url + "/metadata/test-clean.tsv", |
|
"test-other": self.config.data_root_url + "/metadata/test-other.tsv", |
|
}) |
|
|
|
all_splits = [ |
|
"train-clean-100", |
|
"dev-clean", |
|
"dev-other", |
|
"test-clean", |
|
"test-other", |
|
] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
audio_archives = {} |
|
for split in all_splits: |
|
audio_archives[split] = dl_manager.download( |
|
os.path.join(self.config.data_root_url, f"{split}.tar.gz") |
|
) |
|
|
|
|
|
local_extracted_archives = dl_manager.extract(audio_archives) if not dl_manager.is_streaming else {} |
|
|
|
train_splits = [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"metadata_fpaths": [metadata["train-clean-100"]], |
|
"audio_archives": [dl_manager.iter_archive(audio_archives["train"])], |
|
"local_extracted_archives": [local_extracted_archives.get("train")], |
|
} |
|
), |
|
datasets.SplitGenerator( |
|
name="train-clean-100", |
|
gen_kwargs={ |
|
"metadata_fpaths": [metadata["train-clean-100"]], |
|
"audio_archives": [dl_manager.iter_archive(audio_archives["train"])], |
|
"local_extracted_archives": [local_extracted_archives.get("train")], |
|
} |
|
), |
|
] |
|
|
|
dev_splits = [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"metadata_fpath": [metadata["dev-clean"], metadata["dev-other"]], |
|
"audio_archives": [dl_manager.iter_archive(audio_archives["dev-clean"]), dl_manager.iter_archive(audio_archives["dev-other"])], |
|
"local_extracted_archives": [local_extracted_archives.get("dev-clean"), local_extracted_archives.get("dev-other")], |
|
} |
|
), |
|
datasets.SplitGenerator( |
|
name="dev-clean", |
|
gen_kwargs={ |
|
"metadata_fpaths": [metadata["dev-clean"]], |
|
"audio_archives": [dl_manager.iter_archive(audio_archives["dev-clean"])], |
|
"local_extracted_archives": [local_extracted_archives.get("dev-clean")], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name="dev-other", |
|
gen_kwargs={ |
|
"metadata_fpaths": [metadata["dev-other"]], |
|
"audio_archives": [dl_manager.iter_archive(audio_archives["dev-other"])], |
|
"local_extracted_archives": [local_extracted_archives.get("dev-other")], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name="dev-clean-small", |
|
gen_kwargs={ |
|
"metadata_fpaths": [metadata["dev-clean-small"]], |
|
"audio_archives": [dl_manager.iter_archive(audio_archives["dev-clean"])], |
|
"local_extracted_archives": [local_extracted_archives.get("dev-clean")], |
|
}, |
|
), |
|
] |
|
|
|
test_splits = [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"metadata_fpaths": [metadata["test-clean"], metadata["test-other"]], |
|
"audio_archives": [dl_manager.iter_archive(audio_archives["test-clean"]), dl_manager.iter_archive(audio_archives["test-other"])], |
|
"local_extracted_archives": [local_extracted_archives.get("test-clean"), local_extracted_archives.get("test-other")], |
|
} |
|
), |
|
datasets.SplitGenerator( |
|
name="test-clean", |
|
gen_kwargs={ |
|
"metadata_fpaths": [metadata["test-clean"]], |
|
"audio_archives": [dl_manager.iter_archive(audio_archives["test-clean"])], |
|
"local_extracted_archives": [local_extracted_archives.get("test-clean")], |
|
} |
|
), |
|
datasets.SplitGenerator( |
|
name="test-other", |
|
gen_kwargs={ |
|
"metadata_fpaths": [metadata["test-other"]], |
|
"audio_archives": [dl_manager.iter_archive(audio_archives["test-other"])], |
|
"local_extracted_archives": [local_extracted_archives.get("test-other")], |
|
} |
|
), |
|
] |
|
|
|
return train_splits + dev_splits + test_splits |
|
|
|
def _generate_examples(self, metadata_fpaths, audio_archives, local_extracted_archives): |
|
"""Generate examples from a Multilingual LibriSpeech data dir.""" |
|
words, phones = dict(), dict() |
|
for metadata_fpath in metadata_fpaths: |
|
with open(metadata_fpath, "r", encoding="utf-8") as file: |
|
for line in file: |
|
audio_fpath, word, phone = line.strip().split("\t") |
|
audio_id = audio_fpath.split('/')[-1].split(".flac")[0] |
|
words[audio_id] = word |
|
phones[audio_id] = phone |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for archive_idx, audio_archive in enumerate(audio_archives): |
|
|
|
|
|
|
|
|
|
for audio_filename, file in audio_archive: |
|
audio_id = audio_filename.split('/')[-1].split(".flac")[0] |
|
speaker_id, chapter_id = (int(item) for item in audio_id.split("-")[:2]) |
|
word = words.get(audio_id, None) |
|
if word == None: |
|
continue |
|
|
|
local_audio_file_path = os.path.join( |
|
local_extracted_archives[archive_idx], audio_filename |
|
) if local_extracted_archives[archive_idx] else None |
|
|
|
yield audio_filename, { |
|
"file": local_audio_file_path, |
|
"audio": { |
|
"path": local_audio_file_path if local_audio_file_path else audio_filename, |
|
"bytes": file.read() |
|
}, |
|
"word": word, |
|
"phoneme": phones.get(audio_id, None), |
|
"speaker_id": speaker_id, |
|
"chapter_id": chapter_id, |
|
"id": audio_id |
|
} |