hyvoxpopuli / app.py
Edmon02's picture
Update app.py
3f11aba verified
raw
history blame
4.78 kB
from collections import defaultdict
import os
import json
import csv
import datasets
_DESCRIPTION = """
A large-scale speech corpus for representation learning, semi-supervised learning and interpretation.
"""
_CITATION = """
@inproceedings{}
"""
_HOMEPAGE = ""
_LICENSE = ""
_ASR_LANGUAGES = [
"hy"
]
_ASR_ACCENTED_LANGUAGES = [
""
]
_LANGUAGES = _ASR_LANGUAGES + _ASR_ACCENTED_LANGUAGES
_BASE_DATA_DIR = "data/"
_N_SHARDS_FILE = _BASE_DATA_DIR + "n_files.json"
_AUDIO_ARCHIVE_PATH = _BASE_DATA_DIR + "{split}/{split}_dataset.tar.gz"
_METADATA_PATH = _BASE_DATA_DIR + "{split}.tsv"
class HySpeech(datasets.GeneratorBasedBuilder):
"""The VoxPopuli dataset."""
VERSION = datasets.Version("1.1.0") # TODO: version
DEFAULT_WRITER_BATCH_SIZE = 256
def _info(self):
features = datasets.Features(
{
"audio_id": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16_000),
"raw_text": datasets.Value("string"),
"normalized_text": datasets.Value("string"),
"gender": datasets.Value("string"), # TODO: ClassVar?
"speaker_id": datasets.Value("string")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
n_shards_path = dl_manager.download_and_extract(_N_SHARDS_FILE)
with open(n_shards_path) as f:
n_shards = json.load(f)
splits = ["train", "dev", "test"]
audio_urls = defaultdict(dict)
for split in splits:
audio_urls[split] = [_AUDIO_ARCHIVE_PATH.format(split=split)]
meta_urls = defaultdict(dict)
for split in splits:
meta_urls[split] = _METADATA_PATH.format(split=split)
# dl_manager.download_config.num_proc = len(urls)
meta_paths = dl_manager.download_and_extract(meta_urls)
audio_paths = dl_manager.download(audio_urls)
local_extracted_audio_paths = (
dl_manager.extract(audio_paths)
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"audio_archives": {
'hy': [dl_manager.iter_archive(archive) for archive in lang_archives]
for lang_archives in audio_paths["train"].items()
},
"local_extracted_archives_paths": local_extracted_audio_paths["train"],
"metadata_paths": meta_paths["train"],
}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"audio_archives": [dl_manager.iter_archive(archive) for archive in lang_archives]
for lang_archives in audio_paths["dev"].items()
,
"local_extracted_archives_paths": local_extracted_audio_paths["dev"],
"metadata_paths": meta_paths["dev"],
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"audio_archives": {
'hy': [dl_manager.iter_archive(archive) for archive in lang_archives]
for lang_archives in audio_paths["test"].items()
},
"local_extracted_archives_paths": local_extracted_audio_paths["test"],
"metadata_paths": meta_paths["test"],
}
),
]
def _generate_examples(self, audio_archives, local_extracted_archives_paths, metadata_paths):
features = ["raw_text", "normalized_text", "speaker_id", "gender"]
meta_path = metadata_paths[lang]
with open(meta_path) as f:
metadata = {x["id"]: x for x in csv.DictReader(f, delimiter="\t")}
for audio_archive, local_extracted_archive_path in zip(audio_archives['hy'], local_extracted_archives_paths['hy']):
for audio_filename, audio_file in audio_archive:
audio_id = audio_filename.split(os.sep)[-1].split(".wav")[0]
path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
yield audio_id, {
"audio_id": audio_id,
**{feature: metadata[audio_id][feature] for feature in features},
"audio": {"path": path, "bytes": audio_file.read()},
}