voxpopuli / voxpopuli.py
polinaeterna's picture
polinaeterna HF staff
process all examples
858a490
raw history blame
No virus
5.36 kB
from collections import defaultdict
import os
import glob
import csv
from tqdm.auto import tqdm
import datasets
_LANGUAGES = sorted(
[
"en", "de", "fr", "es", "pl", "it", "ro", "hu", "cs", "nl", "fi", "hr",
"sk", "sl", "et", "lt", "pt", "bg", "el", "lv", "mt", "sv", "da"
]
)
_LANGUAGES_V2 = [f"{x}_v2" for x in _LANGUAGES]
_YEARS = list(range(2009, 2020 + 1))
# unnecessary
_CONFIG_TO_LANGS = {
"400k": _LANGUAGES,
"100k": _LANGUAGES,
"10k": _LANGUAGES,
}
_CONFIG_TO_YEARS = {
"400k": _YEARS + [f"{y}_2" for y in _YEARS],
"100k": _YEARS,
"10k": [2019, 2020],
# "asr": _YEARS
}
for lang in _LANGUAGES:
_CONFIG_TO_YEARS[lang] = _YEARS
_BASE_URL = "https://dl.fbaipublicfiles.com/voxpopuli/"
_DATA_URL = _BASE_URL + "audios/{lang}_{year}.tar"
_META_URL = _BASE_URL + "annotations/unlabelled_v2.tsv.gz"
class VoxpopuliConfig(datasets.BuilderConfig):
"""BuilderConfig for VoxPopuli."""
def __init__(self, name, **kwargs):
"""
Args:
name: `string`, name of dataset config
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(name=name, **kwargs)
self.languages = [name] if name in _LANGUAGES else _LANGUAGES
class Voxpopuli(datasets.GeneratorBasedBuilder):
"""The Voxpopuli dataset."""
VERSION = datasets.Version("1.0.0") # TODO ??
BUILDER_CONFIGS = [
VoxpopuliConfig(
name=name,
# version=VERSION,
description="", # TODO
)
for name in _LANGUAGES + ["10k", "100k", "400k"]
]
# DEFAULT_CONFIG_NAME = "400k"
# DEFAULT_WRITER_BATCH_SIZE = 256
def _info(self):
features = datasets.Features(
{
"path": datasets.Value("string"),
"language": datasets.ClassLabel(names=_LANGUAGES),
"year": datasets.Value("int16"),
"audio": datasets.Audio(sampling_rate=16_000),
"segment_id": datasets.Value("int16"),
}
)
return datasets.DatasetInfo(
# description=_DESCRIPTION,
features=features,
# homepage=_HOMEPAGE,
# license=_LICENSE,
# citation=_CITATION,
)
def _read_metadata(self, metadata_path):
# TODO: check for predicate??
# @ https://github.com/facebookresearch/voxpopuli/blob/main/voxpopuli/get_unlabelled_data.py#L34
metadata = defaultdict(list)
with open(metadata_path, encoding="utf-8") as csv_file:
csv_reader = csv.reader(csv_file, delimiter="\t")
for i, row in tqdm(enumerate(csv_reader)):
if i == 0:
continue
audio_id, segment_id, start, end = row
event_id, lang = audio_id.rsplit("_", 1)[-2:]
if lang in self.languages:
# if lang in ["hr", "et"]:
metadata[audio_id].append((float(start), float(end)))
return metadata
def _split_generators(self, dl_manager):
metadata_path = dl_manager.download_and_extract(_META_URL)
years = _CONFIG_TO_YEARS[self.config.name]
# urls = [_DATA_URL.format(lang=language, year=year) for language in ["hr", "et"] for year in [2020]] # , "et"]
urls = [_DATA_URL.format(lang=language, year=year) for language in self.languages for year in years]
dl_manager.download_config.num_proc = len(urls)
data_dirs = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_dirs": data_dirs,
"metadata_path": metadata_path,
}
),
]
def _generate_examples(self, data_dirs, metadata_path):
try:
import torch
import torchaudio
except ImportError as e:
raise ValueError(
"Loading voxpopuli requires `torchaudio` to be installed."
"You can install torchaudio with `pip install torchaudio`." + e
)
metadata = self._read_metadata(metadata_path)
for data_dir in data_dirs:
for file in glob.glob(f"{data_dir}/**/*.ogg", recursive=True):
path_components = file.split(os.sep)
language, year, audio_filename = path_components[-3:]
audio_id, _ = os.path.splitext(audio_filename)
timestamps = metadata[audio_id]
waveform, sr = torchaudio.load(file)
duration = waveform.size(1)
# split audio on the fly and write segments as arrays
for segment_id, (start, stop) in enumerate(timestamps):
segment = waveform[:, int(start * sr): min(int(stop * sr), duration)]
yield f"{audio_filename}_{segment_id}", {
"path": file,
"language": language,
"year": year,
"audio": {
"array": segment[0], # segment is a 2-dim array
"sampling_rate": 16_000
},
"segment_id": segment_id,
}