|
from collections import defaultdict |
|
import os |
|
import glob |
|
import csv |
|
from tqdm.auto import tqdm |
|
|
|
import datasets |
|
|
|
|
|
_LANGUAGES = sorted( |
|
[ |
|
"en", "de", "fr", "es", "pl", "it", "ro", "hu", "cs", "nl", "fi", "hr", |
|
"sk", "sl", "et", "lt", "pt", "bg", "el", "lv", "mt", "sv", "da" |
|
] |
|
) |
|
_LANGUAGES_V2 = [f"{x}_v2" for x in _LANGUAGES] |
|
|
|
_YEARS = list(range(2009, 2020 + 1)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_CONFIG_TO_YEARS = { |
|
"400k": _YEARS + [f"{y}_2" for y in _YEARS], |
|
"100k": _YEARS, |
|
"10k": [2019, 2020], |
|
|
|
} |
|
for lang in _LANGUAGES: |
|
|
|
_CONFIG_TO_YEARS[lang] = [2020] |
|
|
|
_BASE_URL = "https://dl.fbaipublicfiles.com/voxpopuli/" |
|
|
|
_DATA_URL = _BASE_URL + "audios/{lang}_{year}.tar" |
|
|
|
_META_URL = _BASE_URL + "annotations/unlabelled_v2.tsv.gz" |
|
|
|
|
|
class VoxpopuliConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for VoxPopuli.""" |
|
|
|
def __init__(self, name, **kwargs): |
|
""" |
|
Args: |
|
name: `string`, name of dataset config |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__(name=name, **kwargs) |
|
self.languages = [name] if name in _LANGUAGES else _LANGUAGES |
|
|
|
|
|
class Voxpopuli(datasets.GeneratorBasedBuilder): |
|
"""The Voxpopuli dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
BUILDER_CONFIGS = [ |
|
VoxpopuliConfig( |
|
name=name, |
|
|
|
description="", |
|
) |
|
for name in _LANGUAGES + ["10k", "100k", "400k"] |
|
] |
|
|
|
DEFAULT_WRITER_BATCH_SIZE = 256 |
|
|
|
def _info(self): |
|
try: |
|
import torch |
|
import torchaudio |
|
except ImportError as e: |
|
raise ValueError( |
|
f"{str(e)}.\n" + |
|
"Loading voxpopuli requires `torchaudio` to be installed." |
|
"You can install torchaudio with `pip install torchaudio`." |
|
) |
|
global torchaudio |
|
|
|
features = datasets.Features( |
|
{ |
|
"path": datasets.Value("string"), |
|
"language": datasets.ClassLabel(names=_LANGUAGES), |
|
"year": datasets.Value("int16"), |
|
"audio": datasets.Audio(sampling_rate=16_000), |
|
"segment_id": datasets.Value("int16"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
features=features, |
|
|
|
|
|
|
|
) |
|
|
|
def _read_metadata(self, metadata_path): |
|
|
|
def predicate(id_): |
|
is_plenary = id_.find("PLENARY") > -1 |
|
if self.config.name == "10k": |
|
return is_plenary and 20190101 <= int(id_[:8]) < 20200801 |
|
elif self.config.name == "100k": |
|
return is_plenary |
|
elif self.config.name in _LANGUAGES: |
|
return is_plenary and id_.endswith(self.config.name) |
|
elif self.config.name in _LANGUAGES_V2: |
|
return id_.endswith(self.config.name.split("_")[0]) |
|
return True |
|
|
|
metadata = defaultdict(list) |
|
|
|
with open(metadata_path, encoding="utf-8") as csv_file: |
|
csv_reader = csv.reader(csv_file, delimiter="\t") |
|
for i, row in tqdm(enumerate(csv_reader)): |
|
if i == 0: |
|
continue |
|
event_id, segment_id, start, end = row |
|
_, lang = event_id.rsplit("_", 1)[-2:] |
|
if lang in self.config.languages and predicate(event_id): |
|
metadata[event_id].append((float(start), float(end))) |
|
|
|
return metadata |
|
|
|
def _split_generators(self, dl_manager): |
|
metadata_path = dl_manager.download_and_extract(_META_URL) |
|
|
|
years = _CONFIG_TO_YEARS[self.config.name] |
|
urls = [_DATA_URL.format(lang=language, year=year) for language in self.config.languages for year in years] |
|
dl_manager.download_config.num_proc = len(urls) |
|
data_dirs = dl_manager.download_and_extract(urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data_dirs": data_dirs, |
|
"metadata_path": metadata_path, |
|
} |
|
), |
|
] |
|
|
|
def _generate_examples(self, data_dirs, metadata_path): |
|
metadata = self._read_metadata(metadata_path) |
|
|
|
for data_dir in data_dirs: |
|
for file in glob.glob(f"{data_dir}/**/*.ogg", recursive=True): |
|
path_components = file.split(os.sep) |
|
language, year, audio_filename = path_components[-3:] |
|
audio_id, _ = os.path.splitext(audio_filename) |
|
if audio_id not in metadata: |
|
continue |
|
timestamps = metadata[audio_id] |
|
|
|
waveform, sr = torchaudio.load(file) |
|
duration = waveform.size(1) |
|
|
|
|
|
for segment_id, (start, stop) in enumerate(timestamps): |
|
segment = waveform[:, int(start * sr): min(int(stop * sr), duration)] |
|
|
|
yield f"{audio_filename}_{segment_id}", { |
|
"path": file, |
|
"language": language, |
|
"year": year, |
|
"audio": { |
|
"array": segment[0], |
|
"sampling_rate": 16_000 |
|
}, |
|
"segment_id": segment_id, |
|
} |
|
|