Datasets:

Modalities:
Audio
Text
ArXiv:
Libraries:
Datasets
License:
File size: 5,319 Bytes
28cc287
9f6d2ae
 
28cc287
 
 
9f6d2ae
 
 
 
 
 
 
 
 
 
 
 
 
 
28cc287
9f6d2ae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28cc287
 
 
 
 
 
 
 
 
 
 
 
 
 
9f6d2ae
 
 
 
 
 
 
28cc287
9f6d2ae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28cc287
9f6d2ae
 
 
 
 
 
 
 
 
 
28cc287
 
 
 
9f6d2ae
28cc287
 
 
 
 
 
 
 
 
 
9f6d2ae
28cc287
 
 
 
9f6d2ae
28cc287
 
 
 
 
9f6d2ae
 
 
 
 
28cc287
 
9f6d2ae
 
 
 
28cc287
 
 
 
 
 
 
 
 
 
 
 
9f6d2ae
28cc287
9f6d2ae
28cc287
 
 
 
 
 
 
 
 
 
 
9f6d2ae
 
 
28cc287
 
 
 
 
9f6d2ae
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
from collections import defaultdict
import os
import glob
import csv
from pathlib import Path
from tqdm.auto import tqdm

import datasets


_LANGUAGES = sorted(
    [
        "en", "de", "fr", "es", "pl", "it", "ro", "hu", "cs", "nl", "fi", "hr",
        "sk", "sl", "et", "lt", "pt", "bg", "el", "lv", "mt", "sv", "da"
    ]
)
_LANGUAGES_V2 = [f"{x}_v2" for x in _LANGUAGES]

_YEARS = list(range(2009, 2020 + 1))

# unnecessary
_CONFIG_TO_LANGS = {
    "400k": _LANGUAGES,
    "100k": _LANGUAGES,
    "10k": _LANGUAGES,
}

_CONFIG_TO_YEARS = {
    "400k": _YEARS + [f"{y}_2" for y in _YEARS],
    "100k": _YEARS,
    "10k": [2019, 2020],
    # "asr": _YEARS
}
for lang in _LANGUAGES:
    _CONFIG_TO_YEARS[lang] = _YEARS

_BASE_URL = "https://dl.fbaipublicfiles.com/voxpopuli/"

_DATA_URL = _BASE_URL + "audios/{lang}_{year}.tar"

_META_URL = _BASE_URL + "annotations/unlabelled_v2.tsv.gz"


class VoxpopuliConfig(datasets.BuilderConfig):
    """BuilderConfig for VoxPopuli."""

    def __init__(self, name, **kwargs):
        """
        Args:
          name: `string`, name of dataset config
          **kwargs: keyword arguments forwarded to super.
        """
        super().__init__(name=name, **kwargs)
        self.languages = [name] if name in _LANGUAGES else _LANGUAGES


class Voxpopuli(datasets.GeneratorBasedBuilder):
    """The Voxpopuli dataset."""

    VERSION = datasets.Version("1.0.0")  # TODO ??
    BUILDER_CONFIGS = [
        VoxpopuliConfig(
            name=name,
            # version=VERSION,
            description="",  # TODO
            )
        for name in _LANGUAGES + ["10k", "100k", "400k"]
    ]
    # DEFAULT_CONFIG_NAME = "400k"
    # DEFAULT_WRITER_BATCH_SIZE = 1

    def _info(self):
        features = datasets.Features(
            {
                "path": datasets.Value("string"),
                "language": datasets.ClassLabel(names=_LANGUAGES),
                "year": datasets.Value("int16"),
                "audio": datasets.Audio(sampling_rate=16_000),
                "segment_id": datasets.Value("int16"),
            }
        )
        return datasets.DatasetInfo(
            # description=_DESCRIPTION,
            features=features,
            # homepage=_HOMEPAGE,
            # license=_LICENSE,
            # citation=_CITATION,
        )

    def _read_metadata(self, metadata_path):
        # TODO: check for predicate??
        #  @ https://github.com/facebookresearch/voxpopuli/blob/main/voxpopuli/get_unlabelled_data.py#L34
        metadata = defaultdict(list)

        with open(metadata_path, encoding="utf-8") as csv_file:
            csv_reader = csv.reader(csv_file, delimiter="\t")
            for i, row in tqdm(enumerate(csv_reader)):
                if i == 0:
                    continue
                audio_id, segment_id, start, end = row
                event_id, lang = audio_id.rsplit("_", 1)[-2:]
                if lang in self.languages:
                # if lang in ["hr", "et"]:
                    metadata[audio_id].append((float(start), float(end)))

        return metadata

    def _split_generators(self, dl_manager):
        metadata_path = dl_manager.download_and_extract(_META_URL)

        years = _CONFIG_TO_YEARS[self.config.name]
        # urls = [_DATA_URL.format(lang=language, year=year) for language in ["hr", "et"] for year in [2020]]  # , "et"]
        urls = [_DATA_URL.format(lang=language, year=year) for language in self.languages for year in years]
        dl_manager.download_config.num_proc = len(urls)
        data_dirs = dl_manager.download_and_extract(urls)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "data_dirs": data_dirs,
                    "metadata_path": metadata_path,
                }
            ),
        ]

    def _generate_examples(self, data_dirs, metadata_path):
        try:
            import torch
            import torchaudio
        except ImportError as e:
            raise ValueError(
                "Loading voxpopuli requires `torchaudio` to be installed."
                "You can install torchaudio with `pip install torchaudio`." + e
            )

        metadata = self._read_metadata(metadata_path)

        for data_dir in data_dirs:
            for file in glob.glob(f"{data_dir}/**/*.ogg", recursive=True)[:5]:
                path_components = file.split(os.sep)
                language, year, audio_filename = path_components[-3:]
                audio_id, _ = os.path.splitext(audio_filename)
                timestamps = metadata[audio_id]

                waveform, sr = torchaudio.load(file)
                duration = waveform.size(1)

                for segment_id, (start, stop) in enumerate(timestamps):
                    segment = waveform[:, int(start * sr): min(int(stop * sr), duration)]

                    yield f"{audio_filename}_{segment_id}", {
                        "path": file,
                        "language": language,
                        "year": year,
                        "audio": {
                            "array": segment[0],  # segment is a 2-dim array
                            "sampling_rate": 16_000
                        },
                        "segment_id": segment_id,
                    }