polinaeterna HF staff commited on
Commit
bf0fd87
1 Parent(s): 66b9f5d

update loading script

Browse files
Files changed (1) hide show
  1. voxpopuli.py +79 -124
voxpopuli.py CHANGED
@@ -1,8 +1,7 @@
1
  from collections import defaultdict
2
  import os
3
- import glob
4
  import csv
5
- from tqdm.auto import tqdm
6
 
7
  import datasets
8
 
@@ -39,15 +38,6 @@ _HOMEPAGE = "https://github.com/facebookresearch/voxpopuli"
39
 
40
  _LICENSE = "CC0, also see https://www.europarl.europa.eu/legal-notice/en/"
41
 
42
-
43
- _LANGUAGES = sorted(
44
- [
45
- "en", "de", "fr", "es", "pl", "it", "ro", "hu", "cs", "nl", "fi", "hr",
46
- "sk", "sl", "et", "lt", "pt", "bg", "el", "lv", "mt", "sv", "da"
47
- ]
48
- )
49
- _LANGUAGES_V2 = [f"{x}_v2" for x in _LANGUAGES]
50
-
51
  _ASR_LANGUAGES = [
52
  "en", "de", "fr", "es", "pl", "it", "ro", "hu", "cs", "nl", "fi", "hr",
53
  "sk", "sl", "et", "lt"
@@ -56,39 +46,16 @@ _ASR_ACCENTED_LANGUAGES = [
56
  "en_accented"
57
  ]
58
 
59
- _YEARS = list(range(2009, 2020 + 1))
60
-
61
- # unnecessary
62
- _CONFIG_TO_LANGS = {
63
- "400k": _LANGUAGES,
64
- "100k": _LANGUAGES,
65
- "10k": _LANGUAGES,
66
- "asr": _ASR_LANGUAGES, # + _ASR_ACCENTED_LANGUAGES
67
- }
68
-
69
- _CONFIG_TO_YEARS = {
70
- "400k": _YEARS + [f"{y}_2" for y in _YEARS],
71
- "100k": _YEARS,
72
- "10k": [2019, 2020],
73
- "asr": _YEARS,
74
- }
75
- for lang in _LANGUAGES:
76
- _CONFIG_TO_YEARS[lang] = _YEARS
77
- # _CONFIG_TO_YEARS[lang] = [2020]
78
-
79
- for lang in _LANGUAGES_V2:
80
- _CONFIG_TO_YEARS[lang] = _YEARS + [f"{y}_2" for y in _YEARS]
81
-
82
 
83
- _BASE_URL = "https://dl.fbaipublicfiles.com/voxpopuli/"
84
 
85
- _DATA_URL = _BASE_URL + "audios/{lang}_{year}.tar"
86
 
87
- _ASR_DATA_URL = _BASE_URL + "audios/original_{year}.tar"
88
 
89
- _UNLABELLED_META_URL = _BASE_URL + "annotations/unlabelled_v2.tsv.gz"
90
 
91
- _ASR_META_URL = _BASE_URL + "annotations/asr/asr_{lang}.tsv.gz"
92
 
93
 
94
  class VoxpopuliConfig(datasets.BuilderConfig):
@@ -101,44 +68,32 @@ class VoxpopuliConfig(datasets.BuilderConfig):
101
  **kwargs: keyword arguments forwarded to super.
102
  """
103
  super().__init__(name=name, **kwargs)
104
- name = name.split("_")[0]
105
- self.languages = [name] if name in _LANGUAGES else _CONFIG_TO_LANGS[name]
106
- self.years = _CONFIG_TO_YEARS[name]
107
 
108
 
109
  class Voxpopuli(datasets.GeneratorBasedBuilder):
110
  """The VoxPopuli dataset."""
111
 
112
- VERSION = datasets.Version("1.3.0") # not sure
113
  BUILDER_CONFIGS = [
114
  VoxpopuliConfig(
115
  name=name,
116
  version=datasets.Version("1.3.0"),
117
  )
118
- for name in _LANGUAGES + _LANGUAGES_V2 + ["10k", "100k", "400k"]
119
  ]
120
- # DEFAULT_CONFIG_NAME = "400k"
121
  DEFAULT_WRITER_BATCH_SIZE = 256 # SET THIS TO A LOWER VALUE IF IT USES TOO MUCH RAM SPACE
122
 
123
  def _info(self):
124
- try:
125
- import torch
126
- import torchaudio
127
- except ImportError as e:
128
- raise ValueError(
129
- f"{str(e)}.\n" +
130
- "Loading voxpopuli requires `torchaudio` to be installed."
131
- "You can install torchaudio with `pip install torchaudio`."
132
- )
133
- global torchaudio
134
-
135
  features = datasets.Features(
136
  {
137
  "path": datasets.Value("string"),
138
  "language": datasets.ClassLabel(names=_LANGUAGES),
139
- "year": datasets.Value("int16"),
 
140
  "audio": datasets.Audio(sampling_rate=16_000),
141
- "segment_id": datasets.Value("int16"),
142
  }
143
  )
144
  return datasets.DatasetInfo(
@@ -149,80 +104,80 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
149
  citation=_CITATION,
150
  )
151
 
152
- def _read_metadata_unlabelled(self, metadata_path):
153
- # from https://github.com/facebookresearch/voxpopuli/blob/main/voxpopuli/get_unlabelled_data.py#L34
154
- def predicate(id_):
155
- is_plenary = id_.find("PLENARY") > -1
156
- if self.config.name == "10k": # in {"10k", "10k_sd"}
157
- return is_plenary and 20190101 <= int(id_[:8]) < 20200801
158
- elif self.config.name == "100k":
159
- return is_plenary
160
- elif self.config.name in _LANGUAGES:
161
- return is_plenary and id_.endswith(self.config.name)
162
- elif self.config.name in _LANGUAGES_V2:
163
- return id_.endswith(self.config.name.split("_")[0])
164
- return True
165
-
166
- metadata = defaultdict(list)
167
-
168
- with open(metadata_path, encoding="utf-8") as csv_file:
169
- csv_reader = csv.reader(csv_file, delimiter="\t")
170
- for i, row in tqdm(enumerate(csv_reader)):
171
- if i == 0:
172
- continue
173
- event_id, segment_id, start, end = row
174
- _, lang = event_id.rsplit("_", 1)[-2:]
175
- if lang in self.config.languages and predicate(event_id):
176
- metadata[event_id].append((float(start), float(end)))
177
-
178
- return metadata
179
-
180
- def _read_metadata_asr(self, metadata_paths):
181
- pass
182
-
183
  def _split_generators(self, dl_manager):
184
- metadata_path = dl_manager.download_and_extract(_UNLABELLED_META_URL)
 
 
 
 
 
 
 
 
 
 
 
185
 
186
- urls = [_DATA_URL.format(lang=language, year=year) for language in self.config.languages for year in self.config.years]
187
- dl_manager.download_config.num_proc = len(urls)
188
- data_dirs = dl_manager.download_and_extract(urls)
 
 
 
 
 
 
 
 
 
 
189
 
190
  return [
191
  datasets.SplitGenerator(
192
  name=datasets.Split.TRAIN,
193
  gen_kwargs={
194
- "data_dirs": data_dirs,
195
- "metadata_path": metadata_path,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196
  }
197
  ),
198
  ]
199
 
200
- def _generate_examples(self, data_dirs, metadata_path):
201
- metadata = self._read_metadata_unlabelled(metadata_path)
202
-
203
- for data_dir in data_dirs:
204
- for file in glob.glob(f"{data_dir}/**/*.ogg", recursive=True):
205
- path_components = file.split(os.sep)
206
- language, year, audio_filename = path_components[-3:]
207
- audio_id, _ = os.path.splitext(audio_filename)
208
- if audio_id not in metadata:
209
- continue
210
- timestamps = metadata[audio_id]
211
-
212
- waveform, sr = torchaudio.load(file)
213
- duration = waveform.size(1)
214
-
215
- # split audio on the fly and yield segments as arrays - they will be converted to bytes by Audio feature
216
- for segment_id, (start, stop) in enumerate(timestamps):
217
- segment = waveform[:, int(start * sr): min(int(stop * sr), duration)]
218
-
219
- yield f"{audio_filename}_{segment_id}", {
220
- "path": file,
221
- "language": language,
222
- "year": year,
223
- "audio": {
224
- "array": segment[0], # segment is a 2-dim array
225
- "sampling_rate": 16_000
226
- },
227
- "segment_id": segment_id,
228
- }
 
1
  from collections import defaultdict
2
  import os
3
+ import json
4
  import csv
 
5
 
6
  import datasets
7
 
 
38
 
39
  _LICENSE = "CC0, also see https://www.europarl.europa.eu/legal-notice/en/"
40
 
 
 
 
 
 
 
 
 
 
41
  _ASR_LANGUAGES = [
42
  "en", "de", "fr", "es", "pl", "it", "ro", "hu", "cs", "nl", "fi", "hr",
43
  "sk", "sl", "et", "lt"
 
46
  "en_accented"
47
  ]
48
 
49
+ _LANGUAGES = _ASR_LANGUAGES + _ASR_ACCENTED_LANGUAGES
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
 
51
 
52
+ _BASE_DATA_DIR = "https://huggingface.co/datasets/polinaeterna/voxpopuli/resolve/main/data/"
53
 
54
+ _N_SHARDS_FILE = _BASE_DATA_DIR + "n_files.json"
55
 
56
+ _AUDIO_ARCHIVE_PATH = _BASE_DATA_DIR + "{lang}/{split}/{split}_part_{n_shard}.tar.gz"
57
 
58
+ _METADATA_PATH = _BASE_DATA_DIR + "{lang}/asr_{split}.tsv"
59
 
60
 
61
  class VoxpopuliConfig(datasets.BuilderConfig):
 
68
  **kwargs: keyword arguments forwarded to super.
69
  """
70
  super().__init__(name=name, **kwargs)
71
+ self.languages = _LANGUAGES if name == "all" else [name]
72
+ # self.data_root_dis = {lang: _DATA_DIR.format(lang) for lang in self.languages}
 
73
 
74
 
75
  class Voxpopuli(datasets.GeneratorBasedBuilder):
76
  """The VoxPopuli dataset."""
77
 
78
+ VERSION = datasets.Version("1.3.0") # TODO: version
79
  BUILDER_CONFIGS = [
80
  VoxpopuliConfig(
81
  name=name,
82
  version=datasets.Version("1.3.0"),
83
  )
84
+ for name in _LANGUAGES + ["all"]
85
  ]
 
86
  DEFAULT_WRITER_BATCH_SIZE = 256 # SET THIS TO A LOWER VALUE IF IT USES TOO MUCH RAM SPACE
87
 
88
  def _info(self):
 
 
 
 
 
 
 
 
 
 
 
89
  features = datasets.Features(
90
  {
91
  "path": datasets.Value("string"),
92
  "language": datasets.ClassLabel(names=_LANGUAGES),
93
+ "raw_text": datasets.Value("string"),
94
+ "normalized_text": datasets.Value("string"),
95
  "audio": datasets.Audio(sampling_rate=16_000),
96
+ # "segment_id": datasets.Value("int16"), # TODO
97
  }
98
  )
99
  return datasets.DatasetInfo(
 
104
  citation=_CITATION,
105
  )
106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  def _split_generators(self, dl_manager):
108
+ n_shards_path = dl_manager.download_and_extract(_N_SHARDS_FILE)
109
+ with open(n_shards_path) as f:
110
+ n_shards = json.load(f)
111
+
112
+ audio_urls = defaultdict(dict)
113
+ for lang in self.config.languages:
114
+ for split in ["train", "test", "dev"]:
115
+ audio_urls[split][lang] = [_AUDIO_ARCHIVE_PATH.format(lang=lang, split=split, n_shard=i) for i in range(n_shards[lang][split])]
116
+
117
+ meta_urls = defaultdict(dict)
118
+ for split in ["train", "test", "dev"]:
119
+ meta_urls[split][lang] = _METADATA_PATH.format(lang=lang, split=split)
120
 
121
+ # dl_manager.download_config.num_proc = len(urls)
122
+
123
+ meta_paths = dl_manager.download_and_extract(meta_urls)
124
+ audio_paths = dl_manager.download(audio_urls)
125
+
126
+ local_extracted_audio_paths = (
127
+ dl_manager.extract(audio_paths) if not dl_manager.is_streaming else
128
+ {
129
+ "train": [None] * len(audio_paths["train"]),
130
+ "dev": [None] * len(audio_paths["dev"]),
131
+ "test": [None] * len(audio_paths["test"]),
132
+ }
133
+ )
134
 
135
  return [
136
  datasets.SplitGenerator(
137
  name=datasets.Split.TRAIN,
138
  gen_kwargs={
139
+ "audio_archives": {lang: [dl_manager.iter_archive(archive) for archive in lang_archives] for lang, lang_archives
140
+ in audio_paths["train"].items()},
141
+ "local_extracted_audio_archives_paths": local_extracted_audio_paths["train"] if local_extracted_audio_paths else None,
142
+ "metadata_paths": meta_paths["train"],
143
+ }
144
+ ),
145
+ datasets.SplitGenerator(
146
+ name=datasets.Split.VALIDATION,
147
+ gen_kwargs={
148
+ "audio_archives": {lang: [dl_manager.iter_archive(archive) for archive in lang_archives] for lang, lang_archives
149
+ in audio_paths["dev"].items()},
150
+ "local_extracted_audio_archives_paths": local_extracted_audio_paths["dev"] if local_extracted_audio_paths else None,
151
+ "metadata_paths": meta_paths["dev"],
152
+ }
153
+ ),
154
+ datasets.SplitGenerator(
155
+ name=datasets.Split.TEST,
156
+ gen_kwargs={
157
+ "audio_archives": {lang: [dl_manager.iter_archive(archive) for archive in lang_archives] for lang, lang_archives
158
+ in audio_paths["test"].items()},
159
+ "local_extracted_audio_archives_paths": local_extracted_audio_paths["test"] if local_extracted_audio_paths else None,
160
+ "metadata_paths": meta_paths["test"],
161
  }
162
  ),
163
  ]
164
 
165
+ def _generate_examples(self, audio_archives, local_extracted_audio_archives_paths, metadata_paths):
166
+ assert len(metadata_paths) == len(audio_archives)
167
+
168
+ for lang in self.config.languages:
169
+ meta_path = metadata_paths[lang]
170
+ with open(meta_path) as f:
171
+ metadata = {x["id"]: x for x in csv.DictReader(f, delimiter="\t")}
172
+
173
+ for audio_archive, local_extracted_audio_archive_path in zip(audio_archives[lang], local_extracted_audio_archives_paths[lang]):
174
+ for audio_filename, audio_file in audio_archive:
175
+ audio_id = audio_filename.split(os.sep)[-1].split(".wav")[0]
176
+ path = os.path.join(local_extracted_audio_archive_path, audio_filename) if local_extracted_audio_archive_path else audio_filename
177
+ yield audio_id, {
178
+ "path": path,
179
+ "language": lang,
180
+ "raw_text": metadata[audio_id]["raw_text"],
181
+ "normalized_text": metadata[audio_id]["normalized_text"],
182
+ "audio": {"path": path, "bytes": audio_file.read()}
183
+ }