polinaeterna HF staff commited on
Commit
3f69367
1 Parent(s): c58a186

add check for plenary

Browse files
Files changed (1) hide show
  1. voxpopuli.py +40 -26
voxpopuli.py CHANGED
@@ -13,16 +13,16 @@ _LANGUAGES = sorted(
13
  "sk", "sl", "et", "lt", "pt", "bg", "el", "lv", "mt", "sv", "da"
14
  ]
15
  )
16
- _LANGUAGES_V2 = [f"{x}_v2" for x in _LANGUAGES]
17
 
18
  _YEARS = list(range(2009, 2020 + 1))
19
 
20
  # unnecessary
21
- _CONFIG_TO_LANGS = {
22
- "400k": _LANGUAGES,
23
- "100k": _LANGUAGES,
24
- "10k": _LANGUAGES,
25
- }
26
 
27
  _CONFIG_TO_YEARS = {
28
  "400k": _YEARS + [f"{y}_2" for y in _YEARS],
@@ -31,7 +31,8 @@ _CONFIG_TO_YEARS = {
31
  # "asr": _YEARS
32
  }
33
  for lang in _LANGUAGES:
34
- _CONFIG_TO_YEARS[lang] = _YEARS
 
35
 
36
  _BASE_URL = "https://dl.fbaipublicfiles.com/voxpopuli/"
37
 
@@ -66,9 +67,20 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
66
  for name in _LANGUAGES + ["10k", "100k", "400k"]
67
  ]
68
  # DEFAULT_CONFIG_NAME = "400k"
69
- # DEFAULT_WRITER_BATCH_SIZE = 256
70
 
71
  def _info(self):
 
 
 
 
 
 
 
 
 
 
 
72
  features = datasets.Features(
73
  {
74
  "path": datasets.Value("string"),
@@ -87,8 +99,19 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
87
  )
88
 
89
  def _read_metadata(self, metadata_path):
90
- # TODO: check for predicate??
91
- # @ https://github.com/facebookresearch/voxpopuli/blob/main/voxpopuli/get_unlabelled_data.py#L34
 
 
 
 
 
 
 
 
 
 
 
92
  metadata = defaultdict(list)
93
 
94
  with open(metadata_path, encoding="utf-8") as csv_file:
@@ -96,11 +119,10 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
96
  for i, row in tqdm(enumerate(csv_reader)):
97
  if i == 0:
98
  continue
99
- audio_id, segment_id, start, end = row
100
- event_id, lang = audio_id.rsplit("_", 1)[-2:]
101
- if lang in self.languages:
102
- # if lang in ["hr", "et"]:
103
- metadata[audio_id].append((float(start), float(end)))
104
 
105
  return metadata
106
 
@@ -108,7 +130,6 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
108
  metadata_path = dl_manager.download_and_extract(_META_URL)
109
 
110
  years = _CONFIG_TO_YEARS[self.config.name]
111
- # urls = [_DATA_URL.format(lang=language, year=year) for language in ["hr", "et"] for year in [2020]] # , "et"]
112
  urls = [_DATA_URL.format(lang=language, year=year) for language in self.config.languages for year in years]
113
  dl_manager.download_config.num_proc = len(urls)
114
  data_dirs = dl_manager.download_and_extract(urls)
@@ -124,15 +145,6 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
124
  ]
125
 
126
  def _generate_examples(self, data_dirs, metadata_path):
127
- try:
128
- import torch
129
- import torchaudio
130
- except ImportError as e:
131
- raise ValueError(
132
- "Loading voxpopuli requires `torchaudio` to be installed."
133
- "You can install torchaudio with `pip install torchaudio`." + e
134
- )
135
-
136
  metadata = self._read_metadata(metadata_path)
137
 
138
  for data_dir in data_dirs:
@@ -140,12 +152,14 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
140
  path_components = file.split(os.sep)
141
  language, year, audio_filename = path_components[-3:]
142
  audio_id, _ = os.path.splitext(audio_filename)
 
 
143
  timestamps = metadata[audio_id]
144
 
145
  waveform, sr = torchaudio.load(file)
146
  duration = waveform.size(1)
147
 
148
- # split audio on the fly and write segments as arrays
149
  for segment_id, (start, stop) in enumerate(timestamps):
150
  segment = waveform[:, int(start * sr): min(int(stop * sr), duration)]
151
 
 
13
  "sk", "sl", "et", "lt", "pt", "bg", "el", "lv", "mt", "sv", "da"
14
  ]
15
  )
16
+ _LANGUAGES_V2 = [f"{x}_v2" for x in _LANGUAGES] # TODO: what are they used for?
17
 
18
  _YEARS = list(range(2009, 2020 + 1))
19
 
20
  # unnecessary
21
+ # _CONFIG_TO_LANGS = {
22
+ # "400k": _LANGUAGES,
23
+ # "100k": _LANGUAGES,
24
+ # "10k": _LANGUAGES,
25
+ # }
26
 
27
  _CONFIG_TO_YEARS = {
28
  "400k": _YEARS + [f"{y}_2" for y in _YEARS],
 
31
  # "asr": _YEARS
32
  }
33
  for lang in _LANGUAGES:
34
+ # _CONFIG_TO_YEARS[lang] = _YEARS
35
+ _CONFIG_TO_YEARS[lang] = [2020]
36
 
37
  _BASE_URL = "https://dl.fbaipublicfiles.com/voxpopuli/"
38
 
 
67
  for name in _LANGUAGES + ["10k", "100k", "400k"]
68
  ]
69
  # DEFAULT_CONFIG_NAME = "400k"
70
+ DEFAULT_WRITER_BATCH_SIZE = 256
71
 
72
  def _info(self):
73
+ try:
74
+ import torch
75
+ import torchaudio
76
+ except ImportError as e:
77
+ raise ValueError(
78
+ f"{str(e)}.\n" +
79
+ "Loading voxpopuli requires `torchaudio` to be installed."
80
+ "You can install torchaudio with `pip install torchaudio`."
81
+ )
82
+ global torchaudio
83
+
84
  features = datasets.Features(
85
  {
86
  "path": datasets.Value("string"),
 
99
  )
100
 
101
  def _read_metadata(self, metadata_path):
102
+ # from https://github.com/facebookresearch/voxpopuli/blob/main/voxpopuli/get_unlabelled_data.py#L34
103
+ def predicate(id_):
104
+ is_plenary = id_.find("PLENARY") > -1
105
+ if self.config.name == "10k": # in {"10k", "10k_sd"}
106
+ return is_plenary and 20190101 <= int(id_[:8]) < 20200801
107
+ elif self.config.name == "100k":
108
+ return is_plenary
109
+ elif self.config.name in _LANGUAGES:
110
+ return is_plenary and id_.endswith(self.config.name)
111
+ elif self.config.name in _LANGUAGES_V2:
112
+ return id_.endswith(self.config.name.split("_")[0])
113
+ return True
114
+
115
  metadata = defaultdict(list)
116
 
117
  with open(metadata_path, encoding="utf-8") as csv_file:
 
119
  for i, row in tqdm(enumerate(csv_reader)):
120
  if i == 0:
121
  continue
122
+ event_id, segment_id, start, end = row
123
+ _, lang = event_id.rsplit("_", 1)[-2:]
124
+ if lang in self.config.languages and predicate(event_id):
125
+ metadata[event_id].append((float(start), float(end)))
 
126
 
127
  return metadata
128
 
 
130
  metadata_path = dl_manager.download_and_extract(_META_URL)
131
 
132
  years = _CONFIG_TO_YEARS[self.config.name]
 
133
  urls = [_DATA_URL.format(lang=language, year=year) for language in self.config.languages for year in years]
134
  dl_manager.download_config.num_proc = len(urls)
135
  data_dirs = dl_manager.download_and_extract(urls)
 
145
  ]
146
 
147
  def _generate_examples(self, data_dirs, metadata_path):
 
 
 
 
 
 
 
 
 
148
  metadata = self._read_metadata(metadata_path)
149
 
150
  for data_dir in data_dirs:
 
152
  path_components = file.split(os.sep)
153
  language, year, audio_filename = path_components[-3:]
154
  audio_id, _ = os.path.splitext(audio_filename)
155
+ if audio_id not in metadata:
156
+ continue
157
  timestamps = metadata[audio_id]
158
 
159
  waveform, sr = torchaudio.load(file)
160
  duration = waveform.size(1)
161
 
162
+ # split audio on the fly and yield segments as arrays - they will be converted to bytes by Audio feature
163
  for segment_id, (start, stop) in enumerate(timestamps):
164
  segment = waveform[:, int(start * sr): min(int(stop * sr), duration)]
165