polinaeterna HF staff commited on
Commit
e68cfc9
1 Parent(s): 5448c2e

fix accented en

Browse files
Files changed (1) hide show
  1. voxpopuli.py +35 -16
voxpopuli.py CHANGED
@@ -66,13 +66,13 @@ class VoxpopuliConfig(datasets.BuilderConfig):
66
  name: `string` or `List[string]`:
67
  name of a config: either one of the supported languages, "multilang" for many languages.
68
  By default, "multilang" config includes all languages, including accented ones.
69
- To specify a custom set of languages, provide them to the `language` parameter
70
  languages: `List[string]`: if config is "multilang" can be either "all" for all available languages,
71
  including accented ones (default), or a custom list of languages.
72
  **kwargs: keyword arguments forwarded to super.
73
  """
74
  if name == "multilang":
75
- self.languages = _LANGUAGES if languages == "all" else languages
76
  name = "multilang" if languages == "all" else "_".join(languages)
77
  else:
78
  self.languages = [name]
@@ -120,15 +120,20 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
120
  with open(n_shards_path) as f:
121
  n_shards = json.load(f)
122
 
 
 
 
 
 
123
  audio_urls = defaultdict(dict)
124
- for split in ["train", "test", "dev"]:
125
  for lang in self.config.languages:
126
  audio_urls[split][lang] = [
127
  _AUDIO_ARCHIVE_PATH.format(lang=lang, split=split, n_shard=i) for i in range(n_shards[lang][split])
128
  ]
129
 
130
  meta_urls = defaultdict(dict)
131
- for split in ["train", "test", "dev"]:
132
  for lang in self.config.languages:
133
  meta_urls[split][lang] = _METADATA_PATH.format(lang=lang, split=split)
134
 
@@ -140,11 +145,23 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
140
  local_extracted_audio_paths = (
141
  dl_manager.extract(audio_paths) if not dl_manager.is_streaming else
142
  {
143
- "train": {lang: [None] * len(audio_paths["train"]) for lang in self.config.languages},
144
- "dev": {lang: [None] * len(audio_paths["dev"]) for lang in self.config.languages},
145
- "test": {lang: [None] * len(audio_paths["test"]) for lang in self.config.languages},
146
  }
147
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
  return [
150
  datasets.SplitGenerator(
@@ -154,7 +171,7 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
154
  lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
155
  for lang, lang_archives in audio_paths["train"].items()
156
  },
157
- "local_extracted_audio_archives_paths": local_extracted_audio_paths["train"],
158
  "metadata_paths": meta_paths["train"],
159
  }
160
  ),
@@ -165,7 +182,7 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
165
  lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
166
  for lang, lang_archives in audio_paths["dev"].items()
167
  },
168
- "local_extracted_audio_archives_paths": local_extracted_audio_paths["dev"],
169
  "metadata_paths": meta_paths["dev"],
170
  }
171
  ),
@@ -176,29 +193,31 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
176
  lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
177
  for lang, lang_archives in audio_paths["test"].items()
178
  },
179
- "local_extracted_audio_archives_paths": local_extracted_audio_paths["test"],
180
  "metadata_paths": meta_paths["test"],
181
  }
182
  ),
183
  ]
184
 
185
- def _generate_examples(self, audio_archives, local_extracted_audio_archives_paths, metadata_paths):
186
- assert len(metadata_paths) == len(audio_archives) == len(local_extracted_audio_archives_paths)
187
  features = ["raw_text", "normalized_text", "speaker_id", "gender", "is_gold_transcript", "accent"]
188
 
189
  for lang in self.config.languages:
 
 
190
  meta_path = metadata_paths[lang]
191
  with open(meta_path) as f:
192
  metadata = {x["id"]: x for x in csv.DictReader(f, delimiter="\t")}
193
 
194
- for audio_archive, local_extracted_audio_archive_path in zip(audio_archives[lang], local_extracted_audio_archives_paths[lang]):
195
  for audio_filename, audio_file in audio_archive:
196
  audio_id = audio_filename.split(os.sep)[-1].split(".wav")[0]
197
- path = os.path.join(local_extracted_audio_archive_path, audio_filename) if local_extracted_audio_archive_path else audio_filename
198
 
199
  yield audio_id, {
200
  "audio_id": audio_id,
201
  "language": lang,
202
  **{feature: metadata[audio_id][feature] for feature in features},
203
- "audio": {"path": path, "bytes": audio_file.read()}
204
- }
 
66
  name: `string` or `List[string]`:
67
  name of a config: either one of the supported languages, "multilang" for many languages.
68
  By default, "multilang" config includes all languages, including accented ones.
69
+ To specify a custom set of languages, pass them to the `language` parameter
70
  languages: `List[string]`: if config is "multilang" can be either "all" for all available languages,
71
  including accented ones (default), or a custom list of languages.
72
  **kwargs: keyword arguments forwarded to super.
73
  """
74
  if name == "multilang":
75
+ self.languages = _ASR_LANGUAGES if languages == "all" else languages
76
  name = "multilang" if languages == "all" else "_".join(languages)
77
  else:
78
  self.languages = [name]
 
120
  with open(n_shards_path) as f:
121
  n_shards = json.load(f)
122
 
123
+ if self.config.name == "en_accented":
124
+ splits = ["test"]
125
+ else:
126
+ splits = ["train", "dev", "test"]
127
+
128
  audio_urls = defaultdict(dict)
129
+ for split in splits:
130
  for lang in self.config.languages:
131
  audio_urls[split][lang] = [
132
  _AUDIO_ARCHIVE_PATH.format(lang=lang, split=split, n_shard=i) for i in range(n_shards[lang][split])
133
  ]
134
 
135
  meta_urls = defaultdict(dict)
136
+ for split in splits:
137
  for lang in self.config.languages:
138
  meta_urls[split][lang] = _METADATA_PATH.format(lang=lang, split=split)
139
 
 
145
  local_extracted_audio_paths = (
146
  dl_manager.extract(audio_paths) if not dl_manager.is_streaming else
147
  {
148
+ split: {lang: [None] * len(audio_paths[split]) for lang in self.config.languages} for split in splits
 
 
149
  }
150
  )
151
+ if self.config.name == "en_accented":
152
+ return [
153
+ datasets.SplitGenerator(
154
+ name=datasets.Split.TEST,
155
+ gen_kwargs={
156
+ "audio_archives": {
157
+ lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
158
+ for lang, lang_archives in audio_paths["test"].items()
159
+ },
160
+ "local_extracted_archives_paths": local_extracted_audio_paths["test"],
161
+ "metadata_paths": meta_paths["test"],
162
+ }
163
+ ),
164
+ ]
165
 
166
  return [
167
  datasets.SplitGenerator(
 
171
  lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
172
  for lang, lang_archives in audio_paths["train"].items()
173
  },
174
+ "local_extracted_archives_paths": local_extracted_audio_paths["train"],
175
  "metadata_paths": meta_paths["train"],
176
  }
177
  ),
 
182
  lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
183
  for lang, lang_archives in audio_paths["dev"].items()
184
  },
185
+ "local_extracted_archives_paths": local_extracted_audio_paths["dev"],
186
  "metadata_paths": meta_paths["dev"],
187
  }
188
  ),
 
193
  lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
194
  for lang, lang_archives in audio_paths["test"].items()
195
  },
196
+ "local_extracted_archives_paths": local_extracted_audio_paths["test"],
197
  "metadata_paths": meta_paths["test"],
198
  }
199
  ),
200
  ]
201
 
202
+ def _generate_examples(self, audio_archives, local_extracted_archives_paths, metadata_paths):
203
+ assert len(metadata_paths) == len(audio_archives) == len(local_extracted_archives_paths)
204
  features = ["raw_text", "normalized_text", "speaker_id", "gender", "is_gold_transcript", "accent"]
205
 
206
  for lang in self.config.languages:
207
+ assert len(audio_archives[lang]) == len(local_extracted_archives_paths[lang])
208
+
209
  meta_path = metadata_paths[lang]
210
  with open(meta_path) as f:
211
  metadata = {x["id"]: x for x in csv.DictReader(f, delimiter="\t")}
212
 
213
+ for audio_archive, local_extracted_archive_path in zip(audio_archives[lang], local_extracted_archives_paths[lang]):
214
  for audio_filename, audio_file in audio_archive:
215
  audio_id = audio_filename.split(os.sep)[-1].split(".wav")[0]
216
+ path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
217
 
218
  yield audio_id, {
219
  "audio_id": audio_id,
220
  "language": lang,
221
  **{feature: metadata[audio_id][feature] for feature in features},
222
+ "audio": {"path": path, "bytes": audio_file.read()},
223
+ }