polinaeterna HF staff commited on
Commit
9ef3490
1 Parent(s): 9dac28a

modify loading script for to allow both wav and opus configurations

Browse files
Files changed (1) hide show
  1. ml_spoken_words.py +24 -16
ml_spoken_words.py CHANGED
@@ -23,6 +23,7 @@ totaling 23.4 million 1-second spoken examples (over 6,000 hours).
23
 
24
 
25
  import csv
 
26
  from functools import partial
27
 
28
  import datasets
@@ -55,10 +56,10 @@ _LICENSE = "CC-BY 4.0."
55
 
56
  _VERSION = datasets.Version("1.0.0")
57
 
58
- _BASE_URL = "https://huggingface.co/datasets/polinaeterna/ml_spoken_words/resolve/main/data/{lang}/"
59
- _AUDIO_URL = _BASE_URL + "{split}/audio/{n}.tar.gz"
60
- _SPLITS_URL = _BASE_URL + "splits.tar.gz"
61
- _N_FILES_URL = _BASE_URL + "{split}/n_files.txt"
62
 
63
  _GENDERS = ["MALE", "FEMALE", "OTHER", "NAN"]
64
 
@@ -119,7 +120,7 @@ _LANGUAGES = [
119
  class MlSpokenWordsConfig(datasets.BuilderConfig):
120
  """BuilderConfig for MlSpokenWords."""
121
 
122
- def __init__(self, *args, languages, **kwargs):
123
  """BuilderConfig for MlSpokenWords.
124
  Args:
125
  languages (:obj:`Union[List[str], str]`): language or list of languages to load
@@ -127,10 +128,11 @@ class MlSpokenWordsConfig(datasets.BuilderConfig):
127
  """
128
  super().__init__(
129
  *args,
130
- name="+".join(languages) if isinstance(languages, list) else languages,
131
  **kwargs,
132
  )
133
  self.languages = languages if isinstance(languages, list) else [languages]
 
134
 
135
 
136
  class MlSpokenWords(datasets.GeneratorBasedBuilder):
@@ -143,7 +145,11 @@ class MlSpokenWords(datasets.GeneratorBasedBuilder):
143
  """
144
 
145
  VERSION = _VERSION
146
- BUILDER_CONFIGS = [MlSpokenWordsConfig(languages=[lang], version=_VERSION) for lang in _LANGUAGES]
 
 
 
 
147
  BUILDER_CONFIG_CLASS = MlSpokenWordsConfig
148
 
149
  def _info(self):
@@ -154,8 +160,9 @@ class MlSpokenWords(datasets.GeneratorBasedBuilder):
154
  "language": datasets.ClassLabel(names=self.config.languages),
155
  "speaker_id": datasets.Value("string"),
156
  "gender": datasets.ClassLabel(names=_GENDERS),
157
- "keyword": datasets.Value("string"), # seems that there are too many of them (340k unique keywords)
158
- "audio": datasets.Audio(sampling_rate=48_000),
 
159
  }
160
  )
161
  return datasets.DatasetInfo(
@@ -168,7 +175,7 @@ class MlSpokenWords(datasets.GeneratorBasedBuilder):
168
 
169
  def _split_generators(self, dl_manager):
170
  splits_archive_path = [dl_manager.download(_SPLITS_URL.format(lang=lang)) for lang in self.config.languages]
171
- download_audio = partial(_download_audio_archives, dl_manager=dl_manager)
172
 
173
  return [
174
  datasets.SplitGenerator(
@@ -206,8 +213,8 @@ class MlSpokenWords(datasets.GeneratorBasedBuilder):
206
  for i, (link, word, is_valid, speaker, gender) in enumerate(csv_reader):
207
  if i == 0:
208
  continue
209
- audio_filename = "_".join(link.split("/"))
210
- metadata[audio_filename] = {
211
  "keyword": word,
212
  "is_valid": is_valid,
213
  "speaker_id": speaker,
@@ -216,15 +223,16 @@ class MlSpokenWords(datasets.GeneratorBasedBuilder):
216
 
217
  for audio_archive in audio_archives[lang_idx]:
218
  for audio_filename, audio_file in audio_archive:
 
219
  yield audio_filename, {
220
  "file": audio_filename,
221
  "language": lang,
222
  "audio": {"path": audio_filename, "bytes": audio_file.read()},
223
- **metadata[audio_filename],
224
  }
225
 
226
 
227
- def _download_audio_archives(dl_manager, lang, split):
228
  """
229
  All audio files are stored in several .tar.gz archives with names like 0.tar.gz, 1.tar.gz, ...
230
  Number of archives stored in a separate .txt file (n_files.txt)
@@ -232,13 +240,13 @@ def _download_audio_archives(dl_manager, lang, split):
232
  Prepare all the audio archives for iterating over them and their audio files.
233
  """
234
 
235
- n_files_url = _N_FILES_URL.format(lang=lang, split=split)
236
  n_files_path = dl_manager.download(n_files_url)
237
 
238
  with open(n_files_path, "r", encoding="utf-8") as file:
239
  n_files = int(file.read().strip()) # the file contains a number of archives
240
 
241
- archive_urls = [_AUDIO_URL.format(lang=lang, split=split, n=i) for i in range(n_files)]
242
  archive_paths = dl_manager.download(archive_urls)
243
 
244
  return [dl_manager.iter_archive(archive_path) for archive_path in archive_paths]
 
23
 
24
 
25
  import csv
26
+ import os.path
27
  from functools import partial
28
 
29
  import datasets
 
56
 
57
  _VERSION = datasets.Version("1.0.0")
58
 
59
+ _BASE_URL = "https://huggingface.co/datasets/polinaeterna/ml_spoken_words/resolve/main/data/"
60
+ _AUDIO_URL = _BASE_URL + "{format}/{lang}/{split}/audio/{n}.tar.gz"
61
+ _N_FILES_URL = _BASE_URL + "{format}/{lang}/{split}/n_files.txt"
62
+ _SPLITS_URL = _BASE_URL + "splits/{lang}/splits.tar.gz"
63
 
64
  _GENDERS = ["MALE", "FEMALE", "OTHER", "NAN"]
65
 
 
120
  class MlSpokenWordsConfig(datasets.BuilderConfig):
121
  """BuilderConfig for MlSpokenWords."""
122
 
123
+ def __init__(self, *args, languages, format="wav", **kwargs):
124
  """BuilderConfig for MlSpokenWords.
125
  Args:
126
  languages (:obj:`Union[List[str], str]`): language or list of languages to load
 
128
  """
129
  super().__init__(
130
  *args,
131
+ name="+".join(languages) + "_" + format if isinstance(languages, list) else languages + "_" + format,
132
  **kwargs,
133
  )
134
  self.languages = languages if isinstance(languages, list) else [languages]
135
+ self.format = format
136
 
137
 
138
  class MlSpokenWords(datasets.GeneratorBasedBuilder):
 
145
  """
146
 
147
  VERSION = _VERSION
148
+ BUILDER_CONFIGS = [
149
+ MlSpokenWordsConfig(languages=[lang], format="wav", version=_VERSION) for lang in _LANGUAGES
150
+ ] + [
151
+ MlSpokenWordsConfig(languages=[lang], format="opus", version=_VERSION) for lang in _LANGUAGES
152
+ ]
153
  BUILDER_CONFIG_CLASS = MlSpokenWordsConfig
154
 
155
  def _info(self):
 
160
  "language": datasets.ClassLabel(names=self.config.languages),
161
  "speaker_id": datasets.Value("string"),
162
  "gender": datasets.ClassLabel(names=_GENDERS),
163
+ "keyword": datasets.Value("string"), # 340k unique keywords
164
+ "audio": datasets.Audio(sampling_rate=48_000) if self.config.format == "opus" \
165
+ else datasets.Audio(sampling_rate=16_000),
166
  }
167
  )
168
  return datasets.DatasetInfo(
 
175
 
176
  def _split_generators(self, dl_manager):
177
  splits_archive_path = [dl_manager.download(_SPLITS_URL.format(lang=lang)) for lang in self.config.languages]
178
+ download_audio = partial(_download_audio_archives, format=self.config.format, dl_manager=dl_manager)
179
 
180
  return [
181
  datasets.SplitGenerator(
 
213
  for i, (link, word, is_valid, speaker, gender) in enumerate(csv_reader):
214
  if i == 0:
215
  continue
216
+ audio_id, audio_ext = os.path.splitext("_".join(link.split("/")))
217
+ metadata[audio_id] = {
218
  "keyword": word,
219
  "is_valid": is_valid,
220
  "speaker_id": speaker,
 
223
 
224
  for audio_archive in audio_archives[lang_idx]:
225
  for audio_filename, audio_file in audio_archive:
226
+ audio_id, audio_ext = os.path.splitext(audio_filename)
227
  yield audio_filename, {
228
  "file": audio_filename,
229
  "language": lang,
230
  "audio": {"path": audio_filename, "bytes": audio_file.read()},
231
+ **metadata[audio_id],
232
  }
233
 
234
 
235
+ def _download_audio_archives(dl_manager, lang, format, split):
236
  """
237
  All audio files are stored in several .tar.gz archives with names like 0.tar.gz, 1.tar.gz, ...
238
  Number of archives stored in a separate .txt file (n_files.txt)
 
240
  Prepare all the audio archives for iterating over them and their audio files.
241
  """
242
 
243
+ n_files_url = _N_FILES_URL.format(lang=lang, format=format, split=split)
244
  n_files_path = dl_manager.download(n_files_url)
245
 
246
  with open(n_files_path, "r", encoding="utf-8") as file:
247
  n_files = int(file.read().strip()) # the file contains a number of archives
248
 
249
+ archive_urls = [_AUDIO_URL.format(lang=lang, format=format, split=split, n=i) for i in range(n_files)]
250
  archive_paths = dl_manager.download(archive_urls)
251
 
252
  return [dl_manager.iter_archive(archive_path) for archive_path in archive_paths]