polinaeterna HF staff commited on
Commit
009cb64
1 Parent(s): 346aa1c

fix config, add config for custom set of languages

Browse files
Files changed (1) hide show
  1. voxpopuli.py +47 -26
voxpopuli.py CHANGED
@@ -48,7 +48,6 @@ _ASR_ACCENTED_LANGUAGES = [
48
 
49
  _LANGUAGES = _ASR_LANGUAGES + _ASR_ACCENTED_LANGUAGES
50
 
51
-
52
  _BASE_DATA_DIR = "https://huggingface.co/datasets/polinaeterna/voxpopuli/resolve/main/data/"
53
 
54
  _N_SHARDS_FILE = _BASE_DATA_DIR + "n_files.json"
@@ -61,15 +60,24 @@ _METADATA_PATH = _BASE_DATA_DIR + "{lang}/asr_{split}.tsv"
61
  class VoxpopuliConfig(datasets.BuilderConfig):
62
  """BuilderConfig for VoxPopuli."""
63
 
64
- def __init__(self, name, **kwargs):
65
  """
66
  Args:
67
- name: `string`, name of dataset config
 
 
 
68
  **kwargs: keyword arguments forwarded to super.
69
  """
 
 
 
 
 
 
 
 
70
  super().__init__(name=name, **kwargs)
71
- self.languages = _LANGUAGES if name == "all" else [name]
72
- # self.data_root_dis = {lang: _DATA_DIR.format(lang) for lang in self.languages}
73
 
74
 
75
  class Voxpopuli(datasets.GeneratorBasedBuilder):
@@ -81,19 +89,22 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
81
  name=name,
82
  version=datasets.Version("1.3.0"),
83
  )
84
- for name in _LANGUAGES + ["all"]
85
  ]
86
- DEFAULT_WRITER_BATCH_SIZE = 256 # SET THIS TO A LOWER VALUE IF IT USES TOO MUCH RAM SPACE
87
 
88
  def _info(self):
89
  features = datasets.Features(
90
  {
91
- "path": datasets.Value("string"),
92
  "language": datasets.ClassLabel(names=_LANGUAGES),
93
  "raw_text": datasets.Value("string"),
94
  "normalized_text": datasets.Value("string"),
 
 
 
 
95
  "audio": datasets.Audio(sampling_rate=16_000),
96
- # "segment_id": datasets.Value("int16"), # TODO
97
  }
98
  )
99
  return datasets.DatasetInfo(
@@ -110,13 +121,16 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
110
  n_shards = json.load(f)
111
 
112
  audio_urls = defaultdict(dict)
113
- for lang in self.config.languages:
114
- for split in ["train", "test", "dev"]:
115
- audio_urls[split][lang] = [_AUDIO_ARCHIVE_PATH.format(lang=lang, split=split, n_shard=i) for i in range(n_shards[lang][split])]
 
 
116
 
117
  meta_urls = defaultdict(dict)
118
  for split in ["train", "test", "dev"]:
119
- meta_urls[split][lang] = _METADATA_PATH.format(lang=lang, split=split)
 
120
 
121
  # dl_manager.download_config.num_proc = len(urls)
122
 
@@ -136,34 +150,41 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
136
  datasets.SplitGenerator(
137
  name=datasets.Split.TRAIN,
138
  gen_kwargs={
139
- "audio_archives": {lang: [dl_manager.iter_archive(archive) for archive in lang_archives] for lang, lang_archives
140
- in audio_paths["train"].items()},
141
- "local_extracted_audio_archives_paths": local_extracted_audio_paths["train"] if local_extracted_audio_paths else None,
 
 
142
  "metadata_paths": meta_paths["train"],
143
  }
144
  ),
145
  datasets.SplitGenerator(
146
  name=datasets.Split.VALIDATION,
147
  gen_kwargs={
148
- "audio_archives": {lang: [dl_manager.iter_archive(archive) for archive in lang_archives] for lang, lang_archives
149
- in audio_paths["dev"].items()},
150
- "local_extracted_audio_archives_paths": local_extracted_audio_paths["dev"] if local_extracted_audio_paths else None,
 
 
151
  "metadata_paths": meta_paths["dev"],
152
  }
153
  ),
154
  datasets.SplitGenerator(
155
  name=datasets.Split.TEST,
156
  gen_kwargs={
157
- "audio_archives": {lang: [dl_manager.iter_archive(archive) for archive in lang_archives] for lang, lang_archives
158
- in audio_paths["test"].items()},
159
- "local_extracted_audio_archives_paths": local_extracted_audio_paths["test"] if local_extracted_audio_paths else None,
 
 
160
  "metadata_paths": meta_paths["test"],
161
  }
162
  ),
163
  ]
164
 
165
  def _generate_examples(self, audio_archives, local_extracted_audio_archives_paths, metadata_paths):
166
- assert len(metadata_paths) == len(audio_archives)
 
167
 
168
  for lang in self.config.languages:
169
  meta_path = metadata_paths[lang]
@@ -174,10 +195,10 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
174
  for audio_filename, audio_file in audio_archive:
175
  audio_id = audio_filename.split(os.sep)[-1].split(".wav")[0]
176
  path = os.path.join(local_extracted_audio_archive_path, audio_filename) if local_extracted_audio_archive_path else audio_filename
 
177
  yield audio_id, {
178
- "path": path,
179
  "language": lang,
180
- "raw_text": metadata[audio_id]["raw_text"],
181
- "normalized_text": metadata[audio_id]["normalized_text"],
182
  "audio": {"path": path, "bytes": audio_file.read()}
183
  }
 
48
 
49
  _LANGUAGES = _ASR_LANGUAGES + _ASR_ACCENTED_LANGUAGES
50
 
 
51
  _BASE_DATA_DIR = "https://huggingface.co/datasets/polinaeterna/voxpopuli/resolve/main/data/"
52
 
53
  _N_SHARDS_FILE = _BASE_DATA_DIR + "n_files.json"
 
60
  class VoxpopuliConfig(datasets.BuilderConfig):
61
  """BuilderConfig for VoxPopuli."""
62
 
63
+ def __init__(self, name, languages=None, **kwargs):
64
  """
65
  Args:
66
+ name: `string` or `List[string]`:
67
+ name of a config: either one of the supported languages, "all" for all languages (including accented English),
68
+ or "multilang" for a specific set of languages which must be specified in the `languages` parameter.
69
+ languages: `List[string]`: custom list of languages for downloading (if config is "multilang")
70
  **kwargs: keyword arguments forwarded to super.
71
  """
72
+ if name == "all":
73
+ self.languages = _LANGUAGES
74
+ elif name == "multilang" and languages:
75
+ self.languages = [languages]
76
+ name = "+".join(languages)
77
+ else:
78
+ self.languages = [name]
79
+
80
  super().__init__(name=name, **kwargs)
 
 
81
 
82
 
83
  class Voxpopuli(datasets.GeneratorBasedBuilder):
 
89
  name=name,
90
  version=datasets.Version("1.3.0"),
91
  )
92
+ for name in _LANGUAGES + ["all", "multilang"]
93
  ]
94
+ DEFAULT_WRITER_BATCH_SIZE = 256
95
 
96
  def _info(self):
97
  features = datasets.Features(
98
  {
99
+ "audio_id": datasets.Value("string"),
100
  "language": datasets.ClassLabel(names=_LANGUAGES),
101
  "raw_text": datasets.Value("string"),
102
  "normalized_text": datasets.Value("string"),
103
+ "gender": datasets.Value("string"), # TODO: ClassVar?
104
+ "speaker_id": datasets.Value("int64"),
105
+ "is_gold_transcript": datasets.Value("bool"),
106
+ "accent": datasets.Value("string"),
107
  "audio": datasets.Audio(sampling_rate=16_000),
 
108
  }
109
  )
110
  return datasets.DatasetInfo(
 
121
  n_shards = json.load(f)
122
 
123
  audio_urls = defaultdict(dict)
124
+ for split in ["train", "test", "dev"]:
125
+ for lang in self.config.languages:
126
+ audio_urls[split][lang] = [
127
+ _AUDIO_ARCHIVE_PATH.format(lang=lang, split=split, n_shard=i) for i in range(n_shards[lang][split])
128
+ ]
129
 
130
  meta_urls = defaultdict(dict)
131
  for split in ["train", "test", "dev"]:
132
+ for lang in self.config.languages:
133
+ meta_urls[split][lang] = _METADATA_PATH.format(lang=lang, split=split)
134
 
135
  # dl_manager.download_config.num_proc = len(urls)
136
 
 
150
  datasets.SplitGenerator(
151
  name=datasets.Split.TRAIN,
152
  gen_kwargs={
153
+ "audio_archives": {
154
+ lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
155
+ for lang, lang_archives in audio_paths["train"].items()
156
+ },
157
+ "local_extracted_audio_archives_paths": local_extracted_audio_paths["train"],
158
  "metadata_paths": meta_paths["train"],
159
  }
160
  ),
161
  datasets.SplitGenerator(
162
  name=datasets.Split.VALIDATION,
163
  gen_kwargs={
164
+ "audio_archives": {
165
+ lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
166
+ for lang, lang_archives in audio_paths["dev"].items()
167
+ },
168
+ "local_extracted_audio_archives_paths": local_extracted_audio_paths["dev"],
169
  "metadata_paths": meta_paths["dev"],
170
  }
171
  ),
172
  datasets.SplitGenerator(
173
  name=datasets.Split.TEST,
174
  gen_kwargs={
175
+ "audio_archives": {
176
+ lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
177
+ for lang, lang_archives in audio_paths["test"].items()
178
+ },
179
+ "local_extracted_audio_archives_paths": local_extracted_audio_paths["test"],
180
  "metadata_paths": meta_paths["test"],
181
  }
182
  ),
183
  ]
184
 
185
  def _generate_examples(self, audio_archives, local_extracted_audio_archives_paths, metadata_paths):
186
+ assert len(metadata_paths) == len(audio_archives) == len(local_extracted_audio_archives_paths)
187
+ features = ["raw_text", "normalized_text", "speaker_id", "gender", "is_gold_transcript", "accent"]
188
 
189
  for lang in self.config.languages:
190
  meta_path = metadata_paths[lang]
 
195
  for audio_filename, audio_file in audio_archive:
196
  audio_id = audio_filename.split(os.sep)[-1].split(".wav")[0]
197
  path = os.path.join(local_extracted_audio_archive_path, audio_filename) if local_extracted_audio_archive_path else audio_filename
198
+
199
  yield audio_id, {
200
+ "audio_id": audio_id,
201
  "language": lang,
202
+ **{feature: metadata[audio_id][feature] for feature in features},
 
203
  "audio": {"path": path, "bytes": audio_file.read()}
204
  }