polinaeterna HF staff commited on
Commit
3d7c628
1 Parent(s): ce71dc1

update script

Browse files
Files changed (1) hide show
  1. common_voice_11_0.py +74 -128
common_voice_11_0.py CHANGED
@@ -23,6 +23,7 @@ import datasets
23
  import requests
24
  from datasets.utils.py_utils import size_str
25
  from huggingface_hub import HfApi, HfFolder
 
26
 
27
  from .languages import LANGUAGES
28
  from .release_stats import STATS
@@ -41,7 +42,22 @@ _HOMEPAGE = "https://commonvoice.mozilla.org/en/datasets"
41
 
42
  _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
43
 
44
- _API_URL = "https://commonvoice.mozilla.org/api/v1"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
 
47
  class CommonVoiceConfig(datasets.BuilderConfig):
@@ -71,7 +87,7 @@ class CommonVoiceConfig(datasets.BuilderConfig):
71
 
72
 
73
  class CommonVoice(datasets.GeneratorBasedBuilder):
74
- DEFAULT_CONFIG_NAME = "en"
75
  DEFAULT_WRITER_BATCH_SIZE = 1000
76
 
77
  BUILDER_CONFIGS = [
@@ -86,7 +102,7 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
86
  total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
87
  size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
88
  )
89
- for lang, lang_stats in STATS["locales"].items()
90
  ]
91
 
92
  def _info(self):
@@ -121,140 +137,70 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
121
  license=_LICENSE,
122
  citation=_CITATION,
123
  version=self.config.version,
124
- # task_templates=[
125
- # AutomaticSpeechRecognition(audio_file_path_column="path", transcription_column="sentence")
126
- # ],
127
  )
128
 
129
- def _get_bundle_url(self, locale, url_template):
130
- # path = encodeURIComponent(path)
131
- path = url_template.replace("{locale}", locale)
132
- path = urllib.parse.quote(path.encode("utf-8"), safe="~()*!.'")
133
- # use_cdn = self.config.size_bytes < 20 * 1024 * 1024 * 1024
134
- # response = requests.get(f"{_API_URL}/bucket/dataset/{path}/{use_cdn}", timeout=10.0).json()
135
- response = requests.get(f"{_API_URL}/bucket/dataset/{path}", timeout=10.0).json()
136
- return response["url"]
137
-
138
- def _log_download(self, locale, bundle_version, auth_token):
139
- if isinstance(auth_token, bool):
140
- auth_token = HfFolder().get_token()
141
- whoami = HfApi().whoami(auth_token)
142
- email = whoami["email"] if "email" in whoami else ""
143
- payload = {"email": email, "locale": locale, "dataset": bundle_version}
144
- requests.post(f"{_API_URL}/{locale}/downloaders", json=payload).json()
145
-
146
  def _split_generators(self, dl_manager):
147
- """Returns SplitGenerators."""
148
- hf_auth_token = dl_manager.download_config.use_auth_token
149
- if hf_auth_token is None:
150
- raise ConnectionError(
151
- "Please set use_auth_token=True or use_auth_token='<TOKEN>' to download this dataset"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
  )
153
 
154
- bundle_url_template = STATS["bundleURLTemplate"]
155
- bundle_version = bundle_url_template.split("/")[0]
156
- dl_manager.download_config.ignore_url_params = True
157
-
158
- self._log_download(self.config.name, bundle_version, hf_auth_token)
159
- archive_path = dl_manager.download(self._get_bundle_url(self.config.name, bundle_url_template))
160
- local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else None
161
-
162
- if self.config.version < datasets.Version("5.0.0"):
163
- path_to_data = ""
164
- else:
165
- path_to_data = "/".join([bundle_version, self.config.name])
166
- path_to_clips = "/".join([path_to_data, "clips"]) if path_to_data else "clips"
167
-
168
- return [
169
- datasets.SplitGenerator(
170
- name=datasets.Split.TRAIN,
171
- gen_kwargs={
172
- "local_extracted_archive": local_extracted_archive,
173
- "archive_iterator": dl_manager.iter_archive(archive_path),
174
- "metadata_filepath": "/".join([path_to_data, "train.tsv"]) if path_to_data else "train.tsv",
175
- "path_to_clips": path_to_clips,
176
- },
177
- ),
178
- datasets.SplitGenerator(
179
- name=datasets.Split.TEST,
180
- gen_kwargs={
181
- "local_extracted_archive": local_extracted_archive,
182
- "archive_iterator": dl_manager.iter_archive(archive_path),
183
- "metadata_filepath": "/".join([path_to_data, "test.tsv"]) if path_to_data else "test.tsv",
184
- "path_to_clips": path_to_clips,
185
- },
186
- ),
187
- datasets.SplitGenerator(
188
- name=datasets.Split.VALIDATION,
189
- gen_kwargs={
190
- "local_extracted_archive": local_extracted_archive,
191
- "archive_iterator": dl_manager.iter_archive(archive_path),
192
- "metadata_filepath": "/".join([path_to_data, "dev.tsv"]) if path_to_data else "dev.tsv",
193
- "path_to_clips": path_to_clips,
194
- },
195
- ),
196
- datasets.SplitGenerator(
197
- name="other",
198
- gen_kwargs={
199
- "local_extracted_archive": local_extracted_archive,
200
- "archive_iterator": dl_manager.iter_archive(archive_path),
201
- "metadata_filepath": "/".join([path_to_data, "other.tsv"]) if path_to_data else "other.tsv",
202
- "path_to_clips": path_to_clips,
203
- },
204
- ),
205
- datasets.SplitGenerator(
206
- name="invalidated",
207
- gen_kwargs={
208
- "local_extracted_archive": local_extracted_archive,
209
- "archive_iterator": dl_manager.iter_archive(archive_path),
210
- "metadata_filepath": "/".join([path_to_data, "invalidated.tsv"])
211
- if path_to_data
212
- else "invalidated.tsv",
213
- "path_to_clips": path_to_clips,
214
- },
215
- ),
216
- ]
217
 
218
- def _generate_examples(
219
- self,
220
- local_extracted_archive,
221
- archive_iterator,
222
- metadata_filepath,
223
- path_to_clips,
224
- ):
225
- """Yields examples."""
226
  data_fields = list(self._info().features.keys())
227
  metadata = {}
228
- metadata_found = False
229
- for path, f in archive_iterator:
230
- if path == metadata_filepath:
231
- metadata_found = True
232
- lines = (line.decode("utf-8") for line in f)
233
- reader = csv.DictReader(lines, delimiter="\t", quoting=csv.QUOTE_NONE)
234
- for row in reader:
235
- # set absolute path for mp3 audio file
236
- if not row["path"].endswith(".mp3"):
237
- row["path"] += ".mp3"
238
- row["path"] = os.path.join(path_to_clips, row["path"])
239
- # accent -> accents in CV 8.0
240
- if "accents" in row:
241
- row["accent"] = row["accents"]
242
- del row["accents"]
243
- # if data is incomplete, fill with empty values
244
- for field in data_fields:
245
- if field not in row:
246
- row[field] = ""
247
- metadata[row["path"]] = row
248
- elif path.startswith(path_to_clips):
249
- assert metadata_found, "Found audio clips before the metadata TSV file."
250
- if not metadata:
251
- break
252
- if path in metadata:
253
- result = dict(metadata[path])
254
  # set the audio feature and the path to the extracted file
255
- path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
256
- result["audio"] = {"path": path, "bytes": f.read()}
257
  # set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
258
- result["path"] = path if local_extracted_archive else None
259
 
260
  yield path, result
23
  import requests
24
  from datasets.utils.py_utils import size_str
25
  from huggingface_hub import HfApi, HfFolder
26
+ from tqdm import tqdm
27
 
28
  from .languages import LANGUAGES
29
  from .release_stats import STATS
42
 
43
  _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
44
 
45
+ _N_SHARDS = {
46
+ "ar": {
47
+ "train": 1,
48
+ "dev": 1,
49
+ "test": 1,
50
+ "other": 2,
51
+ "invalidated": 1,
52
+ }
53
+ }
54
+
55
+ # TODO: change "streaming" to "main" after merge!
56
+ _BASE_URL = "https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/resolve/streaming/"
57
+
58
+ _AUDIO_URL = _BASE_URL + "audio/{lang}/{split}/{lang}_{split}_{shard_idx}.tar"
59
+
60
+ _TRANSCRIPT_URL = _BASE_URL + "transcript/{lang}/{split}.tsv"
61
 
62
 
63
  class CommonVoiceConfig(datasets.BuilderConfig):
87
 
88
 
89
  class CommonVoice(datasets.GeneratorBasedBuilder):
90
+ DEFAULT_CONFIG_NAME = "ar"
91
  DEFAULT_WRITER_BATCH_SIZE = 1000
92
 
93
  BUILDER_CONFIGS = [
102
  total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
103
  size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
104
  )
105
+ for lang, lang_stats in STATS["locales"].items() if lang == "ar"
106
  ]
107
 
108
  def _info(self):
137
  license=_LICENSE,
138
  citation=_CITATION,
139
  version=self.config.version,
 
 
 
140
  )
141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
  def _split_generators(self, dl_manager):
143
+ lang = self.config.name
144
+ audio_urls = {}
145
+ splits = ("train", "dev", "test", "other", "invalidated")
146
+ for split in splits:
147
+ audio_urls[split] = [
148
+ _AUDIO_URL.format(lang=lang, split=split, shard_idx=i) for i in range(_N_SHARDS[lang][split])
149
+ ]
150
+ archive_paths = dl_manager.download(audio_urls)
151
+ local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
152
+
153
+ meta_urls = {split: _TRANSCRIPT_URL.format(lang=lang, split=split) for split in splits}
154
+ meta_paths = dl_manager.download_and_extract(meta_urls)
155
+
156
+ split_generators = []
157
+ split_names = {
158
+ "train": datasets.Split.TRAIN,
159
+ "dev": datasets.Split.VALIDATION,
160
+ "test": datasets.Split.TEST,
161
+ }
162
+ for split in splits:
163
+ split_generators.append(
164
+ datasets.SplitGenerator(
165
+ name=split_names.get(split, split),
166
+ gen_kwargs={
167
+ "local_extracted_archive_paths": local_extracted_archive_paths.get(split),
168
+ "archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
169
+ "meta_path": meta_paths[split],
170
+ },
171
+ ),
172
  )
173
 
174
+ return split_generators
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
 
176
+ def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
177
+ print(archives)
 
 
 
 
 
 
178
  data_fields = list(self._info().features.keys())
179
  metadata = {}
180
+ with open(meta_path, encoding="utf-8") as f:
181
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
182
+ for row in tqdm(reader, desc="Reading metadata..."):
183
+ if not row["path"].endswith(".mp3"):
184
+ row["path"] += ".mp3"
185
+ # accent -> accents in CV 8.0
186
+ if "accents" in row:
187
+ row["accent"] = row["accents"]
188
+ del row["accents"]
189
+ # if data is incomplete, fill with empty values
190
+ for field in data_fields:
191
+ if field not in row:
192
+ row[field] = ""
193
+ metadata[row["path"]] = row
194
+
195
+ for i, audio_archive in enumerate(archives):
196
+ for filename, file in audio_archive:
197
+ _, filename = os.path.split(filename)
198
+ if filename in metadata:
199
+ result = dict(metadata[filename])
 
 
 
 
 
 
200
  # set the audio feature and the path to the extracted file
201
+ path = os.path.join(local_extracted_archive_paths[i], filename) if local_extracted_archive_paths else filename
202
+ result["audio"] = {"path": path, "bytes": file.read()}
203
  # set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
204
+ result["path"] = path if local_extracted_archive_paths else filename
205
 
206
  yield path, result