ProgramComputer commited on
Commit
8ef7570
1 Parent(s): 27a66f5

Enable streaming for some datasets and add video value

Browse files

Streaming only uses _NO_AUTH urls. Video bytes are added for forward compatibility. m4a is not supported by datasets.Video.

Files changed (1) hide show
  1. vox_celeb.py +87 -58
vox_celeb.py CHANGED
@@ -32,6 +32,8 @@ import pandas as pd
32
  import requests
33
 
34
  import datasets
 
 
35
 
36
  _CITATION = """\
37
  @Article{Nagrani19,
@@ -108,43 +110,25 @@ _URLS = {
108
 
109
  _NO_AUTH_URLS = {
110
  "video": {
111
- "placeholder": "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4",
112
- "dev": (
113
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_partaa",
114
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_partab",
115
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_partac",
116
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_partad",
117
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_partae",
118
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_partaf",
119
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_partag",
120
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_partah",
121
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_mp4_partai",
122
- ),
123
- "test": "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_test_mp4.zip",
124
  },
125
  "audio1": {
126
- "placeholder": "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox1/vox1_dev_wav",
127
- "dev": (
128
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox1/vox1_dev_wav_partaa",
129
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox1/vox1_dev_wav_partab",
130
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox1/vox1_dev_wav_partac",
131
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox1/vox1_dev_wav_partad",
132
- ),
133
- "test": "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox1/vox1_test_wav.zip",
134
  },
135
  "audio2": {
136
- "placeholder": "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_aac",
137
- "dev": (
138
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_aac_partaa",
139
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_aac_partab",
140
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_aac_partac",
141
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_aac_partad",
142
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_aac_partae",
143
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_aac_partaf",
144
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_aac_partag",
145
- "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_dev_aac_partah",
146
- ),
147
- "test": "https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_test_aac.zip",
148
  },
149
  }
150
 
@@ -152,7 +136,7 @@ _DATASET_IDS = {"video": "vox2", "audio1": "vox1", "audio2": "vox2"}
152
 
153
  _PLACEHOLDER_MAPS = dict(
154
  value
155
- for urls in (*_URLS.values(), *_NO_AUTH_URLS.values())
156
  for value in ((urls["placeholder"], urls["dev"]), (urls["test"], (urls["test"],)))
157
  )
158
 
@@ -217,11 +201,12 @@ class VoxCeleb(datasets.GeneratorBasedBuilder):
217
  "video_id": datasets.Value("string"),
218
  "clip_index": datasets.Value("int32"),
219
  }
220
- if self.config.name == "audio1":
221
  features["speaker_name"] = datasets.Value("string")
222
  features["speaker_nationality"] = datasets.Value("string")
223
- if self.config.name.startswith("audio"):
224
- features["audio"] = datasets.Audio(sampling_rate=16000)
 
225
 
226
  return datasets.DatasetInfo(
227
  description=_DESCRIPTION,
@@ -232,8 +217,8 @@ class VoxCeleb(datasets.GeneratorBasedBuilder):
232
  )
233
 
234
  def _split_generators(self, dl_manager):
235
- if dl_manager.is_streaming:
236
- raise TypeError("Streaming is not supported for VoxCeleb")
237
  targets = (
238
  ["audio1", "audio2"] if self.config.name == "audio" else [self.config.name]
239
  )
@@ -248,7 +233,7 @@ class VoxCeleb(datasets.GeneratorBasedBuilder):
248
  creds_path = Path(hf_dir) / f"voxceleb_{self.VERSION}_credentials"
249
  all_urls = _URLS
250
 
251
- if cred_key is None:
252
  if creds_path.exists():
253
  with open(creds_path, "r") as creds:
254
  cred_key = json.load(creds)
@@ -366,10 +351,7 @@ class VoxCeleb(datasets.GeneratorBasedBuilder):
366
  for target in targets
367
  )
368
  )
369
-
370
- mapped_paths = dl_manager.extract(
371
- dl_manager.download_custom(
372
- dict(
373
  (
374
  placeholder_key,
375
  dict(
@@ -377,17 +359,29 @@ class VoxCeleb(datasets.GeneratorBasedBuilder):
377
  for target in targets
378
  ),
379
  )
380
- for placeholder_key in ("placeholder", "test")
381
- ),
 
 
382
  download_custom,
 
 
383
  )
 
 
 
384
  )
 
 
 
 
 
385
 
386
  return [
387
  datasets.SplitGenerator(
388
  name="train",
389
  gen_kwargs={
390
- "paths": mapped_paths["placeholder"],
391
  "meta_paths": metadata,
392
  },
393
  ),
@@ -400,8 +394,9 @@ class VoxCeleb(datasets.GeneratorBasedBuilder):
400
  ),
401
  ]
402
 
403
- def _generate_examples(self, paths, meta_paths):
404
  key = 0
 
405
  for conf in paths:
406
  dataset_id = "vox1" if conf == "audio1" else "vox2"
407
  meta = pd.read_csv(
@@ -410,17 +405,18 @@ class VoxCeleb(datasets.GeneratorBasedBuilder):
410
  index_col=0,
411
  engine="python",
412
  )
413
- dataset_path = next(Path(paths[conf]).iterdir())
414
- dataset_format = dataset_path.name
415
- for speaker_path in dataset_path.iterdir():
416
- speaker = speaker_path.name
417
- speaker_info = meta.loc[speaker]
418
- for video in speaker_path.iterdir():
419
- video_id = video.name
420
- for clip in video.iterdir():
421
- clip_index = int(clip.stem)
 
422
  info = {
423
- "file": str(clip),
424
  "file_format": dataset_format,
425
  "dataset_id": dataset_id,
426
  "speaker_id": speaker,
@@ -433,5 +429,38 @@ class VoxCeleb(datasets.GeneratorBasedBuilder):
433
  info["speaker_nationality"] = speaker_info["Nationality"]
434
  if conf.startswith("audio"):
435
  info["audio"] = info["file"]
 
 
 
436
  yield key, info
437
  key += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  import requests
33
 
34
  import datasets
35
+ import fsspec as fs
36
+ from io import BytesIO
37
 
38
  _CITATION = """\
39
  @Article{Nagrani19,
 
110
 
111
  _NO_AUTH_URLS = {
112
  "video": {
113
+ "dev": {
114
+ 1:"https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_mp4_1.zip",
115
+ 2:"https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_mp4_2.zip",
116
+ 3:"https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_mp4_3.zip",
117
+ 4:"https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_mp4_4.zip",
118
+ 5:"https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_mp4_5.zip",
119
+ 6:"https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_mp4_6.zip",
120
+
121
+ },
122
+ "test": {1:"https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_test_mp4.zip"}
 
 
 
123
  },
124
  "audio1": {
125
+ "dev": {1:"https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox1/vox1_dev_wav.zip"},
126
+ "test": {1:"https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox1/vox1_test_wav.zip"},
 
 
 
 
 
 
127
  },
128
  "audio2": {
129
+ "dev":
130
+ {1:"https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_aac_1.zip",2:"https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_aac_2.zip"},
131
+ "test": {1:"https://huggingface.co/datasets/ProgramComputer/voxceleb/resolve/main/vox2/vox2_test_aac.zip"},
 
 
 
 
 
 
 
 
 
132
  },
133
  }
134
 
 
136
 
137
  _PLACEHOLDER_MAPS = dict(
138
  value
139
+ for urls in _URLS.values()
140
  for value in ((urls["placeholder"], urls["dev"]), (urls["test"], (urls["test"],)))
141
  )
142
 
 
201
  "video_id": datasets.Value("string"),
202
  "clip_index": datasets.Value("int32"),
203
  }
204
+ if self.config.name.startswith("audio"):
205
  features["speaker_name"] = datasets.Value("string")
206
  features["speaker_nationality"] = datasets.Value("string")
207
+ features["audio"] = datasets.Audio()
208
+ if self.config.name.startswith("video"):
209
+ features["video"] = datasets.Value("large_binary")
210
 
211
  return datasets.DatasetInfo(
212
  description=_DESCRIPTION,
 
217
  )
218
 
219
  def _split_generators(self, dl_manager):
220
+ if dl_manager.is_streaming and self.config.name != "audio1" and self.config.name != "video":
221
+ raise TypeError("Streaming is only supported for vox1 wav and vox2 mp4 features")
222
  targets = (
223
  ["audio1", "audio2"] if self.config.name == "audio" else [self.config.name]
224
  )
 
233
  creds_path = Path(hf_dir) / f"voxceleb_{self.VERSION}_credentials"
234
  all_urls = _URLS
235
 
236
+ if cred_key is None and not dl_manager.is_streaming:
237
  if creds_path.exists():
238
  with open(creds_path, "r") as creds:
239
  cred_key = json.load(creds)
 
351
  for target in targets
352
  )
353
  )
354
+ target_dict = dict(
 
 
 
355
  (
356
  placeholder_key,
357
  dict(
 
359
  for target in targets
360
  ),
361
  )
362
+ for placeholder_key in ("dev" if cred_key is None else "placeholder", "test")
363
+ )
364
+ download_paths = dl_manager.download_custom(
365
+ target_dict,
366
  download_custom,
367
+ ) if cred_key is not None else dl_manager.download(
368
+ target_dict
369
  )
370
+
371
+ mapped_paths = dl_manager.extract(
372
+ target_dict
373
  )
374
+
375
+
376
+ apply_function_recursive = lambda d, f: {k: apply_function_recursive(v, f) if isinstance(v, dict) else f(v) for k, v in d.items()}
377
+
378
+ mapped_paths = apply_function_recursive(mapped_paths, dl_manager.iter_files)
379
 
380
  return [
381
  datasets.SplitGenerator(
382
  name="train",
383
  gen_kwargs={
384
+ "paths": mapped_paths["placeholder"] if not dl_manager.is_streaming else mapped_paths["dev"] ,
385
  "meta_paths": metadata,
386
  },
387
  ),
 
394
  ),
395
  ]
396
 
397
+ def _generate_examples(self,paths, meta_paths):
398
  key = 0
399
+ cred_key = os.environ.get("HUGGING_FACE_VOX_CELEB_KEY")
400
  for conf in paths:
401
  dataset_id = "vox1" if conf == "audio1" else "vox2"
402
  meta = pd.read_csv(
 
405
  index_col=0,
406
  engine="python",
407
  )
408
+ if cred_key is None:
409
+ for path in paths[conf].values():
410
+ for file in path:
411
+ try:
412
+ t = tuple(file.split("::")[0].split("/")[2:])
413
+ _,dataset_format,speaker,video_id,clip_index= (None,) * (5 - len(t)) + t
414
+ except Exception:
415
+ raise Exception(file.split("::")[0].split("/")[2:])
416
+ speaker_info = meta.loc[speaker]
417
+ clip_index = int(Path(clip_index).stem)
418
  info = {
419
+ "file": file,
420
  "file_format": dataset_format,
421
  "dataset_id": dataset_id,
422
  "speaker_id": speaker,
 
429
  info["speaker_nationality"] = speaker_info["Nationality"]
430
  if conf.startswith("audio"):
431
  info["audio"] = info["file"]
432
+ if conf.startswith("video"):
433
+ with fs.open(info["file"], 'rb') as f:
434
+ info["video"] = BytesIO(f.read()).getvalue()
435
  yield key, info
436
  key += 1
437
+ else:
438
+ dataset_path = next(Path(paths[conf]).iterdir())
439
+ dataset_format = dataset_path.name
440
+ for speaker_path in dataset_path.iterdir():
441
+ speaker = speaker_path.name
442
+ speaker_info = meta.loc[speaker]
443
+ for video in speaker_path.iterdir():
444
+ video_id = video.name
445
+ for clip in video.iterdir():
446
+ clip_index = int(clip.stem)
447
+ clip = os.path.join(local_extracted_archive, clip.name) if local_extracted_archive else clip
448
+ info = {
449
+ "file": str(clip),
450
+ "file_format": dataset_format,
451
+ "dataset_id": dataset_id,
452
+ "speaker_id": speaker,
453
+ "speaker_gender": speaker_info["Gender"],
454
+ "video_id": video_id,
455
+ "clip_index": clip_index,
456
+ }
457
+ if dataset_id == "vox1":
458
+ info["speaker_name"] = speaker_info["VGGFace1 ID"]
459
+ info["speaker_nationality"] = speaker_info["Nationality"]
460
+ if conf.startswith("audio"):
461
+ info["audio"] = info["file"]
462
+ if conf.startswith("video"):
463
+ with fs.open(info["file"], 'rb') as f:
464
+ info["video"] = BytesIO(f.read()).getvalue()
465
+ yield key, info
466
+ key += 1