cache speaker metadata
Browse files- libriheavy.py +17 -7
libriheavy.py
CHANGED
|
@@ -1,5 +1,7 @@
|
|
| 1 |
import json
|
| 2 |
import gzip
|
|
|
|
|
|
|
| 3 |
|
| 4 |
import datasets
|
| 5 |
import numpy as np
|
|
@@ -79,15 +81,25 @@ class Libriheavy(datasets.GeneratorBasedBuilder):
|
|
| 79 |
# now we load the individual speaker metadata
|
| 80 |
speaker_metadata = {}
|
| 81 |
for speaker_id, metadata_path in tqdm(speaker_list.items()):
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
speaker_chunks = []
|
| 87 |
even_speaker_chunks = []
|
| 88 |
odd_speaker_chunks = []
|
| 89 |
for speaker_id, metadata in speaker_metadata.items():
|
| 90 |
-
print(f"Processing speaker {speaker_id}")
|
| 91 |
for chunk_id, chunk in metadata["chunks"].items():
|
| 92 |
chunk_dict = {
|
| 93 |
"speaker_id": speaker_id,
|
|
@@ -124,9 +136,7 @@ class Libriheavy(datasets.GeneratorBasedBuilder):
|
|
| 124 |
|
| 125 |
def _generate_examples(self, speaker_chunks, split):
|
| 126 |
"""Yields examples."""
|
| 127 |
-
print(f"Generating examples for {split}")
|
| 128 |
for chunk in speaker_chunks:
|
| 129 |
-
print(f"Processing {chunk['id']}")
|
| 130 |
npz = dict(np.load(chunk["audio"], allow_pickle=True))
|
| 131 |
utterances = npz.keys()
|
| 132 |
with gzip.open(chunk["text"], "rt") as f:
|
|
|
|
| 1 |
import json
|
| 2 |
import gzip
|
| 3 |
+
import os
|
| 4 |
+
from pathlib import Path
|
| 5 |
|
| 6 |
import datasets
|
| 7 |
import numpy as np
|
|
|
|
| 81 |
# now we load the individual speaker metadata
|
| 82 |
speaker_metadata = {}
|
| 83 |
for speaker_id, metadata_path in tqdm(speaker_list.items()):
|
| 84 |
+
hf_home = os.environ.get("HF_HOME", "~/.cache/huggingface")
|
| 85 |
+
metadata_cache = f"hf_home/libriheavy_metadata"
|
| 86 |
+
# we always cache the speaker metadata, as it is small
|
| 87 |
+
if os.path.exists(f"{metadata_cache}/{speaker_id}.json"):
|
| 88 |
+
with open(f"{metadata_cache}/{speaker_id}.json", "r") as f:
|
| 89 |
+
speaker_metadata[speaker_id] = json.load(f)
|
| 90 |
+
else:
|
| 91 |
+
Path(metadata_cache).mkdir(parents=True, exist_ok=True)
|
| 92 |
+
metadata_path = f"{PATH}/{speaker_id}/{metadata_path}"
|
| 93 |
+
metadata_path = dl_manager.download_and_extract(metadata_path)
|
| 94 |
+
with open(metadata_path, "r") as f:
|
| 95 |
+
speaker_metadata[speaker_id] = json.load(f)
|
| 96 |
+
with open(f"{metadata_cache}/{speaker_id}.json", "w") as f:
|
| 97 |
+
json.dump(speaker_metadata[speaker_id], f)
|
| 98 |
+
|
| 99 |
speaker_chunks = []
|
| 100 |
even_speaker_chunks = []
|
| 101 |
odd_speaker_chunks = []
|
| 102 |
for speaker_id, metadata in speaker_metadata.items():
|
|
|
|
| 103 |
for chunk_id, chunk in metadata["chunks"].items():
|
| 104 |
chunk_dict = {
|
| 105 |
"speaker_id": speaker_id,
|
|
|
|
| 136 |
|
| 137 |
def _generate_examples(self, speaker_chunks, split):
|
| 138 |
"""Yields examples."""
|
|
|
|
| 139 |
for chunk in speaker_chunks:
|
|
|
|
| 140 |
npz = dict(np.load(chunk["audio"], allow_pickle=True))
|
| 141 |
utterances = npz.keys()
|
| 142 |
with gzip.open(chunk["text"], "rt") as f:
|