init
Browse files- tokenize_dataset_s2s.py +9 -3
tokenize_dataset_s2s.py
CHANGED
@@ -4,7 +4,7 @@ from os.path import expanduser
|
|
4 |
import shutil
|
5 |
import torch
|
6 |
from soundfile import LibsndfileError
|
7 |
-
from datasets import load_dataset, DatasetDict
|
8 |
from encodec_audio_tokenizer import EncodecTokenizer
|
9 |
|
10 |
|
@@ -18,18 +18,24 @@ hf_dataset = f"seamless-align-{direction}"
|
|
18 |
dataset = load_dataset(f"{hf_org}/{hf_dataset}", f"subset_{dataset_id}", split="train")
|
19 |
tokenizer = EncodecTokenizer.from_pretrained()
|
20 |
|
|
|
|
|
21 |
|
22 |
def error_file(example):
|
23 |
for side in sides:
|
24 |
try:
|
25 |
-
example[f"{side}.audio"]
|
26 |
except LibsndfileError:
|
27 |
return False
|
28 |
return True
|
29 |
|
30 |
|
31 |
print(f"Num examples: {len(dataset)}")
|
32 |
-
|
|
|
|
|
|
|
|
|
33 |
print(f"Num examples (after filtering): {len(dataset)}")
|
34 |
|
35 |
|
|
|
4 |
import shutil
|
5 |
import torch
|
6 |
from soundfile import LibsndfileError
|
7 |
+
from datasets import load_dataset, DatasetDict, Audio
|
8 |
from encodec_audio_tokenizer import EncodecTokenizer
|
9 |
|
10 |
|
|
|
18 |
dataset = load_dataset(f"{hf_org}/{hf_dataset}", f"subset_{dataset_id}", split="train")
|
19 |
tokenizer = EncodecTokenizer.from_pretrained()
|
20 |
|
21 |
+
audio_loader = Audio()
|
22 |
+
|
23 |
|
24 |
def error_file(example):
|
25 |
for side in sides:
|
26 |
try:
|
27 |
+
audio_loader.decode_example(example[f"{side}.audio"])
|
28 |
except LibsndfileError:
|
29 |
return False
|
30 |
return True
|
31 |
|
32 |
|
33 |
print(f"Num examples: {len(dataset)}")
|
34 |
+
for s in sides:
|
35 |
+
dataset = dataset.cast_column(f"{s}.audio", Audio(decode=False))
|
36 |
+
dataset = dataset.filter(error_file, num_proc=num_proc, desc="drop broken audio")
|
37 |
+
for s in sides:
|
38 |
+
dataset = dataset.cast_column(f"{s}.audio", Audio())
|
39 |
print(f"Num examples (after filtering): {len(dataset)}")
|
40 |
|
41 |
|