File size: 2,038 Bytes
4a04b75 cd99ff8 4a04b75 cd99ff8 4a04b75 f7fa594 a0302d0 7309f64 4a04b75 96a44aa a5f4f1e 4a04b75 a0302d0 4a04b75 f7fa594 a0302d0 f7fa594 a0302d0 f7fa594 cad4a7b 4a04b75 d318a22 cad4a7b 238451f d318a22 cad4a7b 4a04b75 d318a22 cd99ff8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import os
from os.path import expanduser
import shutil
import torch
from soundfile import LibsndfileError
from datasets import load_dataset, DatasetDict, Audio
from tokenizer_encodec import EncodecTokenizer
direction = os.getenv("DIRECTION", "enA-jaA")
sides = set(direction.split("-"))
dataset_id = os.getenv("DATASET_ID", 0)
num_proc = int(os.getenv("NUM_PROC", 1))
hf_org = os.getenv("HF_ORG", "asahi417")
hf_dataset = f"seamless-align-{direction}"
dataset = load_dataset(f"{hf_org}/{hf_dataset}", f"subset_{dataset_id}", split="train")
tokenizer = EncodecTokenizer.from_pretrained()
audio_loader = Audio()
def error_file(example):
for side in sides:
try:
audio_loader.decode_example(example[f"{side}.audio"])
except LibsndfileError:
return False
return True
print(f"Num examples: {len(dataset)}")
for s in sides:
dataset = dataset.cast_column(f"{s}.audio", Audio(decode=False))
dataset = dataset.filter(error_file, num_proc=num_proc, desc="drop broken audio")
for s in sides:
dataset = dataset.cast_column(f"{s}.audio", Audio())
print(f"Num examples (after filtering): {len(dataset)}")
def tokenize(example):
for side in sides:
wav = torch.as_tensor(example[f"{side}.audio"]["array"].reshape(1, 1, -1), dtype=torch.float32)
example[f"{side}.audio.tokens"] = tokenizer.wav_to_tokens(
wav=wav, sample_rate=example[f"{side}.audio"]["sampling_rate"]
).numpy().tolist()[0]
return example
dataset = dataset.map(
function=tokenize,
remove_columns=[f"{s}.audio" for s in sides] + [f"{s}.url" for s in sides] + [f"{s}.duration_start" for s in sides] + [f"{s}.duration_end" for s in sides],
num_proc=num_proc,
desc="tokenize dataset"
)
DatasetDict({"train": dataset}).push_to_hub(f"{hf_org}/{hf_dataset}.tokenized", config_name=f"subset_{dataset_id}")
cache_dir = f"{expanduser('~')}/.cache/huggingface/datasets/{hf_org}___{hf_dataset}/subset_{dataset_id}"
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir)
|