experiment-process-seamless-align / tokenize_dataset_s2s.py
asahi417's picture
init
2608d98
raw
history blame
No virus
2.35 kB
import os
from os.path import expanduser
import shutil
import torch
from soundfile import LibsndfileError
from datasets import load_dataset, DatasetDict, Audio
from tokenizer_encodec import EncodecTokenizer
direction = os.getenv("DIRECTION", "enA-jaA")
sides = set(direction.split("-"))
dataset_id = os.getenv("DATASET_ID", 0)
num_proc = int(os.getenv("NUM_PROC", 1))
hf_org = os.getenv("HF_ORG", "asahi417")
hf_dataset = f"seamless-align-{direction}"
dataset = load_dataset(f"{hf_org}/{hf_dataset}", f"subset_{dataset_id}", split="train")
tokenizer = EncodecTokenizer.from_pretrained()
max_seq_length = 10000000
audio_loader = Audio()
def error_file(example):
for side in sides:
try:
wav = audio_loader.decode_example(example[f"{side}.audio"])
if len(wav["array"]) == 0 or len(wav["array"]) > max_seq_length:
return False
except LibsndfileError:
return False
return True
print(f"Num examples: {len(dataset)}")
for s in sides:
dataset = dataset.cast_column(f"{s}.audio", Audio(decode=False))
dataset = dataset.filter(error_file, num_proc=num_proc, desc="drop broken audio")
for s in sides:
dataset = dataset.cast_column(f"{s}.audio", Audio())
print(f"Num examples (after filtering): {len(dataset)}")
def tokenize(example):
for side in sides:
wav = torch.as_tensor(example[f"{side}.audio"]["array"].reshape(1, 1, -1), dtype=torch.float32)
if len(wav) == 0:
return None
example[f"{side}.audio.tokens"] = tokenizer.wav_to_tokens(
wav=wav, sample_rate=example[f"{side}.audio"]["sampling_rate"]
).numpy().tolist()[0]
return example
dataset = dataset.map(
function=tokenize,
remove_columns=[f"{s}.audio" for s in sides] + [f"{s}.url" for s in sides] + [f"{s}.duration_start" for s in sides] + [f"{s}.duration_end" for s in sides],
num_proc=num_proc,
desc="tokenize dataset"
)
DatasetDict({"train": dataset}).push_to_hub(f"{hf_org}/{hf_dataset}.tokenized", config_name=f"subset_{dataset_id}")
# DatasetDict({"train": dataset}).push_to_hub(f"{hf_org}/{hf_dataset}.tokenized.encodec", config_name=f"subset_{dataset_id}")
cache_dir = f"{expanduser('~')}/.cache/huggingface/datasets/{hf_org}___{hf_dataset}/subset_{dataset_id}"
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir)