Kendex / scripts /prep_push_to_hf.py
michaelnetbiz's picture
Add text normalization
95ed5d1
raw
history blame
1.67 kB
#!/usr/bin/env python3
from os.path import basename, join
from pathlib import Path
import librosa
import numpy as np
import pandas as pd
from datasets import Audio, Dataset, DatasetDict
from leviticus import normalize
MAX_DURATION_IN_SECONDS = 10.0
MIN_DURATION_IN_SECONDS = 1.0
MAX_LEN = 50
MIN_LEN = 5
def duration_filter(item):
return MIN_DURATION_IN_SECONDS < item < MAX_DURATION_IN_SECONDS
def text_filter(item):
return MIN_LEN < len([i for i in item.split(" ") if len(i) > 0]) < MAX_LEN
def create_dataset(item):
dataset = Dataset.from_pandas(item)
dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000))
dataset = dataset.filter(text_filter, input_columns=["text"])
dataset = dataset.filter(duration_filter, input_columns=["duration"])
return dataset
repo_dir = Path(__file__).resolve().parent.parent
data_dir = join(repo_dir, "data")
kendex_dir = join(data_dir, "Kendex")
audio_dir = join(kendex_dir, "wavs")
metadata = pd.read_csv(join(kendex_dir, "metadata.csv"), delimiter="|", header=None)
wavs = pd.Series([join(audio_dir, f"{f}.wav") for f in metadata[0]])
data = {
"audio": wavs,
"file": [basename(w) for w in wavs],
"text": metadata[1],
"norm": metadata[1].map(lambda x: normalize(x)),
"duration": [librosa.get_duration(path=w) for w in wavs],
}
df = pd.DataFrame(data).sample(frac=1, random_state=666).reset_index(drop=True)
train, test = np.split(df, [int(0.9 * len(df))])
train_dataset = create_dataset(train)
test_dataset = create_dataset(test)
full_dataset = DatasetDict({"train": train_dataset, "test": test_dataset})
full_dataset.push_to_hub("michaelnetbiz/Kendex")