|
from os.path import basename, join |
|
from pathlib import Path |
|
|
|
import librosa |
|
import numpy as np |
|
import pandas as pd |
|
from datasets import Audio, Dataset, DatasetDict |
|
from transformers import AutoTokenizer |
|
|
|
from leviticus import normalize |
|
|
|
MAX_DURATION_IN_SECONDS = 10.0 |
|
MIN_DURATION_IN_SECONDS = 1.0 |
|
MAX_LEN = 50 |
|
MIN_LEN = 5 |
|
SR = 16_000 |
|
TOKENIZER_CHECKPOINT = "distilbert-base-uncased-finetuned-sst-2-english" |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_CHECKPOINT) |
|
|
|
|
|
def duration_filter(item): |
|
return MIN_DURATION_IN_SECONDS < item < MAX_DURATION_IN_SECONDS |
|
|
|
|
|
def text_filter(item): |
|
return MIN_LEN < len([i for i in item.split(" ") if len(i) > 0]) < MAX_LEN |
|
|
|
|
|
def text_mapper(item): |
|
text = item["text"] |
|
item["normalized_text"] = normalize(text) |
|
return item |
|
|
|
|
|
def create_datasets(df): |
|
def create_dataset(df_slice): |
|
audio_column = "audio" |
|
text_column = "text" |
|
duration_column = "duration" |
|
|
|
dataset = Dataset.from_pandas(df_slice) |
|
dataset = dataset.cast_column(audio_column, Audio(sampling_rate=SR)) |
|
dataset = dataset.filter(text_filter, input_columns=[text_column]) |
|
dataset = dataset.filter(duration_filter, input_columns=[duration_column]) |
|
dataset = dataset.map(text_mapper, batched=False) |
|
|
|
return dataset |
|
|
|
train, test = np.split(df, [int(0.9 * len(df))]) |
|
train_dataset = create_dataset(train) |
|
test_dataset = create_dataset(test) |
|
|
|
return train_dataset, test_dataset |
|
|
|
|
|
def main(): |
|
repo_dir = Path(__file__).resolve().parent.parent |
|
data_dir = join(repo_dir, "data") |
|
kendex_dir = join(data_dir, "Kendex") |
|
audio_dir = join(kendex_dir, "wavs") |
|
|
|
metadata = pd.read_csv(join(kendex_dir, "metadata.csv"), delimiter="|", header=None) |
|
wavs = pd.Series([join(audio_dir, f"{f}.wav") for f in metadata[0]]) |
|
data = { |
|
"audio": wavs, |
|
"file": [basename(w) for w in wavs], |
|
"text": metadata[1], |
|
"duration": [librosa.get_duration(path=w) for w in wavs], |
|
} |
|
|
|
df = pd.DataFrame(data).sample(frac=1, random_state=666).reset_index(drop=True) |
|
|
|
train, test = create_datasets(df) |
|
|
|
full_dataset = DatasetDict({"train": train, "test": test}) |
|
full_dataset.push_to_hub("michaelnetbiz/Kendex") |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|