Datasets:
File size: 1,665 Bytes
45bc22a c205bd8 45bc22a 9a5429c b49063f 45bc22a 6ea33a4 45bc22a 95ed5d1 9a5429c 0f1b580 59e17bb 9a5429c 0f1b580 9a5429c 910be04 59e17bb b60875a b49063f 0e7f25e 910be04 91b51e8 95ed5d1 91b51e8 741f3a2 b49063f 741f3a2 b49063f 91b51e8 b49063f 6ea33a4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
#!/usr/bin/env python3
from os.path import basename, join
from pathlib import Path
import librosa
import numpy as np
import pandas as pd
from datasets import Audio, Dataset, DatasetDict
from leviticus import normalize
MAX_DURATION_IN_SECONDS = 10.0
MIN_DURATION_IN_SECONDS = 1.0
MAX_LEN = 50
MIN_LEN = 5
def duration_filter(item):
return MIN_DURATION_IN_SECONDS < item < MAX_DURATION_IN_SECONDS
def text_filter(item):
return MIN_LEN < len([i for i in item.split(" ") if len(i) > 0]) < MAX_LEN
def create_dataset(item):
dataset = Dataset.from_pandas(item)
dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000))
dataset = dataset.filter(text_filter, input_columns=["text"])
dataset = dataset.filter(duration_filter, input_columns=["duration"])
return dataset
repo_dir = Path(__file__).resolve().parent.parent
data_dir = join(repo_dir, "data")
kendex_dir = join(data_dir, "Kendex")
audio_dir = join(kendex_dir, "wavs")
metadata = pd.read_csv(join(kendex_dir, "metadata.csv"), delimiter="|", header=None)
wavs = pd.Series([join(audio_dir, f"{f}.wav") for f in metadata[0]])
data = {
"audio": wavs,
"file": [basename(w) for w in wavs],
"text": metadata[1],
"norm": metadata[1].map(lambda x: normalize(x)),
"duration": [librosa.get_duration(path=w) for w in wavs],
}
df = pd.DataFrame(data).sample(frac=1, random_state=666).reset_index(drop=True)
train, test = np.split(df, [int(0.9 * len(df))])
train_dataset = create_dataset(train)
test_dataset = create_dataset(test)
full_dataset = DatasetDict({"train": train_dataset, "test": test_dataset})
full_dataset.push_to_hub("michaelnetbiz/Kendex")
|