persian-female-Wavernn / train_wavernn.py
Kamtera's picture
Upload train_wavernn.py with huggingface_hub
a6fba01
raw
history blame
1.74 kB
import os
from trainer import Trainer, TrainerArgs
from TTS.utils.audio import AudioProcessor
from TTS.vocoder.configs import WavernnConfig
from TTS.vocoder.datasets.preprocess import load_wav_data
from TTS.vocoder.models.wavernn import Wavernn
from TTS.config.shared_configs import BaseAudioConfig
from TTS.tts.configs.shared_configs import BaseDatasetConfig , CharactersConfig
from TTS.tts.datasets import load_tts_samples
output_path = os.path.dirname(os.path.abspath(__file__))
dataset_config = BaseDatasetConfig(
formatter="mozilla", meta_file_train="metadata.csv", path="/kaggle/input/persian-tts-dataset-famale"
)
audio_config = BaseAudioConfig(
sample_rate=24000,
do_trim_silence=True,
resample=False,
mel_fmin=95,
mel_fmax=8000.0,
)
config = WavernnConfig(
batch_size=64,#
eval_batch_size=16,#
num_loader_workers=1,
num_eval_loader_workers=1,
run_eval=True,
test_delay_epochs=-1,
epochs=1000,
seq_len=1280,
pad_short=2000,
use_noise_augment=False,
save_step=1000,
eval_split_size=10,
print_step=25,
print_eval=True,
mixed_precision=False,
lr=1e-4,
data_path="/kaggle/input/persian-tts-dataset-famale/wavs/",
output_path=output_path,
audio=audio_config,
)
# init audio processor
ap = AudioProcessor(**config.audio.to_dict())
# load training samples
eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size)
# init model
model = Wavernn(config)
# init the trainer and ๐Ÿš€
trainer = Trainer(
TrainerArgs(),
config,
output_path,
model=model,
train_samples=train_samples,
eval_samples=eval_samples,
training_assets={"audio_processor": ap},
)
trainer.fit()