|
import os |
|
|
|
from trainer import Trainer, TrainerArgs |
|
|
|
from TTS.config.shared_configs import BaseAudioConfig |
|
from TTS.tts.configs.shared_configs import BaseDatasetConfig |
|
from TTS.tts.configs.vits_config import VitsConfig |
|
from TTS.tts.datasets import load_tts_samples |
|
from TTS.tts.models.vits import Vits, VitsArgs |
|
from TTS.tts.utils.speakers import SpeakerManager |
|
from TTS.tts.utils.text.tokenizer import TTSTokenizer |
|
from TTS.utils.audio import AudioProcessor |
|
|
|
|
|
import pandas as pd |
|
|
|
|
|
|
|
output_path = '/run/media/opensuse/Barracuda/Models/TTS_new/trained_common_voice' |
|
dataset_path = "/run/media/opensuse/Barracuda/Datasets/CommonVoiceMozillaIta/cv-corpus-9.0-2022-04-27/it" |
|
|
|
pretrained_path = '/run/media/opensuse/Barracuda/Models/TTS_new/trained_common_voice/vits_vctk-June-05-2022_03+45PM-0cf3265a/' |
|
|
|
dataset_config = BaseDatasetConfig( |
|
name="vctk", meta_file_train="", language="it-it", path=dataset_path |
|
) |
|
|
|
|
|
|
|
|
|
def commonvoice_formatter(root_path, manifest_file, **kwargs): |
|
|
|
txt_file = os.path.join(root_path, 'train.tsv') |
|
df = pd.read_csv(txt_file, sep='\t') |
|
items = [] |
|
for i, data in df.iterrows(): |
|
items.append({ |
|
"text": data['sentence'], |
|
"audio_file": os.path.join(root_path, 'clips', data['path']), |
|
"speaker_name": data['client_id'] |
|
}) |
|
return items |
|
|
|
|
|
audio_config = BaseAudioConfig( |
|
sample_rate=22050, |
|
win_length=1024, |
|
hop_length=256, |
|
num_mels=80, |
|
preemphasis=0.0, |
|
ref_level_db=20, |
|
log_func="np.log", |
|
do_trim_silence=True, |
|
trim_db=23.0, |
|
mel_fmin=0, |
|
mel_fmax=None, |
|
spec_gain=1.0, |
|
signal_norm=False, |
|
do_amp_to_db_linear=False, |
|
resample=True, |
|
) |
|
|
|
vitsArgs = VitsArgs( |
|
use_speaker_embedding=True, |
|
) |
|
|
|
config = VitsConfig( |
|
model_args=vitsArgs, |
|
audio=audio_config, |
|
run_name="vits_vctk", |
|
batch_size=32, |
|
eval_batch_size=16, |
|
batch_group_size=5, |
|
num_loader_workers=4, |
|
num_eval_loader_workers=4, |
|
run_eval=True, |
|
test_delay_epochs=-1, |
|
epochs=1000, |
|
text_cleaner="english_cleaners", |
|
use_phonemes=False, |
|
phoneme_cache_path=os.path.join(output_path, "phoneme_cache"), |
|
compute_input_seq_cache=True, |
|
print_step=25, |
|
print_eval=False, |
|
mixed_precision=True, |
|
max_text_len=325, |
|
output_path=output_path, |
|
datasets=[dataset_config], |
|
) |
|
|
|
|
|
|
|
|
|
ap = AudioProcessor.init_from_config(config) |
|
|
|
|
|
|
|
|
|
tokenizer, config = TTSTokenizer.init_from_config(config) |
|
|
|
|
|
|
|
|
|
|
|
|
|
train_samples, eval_samples = load_tts_samples( |
|
dataset_config, eval_split=True, formatter=commonvoice_formatter) |
|
|
|
|
|
|
|
speaker_manager = SpeakerManager() |
|
speaker_manager.set_speaker_ids_from_data(train_samples + eval_samples) |
|
config.model_args.num_speakers = speaker_manager.num_speakers |
|
|
|
|
|
model = Vits(config, ap, tokenizer, speaker_manager) |
|
|
|
|
|
if pretrained_path: |
|
trainer = Trainer( |
|
TrainerArgs( |
|
continue_path=pretrained_path, |
|
), |
|
config, |
|
output_path, |
|
model=model, |
|
train_samples=train_samples, |
|
eval_samples=eval_samples, |
|
) |
|
else: |
|
trainer = Trainer( |
|
TrainerArgs(), |
|
config, |
|
output_path, |
|
model=model, |
|
train_samples=train_samples, |
|
eval_samples=eval_samples, |
|
) |
|
trainer.fit() |
|
|