opensuse commited on
Commit
60dc3af
1 Parent(s): 55a451d

trained model

Browse files
.gitattributes CHANGED
@@ -25,3 +25,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ checkpoint_50000.pth filter=lfs diff=lfs merge=lfs -text
29
+ checkpoint_60000.pth filter=lfs diff=lfs merge=lfs -text
30
+ checkpoint_70000.pth filter=lfs diff=lfs merge=lfs -text
31
+ checkpoint_30000.pth filter=lfs diff=lfs merge=lfs -text
32
+ checkpoint_40000.pth filter=lfs diff=lfs merge=lfs -text
33
+ best_model.pth filter=lfs diff=lfs merge=lfs -text
34
+ best_model_45161.pth filter=lfs diff=lfs merge=lfs -text
best_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b41eeba6ae801c324f3f22c3b0ab22a39257c01cd1b09e0eb4080ce66c5c3e35
3
+ size 1041326717
best_model_45161.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b41eeba6ae801c324f3f22c3b0ab22a39257c01cd1b09e0eb4080ce66c5c3e35
3
+ size 1041326717
checkpoint_30000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a286491197e632ed4c5c31679876fe8384be92de292f0d24ac44cb8850ed1e7
3
+ size 1041326589
checkpoint_40000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:903b2b7ec050864623cc17655dad2f44fac81e99b8a754ba7b58a399c6b05f1d
3
+ size 1041326589
checkpoint_50000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5e33bbf0a92873ebcf5015032f653967d4e7e6f410d36e5296f104c82e5f903
3
+ size 1041326717
checkpoint_60000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91ffd8643368831c58d1ac049679a74db875bcaf3fd7db777cc7f79b1cbc504a
3
+ size 1041326717
checkpoint_70000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11419ab1b75ebdeb7bad74b432cede9ec0b5f6500515df6b9b2de52321f8961a
3
+ size 1041328637
config.json ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "output_path": "/run/media/opensuse/Barracuda/Models/TTS_new/trained_common_voice",
3
+ "logger_uri": null,
4
+ "run_name": "vits_vctk",
5
+ "project_name": null,
6
+ "run_description": "\ud83d\udc38Coqui trainer run.",
7
+ "print_step": 25,
8
+ "plot_step": 100,
9
+ "model_param_stats": false,
10
+ "wandb_entity": null,
11
+ "dashboard_logger": "tensorboard",
12
+ "log_model_step": 10000,
13
+ "save_step": 10000,
14
+ "save_n_checkpoints": 5,
15
+ "save_checkpoints": true,
16
+ "save_all_best": false,
17
+ "save_best_after": 10000,
18
+ "target_loss": null,
19
+ "print_eval": false,
20
+ "test_delay_epochs": -1,
21
+ "run_eval": true,
22
+ "distributed_backend": "nccl",
23
+ "distributed_url": "tcp://localhost:54321",
24
+ "mixed_precision": true,
25
+ "epochs": 1000,
26
+ "batch_size": 32,
27
+ "eval_batch_size": 16,
28
+ "grad_clip": [
29
+ 1000.0,
30
+ 1000.0
31
+ ],
32
+ "scheduler_after_epoch": true,
33
+ "lr": 0.001,
34
+ "optimizer": "AdamW",
35
+ "optimizer_params": {
36
+ "betas": [
37
+ 0.8,
38
+ 0.99
39
+ ],
40
+ "eps": 1e-09,
41
+ "weight_decay": 0.01
42
+ },
43
+ "lr_scheduler": "",
44
+ "lr_scheduler_params": {},
45
+ "use_grad_scaler": false,
46
+ "cudnn_enable": true,
47
+ "cudnn_benchmark": true,
48
+ "torch_seed": 54321,
49
+ "model": "vits",
50
+ "num_loader_workers": 4,
51
+ "num_eval_loader_workers": 4,
52
+ "use_noise_augment": false,
53
+ "use_language_weighted_sampler": false,
54
+ "audio": {
55
+ "fft_size": 1024,
56
+ "win_length": 1024,
57
+ "hop_length": 256,
58
+ "frame_shift_ms": null,
59
+ "frame_length_ms": null,
60
+ "stft_pad_mode": "reflect",
61
+ "sample_rate": 22050,
62
+ "resample": true,
63
+ "preemphasis": 0.0,
64
+ "ref_level_db": 20,
65
+ "do_sound_norm": false,
66
+ "log_func": "np.log",
67
+ "do_trim_silence": true,
68
+ "trim_db": 23,
69
+ "do_rms_norm": false,
70
+ "db_level": null,
71
+ "power": 1.5,
72
+ "griffin_lim_iters": 60,
73
+ "num_mels": 80,
74
+ "mel_fmin": 0.0,
75
+ "mel_fmax": null,
76
+ "spec_gain": 1,
77
+ "do_amp_to_db_linear": false,
78
+ "do_amp_to_db_mel": true,
79
+ "pitch_fmax": 640.0,
80
+ "pitch_fmin": 0.0,
81
+ "signal_norm": false,
82
+ "min_level_db": -100,
83
+ "symmetric_norm": true,
84
+ "max_norm": 4.0,
85
+ "clip_norm": true,
86
+ "stats_path": null
87
+ },
88
+ "use_phonemes": false,
89
+ "phonemizer": null,
90
+ "phoneme_language": null,
91
+ "compute_input_seq_cache": true,
92
+ "text_cleaner": "english_cleaners",
93
+ "enable_eos_bos_chars": false,
94
+ "test_sentences_file": "",
95
+ "phoneme_cache_path": "/run/media/opensuse/Barracuda/Models/TTS_new/trained_common_voice/phoneme_cache",
96
+ "characters": {
97
+ "characters_class": "TTS.tts.utils.text.characters.Graphemes",
98
+ "vocab_dict": null,
99
+ "pad": "<PAD>",
100
+ "eos": "<EOS>",
101
+ "bos": "<BOS>",
102
+ "blank": "<BLNK>",
103
+ "characters": "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
104
+ "punctuations": "!'(),-.:;? ",
105
+ "phonemes": null,
106
+ "is_unique": false,
107
+ "is_sorted": true
108
+ },
109
+ "add_blank": true,
110
+ "batch_group_size": 5,
111
+ "loss_masking": null,
112
+ "sort_by_audio_len": false,
113
+ "min_audio_len": 1,
114
+ "max_audio_len": Infinity,
115
+ "min_text_len": 1,
116
+ "max_text_len": 325,
117
+ "compute_f0": false,
118
+ "compute_linear_spec": true,
119
+ "precompute_num_workers": 0,
120
+ "start_by_longest": false,
121
+ "datasets": [
122
+ {
123
+ "name": "vctk",
124
+ "path": "/run/media/opensuse/Barracuda/Datasets/CommonVoiceMozillaIta/cv-corpus-9.0-2022-04-27/it",
125
+ "meta_file_train": "",
126
+ "ignored_speakers": null,
127
+ "language": "it-it",
128
+ "meta_file_val": "",
129
+ "meta_file_attn_mask": ""
130
+ }
131
+ ],
132
+ "test_sentences": [
133
+ [
134
+ "It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent."
135
+ ],
136
+ [
137
+ "Be a voice, not an echo."
138
+ ],
139
+ [
140
+ "I'm sorry Dave. I'm afraid I can't do that."
141
+ ],
142
+ [
143
+ "This cake is great. It's so delicious and moist."
144
+ ],
145
+ [
146
+ "Prior to November 22, 1963."
147
+ ]
148
+ ],
149
+ "eval_split_max_size": null,
150
+ "eval_split_size": 0.01,
151
+ "model_args": {
152
+ "num_chars": 67,
153
+ "out_channels": 513,
154
+ "spec_segment_size": 32,
155
+ "hidden_channels": 192,
156
+ "hidden_channels_ffn_text_encoder": 768,
157
+ "num_heads_text_encoder": 2,
158
+ "num_layers_text_encoder": 6,
159
+ "kernel_size_text_encoder": 3,
160
+ "dropout_p_text_encoder": 0.1,
161
+ "dropout_p_duration_predictor": 0.5,
162
+ "kernel_size_posterior_encoder": 5,
163
+ "dilation_rate_posterior_encoder": 1,
164
+ "num_layers_posterior_encoder": 16,
165
+ "kernel_size_flow": 5,
166
+ "dilation_rate_flow": 1,
167
+ "num_layers_flow": 4,
168
+ "resblock_type_decoder": "1",
169
+ "resblock_kernel_sizes_decoder": [
170
+ 3,
171
+ 7,
172
+ 11
173
+ ],
174
+ "resblock_dilation_sizes_decoder": [
175
+ [
176
+ 1,
177
+ 3,
178
+ 5
179
+ ],
180
+ [
181
+ 1,
182
+ 3,
183
+ 5
184
+ ],
185
+ [
186
+ 1,
187
+ 3,
188
+ 5
189
+ ]
190
+ ],
191
+ "upsample_rates_decoder": [
192
+ 8,
193
+ 8,
194
+ 2,
195
+ 2
196
+ ],
197
+ "upsample_initial_channel_decoder": 512,
198
+ "upsample_kernel_sizes_decoder": [
199
+ 16,
200
+ 16,
201
+ 4,
202
+ 4
203
+ ],
204
+ "use_sdp": true,
205
+ "noise_scale": 1.0,
206
+ "inference_noise_scale": 0.667,
207
+ "length_scale": 1.0,
208
+ "noise_scale_dp": 1.0,
209
+ "inference_noise_scale_dp": 1.0,
210
+ "max_inference_len": null,
211
+ "init_discriminator": true,
212
+ "use_spectral_norm_disriminator": false,
213
+ "use_speaker_embedding": true,
214
+ "num_speakers": 1174,
215
+ "speakers_file": "/run/media/opensuse/Barracuda/Models/TTS_new/trained_common_voice/vits_vctk-June-05-2022_03+45PM-0cf3265a/speakers.json",
216
+ "d_vector_file": null,
217
+ "speaker_embedding_channels": 256,
218
+ "use_d_vector_file": false,
219
+ "d_vector_dim": 0,
220
+ "detach_dp_input": true,
221
+ "use_language_embedding": false,
222
+ "embedded_language_dim": 4,
223
+ "num_languages": 0,
224
+ "language_ids_file": null,
225
+ "use_speaker_encoder_as_loss": false,
226
+ "speaker_encoder_config_path": "",
227
+ "speaker_encoder_model_path": "",
228
+ "condition_dp_on_speaker": true,
229
+ "freeze_encoder": false,
230
+ "freeze_DP": false,
231
+ "freeze_PE": false,
232
+ "freeze_flow_decoder": false,
233
+ "freeze_waveform_decoder": false
234
+ },
235
+ "lr_gen": 0.0002,
236
+ "lr_disc": 0.0002,
237
+ "lr_scheduler_gen": "ExponentialLR",
238
+ "lr_scheduler_gen_params": {
239
+ "gamma": 0.999875,
240
+ "last_epoch": -1
241
+ },
242
+ "lr_scheduler_disc": "ExponentialLR",
243
+ "lr_scheduler_disc_params": {
244
+ "gamma": 0.999875,
245
+ "last_epoch": -1
246
+ },
247
+ "kl_loss_alpha": 1.0,
248
+ "disc_loss_alpha": 1.0,
249
+ "gen_loss_alpha": 1.0,
250
+ "feat_loss_alpha": 1.0,
251
+ "mel_loss_alpha": 45.0,
252
+ "dur_loss_alpha": 1.0,
253
+ "speaker_encoder_loss_alpha": 1.0,
254
+ "return_wav": true,
255
+ "r": 1,
256
+ "num_speakers": 0,
257
+ "use_speaker_embedding": true,
258
+ "speakers_file": "/run/media/opensuse/Barracuda/Models/TTS_new/trained_common_voice/vits_vctk-June-05-2022_03+45PM-0cf3265a/speakers.json",
259
+ "speaker_embedding_channels": 256,
260
+ "language_ids_file": null,
261
+ "use_language_embedding": false,
262
+ "use_d_vector_file": false,
263
+ "d_vector_file": null,
264
+ "d_vector_dim": 0
265
+ }
events.out.tfevents.1654436713.localhost.localdomain ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e92fd5923439c12dd6bca13bf200ff0563abc501ab6c6e149ec0d42197a2eb4
3
+ size 34169856
events.out.tfevents.1654765423.localhost.localdomain ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21bab98923e9d5118b49b4b6a40667fb96fe241f40a561bb0da22e9178fa68d3
3
+ size 30282744
info_run.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ sha commit coquitts: 0cf3265a4686d7e856bd472cdaf1572d61cab2b8
2
+
3
+ PYTORCH_CUDA_ALLOC_CONF="max_split_size_mb:25" CUDA_VISIBLE_DEVICES=1 python recipes/common_voice/vits/train_vits.py
4
+
5
+ CUDA_VISIBLE_DEVICES=0 tts-server --model_path "/run/media/opensuse/Barracuda/Models/TTS_new/trained_common_voice/vits_vctk-June-05-2022_03+45PM-0cf3265a/best_model.pth" --config_path "/run/media/opensuse/Barracuda/Models/TTS_new/trained_common_voice/vits_vctk-June-05-2022_03+45PM-0cf3265a/config.json"
6
+
7
+
speakers.json ADDED
The diff for this file is too large to render. See raw diff
 
train_vits.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from trainer import Trainer, TrainerArgs
4
+
5
+ from TTS.config.shared_configs import BaseAudioConfig
6
+ from TTS.tts.configs.shared_configs import BaseDatasetConfig
7
+ from TTS.tts.configs.vits_config import VitsConfig
8
+ from TTS.tts.datasets import load_tts_samples
9
+ from TTS.tts.models.vits import Vits, VitsArgs
10
+ from TTS.tts.utils.speakers import SpeakerManager
11
+ from TTS.tts.utils.text.tokenizer import TTSTokenizer
12
+ from TTS.utils.audio import AudioProcessor
13
+
14
+ # to read tsv files from common voice
15
+ import pandas as pd
16
+
17
+ # output_path = '/media/popos/Barracuda/Models/TTS_new/trained_common_voice'
18
+ # dataset_path = "/media/popos/Barracuda/Datasets/CommonVoiceMozillaIta/it_29-03-2021/cv-corpus-6.1-2020-12-11/it"
19
+ output_path = '/run/media/opensuse/Barracuda/Models/TTS_new/trained_common_voice'
20
+ dataset_path = "/run/media/opensuse/Barracuda/Datasets/CommonVoiceMozillaIta/cv-corpus-9.0-2022-04-27/it"
21
+
22
+ pretrained_path = '/run/media/opensuse/Barracuda/Models/TTS_new/trained_common_voice/vits_vctk-June-05-2022_03+45PM-0cf3265a/'
23
+
24
+ dataset_config = BaseDatasetConfig(
25
+ name="vctk", meta_file_train="", language="it-it", path=dataset_path
26
+ )
27
+
28
+ # custom formatter implementation
29
+
30
+
31
+ def commonvoice_formatter(root_path, manifest_file, **kwargs):
32
+ # from root path we have train.tsv, test.tsv and val.tsv or use validated.tsv that contains all
33
+ txt_file = os.path.join(root_path, 'train.tsv')
34
+ df = pd.read_csv(txt_file, sep='\t')
35
+ items = []
36
+ for i, data in df.iterrows():
37
+ items.append({
38
+ "text": data['sentence'],
39
+ "audio_file": os.path.join(root_path, 'clips', data['path']),
40
+ "speaker_name": data['client_id']
41
+ })
42
+ return items
43
+
44
+
45
+ audio_config = BaseAudioConfig(
46
+ sample_rate=22050,
47
+ win_length=1024,
48
+ hop_length=256,
49
+ num_mels=80,
50
+ preemphasis=0.0,
51
+ ref_level_db=20,
52
+ log_func="np.log",
53
+ do_trim_silence=True,
54
+ trim_db=23.0,
55
+ mel_fmin=0,
56
+ mel_fmax=None,
57
+ spec_gain=1.0,
58
+ signal_norm=False,
59
+ do_amp_to_db_linear=False,
60
+ resample=True,
61
+ )
62
+
63
+ vitsArgs = VitsArgs(
64
+ use_speaker_embedding=True,
65
+ )
66
+
67
+ config = VitsConfig(
68
+ model_args=vitsArgs,
69
+ audio=audio_config,
70
+ run_name="vits_vctk",
71
+ batch_size=32,
72
+ eval_batch_size=16,
73
+ batch_group_size=5,
74
+ num_loader_workers=4,
75
+ num_eval_loader_workers=4,
76
+ run_eval=True,
77
+ test_delay_epochs=-1,
78
+ epochs=1000,
79
+ text_cleaner="english_cleaners",
80
+ use_phonemes=False,
81
+ phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
82
+ compute_input_seq_cache=True,
83
+ print_step=25,
84
+ print_eval=False,
85
+ mixed_precision=True,
86
+ max_text_len=325, # change this if you have a larger VRAM than 16GB
87
+ output_path=output_path,
88
+ datasets=[dataset_config],
89
+ )
90
+
91
+ # INITIALIZE THE AUDIO PROCESSOR
92
+ # Audio processor is used for feature extraction and audio I/O.
93
+ # It mainly serves to the dataloader and the training loggers.
94
+ ap = AudioProcessor.init_from_config(config)
95
+
96
+ # INITIALIZE THE TOKENIZER
97
+ # Tokenizer is used to convert text to sequences of token IDs.
98
+ # config is updated with the default characters if not defined in the config.
99
+ tokenizer, config = TTSTokenizer.init_from_config(config)
100
+
101
+ # LOAD DATA SAMPLES
102
+ # Each sample is a list of ```[text, audio_file_path, speaker_name]```
103
+ # You can define your custom sample loader returning the list of samples.
104
+ # Or define your custom formatter and pass it to the `load_tts_samples`.
105
+ # Check `TTS.tts.datasets.load_tts_samples` for more details.
106
+ train_samples, eval_samples = load_tts_samples(
107
+ dataset_config, eval_split=True, formatter=commonvoice_formatter)
108
+
109
+ # init speaker manager for multi-speaker training
110
+ # it maps speaker-id to speaker-name in the model and data-loader
111
+ speaker_manager = SpeakerManager()
112
+ speaker_manager.set_speaker_ids_from_data(train_samples + eval_samples)
113
+ config.model_args.num_speakers = speaker_manager.num_speakers
114
+
115
+ # init model
116
+ model = Vits(config, ap, tokenizer, speaker_manager)
117
+
118
+ # init the trainer and 🚀
119
+ if pretrained_path:
120
+ trainer = Trainer(
121
+ TrainerArgs(
122
+ continue_path=pretrained_path,
123
+ ),
124
+ config,
125
+ output_path,
126
+ model=model,
127
+ train_samples=train_samples,
128
+ eval_samples=eval_samples,
129
+ )
130
+ else:
131
+ trainer = Trainer(
132
+ TrainerArgs(),
133
+ config,
134
+ output_path,
135
+ model=model,
136
+ train_samples=train_samples,
137
+ eval_samples=eval_samples,
138
+ )
139
+ trainer.fit()