Siddhant commited on
Commit
3a859bf
1 Parent(s): 4d0e1c4

import from zenodo

Browse files
Files changed (26) hide show
  1. README.md +50 -0
  2. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/config.yaml +354 -0
  3. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_backward_time.png +0 -0
  4. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_fake_loss.png +0 -0
  5. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_forward_time.png +0 -0
  6. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_loss.png +0 -0
  7. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_optim_step_time.png +0 -0
  8. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_real_loss.png +0 -0
  9. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_train_time.png +0 -0
  10. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_adv_loss.png +0 -0
  11. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_backward_time.png +0 -0
  12. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_dur_loss.png +0 -0
  13. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_feat_match_loss.png +0 -0
  14. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_forward_time.png +0 -0
  15. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_kl_loss.png +0 -0
  16. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_loss.png +0 -0
  17. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_mel_loss.png +0 -0
  18. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_optim_step_time.png +0 -0
  19. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_train_time.png +0 -0
  20. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/gpu_max_cached_mem_GB.png +0 -0
  21. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/iter_time.png +0 -0
  22. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/optim0_lr0.png +0 -0
  23. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/optim1_lr0.png +0 -0
  24. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/train_time.png +0 -0
  25. exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/train.total_count.ave_10best.pth +3 -0
  26. meta.yaml +8 -0
README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - espnet
4
+ - audio
5
+ - text-to-speech
6
+ language: ja
7
+ datasets:
8
+ - jsut
9
+ license: cc-by-4.0
10
+ ---
11
+ ## ESPnet2 TTS pretrained model
12
+ ### `kan-bayashi/jsut_vits_prosody`
13
+ ♻️ Imported from https://zenodo.org/record/5521354/
14
+
15
+ This model was trained by kan-bayashi using jsut/tts1 recipe in [espnet](https://github.com/espnet/espnet/).
16
+ ### Demo: How to use in ESPnet2
17
+ ```python
18
+ # coming soon
19
+ ```
20
+ ### Citing ESPnet
21
+ ```BibTex
22
+ @inproceedings{watanabe2018espnet,
23
+ author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson {Enrique Yalta Soplin} and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai},
24
+ title={{ESPnet}: End-to-End Speech Processing Toolkit},
25
+ year={2018},
26
+ booktitle={Proceedings of Interspeech},
27
+ pages={2207--2211},
28
+ doi={10.21437/Interspeech.2018-1456},
29
+ url={http://dx.doi.org/10.21437/Interspeech.2018-1456}
30
+ }
31
+ @inproceedings{hayashi2020espnet,
32
+ title={{Espnet-TTS}: Unified, reproducible, and integratable open source end-to-end text-to-speech toolkit},
33
+ author={Hayashi, Tomoki and Yamamoto, Ryuichi and Inoue, Katsuki and Yoshimura, Takenori and Watanabe, Shinji and Toda, Tomoki and Takeda, Kazuya and Zhang, Yu and Tan, Xu},
34
+ booktitle={Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
35
+ pages={7654--7658},
36
+ year={2020},
37
+ organization={IEEE}
38
+ }
39
+ ```
40
+ or arXiv:
41
+ ```bibtex
42
+ @misc{watanabe2018espnet,
43
+ title={ESPnet: End-to-End Speech Processing Toolkit},
44
+ author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Enrique Yalta Soplin and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai},
45
+ year={2018},
46
+ eprint={1804.00015},
47
+ archivePrefix={arXiv},
48
+ primaryClass={cs.CL}
49
+ }
50
+ ```
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/config.yaml ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ config: ./conf/tuning/train_vits.yaml
2
+ print_config: false
3
+ log_level: INFO
4
+ dry_run: false
5
+ iterator_type: sequence
6
+ output_dir: exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody
7
+ ngpu: 1
8
+ seed: 777
9
+ num_workers: 4
10
+ num_att_plot: 3
11
+ dist_backend: nccl
12
+ dist_init_method: env://
13
+ dist_world_size: 4
14
+ dist_rank: 0
15
+ local_rank: 0
16
+ dist_master_addr: localhost
17
+ dist_master_port: 34208
18
+ dist_launcher: null
19
+ multiprocessing_distributed: true
20
+ unused_parameters: true
21
+ sharded_ddp: false
22
+ cudnn_enabled: true
23
+ cudnn_benchmark: true
24
+ cudnn_deterministic: false
25
+ collect_stats: false
26
+ write_collected_feats: false
27
+ max_epoch: 2000
28
+ patience: null
29
+ val_scheduler_criterion:
30
+ - valid
31
+ - loss
32
+ early_stopping_criterion:
33
+ - valid
34
+ - loss
35
+ - min
36
+ best_model_criterion:
37
+ - - train
38
+ - total_count
39
+ - max
40
+ keep_nbest_models: 10
41
+ grad_clip: -1
42
+ grad_clip_type: 2.0
43
+ grad_noise: false
44
+ accum_grad: 1
45
+ no_forward_run: false
46
+ resume: true
47
+ train_dtype: float32
48
+ use_amp: false
49
+ log_interval: 50
50
+ use_tensorboard: true
51
+ use_wandb: false
52
+ wandb_project: null
53
+ wandb_id: null
54
+ wandb_entity: null
55
+ wandb_name: null
56
+ wandb_model_log_interval: -1
57
+ detect_anomaly: false
58
+ pretrain_path: null
59
+ init_param: []
60
+ ignore_init_mismatch: false
61
+ freeze_param: []
62
+ num_iters_per_epoch: 500
63
+ batch_size: 20
64
+ valid_batch_size: null
65
+ batch_bins: 3500000
66
+ valid_batch_bins: null
67
+ train_shape_file:
68
+ - exp/tts_stats_raw_linear_spectrogram_phn_jaconv_pyopenjtalk_prosody/train/text_shape.phn
69
+ - exp/tts_stats_raw_linear_spectrogram_phn_jaconv_pyopenjtalk_prosody/train/speech_shape
70
+ valid_shape_file:
71
+ - exp/tts_stats_raw_linear_spectrogram_phn_jaconv_pyopenjtalk_prosody/valid/text_shape.phn
72
+ - exp/tts_stats_raw_linear_spectrogram_phn_jaconv_pyopenjtalk_prosody/valid/speech_shape
73
+ batch_type: numel
74
+ valid_batch_type: null
75
+ fold_length:
76
+ - 150
77
+ - 204800
78
+ sort_in_batch: descending
79
+ sort_batch: descending
80
+ multiple_iterator: false
81
+ chunk_length: 500
82
+ chunk_shift_ratio: 0.5
83
+ num_cache_chunks: 1024
84
+ train_data_path_and_name_and_type:
85
+ - - dump/22k/raw/tr_no_dev/text
86
+ - text
87
+ - text
88
+ - - dump/22k/raw/tr_no_dev/wav.scp
89
+ - speech
90
+ - sound
91
+ valid_data_path_and_name_and_type:
92
+ - - dump/22k/raw/dev/text
93
+ - text
94
+ - text
95
+ - - dump/22k/raw/dev/wav.scp
96
+ - speech
97
+ - sound
98
+ allow_variable_data_keys: false
99
+ max_cache_size: 0.0
100
+ max_cache_fd: 32
101
+ valid_max_cache_size: null
102
+ optim: adamw
103
+ optim_conf:
104
+ lr: 0.0002
105
+ betas:
106
+ - 0.8
107
+ - 0.99
108
+ eps: 1.0e-09
109
+ weight_decay: 0.0
110
+ scheduler: exponentiallr
111
+ scheduler_conf:
112
+ gamma: 0.999875
113
+ optim2: adamw
114
+ optim2_conf:
115
+ lr: 0.0002
116
+ betas:
117
+ - 0.8
118
+ - 0.99
119
+ eps: 1.0e-09
120
+ weight_decay: 0.0
121
+ scheduler2: exponentiallr
122
+ scheduler2_conf:
123
+ gamma: 0.999875
124
+ generator_first: false
125
+ token_list:
126
+ - <blank>
127
+ - <unk>
128
+ - a
129
+ - o
130
+ - i
131
+ - '['
132
+ - '#'
133
+ - u
134
+ - ']'
135
+ - e
136
+ - k
137
+ - n
138
+ - t
139
+ - r
140
+ - s
141
+ - N
142
+ - m
143
+ - _
144
+ - sh
145
+ - d
146
+ - g
147
+ - ^
148
+ - $
149
+ - w
150
+ - cl
151
+ - h
152
+ - y
153
+ - b
154
+ - j
155
+ - ts
156
+ - ch
157
+ - z
158
+ - p
159
+ - f
160
+ - ky
161
+ - ry
162
+ - gy
163
+ - hy
164
+ - ny
165
+ - by
166
+ - my
167
+ - py
168
+ - v
169
+ - dy
170
+ - '?'
171
+ - ty
172
+ - <sos/eos>
173
+ odim: null
174
+ model_conf: {}
175
+ use_preprocessor: true
176
+ token_type: phn
177
+ bpemodel: null
178
+ non_linguistic_symbols: null
179
+ cleaner: jaconv
180
+ g2p: pyopenjtalk_prosody
181
+ feats_extract: linear_spectrogram
182
+ feats_extract_conf:
183
+ n_fft: 1024
184
+ hop_length: 256
185
+ win_length: null
186
+ normalize: null
187
+ normalize_conf: {}
188
+ tts: vits
189
+ tts_conf:
190
+ generator_type: vits_generator
191
+ generator_params:
192
+ hidden_channels: 192
193
+ spks: -1
194
+ global_channels: -1
195
+ segment_size: 32
196
+ text_encoder_attention_heads: 2
197
+ text_encoder_ffn_expand: 4
198
+ text_encoder_blocks: 6
199
+ text_encoder_positionwise_layer_type: conv1d
200
+ text_encoder_positionwise_conv_kernel_size: 3
201
+ text_encoder_positional_encoding_layer_type: rel_pos
202
+ text_encoder_self_attention_layer_type: rel_selfattn
203
+ text_encoder_activation_type: swish
204
+ text_encoder_normalize_before: true
205
+ text_encoder_dropout_rate: 0.1
206
+ text_encoder_positional_dropout_rate: 0.0
207
+ text_encoder_attention_dropout_rate: 0.1
208
+ use_macaron_style_in_text_encoder: true
209
+ use_conformer_conv_in_text_encoder: false
210
+ text_encoder_conformer_kernel_size: -1
211
+ decoder_kernel_size: 7
212
+ decoder_channels: 512
213
+ decoder_upsample_scales:
214
+ - 8
215
+ - 8
216
+ - 2
217
+ - 2
218
+ decoder_upsample_kernel_sizes:
219
+ - 16
220
+ - 16
221
+ - 4
222
+ - 4
223
+ decoder_resblock_kernel_sizes:
224
+ - 3
225
+ - 7
226
+ - 11
227
+ decoder_resblock_dilations:
228
+ - - 1
229
+ - 3
230
+ - 5
231
+ - - 1
232
+ - 3
233
+ - 5
234
+ - - 1
235
+ - 3
236
+ - 5
237
+ use_weight_norm_in_decoder: true
238
+ posterior_encoder_kernel_size: 5
239
+ posterior_encoder_layers: 16
240
+ posterior_encoder_stacks: 1
241
+ posterior_encoder_base_dilation: 1
242
+ posterior_encoder_dropout_rate: 0.0
243
+ use_weight_norm_in_posterior_encoder: true
244
+ flow_flows: 4
245
+ flow_kernel_size: 5
246
+ flow_base_dilation: 1
247
+ flow_layers: 4
248
+ flow_dropout_rate: 0.0
249
+ use_weight_norm_in_flow: true
250
+ use_only_mean_in_flow: true
251
+ stochastic_duration_predictor_kernel_size: 3
252
+ stochastic_duration_predictor_dropout_rate: 0.5
253
+ stochastic_duration_predictor_flows: 4
254
+ stochastic_duration_predictor_dds_conv_layers: 3
255
+ vocabs: 47
256
+ aux_channels: 513
257
+ discriminator_type: hifigan_multi_scale_multi_period_discriminator
258
+ discriminator_params:
259
+ scales: 1
260
+ scale_downsample_pooling: AvgPool1d
261
+ scale_downsample_pooling_params:
262
+ kernel_size: 4
263
+ stride: 2
264
+ padding: 2
265
+ scale_discriminator_params:
266
+ in_channels: 1
267
+ out_channels: 1
268
+ kernel_sizes:
269
+ - 15
270
+ - 41
271
+ - 5
272
+ - 3
273
+ channels: 128
274
+ max_downsample_channels: 1024
275
+ max_groups: 16
276
+ bias: true
277
+ downsample_scales:
278
+ - 2
279
+ - 2
280
+ - 4
281
+ - 4
282
+ - 1
283
+ nonlinear_activation: LeakyReLU
284
+ nonlinear_activation_params:
285
+ negative_slope: 0.1
286
+ use_weight_norm: true
287
+ use_spectral_norm: false
288
+ follow_official_norm: false
289
+ periods:
290
+ - 2
291
+ - 3
292
+ - 5
293
+ - 7
294
+ - 11
295
+ period_discriminator_params:
296
+ in_channels: 1
297
+ out_channels: 1
298
+ kernel_sizes:
299
+ - 5
300
+ - 3
301
+ channels: 32
302
+ downsample_scales:
303
+ - 3
304
+ - 3
305
+ - 3
306
+ - 3
307
+ - 1
308
+ max_downsample_channels: 1024
309
+ bias: true
310
+ nonlinear_activation: LeakyReLU
311
+ nonlinear_activation_params:
312
+ negative_slope: 0.1
313
+ use_weight_norm: true
314
+ use_spectral_norm: false
315
+ generator_adv_loss_params:
316
+ average_by_discriminators: false
317
+ loss_type: mse
318
+ discriminator_adv_loss_params:
319
+ average_by_discriminators: false
320
+ loss_type: mse
321
+ feat_match_loss_params:
322
+ average_by_discriminators: false
323
+ average_by_layers: false
324
+ include_final_outputs: true
325
+ mel_loss_params:
326
+ fs: 22050
327
+ n_fft: 1024
328
+ hop_length: 256
329
+ win_length: null
330
+ window: hann
331
+ n_mels: 80
332
+ fmin: 0
333
+ fmax: null
334
+ log_base: null
335
+ lambda_adv: 1.0
336
+ lambda_mel: 45.0
337
+ lambda_feat_match: 2.0
338
+ lambda_dur: 1.0
339
+ lambda_kl: 1.0
340
+ sampling_rate: 22050
341
+ cache_generator_outputs: true
342
+ pitch_extract: null
343
+ pitch_extract_conf: {}
344
+ pitch_normalize: null
345
+ pitch_normalize_conf: {}
346
+ energy_extract: null
347
+ energy_extract_conf: {}
348
+ energy_normalize: null
349
+ energy_normalize_conf: {}
350
+ required:
351
+ - output_dir
352
+ - token_list
353
+ version: 0.10.3a1
354
+ distributed: true
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_backward_time.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_fake_loss.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_forward_time.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_loss.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_optim_step_time.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_real_loss.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_train_time.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_adv_loss.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_backward_time.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_dur_loss.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_feat_match_loss.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_forward_time.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_kl_loss.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_loss.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_mel_loss.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_optim_step_time.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_train_time.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/gpu_max_cached_mem_GB.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/iter_time.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/optim0_lr0.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/optim1_lr0.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/train_time.png ADDED
exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/train.total_count.ave_10best.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9a5cbb78c63d18672287bdc324e4790ec12464bab07da2ba5c7c16c2ad990a7
3
+ size 372535375
meta.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ espnet: 0.10.3a2
2
+ files:
3
+ model_file: exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/train.total_count.ave_10best.pth
4
+ python: "3.7.3 (default, Mar 27 2019, 22:11:17) \n[GCC 7.3.0]"
5
+ timestamp: 1632317002.149092
6
+ torch: 1.7.1
7
+ yaml_files:
8
+ train_config: exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/config.yaml