Siddhant commited on
Commit
dfbf1f1
1 Parent(s): 438f20d

import from zenodo

Browse files
Files changed (26) hide show
  1. README.md +50 -0
  2. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/100epoch.pth +3 -0
  3. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/config.yaml +355 -0
  4. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_backward_time.png +0 -0
  5. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_fake_loss.png +0 -0
  6. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_forward_time.png +0 -0
  7. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_loss.png +0 -0
  8. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_optim_step_time.png +0 -0
  9. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_real_loss.png +0 -0
  10. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_train_time.png +0 -0
  11. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_adv_loss.png +0 -0
  12. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_backward_time.png +0 -0
  13. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_dur_loss.png +0 -0
  14. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_feat_match_loss.png +0 -0
  15. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_forward_time.png +0 -0
  16. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_kl_loss.png +0 -0
  17. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_loss.png +0 -0
  18. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_mel_loss.png +0 -0
  19. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_optim_step_time.png +0 -0
  20. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_train_time.png +0 -0
  21. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/gpu_max_cached_mem_GB.png +0 -0
  22. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/iter_time.png +0 -0
  23. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/optim0_lr0.png +0 -0
  24. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/optim1_lr0.png +0 -0
  25. exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/train_time.png +0 -0
  26. meta.yaml +8 -0
README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - espnet
4
+ - audio
5
+ - text-to-speech
6
+ language: ja
7
+ datasets:
8
+ - jvs
9
+ license: cc-by-4.0
10
+ ---
11
+ ## ESPnet2 TTS pretrained model
12
+ ### `kan-bayashi/jvs_jvs010_vits_prosody`
13
+ ♻️ Imported from https://zenodo.org/record/5521494/
14
+
15
+ This model was trained by kan-bayashi using jvs/tts1 recipe in [espnet](https://github.com/espnet/espnet/).
16
+ ### Demo: How to use in ESPnet2
17
+ ```python
18
+ # coming soon
19
+ ```
20
+ ### Citing ESPnet
21
+ ```BibTex
22
+ @inproceedings{watanabe2018espnet,
23
+ author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson {Enrique Yalta Soplin} and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai},
24
+ title={{ESPnet}: End-to-End Speech Processing Toolkit},
25
+ year={2018},
26
+ booktitle={Proceedings of Interspeech},
27
+ pages={2207--2211},
28
+ doi={10.21437/Interspeech.2018-1456},
29
+ url={http://dx.doi.org/10.21437/Interspeech.2018-1456}
30
+ }
31
+ @inproceedings{hayashi2020espnet,
32
+ title={{Espnet-TTS}: Unified, reproducible, and integratable open source end-to-end text-to-speech toolkit},
33
+ author={Hayashi, Tomoki and Yamamoto, Ryuichi and Inoue, Katsuki and Yoshimura, Takenori and Watanabe, Shinji and Toda, Tomoki and Takeda, Kazuya and Zhang, Yu and Tan, Xu},
34
+ booktitle={Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
35
+ pages={7654--7658},
36
+ year={2020},
37
+ organization={IEEE}
38
+ }
39
+ ```
40
+ or arXiv:
41
+ ```bibtex
42
+ @misc{watanabe2018espnet,
43
+ title={ESPnet: End-to-End Speech Processing Toolkit},
44
+ author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Enrique Yalta Soplin and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai},
45
+ year={2018},
46
+ eprint={1804.00015},
47
+ archivePrefix={arXiv},
48
+ primaryClass={cs.CL}
49
+ }
50
+ ```
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/100epoch.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45cbc7be0f3562f52d96ac66b123d6389ccb6047b642bfb5434e6adeedfa6b4d
3
+ size 372549199
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/config.yaml ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ config: ./conf/tuning/finetune_vits.v2.yaml
2
+ print_config: false
3
+ log_level: INFO
4
+ dry_run: false
5
+ iterator_type: sequence
6
+ output_dir: exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody
7
+ ngpu: 1
8
+ seed: 777
9
+ num_workers: 4
10
+ num_att_plot: 3
11
+ dist_backend: nccl
12
+ dist_init_method: env://
13
+ dist_world_size: 4
14
+ dist_rank: 0
15
+ local_rank: 0
16
+ dist_master_addr: localhost
17
+ dist_master_port: 41593
18
+ dist_launcher: null
19
+ multiprocessing_distributed: true
20
+ unused_parameters: true
21
+ sharded_ddp: false
22
+ cudnn_enabled: true
23
+ cudnn_benchmark: true
24
+ cudnn_deterministic: false
25
+ collect_stats: false
26
+ write_collected_feats: false
27
+ max_epoch: 100
28
+ patience: null
29
+ val_scheduler_criterion:
30
+ - valid
31
+ - loss
32
+ early_stopping_criterion:
33
+ - valid
34
+ - loss
35
+ - min
36
+ best_model_criterion:
37
+ - - train
38
+ - total_count
39
+ - max
40
+ keep_nbest_models: 10
41
+ grad_clip: -1
42
+ grad_clip_type: 2.0
43
+ grad_noise: false
44
+ accum_grad: 1
45
+ no_forward_run: false
46
+ resume: true
47
+ train_dtype: float32
48
+ use_amp: false
49
+ log_interval: 50
50
+ use_tensorboard: true
51
+ use_wandb: false
52
+ wandb_project: null
53
+ wandb_id: null
54
+ wandb_entity: null
55
+ wandb_name: null
56
+ wandb_model_log_interval: -1
57
+ detect_anomaly: false
58
+ pretrain_path: null
59
+ init_param:
60
+ - ../../jsut/tts1/exp/tts_train_vits_raw_phn_jaconv_pyopenjtalk_prosody/latest.pth:tts:tts
61
+ ignore_init_mismatch: false
62
+ freeze_param: []
63
+ num_iters_per_epoch: 1000
64
+ batch_size: 20
65
+ valid_batch_size: null
66
+ batch_bins: 5000000
67
+ valid_batch_bins: null
68
+ train_shape_file:
69
+ - exp/tts_stats_jvs010_raw_linear_spectrogram_phn_jaconv_pyopenjtalk_prosody/train/text_shape.phn
70
+ - exp/tts_stats_jvs010_raw_linear_spectrogram_phn_jaconv_pyopenjtalk_prosody/train/speech_shape
71
+ valid_shape_file:
72
+ - exp/tts_stats_jvs010_raw_linear_spectrogram_phn_jaconv_pyopenjtalk_prosody/valid/text_shape.phn
73
+ - exp/tts_stats_jvs010_raw_linear_spectrogram_phn_jaconv_pyopenjtalk_prosody/valid/speech_shape
74
+ batch_type: numel
75
+ valid_batch_type: null
76
+ fold_length:
77
+ - 150
78
+ - 204800
79
+ sort_in_batch: descending
80
+ sort_batch: descending
81
+ multiple_iterator: false
82
+ chunk_length: 500
83
+ chunk_shift_ratio: 0.5
84
+ num_cache_chunks: 1024
85
+ train_data_path_and_name_and_type:
86
+ - - dump/22k/raw/jvs010_tr_no_dev/text
87
+ - text
88
+ - text
89
+ - - dump/22k/raw/jvs010_tr_no_dev/wav.scp
90
+ - speech
91
+ - sound
92
+ valid_data_path_and_name_and_type:
93
+ - - dump/22k/raw/jvs010_dev/text
94
+ - text
95
+ - text
96
+ - - dump/22k/raw/jvs010_dev/wav.scp
97
+ - speech
98
+ - sound
99
+ allow_variable_data_keys: false
100
+ max_cache_size: 0.0
101
+ max_cache_fd: 32
102
+ valid_max_cache_size: null
103
+ optim: adamw
104
+ optim_conf:
105
+ lr: 0.0001
106
+ betas:
107
+ - 0.8
108
+ - 0.99
109
+ eps: 1.0e-09
110
+ weight_decay: 0.0
111
+ scheduler: exponentiallr
112
+ scheduler_conf:
113
+ gamma: 0.999875
114
+ optim2: adamw
115
+ optim2_conf:
116
+ lr: 0.0001
117
+ betas:
118
+ - 0.8
119
+ - 0.99
120
+ eps: 1.0e-09
121
+ weight_decay: 0.0
122
+ scheduler2: exponentiallr
123
+ scheduler2_conf:
124
+ gamma: 0.999875
125
+ generator_first: false
126
+ token_list:
127
+ - <blank>
128
+ - <unk>
129
+ - a
130
+ - o
131
+ - i
132
+ - '['
133
+ - '#'
134
+ - u
135
+ - ']'
136
+ - e
137
+ - k
138
+ - n
139
+ - t
140
+ - r
141
+ - s
142
+ - N
143
+ - m
144
+ - _
145
+ - sh
146
+ - d
147
+ - g
148
+ - ^
149
+ - $
150
+ - w
151
+ - cl
152
+ - h
153
+ - y
154
+ - b
155
+ - j
156
+ - ts
157
+ - ch
158
+ - z
159
+ - p
160
+ - f
161
+ - ky
162
+ - ry
163
+ - gy
164
+ - hy
165
+ - ny
166
+ - by
167
+ - my
168
+ - py
169
+ - v
170
+ - dy
171
+ - '?'
172
+ - ty
173
+ - <sos/eos>
174
+ odim: null
175
+ model_conf: {}
176
+ use_preprocessor: true
177
+ token_type: phn
178
+ bpemodel: null
179
+ non_linguistic_symbols: null
180
+ cleaner: jaconv
181
+ g2p: pyopenjtalk_prosody
182
+ feats_extract: linear_spectrogram
183
+ feats_extract_conf:
184
+ n_fft: 1024
185
+ hop_length: 256
186
+ win_length: null
187
+ normalize: null
188
+ normalize_conf: {}
189
+ tts: vits
190
+ tts_conf:
191
+ generator_type: vits_generator
192
+ generator_params:
193
+ hidden_channels: 192
194
+ spks: -1
195
+ global_channels: -1
196
+ segment_size: 32
197
+ text_encoder_attention_heads: 2
198
+ text_encoder_ffn_expand: 4
199
+ text_encoder_blocks: 6
200
+ text_encoder_positionwise_layer_type: conv1d
201
+ text_encoder_positionwise_conv_kernel_size: 3
202
+ text_encoder_positional_encoding_layer_type: rel_pos
203
+ text_encoder_self_attention_layer_type: rel_selfattn
204
+ text_encoder_activation_type: swish
205
+ text_encoder_normalize_before: true
206
+ text_encoder_dropout_rate: 0.1
207
+ text_encoder_positional_dropout_rate: 0.0
208
+ text_encoder_attention_dropout_rate: 0.1
209
+ use_macaron_style_in_text_encoder: true
210
+ use_conformer_conv_in_text_encoder: false
211
+ text_encoder_conformer_kernel_size: -1
212
+ decoder_kernel_size: 7
213
+ decoder_channels: 512
214
+ decoder_upsample_scales:
215
+ - 8
216
+ - 8
217
+ - 2
218
+ - 2
219
+ decoder_upsample_kernel_sizes:
220
+ - 16
221
+ - 16
222
+ - 4
223
+ - 4
224
+ decoder_resblock_kernel_sizes:
225
+ - 3
226
+ - 7
227
+ - 11
228
+ decoder_resblock_dilations:
229
+ - - 1
230
+ - 3
231
+ - 5
232
+ - - 1
233
+ - 3
234
+ - 5
235
+ - - 1
236
+ - 3
237
+ - 5
238
+ use_weight_norm_in_decoder: true
239
+ posterior_encoder_kernel_size: 5
240
+ posterior_encoder_layers: 16
241
+ posterior_encoder_stacks: 1
242
+ posterior_encoder_base_dilation: 1
243
+ posterior_encoder_dropout_rate: 0.0
244
+ use_weight_norm_in_posterior_encoder: true
245
+ flow_flows: 4
246
+ flow_kernel_size: 5
247
+ flow_base_dilation: 1
248
+ flow_layers: 4
249
+ flow_dropout_rate: 0.0
250
+ use_weight_norm_in_flow: true
251
+ use_only_mean_in_flow: true
252
+ stochastic_duration_predictor_kernel_size: 3
253
+ stochastic_duration_predictor_dropout_rate: 0.5
254
+ stochastic_duration_predictor_flows: 4
255
+ stochastic_duration_predictor_dds_conv_layers: 3
256
+ vocabs: 47
257
+ aux_channels: 513
258
+ discriminator_type: hifigan_multi_scale_multi_period_discriminator
259
+ discriminator_params:
260
+ scales: 1
261
+ scale_downsample_pooling: AvgPool1d
262
+ scale_downsample_pooling_params:
263
+ kernel_size: 4
264
+ stride: 2
265
+ padding: 2
266
+ scale_discriminator_params:
267
+ in_channels: 1
268
+ out_channels: 1
269
+ kernel_sizes:
270
+ - 15
271
+ - 41
272
+ - 5
273
+ - 3
274
+ channels: 128
275
+ max_downsample_channels: 1024
276
+ max_groups: 16
277
+ bias: true
278
+ downsample_scales:
279
+ - 2
280
+ - 2
281
+ - 4
282
+ - 4
283
+ - 1
284
+ nonlinear_activation: LeakyReLU
285
+ nonlinear_activation_params:
286
+ negative_slope: 0.1
287
+ use_weight_norm: true
288
+ use_spectral_norm: false
289
+ follow_official_norm: false
290
+ periods:
291
+ - 2
292
+ - 3
293
+ - 5
294
+ - 7
295
+ - 11
296
+ period_discriminator_params:
297
+ in_channels: 1
298
+ out_channels: 1
299
+ kernel_sizes:
300
+ - 5
301
+ - 3
302
+ channels: 32
303
+ downsample_scales:
304
+ - 3
305
+ - 3
306
+ - 3
307
+ - 3
308
+ - 1
309
+ max_downsample_channels: 1024
310
+ bias: true
311
+ nonlinear_activation: LeakyReLU
312
+ nonlinear_activation_params:
313
+ negative_slope: 0.1
314
+ use_weight_norm: true
315
+ use_spectral_norm: false
316
+ generator_adv_loss_params:
317
+ average_by_discriminators: false
318
+ loss_type: mse
319
+ discriminator_adv_loss_params:
320
+ average_by_discriminators: false
321
+ loss_type: mse
322
+ feat_match_loss_params:
323
+ average_by_discriminators: false
324
+ average_by_layers: false
325
+ include_final_outputs: true
326
+ mel_loss_params:
327
+ fs: 22050
328
+ n_fft: 1024
329
+ hop_length: 256
330
+ win_length: null
331
+ window: hann
332
+ n_mels: 80
333
+ fmin: 0
334
+ fmax: null
335
+ log_base: null
336
+ lambda_adv: 1.0
337
+ lambda_mel: 45.0
338
+ lambda_feat_match: 2.0
339
+ lambda_dur: 1.0
340
+ lambda_kl: 1.0
341
+ sampling_rate: 22050
342
+ cache_generator_outputs: true
343
+ pitch_extract: null
344
+ pitch_extract_conf: {}
345
+ pitch_normalize: null
346
+ pitch_normalize_conf: {}
347
+ energy_extract: null
348
+ energy_extract_conf: {}
349
+ energy_normalize: null
350
+ energy_normalize_conf: {}
351
+ required:
352
+ - output_dir
353
+ - token_list
354
+ version: 0.10.3a2
355
+ distributed: true
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_backward_time.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_fake_loss.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_forward_time.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_loss.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_optim_step_time.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_real_loss.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/discriminator_train_time.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_adv_loss.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_backward_time.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_dur_loss.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_feat_match_loss.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_forward_time.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_kl_loss.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_loss.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_mel_loss.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_optim_step_time.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/generator_train_time.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/gpu_max_cached_mem_GB.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/iter_time.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/optim0_lr0.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/optim1_lr0.png ADDED
exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/images/train_time.png ADDED
meta.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ espnet: 0.10.3a2
2
+ files:
3
+ model_file: exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/100epoch.pth
4
+ python: "3.7.3 (default, Mar 27 2019, 22:11:17) \n[GCC 7.3.0]"
5
+ timestamp: 1632320193.158154
6
+ torch: 1.7.1
7
+ yaml_files:
8
+ train_config: exp/tts_finetune_jvs010_jsut_vits_raw_phn_jaconv_pyopenjtalk_prosody/config.yaml