|
--- |
|
tags: |
|
- espnet |
|
- audio |
|
- text-to-speech |
|
language: zh |
|
datasets: |
|
- aishell3 |
|
license: cc-by-4.0 |
|
--- |
|
|
|
## ESPnet2 TTS model |
|
|
|
This model was trained by winniech using aishell3 recipe in [espnet](https://github.com/espnet/espnet/). |
|
|
|
|
|
## TTS config |
|
|
|
``` |
|
config: conf/train.yaml |
|
print_config: false |
|
log_level: INFO |
|
dry_run: false |
|
iterator_type: sequence |
|
output_dir: exp/22k/tts_train_raw_phn_pypinyin_g2p_phone |
|
ngpu: 1 |
|
seed: 777 |
|
num_workers: 4 |
|
num_att_plot: 3 |
|
dist_backend: nccl |
|
dist_init_method: env:// |
|
dist_world_size: null |
|
dist_rank: null |
|
local_rank: 0 |
|
dist_master_addr: null |
|
dist_master_port: null |
|
dist_launcher: null |
|
multiprocessing_distributed: false |
|
unused_parameters: true |
|
sharded_ddp: false |
|
cudnn_enabled: true |
|
cudnn_benchmark: false |
|
cudnn_deterministic: false |
|
collect_stats: false |
|
write_collected_feats: false |
|
max_epoch: 1000 |
|
patience: null |
|
val_scheduler_criterion: |
|
- valid |
|
- loss |
|
early_stopping_criterion: |
|
- valid |
|
- loss |
|
- min |
|
best_model_criterion: |
|
- - train |
|
- total_count |
|
- max |
|
keep_nbest_models: 10 |
|
nbest_averaging_interval: 0 |
|
grad_clip: -1 |
|
grad_clip_type: 2.0 |
|
grad_noise: false |
|
accum_grad: 1 |
|
no_forward_run: false |
|
resume: true |
|
train_dtype: float32 |
|
use_amp: false |
|
log_interval: 50 |
|
use_matplotlib: true |
|
use_tensorboard: true |
|
create_graph_in_tensorboard: false |
|
use_wandb: false |
|
wandb_project: null |
|
wandb_id: null |
|
wandb_entity: null |
|
wandb_name: null |
|
wandb_model_log_interval: -1 |
|
detect_anomaly: false |
|
pretrain_path: null |
|
init_param: [] |
|
ignore_init_mismatch: false |
|
freeze_param: [] |
|
num_iters_per_epoch: 1000 |
|
batch_size: 20 |
|
valid_batch_size: null |
|
batch_bins: 1250000 |
|
valid_batch_bins: null |
|
train_shape_file: |
|
- exp/22k/tts_stats_raw_linear_spectrogram_phn_pypinyin_g2p_phone/train/text_shape.phn |
|
- exp/22k/tts_stats_raw_linear_spectrogram_phn_pypinyin_g2p_phone/train/speech_shape |
|
valid_shape_file: |
|
- exp/22k/tts_stats_raw_linear_spectrogram_phn_pypinyin_g2p_phone/valid/text_shape.phn |
|
- exp/22k/tts_stats_raw_linear_spectrogram_phn_pypinyin_g2p_phone/valid/speech_shape |
|
batch_type: numel |
|
valid_batch_type: null |
|
fold_length: |
|
- 150 |
|
- 204800 |
|
sort_in_batch: descending |
|
sort_batch: descending |
|
multiple_iterator: false |
|
chunk_length: 500 |
|
chunk_shift_ratio: 0.5 |
|
num_cache_chunks: 1024 |
|
chunk_excluded_key_prefixes: [] |
|
train_data_path_and_name_and_type: |
|
- - dump/22k/raw/train_no_dev/text |
|
- text |
|
- text |
|
- - dump/22k/raw/train_no_dev/wav.scp |
|
- speech |
|
- sound |
|
- - dump/22k/xvector/train_no_dev/xvector.scp |
|
- spembs |
|
- kaldi_ark |
|
valid_data_path_and_name_and_type: |
|
- - dump/22k/raw/dev/text |
|
- text |
|
- text |
|
- - dump/22k/raw/dev/wav.scp |
|
- speech |
|
- sound |
|
- - dump/22k/xvector/dev/xvector.scp |
|
- spembs |
|
- kaldi_ark |
|
allow_variable_data_keys: false |
|
max_cache_size: 0.0 |
|
max_cache_fd: 32 |
|
valid_max_cache_size: null |
|
exclude_weight_decay: false |
|
exclude_weight_decay_conf: {} |
|
optim: adamw |
|
optim_conf: |
|
lr: 0.0002 |
|
betas: |
|
- 0.8 |
|
- 0.99 |
|
eps: 1.0e-09 |
|
weight_decay: 0.0 |
|
scheduler: exponentiallr |
|
scheduler_conf: |
|
gamma: 0.999875 |
|
optim2: adamw |
|
optim2_conf: |
|
lr: 0.0002 |
|
betas: |
|
- 0.8 |
|
- 0.99 |
|
eps: 1.0e-09 |
|
weight_decay: 0.0 |
|
scheduler2: exponentiallr |
|
scheduler2_conf: |
|
gamma: 0.999875 |
|
generator_first: false |
|
token_list: |
|
- <blank> |
|
- <unk> |
|
- d |
|
- sh |
|
- j |
|
- i4 |
|
- zh |
|
- l |
|
- x |
|
- e |
|
- b |
|
- g |
|
- i1 |
|
- h |
|
- q |
|
- m |
|
- t |
|
- i2 |
|
- u4 |
|
- z |
|
- ch |
|
- i3 |
|
- f |
|
- s |
|
- n |
|
- iou3 |
|
- r |
|
- ian4 |
|
- ong1 |
|
- uei4 |
|
- e4 |
|
- en2 |
|
- ai4 |
|
- k |
|
- ing2 |
|
- a1 |
|
- uo3 |
|
- u3 |
|
- ao4 |
|
- p |
|
- an1 |
|
- eng2 |
|
- e2 |
|
- in1 |
|
- c |
|
- ai2 |
|
- an4 |
|
- ian2 |
|
- u2 |
|
- ang4 |
|
- ian1 |
|
- ai3 |
|
- ing1 |
|
- ao3 |
|
- uo4 |
|
- ian3 |
|
- ing4 |
|
- ü4 |
|
- ang1 |
|
- u1 |
|
- iao4 |
|
- eng1 |
|
- iou4 |
|
- a4 |
|
- üan2 |
|
- ie4 |
|
- ou4 |
|
- er4 |
|
- en1 |
|
- ong2 |
|
- e1 |
|
- an3 |
|
- ei4 |
|
- uo2 |
|
- ou3 |
|
- ang2 |
|
- iang4 |
|
- ou1 |
|
- ang3 |
|
- an2 |
|
- eng4 |
|
- ong4 |
|
- uan4 |
|
- a3 |
|
- ia4 |
|
- ia1 |
|
- iao1 |
|
- iang1 |
|
- iou2 |
|
- uo1 |
|
- ei3 |
|
- iao3 |
|
- in4 |
|
- e3 |
|
- ü3 |
|
- iang3 |
|
- uei2 |
|
- en3 |
|
- uan1 |
|
- ie3 |
|
- ao1 |
|
- ai1 |
|
- üe4 |
|
- ü2 |
|
- ing3 |
|
- en4 |
|
- uei1 |
|
- er2 |
|
- uan3 |
|
- ü1 |
|
- in3 |
|
- en |
|
- üe2 |
|
- ie2 |
|
- ei2 |
|
- ua4 |
|
- uan2 |
|
- in2 |
|
- a2 |
|
- ie1 |
|
- iang2 |
|
- ou2 |
|
- ong3 |
|
- uang3 |
|
- eng3 |
|
- uen1 |
|
- uai4 |
|
- ün4 |
|
- uang4 |
|
- uei3 |
|
- uen2 |
|
- uen4 |
|
- i |
|
- iong4 |
|
- v3 |
|
- iao2 |
|
- üan4 |
|
- uang1 |
|
- ei1 |
|
- o2 |
|
- iou1 |
|
- uang2 |
|
- a |
|
- ao2 |
|
- o1 |
|
- ua2 |
|
- uen3 |
|
- ua1 |
|
- v4 |
|
- üan3 |
|
- ün1 |
|
- üe1 |
|
- ün2 |
|
- o4 |
|
- er3 |
|
- iong3 |
|
- üan1 |
|
- ia3 |
|
- ia2 |
|
- iong1 |
|
- üe3 |
|
- ve4 |
|
- iong2 |
|
- uai2 |
|
- er |
|
- ua3 |
|
- uai1 |
|
- ou |
|
- ün3 |
|
- uai3 |
|
- ia |
|
- uo |
|
- o3 |
|
- v2 |
|
- ueng1 |
|
- o |
|
- ei |
|
- ua |
|
- io1 |
|
- <sos/eos> |
|
odim: null |
|
model_conf: {} |
|
use_preprocessor: true |
|
token_type: phn |
|
bpemodel: null |
|
non_linguistic_symbols: null |
|
cleaner: null |
|
g2p: pypinyin_g2p_phone |
|
feats_extract: linear_spectrogram |
|
feats_extract_conf: |
|
n_fft: 1024 |
|
hop_length: 256 |
|
win_length: null |
|
normalize: null |
|
normalize_conf: {} |
|
tts: vits |
|
tts_conf: |
|
generator_type: vits_generator |
|
generator_params: |
|
hidden_channels: 192 |
|
spks: -1 |
|
spk_embed_dim: 512 |
|
global_channels: 256 |
|
segment_size: 32 |
|
text_encoder_attention_heads: 2 |
|
text_encoder_ffn_expand: 4 |
|
text_encoder_blocks: 6 |
|
text_encoder_positionwise_layer_type: conv1d |
|
text_encoder_positionwise_conv_kernel_size: 3 |
|
text_encoder_positional_encoding_layer_type: rel_pos |
|
text_encoder_self_attention_layer_type: rel_selfattn |
|
text_encoder_activation_type: swish |
|
text_encoder_normalize_before: true |
|
text_encoder_dropout_rate: 0.1 |
|
text_encoder_positional_dropout_rate: 0.0 |
|
text_encoder_attention_dropout_rate: 0.1 |
|
use_macaron_style_in_text_encoder: true |
|
use_conformer_conv_in_text_encoder: false |
|
text_encoder_conformer_kernel_size: -1 |
|
decoder_kernel_size: 7 |
|
decoder_channels: 512 |
|
decoder_upsample_scales: |
|
- 8 |
|
- 8 |
|
- 2 |
|
- 2 |
|
decoder_upsample_kernel_sizes: |
|
- 16 |
|
- 16 |
|
- 4 |
|
- 4 |
|
decoder_resblock_kernel_sizes: |
|
- 3 |
|
- 7 |
|
- 11 |
|
decoder_resblock_dilations: |
|
- - 1 |
|
- 3 |
|
- 5 |
|
- - 1 |
|
- 3 |
|
- 5 |
|
- - 1 |
|
- 3 |
|
- 5 |
|
use_weight_norm_in_decoder: true |
|
posterior_encoder_kernel_size: 5 |
|
posterior_encoder_layers: 16 |
|
posterior_encoder_stacks: 1 |
|
posterior_encoder_base_dilation: 1 |
|
posterior_encoder_dropout_rate: 0.0 |
|
use_weight_norm_in_posterior_encoder: true |
|
flow_flows: 4 |
|
flow_kernel_size: 5 |
|
flow_base_dilation: 1 |
|
flow_layers: 4 |
|
flow_dropout_rate: 0.0 |
|
use_weight_norm_in_flow: true |
|
use_only_mean_in_flow: true |
|
stochastic_duration_predictor_kernel_size: 3 |
|
stochastic_duration_predictor_dropout_rate: 0.5 |
|
stochastic_duration_predictor_flows: 4 |
|
stochastic_duration_predictor_dds_conv_layers: 3 |
|
vocabs: 180 |
|
aux_channels: 513 |
|
discriminator_type: hifigan_multi_scale_multi_period_discriminator |
|
discriminator_params: |
|
scales: 1 |
|
scale_downsample_pooling: AvgPool1d |
|
scale_downsample_pooling_params: |
|
kernel_size: 4 |
|
stride: 2 |
|
padding: 2 |
|
scale_discriminator_params: |
|
in_channels: 1 |
|
out_channels: 1 |
|
kernel_sizes: |
|
- 15 |
|
- 41 |
|
- 5 |
|
- 3 |
|
channels: 128 |
|
max_downsample_channels: 1024 |
|
max_groups: 16 |
|
bias: true |
|
downsample_scales: |
|
- 2 |
|
- 2 |
|
- 4 |
|
- 4 |
|
- 1 |
|
nonlinear_activation: LeakyReLU |
|
nonlinear_activation_params: |
|
negative_slope: 0.1 |
|
use_weight_norm: true |
|
use_spectral_norm: false |
|
follow_official_norm: false |
|
periods: |
|
- 2 |
|
- 3 |
|
- 5 |
|
- 7 |
|
- 11 |
|
period_discriminator_params: |
|
in_channels: 1 |
|
out_channels: 1 |
|
kernel_sizes: |
|
- 5 |
|
- 3 |
|
channels: 32 |
|
downsample_scales: |
|
- 3 |
|
- 3 |
|
- 3 |
|
- 3 |
|
- 1 |
|
max_downsample_channels: 1024 |
|
bias: true |
|
nonlinear_activation: LeakyReLU |
|
nonlinear_activation_params: |
|
negative_slope: 0.1 |
|
use_weight_norm: true |
|
use_spectral_norm: false |
|
generator_adv_loss_params: |
|
average_by_discriminators: false |
|
loss_type: mse |
|
discriminator_adv_loss_params: |
|
average_by_discriminators: false |
|
loss_type: mse |
|
feat_match_loss_params: |
|
average_by_discriminators: false |
|
average_by_layers: false |
|
include_final_outputs: true |
|
mel_loss_params: |
|
fs: 22050 |
|
n_fft: 1024 |
|
hop_length: 256 |
|
win_length: null |
|
window: hann |
|
n_mels: 80 |
|
fmin: 0 |
|
fmax: null |
|
log_base: null |
|
lambda_adv: 1.0 |
|
lambda_mel: 45.0 |
|
lambda_feat_match: 2.0 |
|
lambda_dur: 1.0 |
|
lambda_kl: 1.0 |
|
sampling_rate: 22050 |
|
cache_generator_outputs: true |
|
pitch_extract: null |
|
pitch_extract_conf: {} |
|
pitch_normalize: null |
|
pitch_normalize_conf: {} |
|
energy_extract: null |
|
energy_extract_conf: {} |
|
energy_normalize: null |
|
energy_normalize_conf: {} |
|
required: |
|
- output_dir |
|
- token_list |
|
version: '202301' |
|
distributed: false |
|
``` |
|
|
|
|
|
|
|
|
|
### Citing ESPnet |
|
|
|
```BibTex |
|
@inproceedings{watanabe2018espnet, |
|
author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai}, |
|
title={{ESPnet}: End-to-End Speech Processing Toolkit}, |
|
year={2018}, |
|
booktitle={Proceedings of Interspeech}, |
|
pages={2207--2211}, |
|
doi={10.21437/Interspeech.2018-1456}, |
|
url={http://dx.doi.org/10.21437/Interspeech.2018-1456} |
|
} |
|
|
|
|
|
|
|
|
|
@inproceedings{hayashi2020espnet, |
|
title={{Espnet-TTS}: Unified, reproducible, and integratable open source end-to-end text-to-speech toolkit}, |
|
author={Hayashi, Tomoki and Yamamoto, Ryuichi and Inoue, Katsuki and Yoshimura, Takenori and Watanabe, Shinji and Toda, Tomoki and Takeda, Kazuya and Zhang, Yu and Tan, Xu}, |
|
booktitle={Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, |
|
pages={7654--7658}, |
|
year={2020}, |
|
organization={IEEE} |
|
} |
|
``` |
|
|
|
or arXiv: |
|
|
|
```bibtex |
|
@misc{watanabe2018espnet, |
|
title={ESPnet: End-to-End Speech Processing Toolkit}, |
|
author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai}, |
|
year={2018}, |
|
eprint={1804.00015}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL} |
|
} |
|
``` |
|
|