Siddhant's picture
import from zenodo
a9dc64c
config: ./conf/tuning/train_joint_conformer_fastspeech2_hifigan.v2.yaml
print_config: false
log_level: INFO
dry_run: false
iterator_type: sequence
output_dir: exp/tts_train_joint_conformer_fastspeech2_hifigan.v2_raw_phn_tacotron_g2p_en_no_space
ngpu: 1
seed: 777
num_workers: 4
num_att_plot: 3
dist_backend: nccl
dist_init_method: env://
dist_world_size: 4
dist_rank: 0
local_rank: 0
dist_master_addr: localhost
dist_master_port: 57589
dist_launcher: null
multiprocessing_distributed: true
unused_parameters: true
sharded_ddp: false
cudnn_enabled: true
cudnn_benchmark: false
cudnn_deterministic: false
collect_stats: false
write_collected_feats: false
max_epoch: 2000
patience: null
val_scheduler_criterion:
- valid
- loss
early_stopping_criterion:
- valid
- loss
- min
best_model_criterion:
- - train
- total_count
- max
keep_nbest_models: 10
grad_clip: -1
grad_clip_type: 2.0
grad_noise: false
accum_grad: 1
no_forward_run: false
resume: true
train_dtype: float32
use_amp: false
log_interval: 50
use_tensorboard: true
use_wandb: false
wandb_project: null
wandb_id: null
wandb_entity: null
wandb_name: null
wandb_model_log_interval: -1
detect_anomaly: false
pretrain_path: null
init_param: []
ignore_init_mismatch: false
freeze_param: []
num_iters_per_epoch: 500
batch_size: 20
valid_batch_size: null
batch_bins: 5000000
valid_batch_bins: null
train_shape_file:
- exp/tts_train_tacotron2_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.best/stats/train/text_shape.phn
- exp/tts_train_tacotron2_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.best/stats/train/speech_shape
valid_shape_file:
- exp/tts_train_tacotron2_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.best/stats/valid/text_shape.phn
- exp/tts_train_tacotron2_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.best/stats/valid/speech_shape
batch_type: numel
valid_batch_type: null
fold_length:
- 150
- 204800
sort_in_batch: descending
sort_batch: descending
multiple_iterator: false
chunk_length: 500
chunk_shift_ratio: 0.5
num_cache_chunks: 1024
train_data_path_and_name_and_type:
- - dump/raw/tr_no_dev/text
- text
- text
- - exp/tts_train_tacotron2_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.best/tr_no_dev/durations
- durations
- text_int
- - dump/raw/tr_no_dev/wav.scp
- speech
- sound
- - exp/tts_train_tacotron2_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.best/stats/train/collect_feats/pitch.scp
- pitch
- npy
- - exp/tts_train_tacotron2_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.best/stats/train/collect_feats/energy.scp
- energy
- npy
valid_data_path_and_name_and_type:
- - dump/raw/dev/text
- text
- text
- - exp/tts_train_tacotron2_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.best/dev/durations
- durations
- text_int
- - dump/raw/dev/wav.scp
- speech
- sound
- - exp/tts_train_tacotron2_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.best/stats/valid/collect_feats/pitch.scp
- pitch
- npy
- - exp/tts_train_tacotron2_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.best/stats/valid/collect_feats/energy.scp
- energy
- npy
allow_variable_data_keys: false
max_cache_size: 0.0
max_cache_fd: 32
valid_max_cache_size: null
optim: adamw
optim_conf:
lr: 0.0002
betas:
- 0.8
- 0.99
eps: 1.0e-09
weight_decay: 0.0
scheduler: exponentiallr
scheduler_conf:
gamma: 0.999875
optim2: adamw
optim2_conf:
lr: 0.0002
betas:
- 0.8
- 0.99
eps: 1.0e-09
weight_decay: 0.0
scheduler2: exponentiallr
scheduler2_conf:
gamma: 0.999875
generator_first: true
token_list:
- <blank>
- <unk>
- AH0
- N
- T
- D
- S
- R
- L
- DH
- K
- Z
- IH1
- IH0
- M
- EH1
- W
- P
- AE1
- AH1
- V
- ER0
- F
- ','
- AA1
- B
- HH
- IY1
- UW1
- IY0
- AO1
- EY1
- AY1
- .
- OW1
- SH
- NG
- G
- ER1
- CH
- JH
- Y
- AW1
- TH
- UH1
- EH2
- OW0
- EY2
- AO0
- IH2
- AE2
- AY2
- AA2
- UW0
- EH0
- OY1
- EY0
- AO2
- ZH
- OW2
- AE0
- UW2
- AH2
- AY0
- IY2
- AW2
- AA0
- ''''
- ER2
- UH2
- '?'
- OY2
- '!'
- AW0
- UH0
- OY0
- ..
- <sos/eos>
odim: null
model_conf: {}
use_preprocessor: true
token_type: phn
bpemodel: null
non_linguistic_symbols: null
cleaner: tacotron
g2p: g2p_en_no_space
feats_extract: fbank
feats_extract_conf:
n_fft: 1024
hop_length: 256
win_length: null
fs: 22050
fmin: 80
fmax: 7600
n_mels: 80
normalize: global_mvn
normalize_conf:
stats_file: exp/tts_train_tacotron2_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.best/stats/train/feats_stats.npz
tts: joint_text2wav
tts_conf:
text2mel_type: fastspeech2
text2mel_params:
adim: 384
aheads: 2
elayers: 4
eunits: 1536
dlayers: 4
dunits: 1536
positionwise_layer_type: conv1d
positionwise_conv_kernel_size: 3
duration_predictor_layers: 2
duration_predictor_chans: 256
duration_predictor_kernel_size: 3
postnet_layers: 5
postnet_filts: 5
postnet_chans: 256
use_masking: true
encoder_normalize_before: true
decoder_normalize_before: true
reduction_factor: 1
encoder_type: conformer
decoder_type: conformer
conformer_rel_pos_type: latest
conformer_pos_enc_layer_type: rel_pos
conformer_self_attn_layer_type: rel_selfattn
conformer_activation_type: swish
use_macaron_style_in_conformer: true
use_cnn_in_conformer: true
conformer_enc_kernel_size: 7
conformer_dec_kernel_size: 31
init_type: xavier_uniform
transformer_enc_dropout_rate: 0.2
transformer_enc_positional_dropout_rate: 0.2
transformer_enc_attn_dropout_rate: 0.2
transformer_dec_dropout_rate: 0.2
transformer_dec_positional_dropout_rate: 0.2
transformer_dec_attn_dropout_rate: 0.2
pitch_predictor_layers: 5
pitch_predictor_chans: 256
pitch_predictor_kernel_size: 5
pitch_predictor_dropout: 0.5
pitch_embed_kernel_size: 1
pitch_embed_dropout: 0.0
stop_gradient_from_pitch_predictor: true
energy_predictor_layers: 2
energy_predictor_chans: 256
energy_predictor_kernel_size: 3
energy_predictor_dropout: 0.5
energy_embed_kernel_size: 1
energy_embed_dropout: 0.0
stop_gradient_from_energy_predictor: false
idim: 78
odim: 80
vocoder_type: hifigan_generator
vocoder_params:
out_channels: 1
channels: 512
global_channels: -1
kernel_size: 7
upsample_scales:
- 8
- 8
- 2
- 2
upsample_kernel_sizes:
- 16
- 16
- 4
- 4
resblock_kernel_sizes:
- 3
- 7
- 11
resblock_dilations:
- - 1
- 3
- 5
- - 1
- 3
- 5
- - 1
- 3
- 5
use_additional_convs: true
bias: true
nonlinear_activation: LeakyReLU
nonlinear_activation_params:
negative_slope: 0.1
use_weight_norm: true
in_channels: 80
discriminator_type: hifigan_multi_scale_multi_period_discriminator
discriminator_params:
scales: 1
scale_downsample_pooling: AvgPool1d
scale_downsample_pooling_params:
kernel_size: 4
stride: 2
padding: 2
scale_discriminator_params:
in_channels: 1
out_channels: 1
kernel_sizes:
- 15
- 41
- 5
- 3
channels: 128
max_downsample_channels: 1024
max_groups: 16
bias: true
downsample_scales:
- 2
- 2
- 4
- 4
- 1
nonlinear_activation: LeakyReLU
nonlinear_activation_params:
negative_slope: 0.1
use_weight_norm: true
use_spectral_norm: false
follow_official_norm: false
periods:
- 2
- 3
- 5
- 7
- 11
period_discriminator_params:
in_channels: 1
out_channels: 1
kernel_sizes:
- 5
- 3
channels: 32
downsample_scales:
- 3
- 3
- 3
- 3
- 1
max_downsample_channels: 1024
bias: true
nonlinear_activation: LeakyReLU
nonlinear_activation_params:
negative_slope: 0.1
use_weight_norm: true
use_spectral_norm: false
generator_adv_loss_params:
average_by_discriminators: false
loss_type: mse
discriminator_adv_loss_params:
average_by_discriminators: false
loss_type: mse
use_feat_match_loss: true
feat_match_loss_params:
average_by_discriminators: false
average_by_layers: false
include_final_outputs: true
use_mel_loss: true
mel_loss_params:
fs: 22050
n_fft: 1024
hop_length: 256
win_length: null
window: hann
n_mels: 80
fmin: 0
fmax: null
log_base: null
lambda_text2mel: 1.0
lambda_adv: 1.0
lambda_mel: 45.0
lambda_feat_match: 2.0
sampling_rate: 22050
segment_size: 32
cache_generator_outputs: true
pitch_extract: dio
pitch_extract_conf:
reduction_factor: 1
fs: 22050
n_fft: 1024
hop_length: 256
f0max: 400
f0min: 80
pitch_normalize: global_mvn
pitch_normalize_conf:
stats_file: exp/tts_train_tacotron2_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.best/stats/train/pitch_stats.npz
energy_extract: energy
energy_extract_conf:
reduction_factor: 1
fs: 22050
n_fft: 1024
hop_length: 256
win_length: null
energy_normalize: global_mvn
energy_normalize_conf:
stats_file: exp/tts_train_tacotron2_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.best/stats/train/energy_stats.npz
required:
- output_dir
- token_list
version: 0.10.3a1
distributed: true