richardbaihe
add another checkpoint for new speaker testing
0fff999
config: ./conf/fsp2_conformer.yaml
print_config: false
log_level: INFO
dry_run: false
iterator_type: sequence
output_dir: exp/unseen_conformer
ngpu: 1
seed: 0
num_workers: 1
num_att_plot: 0
dist_backend: nccl
dist_init_method: env://
dist_world_size: 8
dist_rank: 0
local_rank: 0
dist_master_addr: localhost
dist_master_port: 50455
dist_launcher: null
multiprocessing_distributed: true
unused_parameters: false
sharded_ddp: false
cudnn_enabled: true
cudnn_benchmark: false
cudnn_deterministic: true
collect_stats: false
write_collected_feats: false
max_epoch: 1500
patience: null
val_scheduler_criterion:
- valid
- loss
early_stopping_criterion:
- valid
- loss
- min
best_model_criterion:
- - valid
- loss
- min
- - train
- loss
- min
keep_nbest_models: 5
grad_clip: 1.0
grad_clip_type: 2.0
grad_noise: false
accum_grad: 1
no_forward_run: false
resume: true
train_dtype: float32
use_amp: false
log_interval: null
use_tensorboard: true
use_wandb: false
wandb_project: null
wandb_id: null
wandb_entity: null
wandb_name: null
wandb_model_log_interval: -1
detect_anomaly: false
pretrain_path: null
init_param: []
ignore_init_mismatch: false
freeze_param: []
num_iters_per_epoch: 800
batch_size: 20
valid_batch_size: null
batch_bins: 3000000
valid_batch_bins: null
train_shape_file:
- exp/mlm_stats_raw_en_phn_g2p_en/train/speech_shape
valid_shape_file:
- exp/mlm_stats_raw_en_phn_g2p_en/valid/speech_shape
batch_type: numel
valid_batch_type: null
fold_length:
- 80000
sort_in_batch: descending
sort_batch: descending
multiple_iterator: false
chunk_length: 500
chunk_shift_ratio: 0.5
num_cache_chunks: 1024
train_data_path_and_name_and_type:
- - dump/raw/unseen_tr_no_dev/mfa_wav.scp
- speech
- sound
- - dump/raw/unseen_tr_no_dev/mfa_text
- text
- text
- - dump/raw/unseen_tr_no_dev/mfa_start
- align_start
- text_float
- - dump/raw/unseen_tr_no_dev/mfa_end
- align_end
- text_float
valid_data_path_and_name_and_type:
- - dump/raw/dev/mfa_wav.scp
- speech
- sound
- - dump/raw/dev/mfa_text
- text
- text
- - dump/raw/dev/mfa_start
- align_start
- text_float
- - dump/raw/dev/mfa_end
- align_end
- text_float
allow_variable_data_keys: false
max_cache_size: 0.0
max_cache_fd: 32
valid_max_cache_size: null
optim: adam
optim_conf:
lr: 1.0
scheduler: noamlr
scheduler_conf:
model_size: 384
warmup_steps: 4000
token_list:
- <blank>
- <unk>
- AH0
- T
- N
- sp
- D
- S
- R
- L
- IH1
- DH
- AE1
- M
- EH1
- K
- Z
- W
- HH
- ER0
- AH1
- IY1
- P
- V
- F
- B
- AY1
- IY0
- EY1
- AA1
- AO1
- UW1
- IH0
- OW1
- NG
- G
- SH
- ER1
- Y
- TH
- AW1
- CH
- UH1
- IH2
- JH
- OW0
- EH2
- OY1
- AY2
- EH0
- EY2
- UW0
- AE2
- AA2
- OW2
- AH2
- ZH
- AO2
- IY2
- AE0
- UW2
- AY0
- AA0
- AO0
- AW2
- EY0
- UH2
- ER2
- OY2
- UH0
- AW0
- OY0
- <sos/eos>
init: xavier_uniform
input_size: 80
odim: null
model_conf:
lsm_weight: 0.1
length_normalized_loss: false
masking_schema: phn_span
mean_phn_span: 8
mlm_prob: 0.8
dynamic_mlm_prob: false
postnet_layers: 5
postnet_filts: 5
postnet_chans: 256
use_scaled_pos_enc: false
use_preprocessor: true
token_type: word
bpemodel: data/en_token_list_g2p_en/bpe_unigram5000/bpe.model
non_linguistic_symbols: null
cleaner: null
g2p: g2p_en
speech_volume_normalize: null
rir_scp: null
rir_apply_prob: 1.0
noise_scp: null
noise_apply_prob: 1.0
noise_db_range: '13_15'
feats_extract: fbank
feats_extract_conf:
n_fft: 2048
hop_length: 300
win_length: 1200
fs: 24000
fmin: 80
fmax: 7600
n_mels: 80
normalize: null
normalize_conf: {}
encoder: conformer
encoder_conf:
input_layer: sega_mlm
pre_speech_layer: 0
cnn_module_kernel: 7
attention_dim: 384
attention_heads: 2
linear_units: 1536
num_blocks: 4
dropout_rate: 0.2
positional_dropout_rate: 0.2
attention_dropout_rate: 0.2
normalize_before: true
macaron_style: true
use_cnn_module: true
selfattention_layer_type: legacy_rel_selfattn
activation_type: swish
pos_enc_layer_type: legacy_rel_pos
positionwise_layer_type: conv1d
positionwise_conv_kernel_size: 3
decoder: conformer
decoder_conf:
cnn_module_kernel: 31
attention_dim: 384
attention_heads: 2
linear_units: 1536
num_blocks: 4
dropout_rate: 0.2
positional_dropout_rate: 0.2
attention_dropout_rate: 0.2
macaron_style: true
use_cnn_module: true
selfattention_layer_type: legacy_rel_selfattn
activation_type: swish
pos_enc_layer_type: legacy_rel_pos
positionwise_layer_type: conv1d
positionwise_conv_kernel_size: 3
pre_decoder: linear
pre_decoder_conf: {}
required:
- output_dir
- token_list
version: 0.10.3a3
distributed: true