batch_max_steps: 15360 batch_size: 10 config: conf/parallel_wavegan.v1.yaml dev_dumpdir: dump/dev/norm discriminator_grad_norm: 1 discriminator_optimizer_params: eps: 1.0e-06 lr: 5.0e-05 weight_decay: 0.0 discriminator_params: bias: true conv_channels: 64 in_channels: 1 kernel_size: 3 layers: 10 nonlinear_activation: LeakyReLU nonlinear_activation_params: negative_slope: 0.2 out_channels: 1 use_weight_norm: true discriminator_scheduler_params: gamma: 0.5 step_size: 200000 discriminator_train_start_steps: 100000 eval_interval_steps: 1000 fft_size: 1024 fmax: 7600 fmin: 80 format: npy # hdf5 generator_grad_norm: 10 generator_optimizer_params: eps: 1.0e-06 lr: 0.0001 weight_decay: 0.0 generator_params: aux_channels: 80 aux_context_window: 2 dropout: 0.0 gate_channels: 128 in_channels: 1 kernel_size: 3 layers: 30 out_channels: 1 residual_channels: 64 skip_channels: 64 stacks: 3 upsample_net: ConvInUpsampleNetwork upsample_params: upsample_scales: - 4 - 4 - 4 - 4 use_weight_norm: true generator_scheduler_params: gamma: 0.5 step_size: 200000 global_gain_scale: 1.0 hop_size: 256 lambda_adv: 4.0 log_interval_steps: 100 num_mels: 80 num_save_intermediate_results: 4 num_workers: 8 outdir: exp/train_nodev_arctic_slt_parallel_wavegan.v1 pin_memory: true remove_short_samples: true resume: exp/train_nodev_arctic_slt_parallel_wavegan.v1/checkpoint-300000steps.pkl sampling_rate: 16000 save_interval_steps: 5000 stft_loss_params: fft_sizes: - 1024 - 2048 - 512 hop_sizes: - 120 - 240 - 50 win_lengths: - 600 - 1200 - 240 window: hann_window train_dumpdir: dump/train_nodev/norm train_max_steps: 400000 trim_frame_size: 2048 trim_hop_size: 512 trim_silence: false trim_threshold_in_db: 60 verbose: 0 win_length: null window: hann