REAL-M-sisnr-estimator-training / dprnn2_hyperparams.yaml
speechbrainteam's picture
Update dprnn2_hyperparams.yaml
08bec07 verified
# Generated 2021-09-18 from:
# /home/mila/s/subakany/speechbrain_new/recipes/WHAMandWHAMR/separation/hparams/dprnn-whamr.yaml
# yamllint disable
# ################################
# Model: SepFormer for source separation
# https://arxiv.org/abs/2010.13154
#
# Dataset : WHAMR!
# ################################
# Basic parameters
# Seed needs to be set at top of yaml, before objects with parameters are made
#
seed: 3
__set_seed: !apply:torch.manual_seed [3]
# Data params
# the data folder for the wham dataset
# data_folder needs to follow the format: /yourpath/whamr.
# make sure to use the name whamr at your top folder for the dataset!
data_folder: /network/tmp1/subakany/whamr/
# the path for wsj0/si_tr_s/ folder -- only needed if dynamic mixing is used
# e.g. /yourpath/wsj0-processed/si_tr_s/
# you need to convert the original wsj0 to 8k
# you can do this conversion with the script ../meta/preprocess_dynamic_mixing.py
base_folder_dm: /network/tmp1/subakany/wsj0-processed/si_tr_s/
experiment_name: dprnn-whamr
output_folder: results/dprnn-whamr/3
train_log: results/dprnn-whamr/3/train_log.txt
save_folder: results/dprnn-whamr/3/save
# the file names should start with whamr instead of whamorg
train_data: results/dprnn-whamr/3/save/whamr_tr.csv
valid_data: results/dprnn-whamr/3/save/whamr_cv.csv
test_data: results/dprnn-whamr/3/save/whamr_tt.csv
skip_prep: false
# Experiment params
auto_mix_prec: true # Set it to True for mixed precision
test_only: false
num_spks: 2 # set to 3 for wsj0-3mix
progressbar: true
save_audio: false # Save estimated sources on disk
sample_rate: 8000
# Training parameters
N_epochs: 200
batch_size: 1
lr: 0.00015
clip_grad_norm: 5
loss_upper_lim: 999999 # this is the upper limit for an acceptable loss
# if True, the training sequences are cut to a specified length
limit_training_signal_len: false
# this is the length of sequences if we choose to limit
# the signal length of training sequences
training_signal_len: 32000000
# Set it to True to dynamically create mixtures at training time
dynamic_mixing: true
# Parameters for data augmentation
# rir_path variable points to the directory of the room impulse responses
# e.g. /miniscratch/subakany/rir_wavs
# If the path does not exist, it is created automatically.
rir_path: /miniscratch/subakany/whamr_rirs_wav
# loss thresholding -- this thresholds the training loss
threshold_byloss: true
threshold: -30
# Encoder parameters
N_encoder_out: 256
out_channels: 256
kernel_size: 16
kernel_stride: 8
# Dataloader options
dataloader_opts:
batch_size: 1
num_workers: 3
# Specifying the network
Encoder: &id003 !new:speechbrain.lobes.models.dual_path.Encoder
kernel_size: 16
out_channels: 256
intra: &id001 !new:speechbrain.lobes.models.dual_path.SBRNNBlock
num_layers: 1
input_size: 256
hidden_channels: 256
dropout: 0
bidirectional: true
inter: &id002 !new:speechbrain.lobes.models.dual_path.SBRNNBlock
num_layers: 1
input_size: 256
hidden_channels: 256
dropout: 0
bidirectional: true
MaskNet: &id005 !new:speechbrain.lobes.models.dual_path.Dual_Path_Model
num_spks: 2
in_channels: 256
out_channels: 256
num_layers: 6
K: 250
intra_model: *id001
inter_model: *id002
norm: ln
linear_layer_after_inter_intra: true
skip_around_intra: true
Decoder: &id004 !new:speechbrain.lobes.models.dual_path.Decoder
in_channels: 256
out_channels: 1
kernel_size: 16
stride: 8
bias: false
optimizer: !name:torch.optim.Adam
lr: 0.00015
weight_decay: 0
loss: !name:speechbrain.nnet.losses.get_si_snr_with_pitwrapper
lr_scheduler: &id007 !new:speechbrain.nnet.schedulers.ReduceLROnPlateau
factor: 0.5
patience: 2
dont_halve_until_epoch: 85
epoch_counter: &id006 !new:speechbrain.utils.epoch_loop.EpochCounter
limit: 200
modules:
encoder: *id003
decoder: *id004
masknet: *id005
save_all_checkpoints: true
checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer
checkpoints_dir: results/dprnn-whamr/3/save
recoverables:
encoder: *id003
decoder: *id004
masknet: *id005
counter: *id006
lr_scheduler: *id007
train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger
save_file: results/dprnn-whamr/3/train_log.txt
pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
loadables:
encoder: !ref <Encoder>
masknet: !ref <MaskNet>
decoder: !ref <Decoder>