# ################################ # Model: SepFormer for source separation # https://arxiv.org/abs/2010.13154 # Dataset : Libri3Mix # ################################ # # Basic parameters # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 __set_seed: !apply:torch.manual_seed [!ref ] # Data params # e.g. '/yourpath/Libri3Mix/train-clean-360/' # the data folder is needed even if dynamic mixing is applied data_folder: /mnt3/Libri4Mix_48k_own/Libri4Mix/ # This is needed only if dynamic mixing is applied base_folder_dm: /yourpath/LibriSpeech/train-clean-360/ experiment_name: sepformer-libri4mix-48k output_folder: !ref results// train_log: !ref /train_log.txt save_folder: !ref /save train_data: !ref /libri4mix_train-360.csv valid_data: !ref /libri4mix_test.csv test_data: !ref /libri4mix_test.csv skip_prep: False ckpt_interval_minutes: 60 # Experiment params auto_mix_prec: True # Set it to True for mixed precision num_spks: 4 noprogressbar: False save_audio: False # Save estimated sources on disk sample_rate: 48000 # Training parameters N_epochs: 143 batch_size: 1 lr: 0.00015 clip_grad_norm: 5 loss_upper_lim: 999999 # this is the upper limit for an acceptable loss # if True, the training sequences are cut to a specified length limit_training_signal_len: True # this is the length of sequences if we choose to limit # the signal length of training sequences training_signal_len: 100000 # Set it to True to dynamically create mixtures at training time dynamic_mixing: False use_wham_noise: True # Parameters for data augmentation use_wavedrop: False use_speedperturb: True use_rand_shift: False min_shift: -8000 max_shift: 8000 speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment perturb_prob: 1.0 drop_freq_prob: 0.0 drop_chunk_prob: 0.0 sample_rate: !ref speeds: [95, 100, 105] wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment perturb_prob: 0.0 drop_freq_prob: 1.0 drop_chunk_prob: 1.0 sample_rate: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True threshold: -30 # Encoder parameters N_encoder_out: 256 out_channels: 256 kernel_size: 16 kernel_stride: 8 d_ffn: 1024 # Dataloader options dataloader_opts: batch_size: !ref num_workers: 3 # Specifying the network Encoder: !new:speechbrain.lobes.models.dual_path.Encoder kernel_size: !ref out_channels: !ref SBtfintra: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock num_layers: 8 d_model: !ref nhead: 8 d_ffn: !ref dropout: 0 use_positional_encoding: True norm_before: True SBtfinter: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock num_layers: 8 d_model: !ref nhead: 8 d_ffn: !ref dropout: 0 use_positional_encoding: True norm_before: True MaskNet: !new:speechbrain.lobes.models.dual_path.Dual_Path_Model num_spks: !ref in_channels: !ref out_channels: !ref num_layers: 2 K: 250 intra_model: !ref inter_model: !ref norm: ln linear_layer_after_inter_intra: False skip_around_intra: True Decoder: !new:speechbrain.lobes.models.dual_path.Decoder in_channels: !ref out_channels: 1 kernel_size: !ref stride: !ref bias: False optimizer: !name:torch.optim.Adam lr: !ref weight_decay: 0 loss: !name:speechbrain.nnet.losses.get_si_snr_with_pitwrapper lr_scheduler: !new:speechbrain.nnet.schedulers.ReduceLROnPlateau factor: 0.5 patience: 2 dont_halve_until_epoch: 5 epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref modules: encoder: !ref decoder: !ref masknet: !ref checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref recoverables: encoder: !ref decoder: !ref masknet: !ref counter: !ref # lr_scheduler: !ref train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger save_file: !ref # If you do not want to use the pretrained separator you can simply delete pretrained_separator field. pretrained_separator: !new:speechbrain.utils.parameter_transfer.Pretrainer collect_in: !ref loadables: encoder: !ref decoder: !ref masknet: !ref paths: encoder: speechbrain/sepformer-wsj03mix/encoder.ckpt decoder: speechbrain/sepformer-wsj03mix/decoder.ckpt masknet: speechbrain/sepformer-wsj03mix/masknet.ckpt pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer loadables: encoder: !ref masknet: !ref decoder: !ref