resepformer-wsj02mix / hyperparams.yaml
cemsubakan's picture
Update hyperparams.yaml
b8e127b verified
# ################################
# Model: Pretrained RE-SepFormer for speech separation
# Dataset : WSJ0-2mix
# ################################
sample_rate: 8000
num_spks: 2
# Encoder parameters
N_encoder_out: 128
out_channels: 128
kernel_size: 16
kernel_stride: 8
# Specifying the network
Encoder: !new:speechbrain.lobes.models.dual_path.Encoder
kernel_size: 16
out_channels: 128
intra_mdl: !new:speechbrain.lobes.models.resepformer.SBTransformerBlock_wnormandskip
num_layers: 8
d_model: 128
nhead: 8
d_ffn: 1024
dropout: 0
use_positional_encoding: true
norm_before: true
use_norm: true
use_skip: true
mem_mdl: !new:speechbrain.lobes.models.resepformer.SBTransformerBlock_wnormandskip
num_layers: 8
d_model: 128
nhead: 8
d_ffn: 1024
dropout: 0
use_positional_encoding: true
norm_before: true
use_norm: true
use_skip: true
MaskNet: !new:speechbrain.lobes.models.resepformer.ResourceEfficientSeparator
input_dim: 128
num_spk: 2
causal: false
unit: 256
segment_size: 150
layer: 2
mem_type: av
seg_model: !ref <intra_mdl>
mem_model: !ref <mem_mdl>
Decoder: !new:speechbrain.lobes.models.dual_path.Decoder
in_channels: 128
out_channels: 1
kernel_size: 16
stride: 8
bias: false
modules:
encoder: !ref <Encoder>
decoder: !ref <Decoder>
masknet: !ref <MaskNet>
pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
loadables:
encoder: !ref <Encoder>
masknet: !ref <MaskNet>
decoder: !ref <Decoder>