File size: 1,515 Bytes
edf25e2 98d299b edf25e2 98d299b edf25e2 98d299b edf25e2 98d299b edf25e2 98d299b edf25e2 98d299b edf25e2 98d299b edf25e2 98d299b edf25e2 98d299b edf25e2 98d299b edf25e2 98d299b adb7c4c 98d299b adb7c4c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
# ################################
# Model: Inference for source separation with SepFormer
# https://arxiv.org/abs/2010.13154
# Generated from speechbrain/recipes/WSJ0Mix/separation/train/hparams/sepformer-wsj02mix.yaml
# Dataset : wsj02mix
# ###############################
# Parameters
sample_rate: 8000
num_spks: 2
# Specifying the network
Encoder: !new:speechbrain.lobes.models.dual_path.Encoder
kernel_size: 16
out_channels: 256
SBtfintra: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock
num_layers: 8
d_model: 256
nhead: 8
d_ffn: 1024
dropout: 0
use_positional_encoding: true
norm_before: true
SBtfinter: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock
num_layers: 8
d_model: 256
nhead: 8
d_ffn: 1024
dropout: 0
use_positional_encoding: true
norm_before: true
MaskNet: !new:speechbrain.lobes.models.dual_path.Dual_Path_Model
num_spks: !ref <num_spks>
in_channels: 256
out_channels: 256
num_layers: 2
K: 250
intra_model: !ref <SBtfintra>
inter_model: !ref <SBtfinter>
norm: ln
linear_layer_after_inter_intra: false
skip_around_intra: true
Decoder: !new:speechbrain.lobes.models.dual_path.Decoder
in_channels: 256
out_channels: 1
kernel_size: 16
stride: 8
bias: false
modules:
encoder: !ref <Encoder>
decoder: !ref <Decoder>
masknet: !ref <MaskNet>
pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
loadables:
masknet: !ref <MaskNet>
encoder: !ref <Encoder>
decoder: !ref <Decoder>
|