yipjiaqi commited on
Commit
15787f0
1 Parent(s): aff3c90

Delete spgm_train.yaml

Browse files
Files changed (1) hide show
  1. spgm_train.yaml +0 -176
spgm_train.yaml DELETED
@@ -1,176 +0,0 @@
1
- # ################################
2
- # Model: SPGM for source separation
3
- # https://arxiv.org/abs/2309.12608
4
- # Dataset : WSJ0-2mix
5
- # ################################
6
- #
7
- # Basic parameters
8
- # Seed needs to be set at top of yaml, before objects with parameters are made
9
- #
10
- seed: 1234
11
- __set_seed: !apply:torch.manual_seed [!ref <seed>]
12
-
13
- # Data params
14
-
15
- # e.g. '/yourpath/wsj0-mix/2speakers'
16
- # end with 2speakers for wsj0-2mix or 3speakers for wsj0-3mix
17
- data_folder:
18
-
19
- # the path for wsj0/si_tr_s/ folder -- only needed if dynamic mixing is used
20
- # e.g. /yourpath/wsj0-processed/si_tr_s/
21
- # you need to convert the original wsj0 to 8k
22
- # you can do this conversion with the script ../meta/preprocess_dynamic_mixing.py
23
- base_folder_dm:
24
-
25
- experiment_name: SPGM
26
- output_folder: !ref results/<experiment_name>/<seed>
27
- train_log: !ref <output_folder>/train_log.txt
28
- save_folder: !ref <output_folder>/save
29
- train_data: !ref <save_folder>/wsj_tr.csv
30
- valid_data: !ref <save_folder>/wsj_cv.csv
31
- test_data: !ref <save_folder>/wsj_tt.csv
32
- skip_prep: False
33
-
34
- ckpt_interval_minutes: 15
35
-
36
- # Experiment params
37
- auto_mix_prec: False # Set it to True for mixed precision
38
- test_only: False
39
- num_spks: 2 # set to 3 for wsj0-3mix
40
- noprogressbar: False
41
- save_audio: False # Save estimated sources on disk
42
- sample_rate: 8000
43
-
44
- # Training parameters
45
- N_epochs: 200
46
- batch_size: 1
47
- lr: 0.00015
48
- clip_grad_norm: 5
49
- loss_upper_lim: 999999 # this is the upper limit for an acceptable loss
50
- # if True, the training sequences are cut to a specified length
51
- limit_training_signal_len: False
52
- # this is the length of sequences if we choose to limit
53
- # the signal length of training sequences
54
- training_signal_len: 9999999999999999
55
-
56
- # Set it to True to dynamically create mixtures at training time
57
- dynamic_mixing: True
58
-
59
- # Parameters for data augmentation
60
- use_wavedrop: False
61
- use_speedperturb: True
62
- use_rand_shift: False
63
- min_shift: -8000
64
- max_shift: 8000
65
-
66
- speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment
67
- perturb_prob: 1.0
68
- drop_freq_prob: 0.0
69
- drop_chunk_prob: 0.0
70
- sample_rate: !ref <sample_rate>
71
- speeds: [95, 100, 105]
72
-
73
- wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment
74
- perturb_prob: 0.0
75
- drop_freq_prob: 1.0
76
- drop_chunk_prob: 1.0
77
- sample_rate: !ref <sample_rate>
78
-
79
- # loss thresholding -- this thresholds the training loss
80
- threshold_byloss: True
81
- threshold: -30
82
-
83
- # Encoder parameters
84
- N_encoder_out: 256
85
- out_channels: 256
86
- kernel_size: 16
87
- kernel_stride: 8
88
-
89
- # Dataloader options
90
- # Set num_workers: 0 on MacOS due to behavior of the multiprocessing library
91
- dataloader_opts:
92
- batch_size: !ref <batch_size>
93
- num_workers: 3
94
-
95
-
96
- # Specifying the network
97
- Encoder: !new:speechbrain.lobes.models.dual_path.Encoder
98
- kernel_size: !ref <kernel_size>
99
- out_channels: !ref <N_encoder_out>
100
-
101
-
102
- SBtfintra: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock
103
- num_layers: 8
104
- d_model: !ref <out_channels>
105
- nhead: 8
106
- d_ffn: 1024
107
- dropout: 0
108
- use_positional_encoding: True
109
- norm_before: True
110
-
111
- SBtfinter: !new:speechbrain.lobes.models.SPGM.SPGMBlock
112
- n_embd: !ref <out_channels>
113
- pool: 'att'
114
-
115
- MaskNet: !new:speechbrain.lobes.models.dual_path.Dual_Path_Model
116
- num_spks: !ref <num_spks>
117
- in_channels: !ref <N_encoder_out>
118
- out_channels: !ref <out_channels>
119
- num_layers: 4
120
- K: 250
121
- intra_model: !ref <SBtfintra>
122
- inter_model: !ref <SBtfinter>
123
- norm: ln
124
- linear_layer_after_inter_intra: False
125
- skip_around_intra: True
126
-
127
- Decoder: !new:speechbrain.lobes.models.dual_path.Decoder
128
- in_channels: !ref <N_encoder_out>
129
- out_channels: 1
130
- kernel_size: !ref <kernel_size>
131
- stride: !ref <kernel_stride>
132
- bias: False
133
-
134
- optimizer: !name:torch.optim.Adam
135
- lr: !ref <lr>
136
- weight_decay: 0
137
-
138
- loss: !name:speechbrain.nnet.losses.get_si_snr_with_pitwrapper
139
-
140
- lr_scheduler: !new:speechbrain.nnet.schedulers.ReduceLROnPlateau
141
- factor: 0.5
142
- patience: 2
143
- dont_halve_until_epoch: 85
144
-
145
- epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter
146
- limit: !ref <N_epochs>
147
-
148
- modules:
149
- encoder: !ref <Encoder>
150
- decoder: !ref <Decoder>
151
- masknet: !ref <MaskNet>
152
-
153
- checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer
154
- checkpoints_dir: !ref <save_folder>
155
- recoverables:
156
- encoder: !ref <Encoder>
157
- decoder: !ref <Decoder>
158
- masknet: !ref <MaskNet>
159
- counter: !ref <epoch_counter>
160
- lr_scheduler: !ref <lr_scheduler>
161
-
162
- train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger
163
- save_file: !ref <train_log>
164
-
165
-
166
- # If you do not want to use the pretrained separator you can simply delete pretrained_separator field.
167
- # pretrained_separator: !new:speechbrain.utils.parameter_transfer.Pretrainer
168
- # collect_in: !ref <save_folder>
169
- # loadables:
170
- # encoder: !ref <Encoder>
171
- # decoder: !ref <Decoder>
172
- # masknet: !ref <MaskNet>
173
- # paths:
174
- # encoder: yipjiaqi/spgm/encoder.ckpt
175
- # decoder: yipjiaqi/spgm/decoder.ckpt
176
- # masknet: yipjiaqi/spgm/masknet.ckpt