subakany commited on
Commit
f0ce3b4
1 Parent(s): b4f5b69

pushing separators

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. convtasnet1/CKPT.yaml +4 -0
  2. convtasnet1/brain.ckpt +3 -0
  3. convtasnet1/counter.ckpt +3 -0
  4. convtasnet1/decoder.ckpt +3 -0
  5. convtasnet1/encoder.ckpt +3 -0
  6. convtasnet1/hyperparams.yaml +168 -0
  7. convtasnet1/lr_scheduler.ckpt +3 -0
  8. convtasnet1/masknet.ckpt +3 -0
  9. convtasnet1/optimizer.ckpt +3 -0
  10. convtasnet2/CKPT.yaml +4 -0
  11. convtasnet2/brain.ckpt +3 -0
  12. convtasnet2/counter.ckpt +3 -0
  13. convtasnet2/decoder.ckpt +3 -0
  14. convtasnet2/encoder.ckpt +3 -0
  15. convtasnet2/hyperparams.yaml +168 -0
  16. convtasnet2/lr_scheduler.ckpt +3 -0
  17. convtasnet2/masknet.ckpt +3 -0
  18. convtasnet2/optimizer.ckpt +3 -0
  19. convtasnet3/CKPT.yaml +4 -0
  20. convtasnet3/brain.ckpt +3 -0
  21. convtasnet3/counter.ckpt +3 -0
  22. convtasnet3/decoder.ckpt +3 -0
  23. convtasnet3/encoder.ckpt +3 -0
  24. convtasnet3/hyperparams.yaml +168 -0
  25. convtasnet3/lr_scheduler.ckpt +3 -0
  26. convtasnet3/masknet.ckpt +3 -0
  27. convtasnet3/optimizer.ckpt +3 -0
  28. dprnn1/CKPT.yaml +4 -0
  29. dprnn1/brain.ckpt +3 -0
  30. dprnn1/counter.ckpt +3 -0
  31. dprnn1/dataloader-TRAIN.ckpt +3 -0
  32. dprnn1/decoder.ckpt +3 -0
  33. dprnn1/encoder.ckpt +3 -0
  34. dprnn1/hyperparams.yaml +183 -0
  35. dprnn1/lr_scheduler.ckpt +3 -0
  36. dprnn1/masknet.ckpt +3 -0
  37. dprnn1/optimizer.ckpt +3 -0
  38. dprnn2/CKPT.yaml +4 -0
  39. dprnn2/brain.ckpt +3 -0
  40. dprnn2/counter.ckpt +3 -0
  41. dprnn2/decoder.ckpt +3 -0
  42. dprnn2/encoder.ckpt +3 -0
  43. dprnn2/hyperparams.yaml +183 -0
  44. dprnn2/lr_scheduler.ckpt +3 -0
  45. dprnn2/masknet.ckpt +3 -0
  46. dprnn2/optimizer.ckpt +3 -0
  47. dprnn3/CKPT.yaml +4 -0
  48. dprnn3/brain.ckpt +3 -0
  49. dprnn3/counter.ckpt +3 -0
  50. dprnn3/decoder.ckpt +3 -0
convtasnet1/CKPT.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # yamllint disable
2
+ end-of-epoch: true
3
+ si-snr: -0.42996728845946536
4
+ unixtime: 1631299321.582555
convtasnet1/brain.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9e24193f36931b7f57932532efbdcf64971f42732383ba6808825f77db258f6
3
+ size 28
convtasnet1/counter.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b
3
+ size 1
convtasnet1/decoder.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:623030a5a4317eabc555ec09254d6a05e5f3811933c429f90f06f903a22b808c
3
+ size 17272
convtasnet1/encoder.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4314241647c3b6adece727c3ff8066f674c516ad0a531efaa0bbd89eb4786050
3
+ size 17272
convtasnet1/hyperparams.yaml ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated 2021-09-15 from:
2
+ # /home/mila/s/subakany/speechbrain_new/recipes/WHAMandWHAMR/separation/hparams/convtasnet-whamr.yaml
3
+ # yamllint disable
4
+ # ################################
5
+ # Model: SepFormer for source separation
6
+ # https://arxiv.org/abs/2010.13154
7
+ #
8
+ # Dataset : WHAMR!
9
+ # ################################
10
+ # Basic parameters
11
+ # Seed needs to be set at top of yaml, before objects with parameters are made
12
+ #
13
+ seed: 3
14
+ __set_seed: !apply:torch.manual_seed [3]
15
+
16
+ # Data params
17
+
18
+ # the data folder for the wham dataset
19
+ # data_folder needs to follow the format: /yourpath/whamr.
20
+ # make sure to use the name whamr at your top folder for the dataset!
21
+ data_folder: /network/tmp1/subakany/whamr
22
+
23
+ # the path for wsj0/si_tr_s/ folder -- only needed if dynamic mixing is used
24
+ # e.g. /yourpath/wsj0-processed/si_tr_s/
25
+ # you need to convert the original wsj0 to 8k
26
+ # you can do this conversion with the script ../meta/preprocess_dynamic_mixing.py
27
+ base_folder_dm: /network/tmp1/subakany/wsj0-processed/si_tr_s/
28
+
29
+ experiment_name: convtasnet-whamr
30
+ output_folder: results/convtasnet-whamr/3
31
+ train_log: results/convtasnet-whamr/3/train_log.txt
32
+ save_folder: results/convtasnet-whamr/3/save
33
+
34
+ # the file names should start with whamr instead of whamorg
35
+ train_data: results/convtasnet-whamr/3/save/whamr_tr.csv
36
+ valid_data: results/convtasnet-whamr/3/save/whamr_cv.csv
37
+ test_data: results/convtasnet-whamr/3/save/whamr_tt.csv
38
+ skip_prep: false
39
+
40
+ # Experiment params
41
+ auto_mix_prec: false # Set it to True for mixed precision
42
+ test_only: false
43
+ num_spks: 2 # set to 3 for wsj0-3mix
44
+ progressbar: true
45
+ save_audio: false # Save estimated sources on disk
46
+ sample_rate: 8000
47
+
48
+ # Training parameters
49
+ N_epochs: 200
50
+ batch_size: 1
51
+ lr: 0.00015
52
+ clip_grad_norm: 5
53
+ loss_upper_lim: 999999 # this is the upper limit for an acceptable loss
54
+ # if True, the training sequences are cut to a specified length
55
+ limit_training_signal_len: false
56
+ # this is the length of sequences if we choose to limit
57
+ # the signal length of training sequences
58
+ training_signal_len: 32000000
59
+
60
+ # Set it to True to dynamically create mixtures at training time
61
+ dynamic_mixing: true
62
+
63
+ # Parameters for data augmentation
64
+
65
+ # rir_path variable points to the directory of the room impulse responses
66
+ # e.g. /miniscratch/subakany/rir_wavs
67
+ # If the path does not exist, it is created automatically.
68
+ rir_path: /miniscratch/subakany/whamr_rirs_wav
69
+
70
+ use_wavedrop: false
71
+ use_speedperturb: true
72
+ use_speedperturb_sameforeachsource: false
73
+ use_rand_shift: false
74
+ min_shift: -8000
75
+ max_shift: 8000
76
+
77
+ speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment
78
+ perturb_prob: 1.0
79
+ drop_freq_prob: 0.0
80
+ drop_chunk_prob: 0.0
81
+ sample_rate: 8000
82
+ speeds: [95, 100, 105]
83
+
84
+ wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment
85
+ perturb_prob: 0.0
86
+ drop_freq_prob: 1.0
87
+ drop_chunk_prob: 1.0
88
+ sample_rate: 8000
89
+
90
+ # loss thresholding -- this thresholds the training loss
91
+ threshold_byloss: true
92
+ threshold: -30
93
+
94
+ # Encoder parameters
95
+ N_encoder_out: 256
96
+ out_channels: 256
97
+ kernel_size: 16
98
+ kernel_stride: 8
99
+
100
+ # Dataloader options
101
+ dataloader_opts:
102
+ batch_size: 1
103
+ num_workers: 3
104
+
105
+
106
+ # Specifying the network
107
+ Encoder: &id001 !new:speechbrain.lobes.models.dual_path.Encoder
108
+ kernel_size: 16
109
+ out_channels: 256
110
+
111
+
112
+ MaskNet: &id003 !new:speechbrain.lobes.models.conv_tasnet.MaskNet
113
+
114
+ N: 256
115
+ B: 256
116
+ H: 512
117
+ P: 3
118
+ X: 6
119
+ R: 4
120
+ C: 2
121
+ norm_type: gLN
122
+ causal: false
123
+ mask_nonlinear: relu
124
+
125
+ Decoder: &id002 !new:speechbrain.lobes.models.dual_path.Decoder
126
+ in_channels: 256
127
+ out_channels: 1
128
+ kernel_size: 16
129
+ stride: 8
130
+ bias: false
131
+
132
+
133
+ optimizer: !name:torch.optim.Adam
134
+ lr: 0.00015
135
+ weight_decay: 0
136
+
137
+ loss: !name:speechbrain.nnet.losses.get_si_snr_with_pitwrapper
138
+
139
+ lr_scheduler: &id005 !new:speechbrain.nnet.schedulers.ReduceLROnPlateau
140
+
141
+ factor: 0.5
142
+ patience: 2
143
+ dont_halve_until_epoch: 85
144
+
145
+ epoch_counter: &id004 !new:speechbrain.utils.epoch_loop.EpochCounter
146
+ limit: 200
147
+
148
+ modules:
149
+ encoder: *id001
150
+ decoder: *id002
151
+ masknet: *id003
152
+ save_all_checkpoints: true
153
+ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer
154
+ checkpoints_dir: results/convtasnet-whamr/3/save
155
+ recoverables:
156
+ encoder: *id001
157
+ decoder: *id002
158
+ masknet: *id003
159
+ counter: *id004
160
+ lr_scheduler: *id005
161
+ train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger
162
+ save_file: results/convtasnet-whamr/3/train_log.txt
163
+
164
+ pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
165
+ loadables:
166
+ encoder: !ref <Encoder>
167
+ masknet: !ref <MaskNet>
168
+ decoder: !ref <Decoder>
convtasnet1/lr_scheduler.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b633ee5d0a19696bcf1025be87e8e7ec9b783ad2b9adfa2077e057d18accaea6
3
+ size 495
convtasnet1/masknet.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c8606649b45db4841e79ef35ec71bb7e0f79b1e5101fe690ef083dbb7c0c21c
3
+ size 26404523
convtasnet1/optimizer.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eec7c786af8ea382d342b793fa3518fe337f72e114077b4755cfc0c521b74754
3
+ size 52803531
convtasnet2/CKPT.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # yamllint disable
2
+ end-of-epoch: true
3
+ si-snr: -4.564481391887362
4
+ unixtime: 1631590081.3795788
convtasnet2/brain.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9e24193f36931b7f57932532efbdcf64971f42732383ba6808825f77db258f6
3
+ size 28
convtasnet2/counter.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:031b4af5197ec30a926f48cf40e11a7dbc470048a21e4003b7a3c07c5dab1baa
3
+ size 2
convtasnet2/decoder.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:680ae89a64815fb9ea7a9bf2e19682841772253084ba5db0fe30db96a62e16f1
3
+ size 17272
convtasnet2/encoder.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5830174e7975d20ec1a82e6eb90521ad2359f58d69245864c6299d7fe89b94e3
3
+ size 17272
convtasnet2/hyperparams.yaml ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated 2021-09-15 from:
2
+ # /home/mila/s/subakany/speechbrain_new/recipes/WHAMandWHAMR/separation/hparams/convtasnet-whamr.yaml
3
+ # yamllint disable
4
+ # ################################
5
+ # Model: SepFormer for source separation
6
+ # https://arxiv.org/abs/2010.13154
7
+ #
8
+ # Dataset : WHAMR!
9
+ # ################################
10
+ # Basic parameters
11
+ # Seed needs to be set at top of yaml, before objects with parameters are made
12
+ #
13
+ seed: 3
14
+ __set_seed: !apply:torch.manual_seed [3]
15
+
16
+ # Data params
17
+
18
+ # the data folder for the wham dataset
19
+ # data_folder needs to follow the format: /yourpath/whamr.
20
+ # make sure to use the name whamr at your top folder for the dataset!
21
+ data_folder: /network/tmp1/subakany/whamr
22
+
23
+ # the path for wsj0/si_tr_s/ folder -- only needed if dynamic mixing is used
24
+ # e.g. /yourpath/wsj0-processed/si_tr_s/
25
+ # you need to convert the original wsj0 to 8k
26
+ # you can do this conversion with the script ../meta/preprocess_dynamic_mixing.py
27
+ base_folder_dm: /network/tmp1/subakany/wsj0-processed/si_tr_s/
28
+
29
+ experiment_name: convtasnet-whamr
30
+ output_folder: results/convtasnet-whamr/3
31
+ train_log: results/convtasnet-whamr/3/train_log.txt
32
+ save_folder: results/convtasnet-whamr/3/save
33
+
34
+ # the file names should start with whamr instead of whamorg
35
+ train_data: results/convtasnet-whamr/3/save/whamr_tr.csv
36
+ valid_data: results/convtasnet-whamr/3/save/whamr_cv.csv
37
+ test_data: results/convtasnet-whamr/3/save/whamr_tt.csv
38
+ skip_prep: false
39
+
40
+ # Experiment params
41
+ auto_mix_prec: false # Set it to True for mixed precision
42
+ test_only: false
43
+ num_spks: 2 # set to 3 for wsj0-3mix
44
+ progressbar: true
45
+ save_audio: false # Save estimated sources on disk
46
+ sample_rate: 8000
47
+
48
+ # Training parameters
49
+ N_epochs: 200
50
+ batch_size: 1
51
+ lr: 0.00015
52
+ clip_grad_norm: 5
53
+ loss_upper_lim: 999999 # this is the upper limit for an acceptable loss
54
+ # if True, the training sequences are cut to a specified length
55
+ limit_training_signal_len: false
56
+ # this is the length of sequences if we choose to limit
57
+ # the signal length of training sequences
58
+ training_signal_len: 32000000
59
+
60
+ # Set it to True to dynamically create mixtures at training time
61
+ dynamic_mixing: true
62
+
63
+ # Parameters for data augmentation
64
+
65
+ # rir_path variable points to the directory of the room impulse responses
66
+ # e.g. /miniscratch/subakany/rir_wavs
67
+ # If the path does not exist, it is created automatically.
68
+ rir_path: /miniscratch/subakany/whamr_rirs_wav
69
+
70
+ use_wavedrop: false
71
+ use_speedperturb: true
72
+ use_speedperturb_sameforeachsource: false
73
+ use_rand_shift: false
74
+ min_shift: -8000
75
+ max_shift: 8000
76
+
77
+ speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment
78
+ perturb_prob: 1.0
79
+ drop_freq_prob: 0.0
80
+ drop_chunk_prob: 0.0
81
+ sample_rate: 8000
82
+ speeds: [95, 100, 105]
83
+
84
+ wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment
85
+ perturb_prob: 0.0
86
+ drop_freq_prob: 1.0
87
+ drop_chunk_prob: 1.0
88
+ sample_rate: 8000
89
+
90
+ # loss thresholding -- this thresholds the training loss
91
+ threshold_byloss: true
92
+ threshold: -30
93
+
94
+ # Encoder parameters
95
+ N_encoder_out: 256
96
+ out_channels: 256
97
+ kernel_size: 16
98
+ kernel_stride: 8
99
+
100
+ # Dataloader options
101
+ dataloader_opts:
102
+ batch_size: 1
103
+ num_workers: 3
104
+
105
+
106
+ # Specifying the network
107
+ Encoder: &id001 !new:speechbrain.lobes.models.dual_path.Encoder
108
+ kernel_size: 16
109
+ out_channels: 256
110
+
111
+
112
+ MaskNet: &id003 !new:speechbrain.lobes.models.conv_tasnet.MaskNet
113
+
114
+ N: 256
115
+ B: 256
116
+ H: 512
117
+ P: 3
118
+ X: 6
119
+ R: 4
120
+ C: 2
121
+ norm_type: gLN
122
+ causal: false
123
+ mask_nonlinear: relu
124
+
125
+ Decoder: &id002 !new:speechbrain.lobes.models.dual_path.Decoder
126
+ in_channels: 256
127
+ out_channels: 1
128
+ kernel_size: 16
129
+ stride: 8
130
+ bias: false
131
+
132
+
133
+ optimizer: !name:torch.optim.Adam
134
+ lr: 0.00015
135
+ weight_decay: 0
136
+
137
+ loss: !name:speechbrain.nnet.losses.get_si_snr_with_pitwrapper
138
+
139
+ lr_scheduler: &id005 !new:speechbrain.nnet.schedulers.ReduceLROnPlateau
140
+
141
+ factor: 0.5
142
+ patience: 2
143
+ dont_halve_until_epoch: 85
144
+
145
+ epoch_counter: &id004 !new:speechbrain.utils.epoch_loop.EpochCounter
146
+ limit: 200
147
+
148
+ modules:
149
+ encoder: *id001
150
+ decoder: *id002
151
+ masknet: *id003
152
+ save_all_checkpoints: true
153
+ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer
154
+ checkpoints_dir: results/convtasnet-whamr/3/save
155
+ recoverables:
156
+ encoder: *id001
157
+ decoder: *id002
158
+ masknet: *id003
159
+ counter: *id004
160
+ lr_scheduler: *id005
161
+ train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger
162
+ save_file: results/convtasnet-whamr/3/train_log.txt
163
+
164
+ pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
165
+ loadables:
166
+ encoder: !ref <Encoder>
167
+ masknet: !ref <MaskNet>
168
+ decoder: !ref <Decoder>
convtasnet2/lr_scheduler.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7efd3c42e7652fa5529e1d01e70d8faebd9d5b3939d0d94c719990ab12e5318f
3
+ size 943
convtasnet2/masknet.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a43214e59d3f5b23936a9be023790a4920806186c2e814dc8f4dea13f3a5a2df
3
+ size 26404523
convtasnet2/optimizer.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34e64b2e266108cd74b2ebd7a5039b1d6e39ca53ae03eab20e8dc123402e6c8e
3
+ size 52803979
convtasnet3/CKPT.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # yamllint disable
2
+ end-of-epoch: true
3
+ si-snr: -5.491251103860138
4
+ unixtime: 1631850893.625292
convtasnet3/brain.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9e24193f36931b7f57932532efbdcf64971f42732383ba6808825f77db258f6
3
+ size 28
convtasnet3/counter.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16dc368a89b428b2485484313ba67a3912ca03f2b2b42429174a4f8b3dc84e44
3
+ size 3
convtasnet3/decoder.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35b76309d4da7851d953ad333fe2214c5f52625f09bd786c546f4152789c7710
3
+ size 17272
convtasnet3/encoder.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9dcf0cc01b62e9bf3545eeccbc34547c718f6ae626d6f91bb6d8540ce418e15
3
+ size 17272
convtasnet3/hyperparams.yaml ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated 2021-09-15 from:
2
+ # /home/mila/s/subakany/speechbrain_new/recipes/WHAMandWHAMR/separation/hparams/convtasnet-whamr.yaml
3
+ # yamllint disable
4
+ # ################################
5
+ # Model: SepFormer for source separation
6
+ # https://arxiv.org/abs/2010.13154
7
+ #
8
+ # Dataset : WHAMR!
9
+ # ################################
10
+ # Basic parameters
11
+ # Seed needs to be set at top of yaml, before objects with parameters are made
12
+ #
13
+ seed: 3
14
+ __set_seed: !apply:torch.manual_seed [3]
15
+
16
+ # Data params
17
+
18
+ # the data folder for the wham dataset
19
+ # data_folder needs to follow the format: /yourpath/whamr.
20
+ # make sure to use the name whamr at your top folder for the dataset!
21
+ data_folder: /network/tmp1/subakany/whamr
22
+
23
+ # the path for wsj0/si_tr_s/ folder -- only needed if dynamic mixing is used
24
+ # e.g. /yourpath/wsj0-processed/si_tr_s/
25
+ # you need to convert the original wsj0 to 8k
26
+ # you can do this conversion with the script ../meta/preprocess_dynamic_mixing.py
27
+ base_folder_dm: /network/tmp1/subakany/wsj0-processed/si_tr_s/
28
+
29
+ experiment_name: convtasnet-whamr
30
+ output_folder: results/convtasnet-whamr/3
31
+ train_log: results/convtasnet-whamr/3/train_log.txt
32
+ save_folder: results/convtasnet-whamr/3/save
33
+
34
+ # the file names should start with whamr instead of whamorg
35
+ train_data: results/convtasnet-whamr/3/save/whamr_tr.csv
36
+ valid_data: results/convtasnet-whamr/3/save/whamr_cv.csv
37
+ test_data: results/convtasnet-whamr/3/save/whamr_tt.csv
38
+ skip_prep: false
39
+
40
+ # Experiment params
41
+ auto_mix_prec: false # Set it to True for mixed precision
42
+ test_only: false
43
+ num_spks: 2 # set to 3 for wsj0-3mix
44
+ progressbar: true
45
+ save_audio: false # Save estimated sources on disk
46
+ sample_rate: 8000
47
+
48
+ # Training parameters
49
+ N_epochs: 200
50
+ batch_size: 1
51
+ lr: 0.00015
52
+ clip_grad_norm: 5
53
+ loss_upper_lim: 999999 # this is the upper limit for an acceptable loss
54
+ # if True, the training sequences are cut to a specified length
55
+ limit_training_signal_len: false
56
+ # this is the length of sequences if we choose to limit
57
+ # the signal length of training sequences
58
+ training_signal_len: 32000000
59
+
60
+ # Set it to True to dynamically create mixtures at training time
61
+ dynamic_mixing: true
62
+
63
+ # Parameters for data augmentation
64
+
65
+ # rir_path variable points to the directory of the room impulse responses
66
+ # e.g. /miniscratch/subakany/rir_wavs
67
+ # If the path does not exist, it is created automatically.
68
+ rir_path: /miniscratch/subakany/whamr_rirs_wav
69
+
70
+ use_wavedrop: false
71
+ use_speedperturb: true
72
+ use_speedperturb_sameforeachsource: false
73
+ use_rand_shift: false
74
+ min_shift: -8000
75
+ max_shift: 8000
76
+
77
+ speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment
78
+ perturb_prob: 1.0
79
+ drop_freq_prob: 0.0
80
+ drop_chunk_prob: 0.0
81
+ sample_rate: 8000
82
+ speeds: [95, 100, 105]
83
+
84
+ wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment
85
+ perturb_prob: 0.0
86
+ drop_freq_prob: 1.0
87
+ drop_chunk_prob: 1.0
88
+ sample_rate: 8000
89
+
90
+ # loss thresholding -- this thresholds the training loss
91
+ threshold_byloss: true
92
+ threshold: -30
93
+
94
+ # Encoder parameters
95
+ N_encoder_out: 256
96
+ out_channels: 256
97
+ kernel_size: 16
98
+ kernel_stride: 8
99
+
100
+ # Dataloader options
101
+ dataloader_opts:
102
+ batch_size: 1
103
+ num_workers: 3
104
+
105
+
106
+ # Specifying the network
107
+ Encoder: &id001 !new:speechbrain.lobes.models.dual_path.Encoder
108
+ kernel_size: 16
109
+ out_channels: 256
110
+
111
+
112
+ MaskNet: &id003 !new:speechbrain.lobes.models.conv_tasnet.MaskNet
113
+
114
+ N: 256
115
+ B: 256
116
+ H: 512
117
+ P: 3
118
+ X: 6
119
+ R: 4
120
+ C: 2
121
+ norm_type: gLN
122
+ causal: false
123
+ mask_nonlinear: relu
124
+
125
+ Decoder: &id002 !new:speechbrain.lobes.models.dual_path.Decoder
126
+ in_channels: 256
127
+ out_channels: 1
128
+ kernel_size: 16
129
+ stride: 8
130
+ bias: false
131
+
132
+
133
+ optimizer: !name:torch.optim.Adam
134
+ lr: 0.00015
135
+ weight_decay: 0
136
+
137
+ loss: !name:speechbrain.nnet.losses.get_si_snr_with_pitwrapper
138
+
139
+ lr_scheduler: &id005 !new:speechbrain.nnet.schedulers.ReduceLROnPlateau
140
+
141
+ factor: 0.5
142
+ patience: 2
143
+ dont_halve_until_epoch: 85
144
+
145
+ epoch_counter: &id004 !new:speechbrain.utils.epoch_loop.EpochCounter
146
+ limit: 200
147
+
148
+ modules:
149
+ encoder: *id001
150
+ decoder: *id002
151
+ masknet: *id003
152
+ save_all_checkpoints: true
153
+ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer
154
+ checkpoints_dir: results/convtasnet-whamr/3/save
155
+ recoverables:
156
+ encoder: *id001
157
+ decoder: *id002
158
+ masknet: *id003
159
+ counter: *id004
160
+ lr_scheduler: *id005
161
+ train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger
162
+ save_file: results/convtasnet-whamr/3/train_log.txt
163
+
164
+ pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
165
+ loadables:
166
+ encoder: !ref <Encoder>
167
+ masknet: !ref <MaskNet>
168
+ decoder: !ref <Decoder>
convtasnet3/lr_scheduler.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca2047f7f167981a8323fedc60c915590115468bd222ce33c19871f495b811f2
3
+ size 1391
convtasnet3/masknet.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f14d10e6f2e7d4eb7770f3938fac171dfb86d46bce7082fbfee400d95dfe5cc5
3
+ size 26404523
convtasnet3/optimizer.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17f618f7f980ba67fb00fe0fb3e13dafb0e9ec5111ffb945e94c53fdc7c7aac3
3
+ size 52803979
dprnn1/CKPT.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # yamllint disable
2
+ end-of-epoch: true
3
+ si-snr: -0.5297146898724144
4
+ unixtime: 1631292299.804519
dprnn1/brain.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9e24193f36931b7f57932532efbdcf64971f42732383ba6808825f77db258f6
3
+ size 28
dprnn1/counter.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b
3
+ size 1
dprnn1/dataloader-TRAIN.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:876c9b16254e157d1eb645390dcfae6f29b9d3cd394e73a91de8ee5d0e67ee43
3
+ size 5
dprnn1/decoder.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2e73c51dc6b7bdcc3a546af3e2f682dc4d3f7357d1497b5e545401a3cb517b1
3
+ size 17272
dprnn1/encoder.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdf0dbef1da288e831b4facdfa428de1c3bac0dfefe6c08a127c6e09c60d148b
3
+ size 17272
dprnn1/hyperparams.yaml ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated 2021-09-18 from:
2
+ # /home/mila/s/subakany/speechbrain_new/recipes/WHAMandWHAMR/separation/hparams/dprnn-whamr.yaml
3
+ # yamllint disable
4
+ # ################################
5
+ # Model: SepFormer for source separation
6
+ # https://arxiv.org/abs/2010.13154
7
+ #
8
+ # Dataset : WHAMR!
9
+ # ################################
10
+ # Basic parameters
11
+ # Seed needs to be set at top of yaml, before objects with parameters are made
12
+ #
13
+ seed: 3
14
+ __set_seed: !apply:torch.manual_seed [3]
15
+
16
+ # Data params
17
+
18
+ # the data folder for the wham dataset
19
+ # data_folder needs to follow the format: /yourpath/whamr.
20
+ # make sure to use the name whamr at your top folder for the dataset!
21
+ data_folder: /network/tmp1/subakany/whamr/
22
+
23
+ # the path for wsj0/si_tr_s/ folder -- only needed if dynamic mixing is used
24
+ # e.g. /yourpath/wsj0-processed/si_tr_s/
25
+ # you need to convert the original wsj0 to 8k
26
+ # you can do this conversion with the script ../meta/preprocess_dynamic_mixing.py
27
+ base_folder_dm: /network/tmp1/subakany/wsj0-processed/si_tr_s/
28
+
29
+ experiment_name: dprnn-whamr
30
+ output_folder: results/dprnn-whamr/3
31
+ train_log: results/dprnn-whamr/3/train_log.txt
32
+ save_folder: results/dprnn-whamr/3/save
33
+
34
+ # the file names should start with whamr instead of whamorg
35
+ train_data: results/dprnn-whamr/3/save/whamr_tr.csv
36
+ valid_data: results/dprnn-whamr/3/save/whamr_cv.csv
37
+ test_data: results/dprnn-whamr/3/save/whamr_tt.csv
38
+ skip_prep: false
39
+
40
+ # Experiment params
41
+ auto_mix_prec: true # Set it to True for mixed precision
42
+ test_only: false
43
+ num_spks: 2 # set to 3 for wsj0-3mix
44
+ progressbar: true
45
+ save_audio: false # Save estimated sources on disk
46
+ sample_rate: 8000
47
+
48
+ # Training parameters
49
+ N_epochs: 200
50
+ batch_size: 1
51
+ lr: 0.00015
52
+ clip_grad_norm: 5
53
+ loss_upper_lim: 999999 # this is the upper limit for an acceptable loss
54
+ # if True, the training sequences are cut to a specified length
55
+ limit_training_signal_len: false
56
+ # this is the length of sequences if we choose to limit
57
+ # the signal length of training sequences
58
+ training_signal_len: 32000000
59
+
60
+ # Set it to True to dynamically create mixtures at training time
61
+ dynamic_mixing: true
62
+
63
+ # Parameters for data augmentation
64
+
65
+ # rir_path variable points to the directory of the room impulse responses
66
+ # e.g. /miniscratch/subakany/rir_wavs
67
+ # If the path does not exist, it is created automatically.
68
+ rir_path: /miniscratch/subakany/whamr_rirs_wav
69
+
70
+ use_wavedrop: false
71
+ use_speedperturb: true
72
+ use_speedperturb_sameforeachsource: false
73
+ use_rand_shift: false
74
+ min_shift: -8000
75
+ max_shift: 8000
76
+
77
+ speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment
78
+ perturb_prob: 1.0
79
+ drop_freq_prob: 0.0
80
+ drop_chunk_prob: 0.0
81
+ sample_rate: 8000
82
+ speeds: [95, 100, 105]
83
+
84
+ wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment
85
+ perturb_prob: 0.0
86
+ drop_freq_prob: 1.0
87
+ drop_chunk_prob: 1.0
88
+ sample_rate: 8000
89
+
90
+ # loss thresholding -- this thresholds the training loss
91
+ threshold_byloss: true
92
+ threshold: -30
93
+
94
+ # Encoder parameters
95
+ N_encoder_out: 256
96
+ out_channels: 256
97
+ kernel_size: 16
98
+ kernel_stride: 8
99
+
100
+ # Dataloader options
101
+ dataloader_opts:
102
+ batch_size: 1
103
+ num_workers: 3
104
+
105
+
106
+ # Specifying the network
107
+ Encoder: &id003 !new:speechbrain.lobes.models.dual_path.Encoder
108
+ kernel_size: 16
109
+ out_channels: 256
110
+
111
+
112
+ intra: &id001 !new:speechbrain.lobes.models.dual_path.SBRNNBlock
113
+ num_layers: 1
114
+ input_size: 256
115
+ hidden_channels: 256
116
+ dropout: 0
117
+ bidirectional: true
118
+
119
+ inter: &id002 !new:speechbrain.lobes.models.dual_path.SBRNNBlock
120
+ num_layers: 1
121
+ input_size: 256
122
+ hidden_channels: 256
123
+ dropout: 0
124
+ bidirectional: true
125
+
126
+ MaskNet: &id005 !new:speechbrain.lobes.models.dual_path.Dual_Path_Model
127
+
128
+ num_spks: 2
129
+ in_channels: 256
130
+ out_channels: 256
131
+ num_layers: 6
132
+ K: 250
133
+ intra_model: *id001
134
+ inter_model: *id002
135
+ norm: ln
136
+ linear_layer_after_inter_intra: true
137
+ skip_around_intra: true
138
+
139
+ Decoder: &id004 !new:speechbrain.lobes.models.dual_path.Decoder
140
+ in_channels: 256
141
+ out_channels: 1
142
+ kernel_size: 16
143
+ stride: 8
144
+ bias: false
145
+
146
+
147
+
148
+ optimizer: !name:torch.optim.Adam
149
+ lr: 0.00015
150
+ weight_decay: 0
151
+
152
+ loss: !name:speechbrain.nnet.losses.get_si_snr_with_pitwrapper
153
+
154
+ lr_scheduler: &id007 !new:speechbrain.nnet.schedulers.ReduceLROnPlateau
155
+
156
+ factor: 0.5
157
+ patience: 2
158
+ dont_halve_until_epoch: 85
159
+
160
+ epoch_counter: &id006 !new:speechbrain.utils.epoch_loop.EpochCounter
161
+ limit: 200
162
+
163
+ modules:
164
+ encoder: *id003
165
+ decoder: *id004
166
+ masknet: *id005
167
+ save_all_checkpoints: true
168
+ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer
169
+ checkpoints_dir: results/dprnn-whamr/3/save
170
+ recoverables:
171
+ encoder: *id003
172
+ decoder: *id004
173
+ masknet: *id005
174
+ counter: *id006
175
+ lr_scheduler: *id007
176
+ train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger
177
+ save_file: results/dprnn-whamr/3/train_log.txt
178
+
179
+ pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
180
+ loadables:
181
+ encoder: !ref <Encoder>
182
+ masknet: !ref <MaskNet>
183
+ decoder: !ref <Decoder>
dprnn1/lr_scheduler.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e124452ee7deeb68cdaa636fbcb85a7654dfda37ac2b38ccebf02d5604de85dc
3
+ size 495
dprnn1/masknet.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:427819af9b78e9700010f6cbe84af6c33cb5b229762018c76c3ba9117c4f8761
3
+ size 58477294
dprnn1/optimizer.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52cdd192d0a27ca7a9a065505c30cb0aa025abf4afdc88fbcf006249929514e7
3
+ size 117035321
dprnn2/CKPT.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # yamllint disable
2
+ end-of-epoch: true
3
+ si-snr: -5.874772780568888
4
+ unixtime: 1631748976.9005034
dprnn2/brain.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9e24193f36931b7f57932532efbdcf64971f42732383ba6808825f77db258f6
3
+ size 28
dprnn2/counter.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fca346db656187102ce806ac732e06a62df0dbb2829e511a770556d398e1a6e
3
+ size 2
dprnn2/decoder.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74fac8e4c0330aca71afef77553f5da11fa5a89bff026e9fe8e5fd529ddc3b9a
3
+ size 17272
dprnn2/encoder.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aef9291a6d0d0a74955bf539c81d36a35bdc6900306d5d1c69dca57b0b315e4c
3
+ size 17272
dprnn2/hyperparams.yaml ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated 2021-09-18 from:
2
+ # /home/mila/s/subakany/speechbrain_new/recipes/WHAMandWHAMR/separation/hparams/dprnn-whamr.yaml
3
+ # yamllint disable
4
+ # ################################
5
+ # Model: SepFormer for source separation
6
+ # https://arxiv.org/abs/2010.13154
7
+ #
8
+ # Dataset : WHAMR!
9
+ # ################################
10
+ # Basic parameters
11
+ # Seed needs to be set at top of yaml, before objects with parameters are made
12
+ #
13
+ seed: 3
14
+ __set_seed: !apply:torch.manual_seed [3]
15
+
16
+ # Data params
17
+
18
+ # the data folder for the wham dataset
19
+ # data_folder needs to follow the format: /yourpath/whamr.
20
+ # make sure to use the name whamr at your top folder for the dataset!
21
+ data_folder: /network/tmp1/subakany/whamr/
22
+
23
+ # the path for wsj0/si_tr_s/ folder -- only needed if dynamic mixing is used
24
+ # e.g. /yourpath/wsj0-processed/si_tr_s/
25
+ # you need to convert the original wsj0 to 8k
26
+ # you can do this conversion with the script ../meta/preprocess_dynamic_mixing.py
27
+ base_folder_dm: /network/tmp1/subakany/wsj0-processed/si_tr_s/
28
+
29
+ experiment_name: dprnn-whamr
30
+ output_folder: results/dprnn-whamr/3
31
+ train_log: results/dprnn-whamr/3/train_log.txt
32
+ save_folder: results/dprnn-whamr/3/save
33
+
34
+ # the file names should start with whamr instead of whamorg
35
+ train_data: results/dprnn-whamr/3/save/whamr_tr.csv
36
+ valid_data: results/dprnn-whamr/3/save/whamr_cv.csv
37
+ test_data: results/dprnn-whamr/3/save/whamr_tt.csv
38
+ skip_prep: false
39
+
40
+ # Experiment params
41
+ auto_mix_prec: true # Set it to True for mixed precision
42
+ test_only: false
43
+ num_spks: 2 # set to 3 for wsj0-3mix
44
+ progressbar: true
45
+ save_audio: false # Save estimated sources on disk
46
+ sample_rate: 8000
47
+
48
+ # Training parameters
49
+ N_epochs: 200
50
+ batch_size: 1
51
+ lr: 0.00015
52
+ clip_grad_norm: 5
53
+ loss_upper_lim: 999999 # this is the upper limit for an acceptable loss
54
+ # if True, the training sequences are cut to a specified length
55
+ limit_training_signal_len: false
56
+ # this is the length of sequences if we choose to limit
57
+ # the signal length of training sequences
58
+ training_signal_len: 32000000
59
+
60
+ # Set it to True to dynamically create mixtures at training time
61
+ dynamic_mixing: true
62
+
63
+ # Parameters for data augmentation
64
+
65
+ # rir_path variable points to the directory of the room impulse responses
66
+ # e.g. /miniscratch/subakany/rir_wavs
67
+ # If the path does not exist, it is created automatically.
68
+ rir_path: /miniscratch/subakany/whamr_rirs_wav
69
+
70
+ use_wavedrop: false
71
+ use_speedperturb: true
72
+ use_speedperturb_sameforeachsource: false
73
+ use_rand_shift: false
74
+ min_shift: -8000
75
+ max_shift: 8000
76
+
77
+ speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment
78
+ perturb_prob: 1.0
79
+ drop_freq_prob: 0.0
80
+ drop_chunk_prob: 0.0
81
+ sample_rate: 8000
82
+ speeds: [95, 100, 105]
83
+
84
+ wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment
85
+ perturb_prob: 0.0
86
+ drop_freq_prob: 1.0
87
+ drop_chunk_prob: 1.0
88
+ sample_rate: 8000
89
+
90
+ # loss thresholding -- this thresholds the training loss
91
+ threshold_byloss: true
92
+ threshold: -30
93
+
94
+ # Encoder parameters
95
+ N_encoder_out: 256
96
+ out_channels: 256
97
+ kernel_size: 16
98
+ kernel_stride: 8
99
+
100
+ # Dataloader options
101
+ dataloader_opts:
102
+ batch_size: 1
103
+ num_workers: 3
104
+
105
+
106
+ # Specifying the network
107
+ Encoder: &id003 !new:speechbrain.lobes.models.dual_path.Encoder
108
+ kernel_size: 16
109
+ out_channels: 256
110
+
111
+
112
+ intra: &id001 !new:speechbrain.lobes.models.dual_path.SBRNNBlock
113
+ num_layers: 1
114
+ input_size: 256
115
+ hidden_channels: 256
116
+ dropout: 0
117
+ bidirectional: true
118
+
119
+ inter: &id002 !new:speechbrain.lobes.models.dual_path.SBRNNBlock
120
+ num_layers: 1
121
+ input_size: 256
122
+ hidden_channels: 256
123
+ dropout: 0
124
+ bidirectional: true
125
+
126
+ MaskNet: &id005 !new:speechbrain.lobes.models.dual_path.Dual_Path_Model
127
+
128
+ num_spks: 2
129
+ in_channels: 256
130
+ out_channels: 256
131
+ num_layers: 6
132
+ K: 250
133
+ intra_model: *id001
134
+ inter_model: *id002
135
+ norm: ln
136
+ linear_layer_after_inter_intra: true
137
+ skip_around_intra: true
138
+
139
+ Decoder: &id004 !new:speechbrain.lobes.models.dual_path.Decoder
140
+ in_channels: 256
141
+ out_channels: 1
142
+ kernel_size: 16
143
+ stride: 8
144
+ bias: false
145
+
146
+
147
+
148
+ optimizer: !name:torch.optim.Adam
149
+ lr: 0.00015
150
+ weight_decay: 0
151
+
152
+ loss: !name:speechbrain.nnet.losses.get_si_snr_with_pitwrapper
153
+
154
+ lr_scheduler: &id007 !new:speechbrain.nnet.schedulers.ReduceLROnPlateau
155
+
156
+ factor: 0.5
157
+ patience: 2
158
+ dont_halve_until_epoch: 85
159
+
160
+ epoch_counter: &id006 !new:speechbrain.utils.epoch_loop.EpochCounter
161
+ limit: 200
162
+
163
+ modules:
164
+ encoder: *id003
165
+ decoder: *id004
166
+ masknet: *id005
167
+ save_all_checkpoints: true
168
+ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer
169
+ checkpoints_dir: results/dprnn-whamr/3/save
170
+ recoverables:
171
+ encoder: *id003
172
+ decoder: *id004
173
+ masknet: *id005
174
+ counter: *id006
175
+ lr_scheduler: *id007
176
+ train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger
177
+ save_file: results/dprnn-whamr/3/train_log.txt
178
+
179
+ pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
180
+ loadables:
181
+ encoder: !ref <Encoder>
182
+ masknet: !ref <MaskNet>
183
+ decoder: !ref <Decoder>
dprnn2/lr_scheduler.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb3b7d061e814f1fa9b59ec858d9f1c346671ce3d81c15b3a1a176a371b52037
3
+ size 943
dprnn2/masknet.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dbfce012dc85b68b3cc9f8e1d10b1fc63ef54248b34d562f61bb8c0f7a23030
3
+ size 58477294
dprnn2/optimizer.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7323ab9ed7747b6bb0dd116f2e7f6271d1f6f9d28971644210e5016fa7ee033
3
+ size 117035641
dprnn3/CKPT.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # yamllint disable
2
+ end-of-epoch: true
3
+ si-snr: -7.044497849920388
4
+ unixtime: 1632151547.7435942
dprnn3/brain.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9e24193f36931b7f57932532efbdcf64971f42732383ba6808825f77db258f6
3
+ size 28
dprnn3/counter.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3346f2bbf6c34bd2dbe28bd1bb657d0e9c37392a1d5ec9929e6a5df4763ddc2d
3
+ size 3
dprnn3/decoder.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d26e5412c04d918938a2b5a0e310d814fcf1451c3c7e9dda9f52b549f553f64
3
+ size 17272