subakany commited on
Commit
d004f13
1 Parent(s): d74f99f

pushing model files

Browse files
Files changed (9) hide show
  1. CKPT.yaml +4 -0
  2. brain.ckpt +3 -0
  3. counter.ckpt +3 -0
  4. decoder.ckpt +3 -0
  5. encoder.ckpt +3 -0
  6. hyperparams.yaml +188 -0
  7. lr_scheduler.ckpt +3 -0
  8. masknet.ckpt +3 -0
  9. optimizer.ckpt +3 -0
CKPT.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # yamllint disable
2
+ end-of-epoch: true
3
+ si-snr: -11.609822317790979
4
+ unixtime: 1636546512.8062298
brain.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9e24193f36931b7f57932532efbdcf64971f42732383ba6808825f77db258f6
3
+ size 28
counter.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbae772db29058a88f9bd830e957c695347c41b6162a7eb9a9ea13def34be56b
3
+ size 3
decoder.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91794bd8c7aba5b7ca503574bd9f4997869e253021142958658c8e1e977b5485
3
+ size 17272
encoder.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f19c8a4d767bef3091c7fd5c55064c4cc6e570b794b501b7c6ff95ee3ea3cef
3
+ size 17272
hyperparams.yaml ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated 2021-11-21 from:
2
+ # /home/mila/s/subakany/speechbrain-soundskrit/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr-DM.yaml
3
+ # yamllint disable
4
+ # ################################
5
+ # Model: SepFormer for source separation
6
+ # https://arxiv.org/abs/2010.13154
7
+ #
8
+ # Dataset : WHAMR!
9
+ # ################################
10
+ # Basic parameters
11
+ # Seed needs to be set at top of yaml, before objects with parameters are made
12
+ #
13
+ seed: 1234
14
+ __set_seed: !apply:torch.manual_seed [1234]
15
+
16
+ # Data params
17
+
18
+ # the data folder for the wham dataset
19
+ # data_folder needs to follow the format: /yourpath/whamr.
20
+ # make sure to use the name whamr at your top folder for the dataset!
21
+ data_folder: /network/tmp1/subakany/whamr
22
+ task: enhancement
23
+ dereverberate: false
24
+
25
+ # the path for wsj0/si_tr_s/ folder -- only needed if dynamic mixing is used
26
+ # e.g. /yourpath/wsj0-processed/si_tr_s/
27
+ # you need to convert the original wsj0 to 8k
28
+ # you can do this conversion with the script ../meta/preprocess_dynamic_mixing.py
29
+ base_folder_dm: /network/tmp1/subakany/wsj0-processed/si_tr_s/
30
+
31
+ experiment_name: sepformer-whamr-enhancement-DM
32
+ output_folder: results/sepformer-whamr-enhancement-DM/1234
33
+ train_log: results/sepformer-whamr-enhancement-DM/1234/train_log.txt
34
+ save_folder: results/sepformer-whamr-enhancement-DM/1234/save
35
+
36
+ # the file names should start with whamr instead of whamorg
37
+ train_data: results/sepformer-whamr-enhancement-DM/1234/save/whamr_tr.csv
38
+ valid_data: results/sepformer-whamr-enhancement-DM/1234/save/whamr_cv.csv
39
+ test_data: results/sepformer-whamr-enhancement-DM/1234/save/whamr_tt.csv
40
+ skip_prep: false
41
+
42
+ # Experiment params
43
+ auto_mix_prec: true # Set it to True for mixed precision
44
+ test_only: false
45
+ num_spks: 1 # set to 3 for wsj0-3mix
46
+ progressbar: true
47
+ save_audio: true # Save estimated sources on disk
48
+ sample_rate: 8000
49
+ n_audio_to_save: 20
50
+
51
+ # Training parameters
52
+ N_epochs: 200
53
+ batch_size: 1
54
+ lr: 0.00015
55
+ clip_grad_norm: 5
56
+ loss_upper_lim: 999999 # this is the upper limit for an acceptable loss
57
+ # if True, the training sequences are cut to a specified length
58
+ limit_training_signal_len: false
59
+ # this is the length of sequences if we choose to limit
60
+ # the signal length of training sequences
61
+ training_signal_len: 32000000
62
+
63
+ # Set it to True to dynamically create mixtures at training time
64
+ dynamic_mixing: true
65
+
66
+ # Parameters for data augmentation
67
+
68
+ # rir_path variable points to the directory of the room impulse responses
69
+ # e.g. /miniscratch/subakany/rir_wavs
70
+ # If the path does not exist, it is created automatically.
71
+ rir_path: /network/scratch/s/subakany/whamr_rirs_wavs_8k/
72
+
73
+ use_wavedrop: false
74
+ use_speedperturb: true
75
+ use_speedperturb_sameforeachsource: false
76
+ use_rand_shift: false
77
+ min_shift: -8000
78
+ max_shift: 8000
79
+
80
+ speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment
81
+ perturb_prob: 1.0
82
+ drop_freq_prob: 0.0
83
+ drop_chunk_prob: 0.0
84
+ sample_rate: 8000
85
+ speeds: [95, 100, 105]
86
+
87
+ wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment
88
+ perturb_prob: 0.0
89
+ drop_freq_prob: 1.0
90
+ drop_chunk_prob: 1.0
91
+ sample_rate: 8000
92
+
93
+ # loss thresholding -- this thresholds the training loss
94
+ threshold_byloss: true
95
+ threshold: -30
96
+
97
+ # Encoder parameters
98
+ N_encoder_out: 256
99
+ out_channels: 256
100
+ kernel_size: 16
101
+ kernel_stride: 8
102
+
103
+ # Dataloader options
104
+ dataloader_opts:
105
+ batch_size: 1
106
+ num_workers: 3
107
+
108
+ # Specifying the network
109
+ Encoder: &id003 !new:speechbrain.lobes.models.dual_path.Encoder
110
+ kernel_size: 16
111
+ out_channels: 256
112
+
113
+
114
+ SBtfintra: &id001 !new:speechbrain.lobes.models.dual_path.SBTransformerBlock
115
+ num_layers: 8
116
+ d_model: 256
117
+ nhead: 8
118
+ d_ffn: 1024
119
+ dropout: 0
120
+ use_positional_encoding: true
121
+ norm_before: true
122
+
123
+ SBtfinter: &id002 !new:speechbrain.lobes.models.dual_path.SBTransformerBlock
124
+ num_layers: 8
125
+ d_model: 256
126
+ nhead: 8
127
+ d_ffn: 1024
128
+ dropout: 0
129
+ use_positional_encoding: true
130
+ norm_before: true
131
+
132
+ MaskNet: &id005 !new:speechbrain.lobes.models.dual_path.Dual_Path_Model
133
+
134
+ num_spks: 1
135
+ in_channels: 256
136
+ out_channels: 256
137
+ num_layers: 2
138
+ K: 250
139
+ intra_model: *id001
140
+ inter_model: *id002
141
+ norm: ln
142
+ linear_layer_after_inter_intra: false
143
+ skip_around_intra: true
144
+
145
+ Decoder: &id004 !new:speechbrain.lobes.models.dual_path.Decoder
146
+ in_channels: 256
147
+ out_channels: 1
148
+ kernel_size: 16
149
+ stride: 8
150
+ bias: false
151
+
152
+ optimizer: !name:torch.optim.Adam
153
+ lr: 0.00015
154
+ weight_decay: 0
155
+
156
+ loss: !name:speechbrain.nnet.losses.get_si_snr_with_pitwrapper
157
+
158
+ lr_scheduler: &id007 !new:speechbrain.nnet.schedulers.ReduceLROnPlateau
159
+
160
+ factor: 0.5
161
+ patience: 2
162
+ dont_halve_until_epoch: 85
163
+
164
+ epoch_counter: &id006 !new:speechbrain.utils.epoch_loop.EpochCounter
165
+ limit: 200
166
+
167
+ modules:
168
+ encoder: *id003
169
+ decoder: *id004
170
+ masknet: *id005
171
+ save_all_checkpoints: false
172
+ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer
173
+ checkpoints_dir: results/sepformer-whamr-enhancement-DM/1234/save
174
+ recoverables:
175
+ encoder: *id003
176
+ decoder: *id004
177
+ masknet: *id005
178
+ counter: *id006
179
+ lr_scheduler: *id007
180
+ train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger
181
+ save_file: results/sepformer-whamr-enhancement-DM/1234/train_log.txt
182
+
183
+ pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
184
+ loadables:
185
+ encoder: !ref <Encoder>
186
+ masknet: !ref <MaskNet>
187
+ decoder: !ref <Decoder>
188
+
lr_scheduler.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf1b43deb7479588149147d22a956e1d20fbe00ac1aaaa0774e2a2dba0b7fdd0
3
+ size 1711
masknet.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26e6b5e91dd8fd7eb6c7583e87b51c000f142598a749402058d9ec5bc3c3683f
3
+ size 112849478
optimizer.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4f4ce52a298a9567f0951c0020c8d95a7f7cd3f0f4ef9a2929f02f22bec0d4d
3
+ size 205168377