cemsubakan commited on
Commit
8025fa8
1 Parent(s): 43ffef3

added the model files

Browse files
Files changed (9) hide show
  1. .gitattributes +6 -0
  2. brain.ckpt +3 -0
  3. config.json +3 -0
  4. counter.ckpt +3 -0
  5. decoder.ckpt +3 -0
  6. encoder.ckpt +3 -0
  7. hyperparams.yaml +176 -0
  8. masknet.ckpt +3 -0
  9. optimizer.ckpt +3 -0
.gitattributes CHANGED
@@ -30,3 +30,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
30
  *.zip filter=lfs diff=lfs merge=lfs -text
31
  *.zst filter=lfs diff=lfs merge=lfs -text
32
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
30
  *.zip filter=lfs diff=lfs merge=lfs -text
31
  *.zst filter=lfs diff=lfs merge=lfs -text
32
  *tfevents* filter=lfs diff=lfs merge=lfs -text
33
+ optimizer.ckpt filter=lfs diff=lfs merge=lfs -text
34
+ brain.ckpt filter=lfs diff=lfs merge=lfs -text
35
+ counter.ckpt filter=lfs diff=lfs merge=lfs -text
36
+ decoder.ckpt filter=lfs diff=lfs merge=lfs -text
37
+ encoder.ckpt filter=lfs diff=lfs merge=lfs -text
38
+ masknet.ckpt filter=lfs diff=lfs merge=lfs -text
brain.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9e24193f36931b7f57932532efbdcf64971f42732383ba6808825f77db258f6
3
+ size 28
config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "speechbrain_interface": "SepformerSeparation"
3
+ }
counter.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f2253d7e228b22a08bda1f09c516f6fead81df6536eb02fa991a34bb38d9be8
3
+ size 2
decoder.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05340634088efb00ec448f548af107d5d932057ea214dd64497f34106247e184
3
+ size 17272
encoder.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:740d082e75b92370bc12f176d5b5554f6383cf8bc607da8ac8a550255987ed4a
3
+ size 17272
hyperparams.yaml ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated 2021-07-13 from:
2
+ # /home/mila/s/subakany/speechbrain_new/recipes/LibriMix/separation/hparams/sepformer-libri2mix.yaml
3
+ # yamllint disable
4
+ # ################################
5
+ # Model: SepFormer for source separation
6
+ # https://arxiv.org/abs/2010.13154
7
+ # Dataset : Libri2mix
8
+ # ################################
9
+ #
10
+ # Basic parameters
11
+ # Seed needs to be set at top of yaml, before objects with parameters are made
12
+ #
13
+ seed: 1234
14
+ __set_seed: !apply:torch.manual_seed [1234]
15
+
16
+ # Data params
17
+
18
+ # e.g. '/yourpath/Libri2Mix/train-clean-360/'
19
+ # the data folder is needed even if dynamic mixing is applied
20
+ data_folder: /miniscratch/subakany/LibriMixData_new/Libri2Mix/
21
+
22
+ # this is the base folder for dynamic mixing
23
+ base_folder_dm: /miniscratch/subakany/LibriMixData_new/LibriSpeech/train-clean-360_processed/
24
+
25
+ experiment_name: sepformer-libri2mix
26
+ output_folder: results/sepformer-libri2mix/1234
27
+ train_log: results/sepformer-libri2mix/1234/train_log.txt
28
+ save_folder: results/sepformer-libri2mix/1234/save
29
+ train_data: results/sepformer-libri2mix/1234/save/libri2mix_train-360.csv
30
+ valid_data: results/sepformer-libri2mix/1234/save/libri2mix_dev.csv
31
+ test_data: results/sepformer-libri2mix/1234/save/libri2mix_test.csv
32
+ skip_prep: false
33
+
34
+ ckpt_interval_minutes: 60
35
+
36
+ # Experiment params
37
+ auto_mix_prec: true # Set it to True for mixed precision
38
+ test_only: true
39
+ num_spks: 2
40
+ progressbar: true
41
+ save_audio: false # Save estimated sources on disk
42
+ sample_rate: 8000
43
+
44
+ # Training parameters
45
+ N_epochs: 200
46
+ batch_size: 1
47
+ lr: 0.00015
48
+ clip_grad_norm: 5
49
+ loss_upper_lim: 999999 # this is the upper limit for an acceptable loss
50
+ # if True, the training sequences are cut to a specified length
51
+ limit_training_signal_len: false
52
+ # this is the length of sequences if we choose to limit
53
+ # the signal length of training sequences
54
+ training_signal_len: 32000000
55
+
56
+ # Set it to True to dynamically create mixtures at training time
57
+ dynamic_mixing: true
58
+ use_wham_noise: false
59
+
60
+ # Parameters for data augmentation
61
+ use_wavedrop: false
62
+ use_speedperturb: true
63
+ use_speedperturb_sameforeachsource: false
64
+ use_rand_shift: false
65
+ min_shift: -8000
66
+ max_shift: 8000
67
+
68
+ speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment
69
+ perturb_prob: 1.0
70
+ drop_freq_prob: 0.0
71
+ drop_chunk_prob: 0.0
72
+ sample_rate: 8000
73
+ speeds: [95, 100, 105]
74
+
75
+ wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment
76
+ perturb_prob: 0.0
77
+ drop_freq_prob: 1.0
78
+ drop_chunk_prob: 1.0
79
+ sample_rate: 8000
80
+
81
+ # loss thresholding -- this thresholds the training loss
82
+ threshold_byloss: true
83
+ threshold: -30
84
+
85
+ # Encoder parameters
86
+ N_encoder_out: 256
87
+ out_channels: 256
88
+ kernel_size: 16
89
+ kernel_stride: 8
90
+
91
+ # Dataloader options
92
+ dataloader_opts:
93
+ batch_size: 1
94
+ num_workers: 0
95
+
96
+
97
+ # Specifying the network
98
+ Encoder: &id003 !new:speechbrain.lobes.models.dual_path.Encoder
99
+ kernel_size: 16
100
+ out_channels: 256
101
+
102
+
103
+ SBtfintra: &id001 !new:speechbrain.lobes.models.dual_path.SBTransformerBlock
104
+ num_layers: 8
105
+ d_model: 256
106
+ nhead: 8
107
+ d_ffn: 1024
108
+ dropout: 0
109
+ use_positional_encoding: true
110
+ norm_before: true
111
+
112
+ SBtfinter: &id002 !new:speechbrain.lobes.models.dual_path.SBTransformerBlock
113
+ num_layers: 8
114
+ d_model: 256
115
+ nhead: 8
116
+ d_ffn: 1024
117
+ dropout: 0
118
+ use_positional_encoding: true
119
+ norm_before: true
120
+
121
+ MaskNet: &id005 !new:speechbrain.lobes.models.dual_path.Dual_Path_Model
122
+
123
+ num_spks: 2
124
+ in_channels: 256
125
+ out_channels: 256
126
+ num_layers: 2
127
+ K: 250
128
+ intra_model: *id001
129
+ inter_model: *id002
130
+ norm: ln
131
+ linear_layer_after_inter_intra: false
132
+ skip_around_intra: true
133
+
134
+ Decoder: &id004 !new:speechbrain.lobes.models.dual_path.Decoder
135
+ in_channels: 256
136
+ out_channels: 1
137
+ kernel_size: 16
138
+ stride: 8
139
+ bias: false
140
+
141
+ optimizer: !name:torch.optim.Adam
142
+ lr: 0.00015
143
+ weight_decay: 0
144
+
145
+ loss: !name:speechbrain.nnet.losses.get_si_snr_with_pitwrapper
146
+
147
+ lr_scheduler: !new:speechbrain.nnet.schedulers.ReduceLROnPlateau
148
+ factor: 0.5
149
+ patience: 2
150
+ dont_halve_until_epoch: 5
151
+
152
+ epoch_counter: &id006 !new:speechbrain.utils.epoch_loop.EpochCounter
153
+ # lr_scheduler: !ref <lr_scheduler>
154
+
155
+ limit: 200
156
+
157
+ modules:
158
+ encoder: *id003
159
+ decoder: *id004
160
+ masknet: *id005
161
+ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer
162
+ checkpoints_dir: results/sepformer-libri2mix/1234/save
163
+ recoverables:
164
+ encoder: *id003
165
+ decoder: *id004
166
+ masknet: *id005
167
+ counter: *id006
168
+ train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger
169
+ save_file: results/sepformer-libri2mix/1234/train_log.txt
170
+
171
+
172
+ pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
173
+ loadables:
174
+ encoder: !ref <Encoder>
175
+ masknet: !ref <MaskNet>
176
+ decoder: !ref <Decoder>
masknet.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f54762647082a8fe75a1cacc6c3d463b967223fc77c836cc4fb8ca2a272d9a75
3
+ size 113112646
optimizer.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:287477b926bece97fd348decd6a43a9f74fed365d372484756daf06f1e5ae78a
3
+ size 205694713