ftshijt commited on
Commit
259cae9
1 Parent(s): e5027f5

Update model

Browse files
Files changed (29) hide show
  1. README.md +344 -0
  2. exp/codec_train_dac_large_v1.6_raw_fs16000/120epoch.pth +3 -0
  3. exp/codec_train_dac_large_v1.6_raw_fs16000/config.yaml +269 -0
  4. exp/codec_train_dac_large_v1.6_raw_fs16000/images/adv_loss.png +0 -0
  5. exp/codec_train_dac_large_v1.6_raw_fs16000/images/codec_commit_loss.png +0 -0
  6. exp/codec_train_dac_large_v1.6_raw_fs16000/images/codec_loss.png +0 -0
  7. exp/codec_train_dac_large_v1.6_raw_fs16000/images/codec_quantization_loss.png +0 -0
  8. exp/codec_train_dac_large_v1.6_raw_fs16000/images/discriminator_backward_time.png +0 -0
  9. exp/codec_train_dac_large_v1.6_raw_fs16000/images/discriminator_forward_time.png +0 -0
  10. exp/codec_train_dac_large_v1.6_raw_fs16000/images/discriminator_loss.png +0 -0
  11. exp/codec_train_dac_large_v1.6_raw_fs16000/images/discriminator_optim_step_time.png +0 -0
  12. exp/codec_train_dac_large_v1.6_raw_fs16000/images/discriminator_train_time.png +0 -0
  13. exp/codec_train_dac_large_v1.6_raw_fs16000/images/fake_loss.png +0 -0
  14. exp/codec_train_dac_large_v1.6_raw_fs16000/images/feat_match_loss.png +0 -0
  15. exp/codec_train_dac_large_v1.6_raw_fs16000/images/generator_backward_time.png +0 -0
  16. exp/codec_train_dac_large_v1.6_raw_fs16000/images/generator_forward_time.png +0 -0
  17. exp/codec_train_dac_large_v1.6_raw_fs16000/images/generator_optim_step_time.png +0 -0
  18. exp/codec_train_dac_large_v1.6_raw_fs16000/images/generator_train_time.png +0 -0
  19. exp/codec_train_dac_large_v1.6_raw_fs16000/images/gpu_max_cached_mem_GB.png +0 -0
  20. exp/codec_train_dac_large_v1.6_raw_fs16000/images/iter_time.png +0 -0
  21. exp/codec_train_dac_large_v1.6_raw_fs16000/images/loss.png +0 -0
  22. exp/codec_train_dac_large_v1.6_raw_fs16000/images/mel_loss.png +0 -0
  23. exp/codec_train_dac_large_v1.6_raw_fs16000/images/mel_loss_real.png +0 -0
  24. exp/codec_train_dac_large_v1.6_raw_fs16000/images/optim0_lr0.png +0 -0
  25. exp/codec_train_dac_large_v1.6_raw_fs16000/images/optim1_lr0.png +0 -0
  26. exp/codec_train_dac_large_v1.6_raw_fs16000/images/real_loss.png +0 -0
  27. exp/codec_train_dac_large_v1.6_raw_fs16000/images/reconstruct_loss.png +0 -0
  28. exp/codec_train_dac_large_v1.6_raw_fs16000/images/train_time.png +0 -0
  29. meta.yaml +8 -0
README.md ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - espnet
4
+ - audio
5
+ - codec
6
+ language: multilingual
7
+ datasets:
8
+ - amuse
9
+ license: cc-by-4.0
10
+ ---
11
+
12
+ ## ESPnet2 Codec model
13
+
14
+ ### `ftshijt/espnet_codec_dac_large_v1.6_120epoch`
15
+
16
+ This model was trained by ftshijt using amuse recipe in [espnet](https://github.com/espnet/espnet/).
17
+
18
+ ### Demo: How to use in ESPnet2
19
+
20
+ Follow the [ESPnet installation instructions](https://espnet.github.io/espnet/installation.html)
21
+ if you haven't done that already.
22
+
23
+ ```bash
24
+ cd espnet
25
+ git checkout 9baec3a7b10b784cb721849e19caed19e8ac45bc
26
+ pip install -e .
27
+ cd egs2/amuse/codec1
28
+ ./run.sh --skip_data_prep false --skip_train true --download_model ftshijt/espnet_codec_dac_large_v1.6_120epoch
29
+ ```
30
+
31
+
32
+
33
+ ## Codec config
34
+
35
+ <details><summary>expand</summary>
36
+
37
+ ```
38
+ config: conf/train_dac_large_v1.6.yaml
39
+ print_config: false
40
+ log_level: INFO
41
+ drop_last_iter: false
42
+ dry_run: false
43
+ iterator_type: chunk
44
+ valid_iterator_type: null
45
+ output_dir: exp/codec_train_dac_large_v1.6_raw_fs16000
46
+ ngpu: 1
47
+ seed: 777
48
+ num_workers: 1
49
+ num_att_plot: 0
50
+ dist_backend: nccl
51
+ dist_init_method: env://
52
+ dist_world_size: 2
53
+ dist_rank: 0
54
+ local_rank: 0
55
+ dist_master_addr: localhost
56
+ dist_master_port: 60663
57
+ dist_launcher: null
58
+ multiprocessing_distributed: true
59
+ unused_parameters: true
60
+ sharded_ddp: false
61
+ use_deepspeed: false
62
+ deepspeed_config: null
63
+ cudnn_enabled: true
64
+ cudnn_benchmark: false
65
+ cudnn_deterministic: false
66
+ use_tf32: false
67
+ collect_stats: false
68
+ write_collected_feats: false
69
+ max_epoch: 120
70
+ patience: null
71
+ val_scheduler_criterion:
72
+ - valid
73
+ - loss
74
+ early_stopping_criterion:
75
+ - valid
76
+ - loss
77
+ - min
78
+ best_model_criterion:
79
+ - - valid
80
+ - mel_loss
81
+ - min
82
+ - - train
83
+ - mel_loss
84
+ - min
85
+ - - train
86
+ - total_count
87
+ - max
88
+ keep_nbest_models: 5
89
+ nbest_averaging_interval: 0
90
+ grad_clip: -1
91
+ grad_clip_type: 2.0
92
+ grad_noise: false
93
+ accum_grad: 1
94
+ no_forward_run: false
95
+ resume: true
96
+ train_dtype: float32
97
+ use_amp: false
98
+ log_interval: 50
99
+ use_matplotlib: true
100
+ use_tensorboard: true
101
+ create_graph_in_tensorboard: false
102
+ use_wandb: false
103
+ wandb_project: null
104
+ wandb_id: null
105
+ wandb_entity: null
106
+ wandb_name: null
107
+ wandb_model_log_interval: -1
108
+ detect_anomaly: false
109
+ use_adapter: false
110
+ adapter: lora
111
+ save_strategy: all
112
+ adapter_conf: {}
113
+ pretrain_path: null
114
+ init_param: []
115
+ ignore_init_mismatch: false
116
+ freeze_param: []
117
+ num_iters_per_epoch: 5000
118
+ batch_size: 128
119
+ valid_batch_size: null
120
+ batch_bins: 1000000
121
+ valid_batch_bins: null
122
+ category_sample_size: 10
123
+ train_shape_file:
124
+ - exp/codec_stats_raw/train/audio_shape
125
+ valid_shape_file:
126
+ - exp/codec_stats_raw/valid/audio_shape
127
+ batch_type: unsorted
128
+ valid_batch_type: null
129
+ fold_length:
130
+ - 256000
131
+ sort_in_batch: descending
132
+ shuffle_within_batch: false
133
+ sort_batch: descending
134
+ multiple_iterator: false
135
+ chunk_length: 32000
136
+ chunk_shift_ratio: 0.5
137
+ num_cache_chunks: 256
138
+ chunk_excluded_key_prefixes: []
139
+ chunk_default_fs: null
140
+ chunk_max_abs_length: null
141
+ chunk_discard_short_samples: true
142
+ train_data_path_and_name_and_type:
143
+ - - dump/raw/owsm_all_temp/wav.scp
144
+ - audio
145
+ - kaldi_ark
146
+ valid_data_path_and_name_and_type:
147
+ - - dump/raw/dev-small/wav.scp
148
+ - audio
149
+ - kaldi_ark
150
+ multi_task_dataset: false
151
+ allow_variable_data_keys: false
152
+ max_cache_size: 0.0
153
+ max_cache_fd: 32
154
+ allow_multi_rates: false
155
+ valid_max_cache_size: null
156
+ exclude_weight_decay: false
157
+ exclude_weight_decay_conf: {}
158
+ optim: adamw
159
+ optim_conf:
160
+ lr: 0.0002
161
+ betas:
162
+ - 0.5
163
+ - 0.9
164
+ eps: 1.0e-09
165
+ weight_decay: 0.0
166
+ scheduler: exponentiallr
167
+ scheduler_conf:
168
+ gamma: 0.999875
169
+ optim2: adamw
170
+ optim2_conf:
171
+ lr: 0.0002
172
+ betas:
173
+ - 0.5
174
+ - 0.9
175
+ eps: 1.0e-09
176
+ weight_decay: 0.0
177
+ scheduler2: exponentiallr
178
+ scheduler2_conf:
179
+ gamma: 0.999875
180
+ generator_first: true
181
+ skip_discriminator_prob: 0.0
182
+ model_conf: {}
183
+ use_preprocessor: true
184
+ codec: dac
185
+ codec_conf:
186
+ sampling_rate: 16000
187
+ generator_params:
188
+ hidden_dim: 512
189
+ codebook_dim: 512
190
+ encdec_channels: 1
191
+ encdec_n_filters: 32
192
+ encdec_n_residual_layers: 3
193
+ encdec_ratios:
194
+ - 8
195
+ - 5
196
+ - 4
197
+ - 2
198
+ encdec_activation: Snake
199
+ encdec_norm: weight_norm
200
+ encdec_kernel_size: 7
201
+ encdec_residual_kernel_size: 7
202
+ encdec_last_kernel_size: 7
203
+ encdec_dilation_base: 2
204
+ encdec_causal: false
205
+ encdec_pad_mode: reflect
206
+ encdec_true_skip: false
207
+ encdec_compress: 2
208
+ encdec_lstm: 2
209
+ decoder_trim_right_ratio: 1.0
210
+ decoder_final_activation: null
211
+ decoder_final_activation_params: null
212
+ quantizer_n_q: 8
213
+ quantizer_bins: 1024
214
+ quantizer_decay: 0.99
215
+ quantizer_kmeans_init: true
216
+ quantizer_kmeans_iters: 50
217
+ quantizer_threshold_ema_dead_code: 2
218
+ quantizer_target_bandwidth:
219
+ - 0.5
220
+ - 1
221
+ - 1.5
222
+ - 2
223
+ - 4
224
+ quantizer_dropout: true
225
+ sample_rate: 16000
226
+ discriminator_params:
227
+ msmpmb_discriminator_params:
228
+ rates: []
229
+ sample_rate: 24000
230
+ fft_sizes:
231
+ - 2048
232
+ - 1024
233
+ - 512
234
+ periods:
235
+ - 2
236
+ - 3
237
+ - 5
238
+ - 7
239
+ - 11
240
+ period_discriminator_params:
241
+ in_channels: 1
242
+ out_channels: 1
243
+ kernel_sizes:
244
+ - 5
245
+ - 3
246
+ channels: 32
247
+ downsample_scales:
248
+ - 3
249
+ - 3
250
+ - 3
251
+ - 3
252
+ - 1
253
+ max_downsample_channels: 1024
254
+ bias: true
255
+ nonlinear_activation: LeakyReLU
256
+ nonlinear_activation_params:
257
+ negative_slope: 0.1
258
+ use_weight_norm: true
259
+ use_spectral_norm: false
260
+ band_discriminator_params:
261
+ hop_factor: 0.25
262
+ sample_rate: 24000
263
+ bands:
264
+ - - 0.0
265
+ - 0.1
266
+ - - 0.1
267
+ - 0.25
268
+ - - 0.25
269
+ - 0.5
270
+ - - 0.5
271
+ - 0.75
272
+ - - 0.75
273
+ - 1.0
274
+ channel: 32
275
+ generator_adv_loss_params:
276
+ average_by_discriminators: false
277
+ loss_type: mse
278
+ discriminator_adv_loss_params:
279
+ average_by_discriminators: false
280
+ loss_type: mse
281
+ use_feat_match_loss: true
282
+ feat_match_loss_params:
283
+ average_by_discriminators: false
284
+ average_by_layers: false
285
+ include_final_outputs: true
286
+ use_mel_loss: true
287
+ mel_loss_params:
288
+ range_start: 6
289
+ range_end: 11
290
+ window: hann
291
+ n_mels: 80
292
+ fmin: 0
293
+ fmax: null
294
+ log_base: null
295
+ fs: 16000
296
+ lambda_quantization: 0.25
297
+ lambda_commit: 1.0
298
+ lambda_reconstruct: 1.0
299
+ lambda_adv: 1.0
300
+ lambda_mel: 45.0
301
+ lambda_feat_match: 2.0
302
+ cache_generator_outputs: true
303
+ required:
304
+ - output_dir
305
+ version: '202402'
306
+ distributed: true
307
+ ```
308
+
309
+ </details>
310
+
311
+
312
+
313
+ ### Citing ESPnet
314
+
315
+ ```BibTex
316
+ @inproceedings{watanabe2018espnet,
317
+ author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai},
318
+ title={{ESPnet}: End-to-End Speech Processing Toolkit},
319
+ year={2018},
320
+ booktitle={Proceedings of Interspeech},
321
+ pages={2207--2211},
322
+ doi={10.21437/Interspeech.2018-1456},
323
+ url={http://dx.doi.org/10.21437/Interspeech.2018-1456}
324
+ }
325
+
326
+
327
+
328
+
329
+
330
+
331
+ ```
332
+
333
+ or arXiv:
334
+
335
+ ```bibtex
336
+ @misc{watanabe2018espnet,
337
+ title={ESPnet: End-to-End Speech Processing Toolkit},
338
+ author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai},
339
+ year={2018},
340
+ eprint={1804.00015},
341
+ archivePrefix={arXiv},
342
+ primaryClass={cs.CL}
343
+ }
344
+ ```
exp/codec_train_dac_large_v1.6_raw_fs16000/120epoch.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:431e3afd96655bb08395058bd7ca9176dcff04e0fd7c441944f96c47d0c80469
3
+ size 283100815
exp/codec_train_dac_large_v1.6_raw_fs16000/config.yaml ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ config: conf/train_dac_large_v1.6.yaml
2
+ print_config: false
3
+ log_level: INFO
4
+ drop_last_iter: false
5
+ dry_run: false
6
+ iterator_type: chunk
7
+ valid_iterator_type: null
8
+ output_dir: exp/codec_train_dac_large_v1.6_raw_fs16000
9
+ ngpu: 1
10
+ seed: 777
11
+ num_workers: 1
12
+ num_att_plot: 0
13
+ dist_backend: nccl
14
+ dist_init_method: env://
15
+ dist_world_size: 2
16
+ dist_rank: 0
17
+ local_rank: 0
18
+ dist_master_addr: localhost
19
+ dist_master_port: 60663
20
+ dist_launcher: null
21
+ multiprocessing_distributed: true
22
+ unused_parameters: true
23
+ sharded_ddp: false
24
+ use_deepspeed: false
25
+ deepspeed_config: null
26
+ cudnn_enabled: true
27
+ cudnn_benchmark: false
28
+ cudnn_deterministic: false
29
+ use_tf32: false
30
+ collect_stats: false
31
+ write_collected_feats: false
32
+ max_epoch: 120
33
+ patience: null
34
+ val_scheduler_criterion:
35
+ - valid
36
+ - loss
37
+ early_stopping_criterion:
38
+ - valid
39
+ - loss
40
+ - min
41
+ best_model_criterion:
42
+ - - valid
43
+ - mel_loss
44
+ - min
45
+ - - train
46
+ - mel_loss
47
+ - min
48
+ - - train
49
+ - total_count
50
+ - max
51
+ keep_nbest_models: 5
52
+ nbest_averaging_interval: 0
53
+ grad_clip: -1
54
+ grad_clip_type: 2.0
55
+ grad_noise: false
56
+ accum_grad: 1
57
+ no_forward_run: false
58
+ resume: true
59
+ train_dtype: float32
60
+ use_amp: false
61
+ log_interval: 50
62
+ use_matplotlib: true
63
+ use_tensorboard: true
64
+ create_graph_in_tensorboard: false
65
+ use_wandb: false
66
+ wandb_project: null
67
+ wandb_id: null
68
+ wandb_entity: null
69
+ wandb_name: null
70
+ wandb_model_log_interval: -1
71
+ detect_anomaly: false
72
+ use_adapter: false
73
+ adapter: lora
74
+ save_strategy: all
75
+ adapter_conf: {}
76
+ pretrain_path: null
77
+ init_param: []
78
+ ignore_init_mismatch: false
79
+ freeze_param: []
80
+ num_iters_per_epoch: 5000
81
+ batch_size: 128
82
+ valid_batch_size: null
83
+ batch_bins: 1000000
84
+ valid_batch_bins: null
85
+ category_sample_size: 10
86
+ train_shape_file:
87
+ - exp/codec_stats_raw/train/audio_shape
88
+ valid_shape_file:
89
+ - exp/codec_stats_raw/valid/audio_shape
90
+ batch_type: unsorted
91
+ valid_batch_type: null
92
+ fold_length:
93
+ - 256000
94
+ sort_in_batch: descending
95
+ shuffle_within_batch: false
96
+ sort_batch: descending
97
+ multiple_iterator: false
98
+ chunk_length: 32000
99
+ chunk_shift_ratio: 0.5
100
+ num_cache_chunks: 256
101
+ chunk_excluded_key_prefixes: []
102
+ chunk_default_fs: null
103
+ chunk_max_abs_length: null
104
+ chunk_discard_short_samples: true
105
+ train_data_path_and_name_and_type:
106
+ - - dump/raw/owsm_all_temp/wav.scp
107
+ - audio
108
+ - kaldi_ark
109
+ valid_data_path_and_name_and_type:
110
+ - - dump/raw/dev-small/wav.scp
111
+ - audio
112
+ - kaldi_ark
113
+ multi_task_dataset: false
114
+ allow_variable_data_keys: false
115
+ max_cache_size: 0.0
116
+ max_cache_fd: 32
117
+ allow_multi_rates: false
118
+ valid_max_cache_size: null
119
+ exclude_weight_decay: false
120
+ exclude_weight_decay_conf: {}
121
+ optim: adamw
122
+ optim_conf:
123
+ lr: 0.0002
124
+ betas:
125
+ - 0.5
126
+ - 0.9
127
+ eps: 1.0e-09
128
+ weight_decay: 0.0
129
+ scheduler: exponentiallr
130
+ scheduler_conf:
131
+ gamma: 0.999875
132
+ optim2: adamw
133
+ optim2_conf:
134
+ lr: 0.0002
135
+ betas:
136
+ - 0.5
137
+ - 0.9
138
+ eps: 1.0e-09
139
+ weight_decay: 0.0
140
+ scheduler2: exponentiallr
141
+ scheduler2_conf:
142
+ gamma: 0.999875
143
+ generator_first: true
144
+ skip_discriminator_prob: 0.0
145
+ model_conf: {}
146
+ use_preprocessor: true
147
+ codec: dac
148
+ codec_conf:
149
+ sampling_rate: 16000
150
+ generator_params:
151
+ hidden_dim: 512
152
+ codebook_dim: 512
153
+ encdec_channels: 1
154
+ encdec_n_filters: 32
155
+ encdec_n_residual_layers: 3
156
+ encdec_ratios:
157
+ - 8
158
+ - 5
159
+ - 4
160
+ - 2
161
+ encdec_activation: Snake
162
+ encdec_norm: weight_norm
163
+ encdec_kernel_size: 7
164
+ encdec_residual_kernel_size: 7
165
+ encdec_last_kernel_size: 7
166
+ encdec_dilation_base: 2
167
+ encdec_causal: false
168
+ encdec_pad_mode: reflect
169
+ encdec_true_skip: false
170
+ encdec_compress: 2
171
+ encdec_lstm: 2
172
+ decoder_trim_right_ratio: 1.0
173
+ decoder_final_activation: null
174
+ decoder_final_activation_params: null
175
+ quantizer_n_q: 8
176
+ quantizer_bins: 1024
177
+ quantizer_decay: 0.99
178
+ quantizer_kmeans_init: true
179
+ quantizer_kmeans_iters: 50
180
+ quantizer_threshold_ema_dead_code: 2
181
+ quantizer_target_bandwidth:
182
+ - 0.5
183
+ - 1
184
+ - 1.5
185
+ - 2
186
+ - 4
187
+ quantizer_dropout: true
188
+ sample_rate: 16000
189
+ discriminator_params:
190
+ msmpmb_discriminator_params:
191
+ rates: []
192
+ sample_rate: 24000
193
+ fft_sizes:
194
+ - 2048
195
+ - 1024
196
+ - 512
197
+ periods:
198
+ - 2
199
+ - 3
200
+ - 5
201
+ - 7
202
+ - 11
203
+ period_discriminator_params:
204
+ in_channels: 1
205
+ out_channels: 1
206
+ kernel_sizes:
207
+ - 5
208
+ - 3
209
+ channels: 32
210
+ downsample_scales:
211
+ - 3
212
+ - 3
213
+ - 3
214
+ - 3
215
+ - 1
216
+ max_downsample_channels: 1024
217
+ bias: true
218
+ nonlinear_activation: LeakyReLU
219
+ nonlinear_activation_params:
220
+ negative_slope: 0.1
221
+ use_weight_norm: true
222
+ use_spectral_norm: false
223
+ band_discriminator_params:
224
+ hop_factor: 0.25
225
+ sample_rate: 24000
226
+ bands:
227
+ - - 0.0
228
+ - 0.1
229
+ - - 0.1
230
+ - 0.25
231
+ - - 0.25
232
+ - 0.5
233
+ - - 0.5
234
+ - 0.75
235
+ - - 0.75
236
+ - 1.0
237
+ channel: 32
238
+ generator_adv_loss_params:
239
+ average_by_discriminators: false
240
+ loss_type: mse
241
+ discriminator_adv_loss_params:
242
+ average_by_discriminators: false
243
+ loss_type: mse
244
+ use_feat_match_loss: true
245
+ feat_match_loss_params:
246
+ average_by_discriminators: false
247
+ average_by_layers: false
248
+ include_final_outputs: true
249
+ use_mel_loss: true
250
+ mel_loss_params:
251
+ range_start: 6
252
+ range_end: 11
253
+ window: hann
254
+ n_mels: 80
255
+ fmin: 0
256
+ fmax: null
257
+ log_base: null
258
+ fs: 16000
259
+ lambda_quantization: 0.25
260
+ lambda_commit: 1.0
261
+ lambda_reconstruct: 1.0
262
+ lambda_adv: 1.0
263
+ lambda_mel: 45.0
264
+ lambda_feat_match: 2.0
265
+ cache_generator_outputs: true
266
+ required:
267
+ - output_dir
268
+ version: '202402'
269
+ distributed: true
exp/codec_train_dac_large_v1.6_raw_fs16000/images/adv_loss.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/codec_commit_loss.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/codec_loss.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/codec_quantization_loss.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/discriminator_backward_time.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/discriminator_forward_time.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/discriminator_loss.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/discriminator_optim_step_time.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/discriminator_train_time.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/fake_loss.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/feat_match_loss.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/generator_backward_time.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/generator_forward_time.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/generator_optim_step_time.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/generator_train_time.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/gpu_max_cached_mem_GB.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/iter_time.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/loss.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/mel_loss.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/mel_loss_real.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/optim0_lr0.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/optim1_lr0.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/real_loss.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/reconstruct_loss.png ADDED
exp/codec_train_dac_large_v1.6_raw_fs16000/images/train_time.png ADDED
meta.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ espnet: '202402'
2
+ files:
3
+ model_file: exp/codec_train_dac_large_v1.6_raw_fs16000/120epoch.pth
4
+ python: 3.10.13 | packaged by conda-forge | (main, Dec 23 2023, 15:26:55) [GCC 12.3.0]
5
+ timestamp: 1730000832.234379
6
+ torch: 2.5.0.dev20240825+cu124
7
+ yaml_files:
8
+ train_config: exp/codec_train_dac_large_v1.6_raw_fs16000/config.yaml