haoxiangsnr commited on
Commit
6ee97f1
1 Parent(s): cb972f2

Upload 79 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. swin_default_LR1e-2_addEOSToken/config__2024_01_17--11_16_51.toml +89 -0
  2. swin_default_LR1e-2_addEOSToken/config__2024_01_17--12_14_37.toml +89 -0
  3. swin_default_LR1e-2_addEOSToken/config__2024_01_17--17_16_44.toml +89 -0
  4. swin_default_LR1e-2_addEOSToken/swin_default_LR1e-2_addEOSToken_2024_01_17--11_16_21.log +142 -0
  5. swin_default_LR1e-2_addEOSToken/swin_default_LR1e-2_addEOSToken_2024_01_17--12_14_33.log +130 -0
  6. swin_default_LR1e-2_addEOSToken/swin_default_LR1e-2_addEOSToken_2024_01_17--17_15_22.log +203 -0
  7. swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705450617.gina1.1498943.0 +3 -0
  8. swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705450617.gina1.1498944.0 +3 -0
  9. swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705450617.gina1.1498945.0 +3 -0
  10. swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705450617.gina1.1498946.0 +3 -0
  11. swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705454078.gina301.610519.0 +3 -0
  12. swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705454078.gina301.610520.0 +3 -0
  13. swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705454078.gina301.610521.0 +3 -0
  14. swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705454078.gina301.610522.0 +3 -0
  15. swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705472223.gina1.1576669.0 +3 -0
  16. swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705472223.gina1.1576670.0 +3 -0
  17. swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705472223.gina1.1576671.0 +3 -0
  18. swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705472223.gina1.1576672.0 +3 -0
  19. swin_default_LR1e-3_AR-NAR/config__2024_01_18--00_22_05.toml +89 -0
  20. swin_default_LR1e-3_AR-NAR/config__2024_01_18--00_23_20.toml +89 -0
  21. swin_default_LR1e-3_AR-NAR/config__2024_01_18--00_33_25.toml +89 -0
  22. swin_default_LR1e-3_AR-NAR/config__2024_01_18--13_13_46.toml +89 -0
  23. swin_default_LR1e-3_AR-NAR/config__2024_01_18--13_14_43.toml +89 -0
  24. swin_default_LR1e-3_AR-NAR/swin_default_LR1e-2_AR-NAR_2024_01_18--00_21_17.log +85 -0
  25. swin_default_LR1e-3_AR-NAR/swin_default_LR1e-2_AR-NAR_2024_01_18--00_23_16.log +85 -0
  26. swin_default_LR1e-3_AR-NAR/swin_default_LR1e-2_AR-NAR_2024_01_18--00_33_02.log +515 -0
  27. swin_default_LR1e-3_AR-NAR/swin_default_LR1e-3_AR-NAR_2024_01_18--13_13_05.log +94 -0
  28. swin_default_LR1e-3_AR-NAR/swin_default_LR1e-3_AR-NAR_2024_01_18--13_14_39.log +121 -0
  29. swin_default_LR1e-3_AR-NAR/swin_default_LR1e-3_AR-NAR_2024_01_18--13_14_40.log +1 -0
  30. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705497738.gina1.1685133.0 +3 -0
  31. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705497738.gina1.1685134.0 +3 -0
  32. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705497738.gina1.1685135.0 +3 -0
  33. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705497738.gina1.1685136.0 +3 -0
  34. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705497802.gina1.1688371.0 +3 -0
  35. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705497802.gina1.1688372.0 +3 -0
  36. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705497802.gina1.1688373.0 +3 -0
  37. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705497802.gina1.1688374.0 +3 -0
  38. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705498412.gina1.1693065.0 +3 -0
  39. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705498412.gina1.1693066.0 +3 -0
  40. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705498412.gina1.1693067.0 +3 -0
  41. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705498412.gina1.1693068.0 +3 -0
  42. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705544038.gina1.1902904.0 +3 -0
  43. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705544038.gina1.1902905.0 +3 -0
  44. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705544038.gina1.1902906.0 +3 -0
  45. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705544038.gina1.1902907.0 +3 -0
  46. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705544084.gina1.1906280.0 +3 -0
  47. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705544085.gina1.1906278.0 +3 -0
  48. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705544085.gina1.1906279.0 +3 -0
  49. swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705544085.gina1.1906281.0 +3 -0
  50. swin_default_LR1e-4_AR-NAR/config__2024_01_18--13_07_07.toml +89 -0
swin_default_LR1e-2_addEOSToken/config__2024_01_17--11_16_51.toml ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [[validate_dataset]]
2
+ path = "dataloader.Dataset"
3
+
4
+ [validate_dataset.args]
5
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
6
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
7
+ duration = 6.0
8
+ sr = 24000
9
+ num_samples = 10
10
+ [validate_dataset.dataloader]
11
+ batch_size = 1
12
+ num_workers = 1
13
+
14
+ [meta]
15
+ save_dir = "exp"
16
+ description = "Train a model using Generative Adversarial Networks (GANs)"
17
+ seed = 20220815
18
+ exp_id = "swin_default_LR1e-2_addEOSToken"
19
+ config_path = "/fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/conf/swin_default_LR1e-2_addEOSToken.toml"
20
+
21
+ [trainer]
22
+ path = "trainer.Trainer"
23
+
24
+ [loss_function]
25
+ path = "torch.nn.MSELoss"
26
+
27
+ [optimizer]
28
+ path = "torch.optim.AdamW"
29
+
30
+ [model]
31
+ path = "model.Model"
32
+
33
+ [acoustics]
34
+ n_fft = 512
35
+ hop_length = 128
36
+ win_length = 512
37
+ sr = 24000
38
+
39
+ [train_dataset]
40
+ path = "dataloader.Dataset"
41
+
42
+ [test_dataset]
43
+ path = "dataloader.Dataset"
44
+
45
+ [trainer.args]
46
+ debug = false
47
+ max_steps = 0
48
+ max_epochs = 1000
49
+ max_grad_norm = 1.0
50
+ save_max_score = true
51
+ save_ckpt_interval = 5
52
+ max_patience = 200
53
+ plot_norm = true
54
+ validation_interval = 200
55
+ max_num_checkpoints = 100
56
+ scheduler_name = "constant_schedule_with_warmup"
57
+ warmup_steps = 1000
58
+ warmup_ratio = 0.0
59
+ gradient_accumulation_steps = 1
60
+
61
+ [loss_function.args]
62
+
63
+ [optimizer.args]
64
+ lr = 0.0001
65
+
66
+ [model.args]
67
+
68
+ [train_dataset.args]
69
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
70
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
71
+ duration = 6.0
72
+ sr = 24000
73
+
74
+ [train_dataset.dataloader]
75
+ batch_size = 20
76
+ num_workers = 10
77
+ drop_last = true
78
+ pin_memory = true
79
+
80
+ [test_dataset.args]
81
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
82
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
83
+ duration = 6.0
84
+ sr = 24000
85
+ num_samples = 10
86
+
87
+ [test_dataset.dataloader]
88
+ batch_size = 1
89
+ num_workers = 1
swin_default_LR1e-2_addEOSToken/config__2024_01_17--12_14_37.toml ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [[validate_dataset]]
2
+ path = "dataloader.Dataset"
3
+
4
+ [validate_dataset.args]
5
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
6
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
7
+ duration = 6.0
8
+ sr = 24000
9
+ num_samples = 10
10
+ [validate_dataset.dataloader]
11
+ batch_size = 1
12
+ num_workers = 1
13
+
14
+ [meta]
15
+ save_dir = "exp"
16
+ description = "Train a model using Generative Adversarial Networks (GANs)"
17
+ seed = 20220815
18
+ exp_id = "swin_default_LR1e-2_addEOSToken"
19
+ config_path = "/fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/conf/swin_default_LR1e-2_addEOSToken.toml"
20
+
21
+ [trainer]
22
+ path = "trainer.Trainer"
23
+
24
+ [loss_function]
25
+ path = "torch.nn.MSELoss"
26
+
27
+ [optimizer]
28
+ path = "torch.optim.AdamW"
29
+
30
+ [model]
31
+ path = "model.Model"
32
+
33
+ [acoustics]
34
+ n_fft = 512
35
+ hop_length = 128
36
+ win_length = 512
37
+ sr = 24000
38
+
39
+ [train_dataset]
40
+ path = "dataloader.Dataset"
41
+
42
+ [test_dataset]
43
+ path = "dataloader.Dataset"
44
+
45
+ [trainer.args]
46
+ debug = false
47
+ max_steps = 0
48
+ max_epochs = 1000
49
+ max_grad_norm = 1.0
50
+ save_max_score = true
51
+ save_ckpt_interval = 5
52
+ max_patience = 200
53
+ plot_norm = true
54
+ validation_interval = 200
55
+ max_num_checkpoints = 100
56
+ scheduler_name = "constant_schedule_with_warmup"
57
+ warmup_steps = 1000
58
+ warmup_ratio = 0.0
59
+ gradient_accumulation_steps = 1
60
+
61
+ [loss_function.args]
62
+
63
+ [optimizer.args]
64
+ lr = 0.0001
65
+
66
+ [model.args]
67
+
68
+ [train_dataset.args]
69
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
70
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
71
+ duration = 6.0
72
+ sr = 24000
73
+
74
+ [train_dataset.dataloader]
75
+ batch_size = 20
76
+ num_workers = 10
77
+ drop_last = true
78
+ pin_memory = true
79
+
80
+ [test_dataset.args]
81
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
82
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
83
+ duration = 6.0
84
+ sr = 24000
85
+ num_samples = 10
86
+
87
+ [test_dataset.dataloader]
88
+ batch_size = 1
89
+ num_workers = 1
swin_default_LR1e-2_addEOSToken/config__2024_01_17--17_16_44.toml ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [[validate_dataset]]
2
+ path = "dataloader.Dataset"
3
+
4
+ [validate_dataset.args]
5
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
6
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
7
+ duration = 6.0
8
+ sr = 24000
9
+ num_samples = 10
10
+ [validate_dataset.dataloader]
11
+ batch_size = 1
12
+ num_workers = 1
13
+
14
+ [meta]
15
+ save_dir = "exp"
16
+ description = "Train a model using Generative Adversarial Networks (GANs)"
17
+ seed = 20220815
18
+ exp_id = "swin_default_LR1e-2_addEOSToken"
19
+ config_path = "/fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/conf/swin_default_LR1e-2_addEOSToken.toml"
20
+
21
+ [trainer]
22
+ path = "trainer.Trainer"
23
+
24
+ [loss_function]
25
+ path = "torch.nn.MSELoss"
26
+
27
+ [optimizer]
28
+ path = "torch.optim.AdamW"
29
+
30
+ [model]
31
+ path = "model.Model"
32
+
33
+ [acoustics]
34
+ n_fft = 512
35
+ hop_length = 128
36
+ win_length = 512
37
+ sr = 24000
38
+
39
+ [train_dataset]
40
+ path = "dataloader.Dataset"
41
+
42
+ [test_dataset]
43
+ path = "dataloader.Dataset"
44
+
45
+ [trainer.args]
46
+ debug = false
47
+ max_steps = 0
48
+ max_epochs = 1000
49
+ max_grad_norm = 1.0
50
+ save_max_score = true
51
+ save_ckpt_interval = 5
52
+ max_patience = 200
53
+ plot_norm = true
54
+ validation_interval = 200
55
+ max_num_checkpoints = 100
56
+ scheduler_name = "constant_schedule_with_warmup"
57
+ warmup_steps = 1000
58
+ warmup_ratio = 0.0
59
+ gradient_accumulation_steps = 1
60
+
61
+ [loss_function.args]
62
+
63
+ [optimizer.args]
64
+ lr = 0.0001
65
+
66
+ [model.args]
67
+
68
+ [train_dataset.args]
69
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
70
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
71
+ duration = 6.0
72
+ sr = 24000
73
+
74
+ [train_dataset.dataloader]
75
+ batch_size = 20
76
+ num_workers = 10
77
+ drop_last = true
78
+ pin_memory = true
79
+
80
+ [test_dataset.args]
81
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
82
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
83
+ duration = 6.0
84
+ sr = 24000
85
+ num_samples = 10
86
+
87
+ [test_dataset.dataloader]
88
+ batch_size = 1
89
+ num_workers = 1
swin_default_LR1e-2_addEOSToken/swin_default_LR1e-2_addEOSToken_2024_01_17--11_16_21.log ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 01-17 11:16:21 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken.
2
+ 01-17 11:16:21 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken.
3
+ 01-17 11:16:21 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken.
4
+ 01-17 11:16:21 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken.
5
+ 01-17 11:16:57 INFO [logging.py:61]: Configuration file is saved to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/config__2024_01_17--11_16_51.toml.
6
+ 01-17 11:16:57 INFO [logging.py:61]: Environment information:
7
+ - `Accelerate` version: 0.26.1
8
+ - Platform: Linux-5.14.0-362.13.1.el9_3.x86_64-x86_64-with-glibc2.34
9
+ - Python version: 3.10.13
10
+ - Numpy version: 1.26.3
11
+ - PyTorch version (GPU?): 2.1.2 (True)
12
+ - System RAM: 503.48 GB
13
+ - GPU Available: True
14
+ - GPU IDs: 4
15
+ - GPU type: NVIDIA A100-SXM4-80GB
16
+ 01-17 11:16:57 INFO [logging.py:61]:
17
+ ===============================================================================================
18
+ Layer (type:depth-idx) Param #
19
+ ===============================================================================================
20
+ DistributedDataParallel --
21
+ ├─Model: 1-1 --
22
+ │ └─EncodecModel: 2-1 --
23
+ │ │ └─EncodecEncoder: 3-1 (7,425,792)
24
+ │ │ └─EncodecDecoder: 3-2 (7,426,018)
25
+ │ │ └─EncodecResidualVectorQuantizer: 3-3 --
26
+ │ └─ModuleList: 2-2 --
27
+ │ │ └─TokenEmbedding: 3-4 1,049,600
28
+ │ │ └─TokenEmbedding: 3-5 1,049,600
29
+ │ │ └─TokenEmbedding: 3-6 1,049,600
30
+ │ │ └─TokenEmbedding: 3-7 1,049,600
31
+ │ │ └─TokenEmbedding: 3-8 1,049,600
32
+ │ │ └─TokenEmbedding: 3-9 1,049,600
33
+ │ │ └─TokenEmbedding: 3-10 1,049,600
34
+ │ │ └─TokenEmbedding: 3-11 1,049,600
35
+ │ └─PreNet: 2-3 --
36
+ │ │ └─Sequential: 3-12 591,360
37
+ │ └─SinePositionalEmbedding: 2-4 1
38
+ │ │ └─Dropout: 3-13 --
39
+ │ └─TransformerEncoder: 2-5 --
40
+ │ │ └─ModuleList: 3-14 201,535,488
41
+ │ │ └─AdaptiveLayerNorm: 3-15 2,101,248
42
+ │ └─ModuleList: 2-6 --
43
+ │ │ └─Linear: 3-16 1,049,600
44
+ │ │ └─Linear: 3-17 1,049,600
45
+ │ │ └─Linear: 3-18 1,049,600
46
+ │ │ └─Linear: 3-19 1,049,600
47
+ │ │ └─Linear: 3-20 1,049,600
48
+ │ │ └─Linear: 3-21 1,049,600
49
+ │ │ └─Linear: 3-22 1,049,600
50
+ │ │ └─Linear: 3-23 1,049,600
51
+ │ └─ModuleList: 2-7 --
52
+ │ │ └─TokenEmbedding: 3-24 1,024
53
+ │ │ └─TokenEmbedding: 3-25 1,024
54
+ │ │ └─TokenEmbedding: 3-26 1,024
55
+ │ │ └─TokenEmbedding: 3-27 1,024
56
+ │ │ └─TokenEmbedding: 3-28 1,024
57
+ │ │ └─TokenEmbedding: 3-29 1,024
58
+ │ │ └─TokenEmbedding: 3-30 1,024
59
+ │ │ └─TokenEmbedding: 3-31 1,024
60
+ │ └─MulticlassAccuracy: 2-8 --
61
+ ===============================================================================================
62
+ Total params: 235,881,699
63
+ Trainable params: 221,029,888
64
+ Non-trainable params: 14,851,811
65
+ ===============================================================================================
66
+ 01-17 11:16:57 INFO [logging.py:61]: Training control variables:
67
+ 01-17 11:16:57 INFO [logging.py:61]: `steps_per_epoch`: 500
68
+ 01-17 11:16:57 INFO [logging.py:61]: Gradient accumulation steps: 1
69
+ 01-17 11:16:57 INFO [logging.py:61]: `update_steps_per_epoch`: 500
70
+ 01-17 11:16:57 INFO [logging.py:61]: `max_steps`: 500000
71
+ 01-17 11:16:57 INFO [logging.py:61]: `max_epochs`: 1000
72
+ 01-17 11:16:57 INFO [logging.py:61]: warmup_steps=1000. warmup_ratio will be ignored.
73
+ 01-17 11:16:57 INFO [logging.py:61]: ========= Epoch 1 out of 1000 =========
74
+ 01-17 11:16:57 INFO [logging.py:61]: Begin training...
75
+ 01-17 11:34:44 INFO [logging.py:61]: Loss 'loss' on epoch 1: 5.376262664794922
76
+ 01-17 11:34:44 INFO [logging.py:61]: Loss 'acc' on epoch 1: 0.29953059554100037
77
+ 01-17 11:34:44 INFO [logging.py:61]: ========= Epoch 2 out of 1000 =========
78
+ 01-17 11:34:44 INFO [logging.py:61]: Begin training...
79
+ 01-17 11:52:25 INFO [logging.py:61]: Loss 'loss' on epoch 2: 4.830622673034668
80
+ 01-17 11:52:25 INFO [logging.py:61]: Loss 'acc' on epoch 2: 0.35284850001335144
81
+ 01-17 11:52:25 INFO [logging.py:61]: ========= Epoch 3 out of 1000 =========
82
+ 01-17 11:52:25 INFO [logging.py:61]: Begin training...
83
+ 01-17 12:10:07 INFO [logging.py:61]: Loss 'loss' on epoch 3: 4.957332134246826
84
+ 01-17 12:10:07 INFO [logging.py:61]: Loss 'acc' on epoch 3: 0.3474164605140686
85
+ 01-17 12:10:07 INFO [logging.py:61]: ========= Epoch 4 out of 1000 =========
86
+ 01-17 12:10:07 INFO [logging.py:61]: Begin training...
87
+ 01-17 12:27:48 INFO [logging.py:61]: Loss 'loss' on epoch 4: 4.950679779052734
88
+ 01-17 12:27:48 INFO [logging.py:61]: Loss 'acc' on epoch 4: 0.3592258095741272
89
+ 01-17 12:27:48 INFO [logging.py:61]: ========= Epoch 5 out of 1000 =========
90
+ 01-17 12:27:48 INFO [logging.py:61]: Begin training...
91
+ 01-17 12:45:29 INFO [logging.py:61]: Loss 'loss' on epoch 5: 4.960457801818848
92
+ 01-17 12:45:29 INFO [logging.py:61]: Loss 'acc' on epoch 5: 0.3581017851829529
93
+ 01-17 12:45:29 INFO [logging.py:61]: Saving current state to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0005
94
+ 01-17 12:45:30 INFO [logging.py:61]: Model weights saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0005/pytorch_model.bin
95
+ 01-17 12:45:33 INFO [logging.py:61]: Optimizer state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0005/optimizer.bin
96
+ 01-17 12:45:33 INFO [logging.py:61]: Scheduler state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0005/scheduler.bin
97
+ 01-17 12:45:33 INFO [logging.py:61]: Sampler state for dataloader 0 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0005/sampler.bin
98
+ 01-17 12:45:33 INFO [logging.py:61]: Sampler state for dataloader 1 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0005/sampler_1.bin
99
+ 01-17 12:45:33 INFO [logging.py:61]: Random states saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0005/random_states_0.pkl
100
+ 01-17 12:45:33 INFO [logging.py:61]: Saving the state of TrainerState to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0005/custom_checkpoint_0.pkl
101
+ 01-17 12:45:33 INFO [logging.py:61]: ========= Epoch 6 out of 1000 =========
102
+ 01-17 12:45:33 INFO [logging.py:61]: Begin training...
103
+ 01-17 13:03:13 INFO [logging.py:61]: Loss 'loss' on epoch 6: 5.089817523956299
104
+ 01-17 13:03:13 INFO [logging.py:61]: Loss 'acc' on epoch 6: 0.35066384077072144
105
+ 01-17 13:03:13 INFO [logging.py:61]: ========= Epoch 7 out of 1000 =========
106
+ 01-17 13:03:13 INFO [logging.py:61]: Begin training...
107
+ 01-17 13:20:54 INFO [logging.py:61]: Loss 'loss' on epoch 7: 5.060431480407715
108
+ 01-17 13:20:54 INFO [logging.py:61]: Loss 'acc' on epoch 7: 0.3525404930114746
109
+ 01-17 13:20:54 INFO [logging.py:61]: ========= Epoch 8 out of 1000 =========
110
+ 01-17 13:20:54 INFO [logging.py:61]: Begin training...
111
+ 01-17 13:38:35 INFO [logging.py:61]: Loss 'loss' on epoch 8: 5.10891056060791
112
+ 01-17 13:38:35 INFO [logging.py:61]: Loss 'acc' on epoch 8: 0.349361389875412
113
+ 01-17 13:38:35 INFO [logging.py:61]: ========= Epoch 9 out of 1000 =========
114
+ 01-17 13:38:35 INFO [logging.py:61]: Begin training...
115
+ 01-17 13:56:16 INFO [logging.py:61]: Loss 'loss' on epoch 9: 5.178280830383301
116
+ 01-17 13:56:16 INFO [logging.py:61]: Loss 'acc' on epoch 9: 0.34400177001953125
117
+ 01-17 13:56:16 INFO [logging.py:61]: ========= Epoch 10 out of 1000 =========
118
+ 01-17 13:56:16 INFO [logging.py:61]: Begin training...
119
+ 01-17 14:13:57 INFO [logging.py:61]: Loss 'loss' on epoch 10: 5.152740001678467
120
+ 01-17 14:13:57 INFO [logging.py:61]: Loss 'acc' on epoch 10: 0.34986352920532227
121
+ 01-17 14:13:57 INFO [logging.py:61]: Saving current state to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0010
122
+ 01-17 14:13:58 INFO [logging.py:61]: Model weights saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0010/pytorch_model.bin
123
+ 01-17 14:14:00 INFO [logging.py:61]: Optimizer state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0010/optimizer.bin
124
+ 01-17 14:14:00 INFO [logging.py:61]: Scheduler state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0010/scheduler.bin
125
+ 01-17 14:14:00 INFO [logging.py:61]: Sampler state for dataloader 0 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0010/sampler.bin
126
+ 01-17 14:14:00 INFO [logging.py:61]: Sampler state for dataloader 1 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0010/sampler_1.bin
127
+ 01-17 14:14:00 INFO [logging.py:61]: Random states saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0010/random_states_0.pkl
128
+ 01-17 14:14:00 INFO [logging.py:61]: Saving the state of TrainerState to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0010/custom_checkpoint_0.pkl
129
+ 01-17 14:14:01 INFO [logging.py:61]: ========= Epoch 11 out of 1000 =========
130
+ 01-17 14:14:01 INFO [logging.py:61]: Begin training...
131
+ 01-17 14:31:41 INFO [logging.py:61]: Loss 'loss' on epoch 11: 5.189244270324707
132
+ 01-17 14:31:41 INFO [logging.py:61]: Loss 'acc' on epoch 11: 0.3463723957538605
133
+ 01-17 14:31:41 INFO [logging.py:61]: ========= Epoch 12 out of 1000 =========
134
+ 01-17 14:31:41 INFO [logging.py:61]: Begin training...
135
+ 01-17 14:49:22 INFO [logging.py:61]: Loss 'loss' on epoch 12: 5.161355495452881
136
+ 01-17 14:49:22 INFO [logging.py:61]: Loss 'acc' on epoch 12: 0.347272664308548
137
+ 01-17 14:49:22 INFO [logging.py:61]: ========= Epoch 13 out of 1000 =========
138
+ 01-17 14:49:22 INFO [logging.py:61]: Begin training...
139
+ 01-17 15:07:03 INFO [logging.py:61]: Loss 'loss' on epoch 13: 5.166537761688232
140
+ 01-17 15:07:03 INFO [logging.py:61]: Loss 'acc' on epoch 13: 0.34788310527801514
141
+ 01-17 15:07:03 INFO [logging.py:61]: ========= Epoch 14 out of 1000 =========
142
+ 01-17 15:07:03 INFO [logging.py:61]: Begin training...
swin_default_LR1e-2_addEOSToken/swin_default_LR1e-2_addEOSToken_2024_01_17--12_14_33.log ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 01-17 12:14:33 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken.
2
+ 01-17 12:14:33 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken.
3
+ 01-17 12:14:33 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken.
4
+ 01-17 12:14:33 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken.
5
+ 01-17 12:14:38 INFO [logging.py:61]: Configuration file is saved to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/config__2024_01_17--12_14_37.toml.
6
+ 01-17 12:14:38 INFO [logging.py:61]: Environment information:
7
+ - `Accelerate` version: 0.26.1
8
+ - Platform: Linux-5.14.0-362.13.1.el9_3.x86_64-x86_64-with-glibc2.34
9
+ - Python version: 3.10.13
10
+ - Numpy version: 1.26.3
11
+ - PyTorch version (GPU?): 2.1.2 (True)
12
+ - System RAM: 1007.48 GB
13
+ - GPU Available: True
14
+ - GPU IDs: 4
15
+ - GPU type: NVIDIA A100-SXM4-80GB
16
+ 01-17 12:14:38 INFO [logging.py:61]:
17
+ ===============================================================================================
18
+ Layer (type:depth-idx) Param #
19
+ ===============================================================================================
20
+ DistributedDataParallel --
21
+ ├─Model: 1-1 --
22
+ │ └─EncodecModel: 2-1 --
23
+ │ │ └─EncodecEncoder: 3-1 (7,425,792)
24
+ │ │ └─EncodecDecoder: 3-2 (7,426,018)
25
+ │ │ └─EncodecResidualVectorQuantizer: 3-3 --
26
+ │ └─ModuleList: 2-2 --
27
+ │ │ └─TokenEmbedding: 3-4 1,049,600
28
+ │ │ └─TokenEmbedding: 3-5 1,049,600
29
+ │ │ └─TokenEmbedding: 3-6 1,049,600
30
+ │ │ └─TokenEmbedding: 3-7 1,049,600
31
+ │ │ └─TokenEmbedding: 3-8 1,049,600
32
+ │ │ └─TokenEmbedding: 3-9 1,049,600
33
+ │ │ └─TokenEmbedding: 3-10 1,049,600
34
+ │ │ └─TokenEmbedding: 3-11 1,049,600
35
+ │ └─PreNet: 2-3 --
36
+ │ │ └─Sequential: 3-12 591,360
37
+ │ └─SinePositionalEmbedding: 2-4 1
38
+ │ │ └─Dropout: 3-13 --
39
+ │ └─TransformerEncoder: 2-5 --
40
+ │ │ └─ModuleList: 3-14 201,535,488
41
+ │ │ └─AdaptiveLayerNorm: 3-15 2,101,248
42
+ │ └─ModuleList: 2-6 --
43
+ │ │ └─Linear: 3-16 1,049,600
44
+ │ │ └─Linear: 3-17 1,049,600
45
+ │ │ └─Linear: 3-18 1,049,600
46
+ │ │ └─Linear: 3-19 1,049,600
47
+ │ │ └─Linear: 3-20 1,049,600
48
+ │ │ └─Linear: 3-21 1,049,600
49
+ │ │ └─Linear: 3-22 1,049,600
50
+ │ │ └─Linear: 3-23 1,049,600
51
+ │ └─ModuleList: 2-7 --
52
+ │ │ └─TokenEmbedding: 3-24 1,024
53
+ │ │ └─TokenEmbedding: 3-25 1,024
54
+ │ │ └─TokenEmbedding: 3-26 1,024
55
+ │ │ └─TokenEmbedding: 3-27 1,024
56
+ │ │ └─TokenEmbedding: 3-28 1,024
57
+ │ │ └─TokenEmbedding: 3-29 1,024
58
+ │ │ └─TokenEmbedding: 3-30 1,024
59
+ │ │ └─TokenEmbedding: 3-31 1,024
60
+ │ └─MulticlassAccuracy: 2-8 --
61
+ ===============================================================================================
62
+ Total params: 235,881,699
63
+ Trainable params: 221,029,888
64
+ Non-trainable params: 14,851,811
65
+ ===============================================================================================
66
+ 01-17 12:14:38 INFO [logging.py:61]: Training control variables:
67
+ 01-17 12:14:38 INFO [logging.py:61]: `steps_per_epoch`: 500
68
+ 01-17 12:14:38 INFO [logging.py:61]: Gradient accumulation steps: 1
69
+ 01-17 12:14:38 INFO [logging.py:61]: `update_steps_per_epoch`: 500
70
+ 01-17 12:14:38 INFO [logging.py:61]: `max_steps`: 500000
71
+ 01-17 12:14:38 INFO [logging.py:61]: `max_epochs`: 1000
72
+ 01-17 12:14:38 INFO [logging.py:61]: warmup_steps=1000. warmup_ratio will be ignored.
73
+ 01-17 12:14:38 INFO [logging.py:61]: ========= Epoch 1 out of 1000 =========
74
+ 01-17 12:14:38 INFO [logging.py:61]: Begin training...
75
+ 01-17 12:32:23 INFO [logging.py:61]: Loss 'loss' on epoch 1: 5.352734088897705
76
+ 01-17 12:32:23 INFO [logging.py:61]: Loss 'acc' on epoch 1: 0.30075228214263916
77
+ 01-17 12:32:23 INFO [logging.py:61]: ========= Epoch 2 out of 1000 =========
78
+ 01-17 12:32:23 INFO [logging.py:61]: Begin training...
79
+ 01-17 12:50:06 INFO [logging.py:61]: Loss 'loss' on epoch 2: 4.763156414031982
80
+ 01-17 12:50:06 INFO [logging.py:61]: Loss 'acc' on epoch 2: 0.35356950759887695
81
+ 01-17 12:50:06 INFO [logging.py:61]: ========= Epoch 3 out of 1000 =========
82
+ 01-17 12:50:06 INFO [logging.py:61]: Begin training...
83
+ 01-17 13:07:51 INFO [logging.py:61]: Loss 'loss' on epoch 3: 4.902694225311279
84
+ 01-17 13:07:51 INFO [logging.py:61]: Loss 'acc' on epoch 3: 0.35302937030792236
85
+ 01-17 13:07:51 INFO [logging.py:61]: ========= Epoch 4 out of 1000 =========
86
+ 01-17 13:07:51 INFO [logging.py:61]: Begin training...
87
+ 01-17 13:28:14 INFO [logging.py:61]: Loss 'loss' on epoch 4: 4.946804046630859
88
+ 01-17 13:28:14 INFO [logging.py:61]: Loss 'acc' on epoch 4: 0.3474726378917694
89
+ 01-17 13:28:14 INFO [logging.py:61]: ========= Epoch 5 out of 1000 =========
90
+ 01-17 13:28:14 INFO [logging.py:61]: Begin training...
91
+ 01-17 13:46:01 INFO [logging.py:61]: Loss 'loss' on epoch 5: 5.042134761810303
92
+ 01-17 13:46:01 INFO [logging.py:61]: Loss 'acc' on epoch 5: 0.3472442626953125
93
+ 01-17 13:46:01 INFO [logging.py:61]: Saving current state to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0005
94
+ 01-17 13:46:02 INFO [logging.py:61]: Model weights saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0005/pytorch_model.bin
95
+ 01-17 13:46:05 INFO [logging.py:61]: Optimizer state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0005/optimizer.bin
96
+ 01-17 13:46:05 INFO [logging.py:61]: Scheduler state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0005/scheduler.bin
97
+ 01-17 13:46:05 INFO [logging.py:61]: Sampler state for dataloader 0 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0005/sampler.bin
98
+ 01-17 13:46:05 INFO [logging.py:61]: Sampler state for dataloader 1 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0005/sampler_1.bin
99
+ 01-17 13:46:05 INFO [logging.py:61]: Random states saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0005/random_states_0.pkl
100
+ 01-17 13:46:05 INFO [logging.py:61]: Saving the state of TrainerState to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0005/custom_checkpoint_0.pkl
101
+ 01-17 13:46:05 INFO [logging.py:61]: ========= Epoch 6 out of 1000 =========
102
+ 01-17 13:46:05 INFO [logging.py:61]: Begin training...
103
+ 01-17 14:03:48 INFO [logging.py:61]: Loss 'loss' on epoch 6: 5.154253959655762
104
+ 01-17 14:03:48 INFO [logging.py:61]: Loss 'acc' on epoch 6: 0.3447455167770386
105
+ 01-17 14:03:48 INFO [logging.py:61]: ========= Epoch 7 out of 1000 =========
106
+ 01-17 14:03:48 INFO [logging.py:61]: Begin training...
107
+ 01-17 14:21:30 INFO [logging.py:61]: Loss 'loss' on epoch 7: 5.2775559425354
108
+ 01-17 14:21:30 INFO [logging.py:61]: Loss 'acc' on epoch 7: 0.3431953191757202
109
+ 01-17 14:21:30 INFO [logging.py:61]: ========= Epoch 8 out of 1000 =========
110
+ 01-17 14:21:30 INFO [logging.py:61]: Begin training...
111
+ 01-17 14:39:10 INFO [logging.py:61]: Loss 'loss' on epoch 8: 5.302144527435303
112
+ 01-17 14:39:10 INFO [logging.py:61]: Loss 'acc' on epoch 8: 0.3417191207408905
113
+ 01-17 14:39:10 INFO [logging.py:61]: ========= Epoch 9 out of 1000 =========
114
+ 01-17 14:39:10 INFO [logging.py:61]: Begin training...
115
+ 01-17 14:56:49 INFO [logging.py:61]: Loss 'loss' on epoch 9: 5.384851932525635
116
+ 01-17 14:56:49 INFO [logging.py:61]: Loss 'acc' on epoch 9: 0.33888909220695496
117
+ 01-17 14:56:49 INFO [logging.py:61]: ========= Epoch 10 out of 1000 =========
118
+ 01-17 14:56:49 INFO [logging.py:61]: Begin training...
119
+ 01-17 15:14:29 INFO [logging.py:61]: Loss 'loss' on epoch 10: 5.374612808227539
120
+ 01-17 15:14:29 INFO [logging.py:61]: Loss 'acc' on epoch 10: 0.3413293659687042
121
+ 01-17 15:14:29 INFO [logging.py:61]: Saving current state to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0010
122
+ 01-17 15:14:30 INFO [logging.py:61]: Model weights saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0010/pytorch_model.bin
123
+ 01-17 15:14:32 INFO [logging.py:61]: Optimizer state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0010/optimizer.bin
124
+ 01-17 15:14:32 INFO [logging.py:61]: Scheduler state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0010/scheduler.bin
125
+ 01-17 15:14:32 INFO [logging.py:61]: Sampler state for dataloader 0 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0010/sampler.bin
126
+ 01-17 15:14:32 INFO [logging.py:61]: Sampler state for dataloader 1 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0010/sampler_1.bin
127
+ 01-17 15:14:32 INFO [logging.py:61]: Random states saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0010/random_states_0.pkl
128
+ 01-17 15:14:32 INFO [logging.py:61]: Saving the state of TrainerState to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0010/custom_checkpoint_0.pkl
129
+ 01-17 15:14:32 INFO [logging.py:61]: ========= Epoch 11 out of 1000 =========
130
+ 01-17 15:14:32 INFO [logging.py:61]: Begin training...
swin_default_LR1e-2_addEOSToken/swin_default_LR1e-2_addEOSToken_2024_01_17--17_15_22.log ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 01-17 17:15:22 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken.
2
+ 01-17 17:15:22 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken.
3
+ 01-17 17:15:22 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken.
4
+ 01-17 17:15:22 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken.
5
+ 01-17 17:17:03 INFO [logging.py:61]: Configuration file is saved to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/config__2024_01_17--17_16_44.toml.
6
+ 01-17 17:17:03 INFO [logging.py:61]: Environment information:
7
+ - `Accelerate` version: 0.26.1
8
+ - Platform: Linux-5.14.0-362.13.1.el9_3.x86_64-x86_64-with-glibc2.34
9
+ - Python version: 3.10.13
10
+ - Numpy version: 1.26.3
11
+ - PyTorch version (GPU?): 2.1.2 (True)
12
+ - System RAM: 503.48 GB
13
+ - GPU Available: True
14
+ - GPU IDs: 4
15
+ - GPU type: NVIDIA A100-SXM4-80GB
16
+ 01-17 17:17:03 INFO [logging.py:61]:
17
+ ===============================================================================================
18
+ Layer (type:depth-idx) Param #
19
+ ===============================================================================================
20
+ DistributedDataParallel --
21
+ ├─Model: 1-1 --
22
+ │ └─EncodecModel: 2-1 --
23
+ │ │ └─EncodecEncoder: 3-1 (7,425,792)
24
+ │ │ └─EncodecDecoder: 3-2 (7,426,018)
25
+ │ │ └─EncodecResidualVectorQuantizer: 3-3 --
26
+ │ └─ModuleList: 2-2 --
27
+ │ │ └─TokenEmbedding: 3-4 1,049,600
28
+ │ │ └─TokenEmbedding: 3-5 1,049,600
29
+ │ │ └─TokenEmbedding: 3-6 1,049,600
30
+ │ │ └─TokenEmbedding: 3-7 1,049,600
31
+ │ │ └─TokenEmbedding: 3-8 1,049,600
32
+ │ │ └─TokenEmbedding: 3-9 1,049,600
33
+ │ │ └─TokenEmbedding: 3-10 1,049,600
34
+ │ │ └─TokenEmbedding: 3-11 1,049,600
35
+ │ └─PreNet: 2-3 --
36
+ │ │ └─Sequential: 3-12 591,360
37
+ │ └─SinePositionalEmbedding: 2-4 1
38
+ │ │ └─Dropout: 3-13 --
39
+ │ └─TransformerEncoder: 2-5 --
40
+ │ │ └─ModuleList: 3-14 201,535,488
41
+ │ │ └─AdaptiveLayerNorm: 3-15 2,101,248
42
+ │ └─ModuleList: 2-6 --
43
+ │ │ └─Linear: 3-16 1,049,600
44
+ │ │ └─Linear: 3-17 1,049,600
45
+ │ │ └─Linear: 3-18 1,049,600
46
+ │ │ └─Linear: 3-19 1,049,600
47
+ │ │ └─Linear: 3-20 1,049,600
48
+ │ │ └─Linear: 3-21 1,049,600
49
+ │ │ └─Linear: 3-22 1,049,600
50
+ │ │ └─Linear: 3-23 1,049,600
51
+ │ └─ModuleList: 2-7 --
52
+ │ │ └─TokenEmbedding: 3-24 1,024
53
+ │ │ └─TokenEmbedding: 3-25 1,024
54
+ │ │ └─TokenEmbedding: 3-26 1,024
55
+ │ │ └─TokenEmbedding: 3-27 1,024
56
+ │ │ └─TokenEmbedding: 3-28 1,024
57
+ │ │ └─TokenEmbedding: 3-29 1,024
58
+ │ │ └─TokenEmbedding: 3-30 1,024
59
+ │ │ └─TokenEmbedding: 3-31 1,024
60
+ │ └─MulticlassAccuracy: 2-8 --
61
+ ===============================================================================================
62
+ Total params: 235,881,699
63
+ Trainable params: 221,029,888
64
+ Non-trainable params: 14,851,811
65
+ ===============================================================================================
66
+ 01-17 17:17:03 INFO [logging.py:61]: Training control variables:
67
+ 01-17 17:17:03 INFO [logging.py:61]: `steps_per_epoch`: 500
68
+ 01-17 17:17:03 INFO [logging.py:61]: Gradient accumulation steps: 1
69
+ 01-17 17:17:03 INFO [logging.py:61]: `update_steps_per_epoch`: 500
70
+ 01-17 17:17:03 INFO [logging.py:61]: `max_steps`: 500000
71
+ 01-17 17:17:03 INFO [logging.py:61]: `max_epochs`: 1000
72
+ 01-17 17:17:03 INFO [logging.py:61]: warmup_steps=1000. warmup_ratio will be ignored.
73
+ 01-17 17:17:03 INFO [logging.py:61]: Loading states from /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0010
74
+ 01-17 17:17:07 INFO [logging.py:61]: All model weights loaded successfully
75
+ 01-17 17:17:13 INFO [logging.py:61]: All optimizer states loaded successfully
76
+ 01-17 17:17:13 INFO [logging.py:61]: All scheduler states loaded successfully
77
+ 01-17 17:17:13 INFO [logging.py:61]: All dataloader sampler states loaded successfully
78
+ 01-17 17:17:13 INFO [logging.py:61]: All random states loaded successfully
79
+ 01-17 17:17:13 INFO [logging.py:61]: Loading in 1 custom states
80
+ 01-17 17:17:13 INFO [logging.py:61]: Loading the state of TrainerState from /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0010/custom_checkpoint_0.pkl
81
+ 01-17 17:17:13 INFO [logging.py:61]: Checkpoint on epoch 10 is loaded.
82
+ 01-17 17:17:13 INFO [logging.py:61]: ========= Epoch 11 out of 1000 =========
83
+ 01-17 17:17:13 INFO [logging.py:61]: Begin training...
84
+ 01-17 17:34:58 INFO [logging.py:61]: Loss 'loss' on epoch 11: 5.385993003845215
85
+ 01-17 17:34:58 INFO [logging.py:61]: Loss 'acc' on epoch 11: 0.339417040348053
86
+ 01-17 17:34:58 INFO [logging.py:61]: ========= Epoch 12 out of 1000 =========
87
+ 01-17 17:34:58 INFO [logging.py:61]: Begin training...
88
+ 01-17 17:53:05 INFO [logging.py:61]: Loss 'loss' on epoch 12: 5.376588344573975
89
+ 01-17 17:53:06 INFO [logging.py:61]: Loss 'acc' on epoch 12: 0.33914294838905334
90
+ 01-17 17:53:06 INFO [logging.py:61]: ========= Epoch 13 out of 1000 =========
91
+ 01-17 17:53:06 INFO [logging.py:61]: Begin training...
92
+ 01-17 18:11:23 INFO [logging.py:61]: Loss 'loss' on epoch 13: 5.388041973114014
93
+ 01-17 18:11:24 INFO [logging.py:61]: Loss 'acc' on epoch 13: 0.339685320854187
94
+ 01-17 18:11:24 INFO [logging.py:61]: ========= Epoch 14 out of 1000 =========
95
+ 01-17 18:11:24 INFO [logging.py:61]: Begin training...
96
+ 01-17 18:30:04 INFO [logging.py:61]: Loss 'loss' on epoch 14: 5.3795671463012695
97
+ 01-17 18:30:04 INFO [logging.py:61]: Loss 'acc' on epoch 14: 0.3413822054862976
98
+ 01-17 18:30:04 INFO [logging.py:61]: ========= Epoch 15 out of 1000 =========
99
+ 01-17 18:30:04 INFO [logging.py:61]: Begin training...
100
+ 01-17 18:48:00 INFO [logging.py:61]: Loss 'loss' on epoch 15: 5.390796661376953
101
+ 01-17 18:48:00 INFO [logging.py:61]: Loss 'acc' on epoch 15: 0.33877530694007874
102
+ 01-17 18:48:00 INFO [logging.py:61]: Saving current state to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0015
103
+ 01-17 18:48:02 INFO [logging.py:61]: Model weights saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0015/pytorch_model.bin
104
+ 01-17 18:48:05 INFO [logging.py:61]: Optimizer state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0015/optimizer.bin
105
+ 01-17 18:48:05 INFO [logging.py:61]: Scheduler state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0015/scheduler.bin
106
+ 01-17 18:48:05 INFO [logging.py:61]: Sampler state for dataloader 0 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0015/sampler.bin
107
+ 01-17 18:48:05 INFO [logging.py:61]: Sampler state for dataloader 1 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0015/sampler_1.bin
108
+ 01-17 18:48:05 INFO [logging.py:61]: Random states saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0015/random_states_0.pkl
109
+ 01-17 18:48:05 INFO [logging.py:61]: Saving the state of TrainerState to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0015/custom_checkpoint_0.pkl
110
+ 01-17 18:48:05 INFO [logging.py:61]: ========= Epoch 16 out of 1000 =========
111
+ 01-17 18:48:05 INFO [logging.py:61]: Begin training...
112
+ 01-17 19:06:17 INFO [logging.py:61]: Loss 'loss' on epoch 16: 5.389437198638916
113
+ 01-17 19:06:17 INFO [logging.py:61]: Loss 'acc' on epoch 16: 0.33847111463546753
114
+ 01-17 19:06:17 INFO [logging.py:61]: ========= Epoch 17 out of 1000 =========
115
+ 01-17 19:06:17 INFO [logging.py:61]: Begin training...
116
+ 01-17 19:24:08 INFO [logging.py:61]: Loss 'loss' on epoch 17: 5.380032539367676
117
+ 01-17 19:24:08 INFO [logging.py:61]: Loss 'acc' on epoch 17: 0.339590847492218
118
+ 01-17 19:24:08 INFO [logging.py:61]: ========= Epoch 18 out of 1000 =========
119
+ 01-17 19:24:08 INFO [logging.py:61]: Begin training...
120
+ 01-17 19:52:50 INFO [logging.py:61]: Loss 'loss' on epoch 18: 5.37690544128418
121
+ 01-17 19:52:50 INFO [logging.py:61]: Loss 'acc' on epoch 18: 0.33943551778793335
122
+ 01-17 19:52:50 INFO [logging.py:61]: ========= Epoch 19 out of 1000 =========
123
+ 01-17 19:52:50 INFO [logging.py:61]: Begin training...
124
+ 01-17 20:10:43 INFO [logging.py:61]: Loss 'loss' on epoch 19: 5.3855366706848145
125
+ 01-17 20:10:43 INFO [logging.py:61]: Loss 'acc' on epoch 19: 0.33942481875419617
126
+ 01-17 20:10:43 INFO [logging.py:61]: ========= Epoch 20 out of 1000 =========
127
+ 01-17 20:10:43 INFO [logging.py:61]: Begin training...
128
+ 01-17 20:29:32 INFO [logging.py:61]: Loss 'loss' on epoch 20: 5.374676704406738
129
+ 01-17 20:29:32 INFO [logging.py:61]: Loss 'acc' on epoch 20: 0.33983924984931946
130
+ 01-17 20:29:32 INFO [logging.py:61]: Saving current state to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0020
131
+ 01-17 20:29:33 INFO [logging.py:61]: Model weights saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0020/pytorch_model.bin
132
+ 01-17 20:29:36 INFO [logging.py:61]: Optimizer state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0020/optimizer.bin
133
+ 01-17 20:29:36 INFO [logging.py:61]: Scheduler state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0020/scheduler.bin
134
+ 01-17 20:29:36 INFO [logging.py:61]: Sampler state for dataloader 0 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0020/sampler.bin
135
+ 01-17 20:29:36 INFO [logging.py:61]: Sampler state for dataloader 1 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0020/sampler_1.bin
136
+ 01-17 20:29:36 INFO [logging.py:61]: Random states saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0020/random_states_0.pkl
137
+ 01-17 20:29:36 INFO [logging.py:61]: Saving the state of TrainerState to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0020/custom_checkpoint_0.pkl
138
+ 01-17 20:29:36 INFO [logging.py:61]: ========= Epoch 21 out of 1000 =========
139
+ 01-17 20:29:36 INFO [logging.py:61]: Begin training...
140
+ 01-17 20:47:57 INFO [logging.py:61]: Loss 'loss' on epoch 21: 5.386965274810791
141
+ 01-17 20:47:57 INFO [logging.py:61]: Loss 'acc' on epoch 21: 0.3395291566848755
142
+ 01-17 20:47:57 INFO [logging.py:61]: ========= Epoch 22 out of 1000 =========
143
+ 01-17 20:47:57 INFO [logging.py:61]: Begin training...
144
+ 01-17 21:05:45 INFO [logging.py:61]: Loss 'loss' on epoch 22: 5.365380764007568
145
+ 01-17 21:05:45 INFO [logging.py:61]: Loss 'acc' on epoch 22: 0.34075096249580383
146
+ 01-17 21:05:45 INFO [logging.py:61]: ========= Epoch 23 out of 1000 =========
147
+ 01-17 21:05:45 INFO [logging.py:61]: Begin training...
148
+ 01-17 21:23:33 INFO [logging.py:61]: Loss 'loss' on epoch 23: 5.38892936706543
149
+ 01-17 21:23:33 INFO [logging.py:61]: Loss 'acc' on epoch 23: 0.33941057324409485
150
+ 01-17 21:23:33 INFO [logging.py:61]: ========= Epoch 24 out of 1000 =========
151
+ 01-17 21:23:33 INFO [logging.py:61]: Begin training...
152
+ 01-17 21:41:21 INFO [logging.py:61]: Loss 'loss' on epoch 24: 5.370124340057373
153
+ 01-17 21:41:21 INFO [logging.py:61]: Loss 'acc' on epoch 24: 0.3396085798740387
154
+ 01-17 21:41:21 INFO [logging.py:61]: ========= Epoch 25 out of 1000 =========
155
+ 01-17 21:41:21 INFO [logging.py:61]: Begin training...
156
+ 01-17 21:59:09 INFO [logging.py:61]: Loss 'loss' on epoch 25: 5.369869232177734
157
+ 01-17 21:59:09 INFO [logging.py:61]: Loss 'acc' on epoch 25: 0.3406093716621399
158
+ 01-17 21:59:09 INFO [logging.py:61]: Saving current state to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0025
159
+ 01-17 21:59:11 INFO [logging.py:61]: Model weights saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0025/pytorch_model.bin
160
+ 01-17 21:59:14 INFO [logging.py:61]: Optimizer state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0025/optimizer.bin
161
+ 01-17 21:59:14 INFO [logging.py:61]: Scheduler state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0025/scheduler.bin
162
+ 01-17 21:59:14 INFO [logging.py:61]: Sampler state for dataloader 0 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0025/sampler.bin
163
+ 01-17 21:59:14 INFO [logging.py:61]: Sampler state for dataloader 1 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0025/sampler_1.bin
164
+ 01-17 21:59:14 INFO [logging.py:61]: Random states saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0025/random_states_0.pkl
165
+ 01-17 21:59:14 INFO [logging.py:61]: Saving the state of TrainerState to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0025/custom_checkpoint_0.pkl
166
+ 01-17 21:59:16 INFO [logging.py:61]: ========= Epoch 26 out of 1000 =========
167
+ 01-17 21:59:16 INFO [logging.py:61]: Begin training...
168
+ 01-17 22:17:25 INFO [logging.py:61]: Loss 'loss' on epoch 26: 5.375258445739746
169
+ 01-17 22:17:25 INFO [logging.py:61]: Loss 'acc' on epoch 26: 0.33873483538627625
170
+ 01-17 22:17:25 INFO [logging.py:61]: ========= Epoch 27 out of 1000 =========
171
+ 01-17 22:17:25 INFO [logging.py:61]: Begin training...
172
+ 01-17 22:35:12 INFO [logging.py:61]: Loss 'loss' on epoch 27: 5.3775506019592285
173
+ 01-17 22:35:12 INFO [logging.py:61]: Loss 'acc' on epoch 27: 0.33919477462768555
174
+ 01-17 22:35:12 INFO [logging.py:61]: ========= Epoch 28 out of 1000 =========
175
+ 01-17 22:35:12 INFO [logging.py:61]: Begin training...
176
+ 01-17 22:53:00 INFO [logging.py:61]: Loss 'loss' on epoch 28: 5.390237331390381
177
+ 01-17 22:53:00 INFO [logging.py:61]: Loss 'acc' on epoch 28: 0.33835938572883606
178
+ 01-17 22:53:00 INFO [logging.py:61]: ========= Epoch 29 out of 1000 =========
179
+ 01-17 22:53:00 INFO [logging.py:61]: Begin training...
180
+ 01-17 23:10:48 INFO [logging.py:61]: Loss 'loss' on epoch 29: 5.371942520141602
181
+ 01-17 23:10:48 INFO [logging.py:61]: Loss 'acc' on epoch 29: 0.3405751883983612
182
+ 01-17 23:10:48 INFO [logging.py:61]: ========= Epoch 30 out of 1000 =========
183
+ 01-17 23:10:48 INFO [logging.py:61]: Begin training...
184
+ 01-17 23:28:35 INFO [logging.py:61]: Loss 'loss' on epoch 30: 5.369765281677246
185
+ 01-17 23:28:35 INFO [logging.py:61]: Loss 'acc' on epoch 30: 0.34033069014549255
186
+ 01-17 23:28:35 INFO [logging.py:61]: Saving current state to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0030
187
+ 01-17 23:28:36 INFO [logging.py:61]: Model weights saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0030/pytorch_model.bin
188
+ 01-17 23:28:45 INFO [logging.py:61]: Optimizer state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0030/optimizer.bin
189
+ 01-17 23:28:45 INFO [logging.py:61]: Scheduler state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0030/scheduler.bin
190
+ 01-17 23:28:45 INFO [logging.py:61]: Sampler state for dataloader 0 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0030/sampler.bin
191
+ 01-17 23:28:45 INFO [logging.py:61]: Sampler state for dataloader 1 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0030/sampler_1.bin
192
+ 01-17 23:28:45 INFO [logging.py:61]: Random states saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0030/random_states_0.pkl
193
+ 01-17 23:28:45 INFO [logging.py:61]: Saving the state of TrainerState to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_addEOSToken/checkpoints/epoch_0030/custom_checkpoint_0.pkl
194
+ 01-17 23:28:45 INFO [logging.py:61]: ========= Epoch 31 out of 1000 =========
195
+ 01-17 23:28:45 INFO [logging.py:61]: Begin training...
196
+ 01-17 23:47:05 INFO [logging.py:61]: Loss 'loss' on epoch 31: 5.372854232788086
197
+ 01-17 23:47:05 INFO [logging.py:61]: Loss 'acc' on epoch 31: 0.3400099575519562
198
+ 01-17 23:47:05 INFO [logging.py:61]: ========= Epoch 32 out of 1000 =========
199
+ 01-17 23:47:05 INFO [logging.py:61]: Begin training...
200
+ 01-18 00:04:54 INFO [logging.py:61]: Loss 'loss' on epoch 32: 5.364051342010498
201
+ 01-18 00:04:54 INFO [logging.py:61]: Loss 'acc' on epoch 32: 0.3399272561073303
202
+ 01-18 00:04:54 INFO [logging.py:61]: ========= Epoch 33 out of 1000 =========
203
+ 01-18 00:04:54 INFO [logging.py:61]: Begin training...
swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705450617.gina1.1498943.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7788705d2ca707cbbd2b5ff1645866ddac63ebf122ee6a449e18cc4eb54983d
3
+ size 368795
swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705450617.gina1.1498944.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34a26980b52523672ca790ef1d4aa6d2f2a9718cfd2764aacb44a47eeb891c8c
3
+ size 2170
swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705450617.gina1.1498945.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b68c141fba1b5301e8a56b4b075d598bb61910225e149e39870d88cbec923b4e
3
+ size 2170
swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705450617.gina1.1498946.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33d56a85c87367dba2524854103cb0fa917e5a87b3dc9bfce8b167701fc1d23c
3
+ size 2170
swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705454078.gina301.610519.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7edc77f10fdec037eaf9b43db84eb42657bbffa59d6250094ba607ffeea0e9a
3
+ size 276026
swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705454078.gina301.610520.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0bdcf98a8c49f14de2ea16c197cc1bcc5a1a9c5d32f629ed617d4ca4e537ca6
3
+ size 2170
swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705454078.gina301.610521.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0a55d85d50d9778f17cf404b752dc21123dbed4e1185acb5160704e2a7b7b5f
3
+ size 2170
swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705454078.gina301.610522.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fbf5ffdcceff08c5a8b229bf89f47ef363ea6906f06a3b3a180b03a2f96d261
3
+ size 2170
swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705472223.gina1.1576669.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ae26f96b329133f7afaa11b57d23661d4ccac43a2904a37ca5ff92189d11143
3
+ size 620690
swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705472223.gina1.1576670.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af00695e81116da6aa753e531f6fefec091fdefc20ba5e0ad103bbd53362cb35
3
+ size 2170
swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705472223.gina1.1576671.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c494d29f5efe6acd4c545e367b5ff638ecf6d1b05344a5459db6b1ad76a7bf0f
3
+ size 2170
swin_default_LR1e-2_addEOSToken/tb_log/events.out.tfevents.1705472223.gina1.1576672.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a4ec1e9390f233230307f57f7ec4d8cb489d80f77a7642c0d0b7cfba2143cca
3
+ size 2170
swin_default_LR1e-3_AR-NAR/config__2024_01_18--00_22_05.toml ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [[validate_dataset]]
2
+ path = "dataloader.Dataset"
3
+
4
+ [validate_dataset.args]
5
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
6
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
7
+ duration = 6.0
8
+ sr = 24000
9
+ num_samples = 10
10
+ [validate_dataset.dataloader]
11
+ batch_size = 1
12
+ num_workers = 1
13
+
14
+ [meta]
15
+ save_dir = "exp"
16
+ description = "Train a model using Generative Adversarial Networks (GANs)"
17
+ seed = 20220815
18
+ exp_id = "swin_default_LR1e-2_AR-NAR"
19
+ config_path = "/fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/conf/swin_default_LR1e-2_AR-NAR.toml"
20
+
21
+ [trainer]
22
+ path = "trainer.Trainer"
23
+
24
+ [loss_function]
25
+ path = "torch.nn.MSELoss"
26
+
27
+ [optimizer]
28
+ path = "torch.optim.AdamW"
29
+
30
+ [model]
31
+ path = "model_ar_nar.Model"
32
+
33
+ [acoustics]
34
+ n_fft = 512
35
+ hop_length = 128
36
+ win_length = 512
37
+ sr = 24000
38
+
39
+ [train_dataset]
40
+ path = "dataloader.Dataset"
41
+
42
+ [test_dataset]
43
+ path = "dataloader.Dataset"
44
+
45
+ [trainer.args]
46
+ debug = false
47
+ max_steps = 0
48
+ max_epochs = 1000
49
+ max_grad_norm = 1.0
50
+ save_max_score = true
51
+ save_ckpt_interval = 5
52
+ max_patience = 200
53
+ plot_norm = true
54
+ validation_interval = 200
55
+ max_num_checkpoints = 100
56
+ scheduler_name = "constant_schedule_with_warmup"
57
+ warmup_steps = 1000
58
+ warmup_ratio = 0.0
59
+ gradient_accumulation_steps = 1
60
+
61
+ [loss_function.args]
62
+
63
+ [optimizer.args]
64
+ lr = 0.001
65
+
66
+ [model.args]
67
+
68
+ [train_dataset.args]
69
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
70
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
71
+ duration = 6.0
72
+ sr = 24000
73
+
74
+ [train_dataset.dataloader]
75
+ batch_size = 20
76
+ num_workers = 10
77
+ drop_last = true
78
+ pin_memory = true
79
+
80
+ [test_dataset.args]
81
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
82
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
83
+ duration = 6.0
84
+ sr = 24000
85
+ num_samples = 10
86
+
87
+ [test_dataset.dataloader]
88
+ batch_size = 1
89
+ num_workers = 1
swin_default_LR1e-3_AR-NAR/config__2024_01_18--00_23_20.toml ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [[validate_dataset]]
2
+ path = "dataloader.Dataset"
3
+
4
+ [validate_dataset.args]
5
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
6
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
7
+ duration = 6.0
8
+ sr = 24000
9
+ num_samples = 10
10
+ [validate_dataset.dataloader]
11
+ batch_size = 1
12
+ num_workers = 1
13
+
14
+ [meta]
15
+ save_dir = "exp"
16
+ description = "Train a model using Generative Adversarial Networks (GANs)"
17
+ seed = 20220815
18
+ exp_id = "swin_default_LR1e-2_AR-NAR"
19
+ config_path = "/fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/conf/swin_default_LR1e-2_AR-NAR.toml"
20
+
21
+ [trainer]
22
+ path = "trainer.Trainer"
23
+
24
+ [loss_function]
25
+ path = "torch.nn.MSELoss"
26
+
27
+ [optimizer]
28
+ path = "torch.optim.AdamW"
29
+
30
+ [model]
31
+ path = "model_ar_nar.Model"
32
+
33
+ [acoustics]
34
+ n_fft = 512
35
+ hop_length = 128
36
+ win_length = 512
37
+ sr = 24000
38
+
39
+ [train_dataset]
40
+ path = "dataloader.Dataset"
41
+
42
+ [test_dataset]
43
+ path = "dataloader.Dataset"
44
+
45
+ [trainer.args]
46
+ debug = false
47
+ max_steps = 0
48
+ max_epochs = 1000
49
+ max_grad_norm = 1.0
50
+ save_max_score = true
51
+ save_ckpt_interval = 5
52
+ max_patience = 200
53
+ plot_norm = true
54
+ validation_interval = 200
55
+ max_num_checkpoints = 100
56
+ scheduler_name = "constant_schedule_with_warmup"
57
+ warmup_steps = 1000
58
+ warmup_ratio = 0.0
59
+ gradient_accumulation_steps = 1
60
+
61
+ [loss_function.args]
62
+
63
+ [optimizer.args]
64
+ lr = 0.001
65
+
66
+ [model.args]
67
+
68
+ [train_dataset.args]
69
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
70
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
71
+ duration = 6.0
72
+ sr = 24000
73
+
74
+ [train_dataset.dataloader]
75
+ batch_size = 20
76
+ num_workers = 10
77
+ drop_last = true
78
+ pin_memory = true
79
+
80
+ [test_dataset.args]
81
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
82
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
83
+ duration = 6.0
84
+ sr = 24000
85
+ num_samples = 10
86
+
87
+ [test_dataset.dataloader]
88
+ batch_size = 1
89
+ num_workers = 1
swin_default_LR1e-3_AR-NAR/config__2024_01_18--00_33_25.toml ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [[validate_dataset]]
2
+ path = "dataloader.Dataset"
3
+
4
+ [validate_dataset.args]
5
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
6
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
7
+ duration = 6.0
8
+ sr = 24000
9
+ num_samples = 10
10
+ [validate_dataset.dataloader]
11
+ batch_size = 1
12
+ num_workers = 1
13
+
14
+ [meta]
15
+ save_dir = "exp"
16
+ description = "Train a model using Generative Adversarial Networks (GANs)"
17
+ seed = 20220815
18
+ exp_id = "swin_default_LR1e-2_AR-NAR"
19
+ config_path = "/fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/conf/swin_default_LR1e-2_AR-NAR.toml"
20
+
21
+ [trainer]
22
+ path = "trainer.Trainer"
23
+
24
+ [loss_function]
25
+ path = "torch.nn.MSELoss"
26
+
27
+ [optimizer]
28
+ path = "torch.optim.AdamW"
29
+
30
+ [model]
31
+ path = "model_ar_nar.Model"
32
+
33
+ [acoustics]
34
+ n_fft = 512
35
+ hop_length = 128
36
+ win_length = 512
37
+ sr = 24000
38
+
39
+ [train_dataset]
40
+ path = "dataloader.Dataset"
41
+
42
+ [test_dataset]
43
+ path = "dataloader.Dataset"
44
+
45
+ [trainer.args]
46
+ debug = false
47
+ max_steps = 0
48
+ max_epochs = 1000
49
+ max_grad_norm = 1.0
50
+ save_max_score = true
51
+ save_ckpt_interval = 5
52
+ max_patience = 200
53
+ plot_norm = true
54
+ validation_interval = 200
55
+ max_num_checkpoints = 100
56
+ scheduler_name = "constant_schedule_with_warmup"
57
+ warmup_steps = 1000
58
+ warmup_ratio = 0.0
59
+ gradient_accumulation_steps = 1
60
+
61
+ [loss_function.args]
62
+
63
+ [optimizer.args]
64
+ lr = 0.001
65
+
66
+ [model.args]
67
+
68
+ [train_dataset.args]
69
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
70
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
71
+ duration = 6.0
72
+ sr = 24000
73
+
74
+ [train_dataset.dataloader]
75
+ batch_size = 20
76
+ num_workers = 10
77
+ drop_last = true
78
+ pin_memory = true
79
+
80
+ [test_dataset.args]
81
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
82
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
83
+ duration = 6.0
84
+ sr = 24000
85
+ num_samples = 10
86
+
87
+ [test_dataset.dataloader]
88
+ batch_size = 1
89
+ num_workers = 1
swin_default_LR1e-3_AR-NAR/config__2024_01_18--13_13_46.toml ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [[validate_dataset]]
2
+ path = "dataloader.Dataset"
3
+
4
+ [validate_dataset.args]
5
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
6
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
7
+ duration = 6.0
8
+ sr = 24000
9
+ num_samples = 10
10
+ [validate_dataset.dataloader]
11
+ batch_size = 1
12
+ num_workers = 1
13
+
14
+ [meta]
15
+ save_dir = "exp"
16
+ description = "Train a model using Generative Adversarial Networks (GANs)"
17
+ seed = 20220815
18
+ exp_id = "swin_default_LR1e-3_AR-NAR"
19
+ config_path = "/fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/conf/swin_default_LR1e-3_AR-NAR.toml"
20
+
21
+ [trainer]
22
+ path = "trainer.Trainer"
23
+
24
+ [loss_function]
25
+ path = "torch.nn.MSELoss"
26
+
27
+ [optimizer]
28
+ path = "torch.optim.AdamW"
29
+
30
+ [model]
31
+ path = "model_ar_nar.Model"
32
+
33
+ [acoustics]
34
+ n_fft = 512
35
+ hop_length = 128
36
+ win_length = 512
37
+ sr = 24000
38
+
39
+ [train_dataset]
40
+ path = "dataloader.Dataset"
41
+
42
+ [test_dataset]
43
+ path = "dataloader.Dataset"
44
+
45
+ [trainer.args]
46
+ debug = false
47
+ max_steps = 0
48
+ max_epochs = 1000
49
+ max_grad_norm = 1.0
50
+ save_max_score = true
51
+ save_ckpt_interval = 5
52
+ max_patience = 200
53
+ plot_norm = true
54
+ validation_interval = 200
55
+ max_num_checkpoints = 100
56
+ scheduler_name = "constant_schedule_with_warmup"
57
+ warmup_steps = 1000
58
+ warmup_ratio = 0.0
59
+ gradient_accumulation_steps = 1
60
+
61
+ [loss_function.args]
62
+
63
+ [optimizer.args]
64
+ lr = 0.001
65
+
66
+ [model.args]
67
+
68
+ [train_dataset.args]
69
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
70
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
71
+ duration = 6.0
72
+ sr = 24000
73
+
74
+ [train_dataset.dataloader]
75
+ batch_size = 20
76
+ num_workers = 10
77
+ drop_last = true
78
+ pin_memory = true
79
+
80
+ [test_dataset.args]
81
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
82
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
83
+ duration = 6.0
84
+ sr = 24000
85
+ num_samples = 10
86
+
87
+ [test_dataset.dataloader]
88
+ batch_size = 1
89
+ num_workers = 1
swin_default_LR1e-3_AR-NAR/config__2024_01_18--13_14_43.toml ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [[validate_dataset]]
2
+ path = "dataloader.Dataset"
3
+
4
+ [validate_dataset.args]
5
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
6
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
7
+ duration = 6.0
8
+ sr = 24000
9
+ num_samples = 10
10
+ [validate_dataset.dataloader]
11
+ batch_size = 1
12
+ num_workers = 1
13
+
14
+ [meta]
15
+ save_dir = "exp"
16
+ description = "Train a model using Generative Adversarial Networks (GANs)"
17
+ seed = 20220815
18
+ exp_id = "swin_default_LR1e-3_AR-NAR"
19
+ config_path = "/fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/conf/swin_default_LR1e-3_AR-NAR.toml"
20
+
21
+ [trainer]
22
+ path = "trainer.Trainer"
23
+
24
+ [loss_function]
25
+ path = "torch.nn.MSELoss"
26
+
27
+ [optimizer]
28
+ path = "torch.optim.AdamW"
29
+
30
+ [model]
31
+ path = "model_ar_nar.Model"
32
+
33
+ [acoustics]
34
+ n_fft = 512
35
+ hop_length = 128
36
+ win_length = 512
37
+ sr = 24000
38
+
39
+ [train_dataset]
40
+ path = "dataloader.Dataset"
41
+
42
+ [test_dataset]
43
+ path = "dataloader.Dataset"
44
+
45
+ [trainer.args]
46
+ debug = false
47
+ max_steps = 0
48
+ max_epochs = 1000
49
+ max_grad_norm = 1.0
50
+ save_max_score = true
51
+ save_ckpt_interval = 5
52
+ max_patience = 200
53
+ plot_norm = true
54
+ validation_interval = 200
55
+ max_num_checkpoints = 100
56
+ scheduler_name = "constant_schedule_with_warmup"
57
+ warmup_steps = 1000
58
+ warmup_ratio = 0.0
59
+ gradient_accumulation_steps = 1
60
+
61
+ [loss_function.args]
62
+
63
+ [optimizer.args]
64
+ lr = 0.001
65
+
66
+ [model.args]
67
+
68
+ [train_dataset.args]
69
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
70
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
71
+ duration = 6.0
72
+ sr = 24000
73
+
74
+ [train_dataset.dataloader]
75
+ batch_size = 20
76
+ num_workers = 10
77
+ drop_last = true
78
+ pin_memory = true
79
+
80
+ [test_dataset.args]
81
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
82
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
83
+ duration = 6.0
84
+ sr = 24000
85
+ num_samples = 10
86
+
87
+ [test_dataset.dataloader]
88
+ batch_size = 1
89
+ num_workers = 1
swin_default_LR1e-3_AR-NAR/swin_default_LR1e-2_AR-NAR_2024_01_18--00_21_17.log ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 01-18 00:21:17 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR.
2
+ 01-18 00:21:17 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR.
3
+ 01-18 00:21:17 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR.
4
+ 01-18 00:21:17 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR.
5
+ 01-18 00:22:18 INFO [logging.py:61]: Configuration file is saved to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/config__2024_01_18--00_22_05.toml.
6
+ 01-18 00:22:18 INFO [logging.py:61]: Environment information:
7
+ - `Accelerate` version: 0.26.1
8
+ - Platform: Linux-5.14.0-362.13.1.el9_3.x86_64-x86_64-with-glibc2.34
9
+ - Python version: 3.10.13
10
+ - Numpy version: 1.26.3
11
+ - PyTorch version (GPU?): 2.1.2 (True)
12
+ - System RAM: 503.48 GB
13
+ - GPU Available: True
14
+ - GPU IDs: 4
15
+ - GPU type: NVIDIA A100-SXM4-80GB
16
+ 01-18 00:22:18 INFO [logging.py:61]:
17
+ ===============================================================================================
18
+ Layer (type:depth-idx) Param #
19
+ ===============================================================================================
20
+ DistributedDataParallel --
21
+ ├─Model: 1-1 --
22
+ │ └─EncodecModel: 2-1 --
23
+ │ │ └─EncodecEncoder: 3-1 (7,425,792)
24
+ │ │ └─EncodecDecoder: 3-2 (7,426,018)
25
+ │ │ └─EncodecResidualVectorQuantizer: 3-3 --
26
+ │ └─TokenEmbedding: 2-2 --
27
+ │ │ └─Dropout: 3-4 --
28
+ │ │ └─Embedding: 3-5 1,049,600
29
+ │ └─Identity: 2-3 --
30
+ │ └─SinePositionalEmbedding: 2-4 1
31
+ │ │ └─Dropout: 3-6 --
32
+ │ └─TransformerEncoder: 2-5 --
33
+ │ │ └─ModuleList: 3-7 151,154,688
34
+ │ │ └─LayerNorm: 3-8 2,048
35
+ │ └─Linear: 2-6 1,049,600
36
+ │ └─MulticlassAccuracy: 2-7 --
37
+ │ └─TokenEmbedding: 2-8 --
38
+ │ │ └─Dropout: 3-9 --
39
+ │ │ └─Embedding: 3-10 1,048,576
40
+ │ └─ModuleList: 2-9 --
41
+ │ │ └─TokenEmbedding: 3-11 1,049,600
42
+ │ │ └─TokenEmbedding: 3-12 1,048,576
43
+ │ │ └─TokenEmbedding: 3-13 1,048,576
44
+ │ │ └─TokenEmbedding: 3-14 1,048,576
45
+ │ │ └─TokenEmbedding: 3-15 1,048,576
46
+ │ │ └─TokenEmbedding: 3-16 1,048,576
47
+ │ │ └─TokenEmbedding: 3-17 1,048,576
48
+ │ │ └─TokenEmbedding: 3-18 1,048,576
49
+ │ └─Identity: 2-10 --
50
+ │ └─SinePositionalEmbedding: 2-11 1
51
+ │ │ └─Dropout: 3-19 --
52
+ │ └─TransformerEncoder: 2-12 --
53
+ │ │ └─ModuleList: 3-20 201,535,488
54
+ │ │ └─AdaptiveLayerNorm: 3-21 2,101,248
55
+ │ └─ModuleList: 2-13 --
56
+ │ │ └─Linear: 3-22 1,048,576
57
+ │ │ └─Linear: 3-23 1,048,576
58
+ │ │ └─Linear: 3-24 1,048,576
59
+ │ │ └─Linear: 3-25 1,048,576
60
+ │ │ └─Linear: 3-26 1,048,576
61
+ │ │ └─Linear: 3-27 1,048,576
62
+ │ │ └─Linear: 3-28 1,048,576
63
+ │ └─ModuleList: 2-14 --
64
+ │ │ └─TokenEmbedding: 3-29 1,024
65
+ │ │ └─TokenEmbedding: 3-30 1,024
66
+ │ │ └─TokenEmbedding: 3-31 1,024
67
+ │ │ └─TokenEmbedding: 3-32 1,024
68
+ │ │ └─TokenEmbedding: 3-33 1,024
69
+ │ │ └─TokenEmbedding: 3-34 1,024
70
+ │ │ └─TokenEmbedding: 3-35 1,024
71
+ │ └─MulticlassAccuracy: 2-15 --
72
+ ===============================================================================================
73
+ Total params: 388,529,892
74
+ Trainable params: 373,678,081
75
+ Non-trainable params: 14,851,811
76
+ ===============================================================================================
77
+ 01-18 00:22:18 INFO [logging.py:61]: Training control variables:
78
+ 01-18 00:22:18 INFO [logging.py:61]: `steps_per_epoch`: 500
79
+ 01-18 00:22:18 INFO [logging.py:61]: Gradient accumulation steps: 1
80
+ 01-18 00:22:18 INFO [logging.py:61]: `update_steps_per_epoch`: 500
81
+ 01-18 00:22:18 INFO [logging.py:61]: `max_steps`: 500000
82
+ 01-18 00:22:18 INFO [logging.py:61]: `max_epochs`: 1000
83
+ 01-18 00:22:18 INFO [logging.py:61]: warmup_steps=1000. warmup_ratio will be ignored.
84
+ 01-18 00:22:18 INFO [logging.py:61]: ========= Epoch 1 out of 1000 =========
85
+ 01-18 00:22:18 INFO [logging.py:61]: Begin training...
swin_default_LR1e-3_AR-NAR/swin_default_LR1e-2_AR-NAR_2024_01_18--00_23_16.log ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 01-18 00:23:16 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR.
2
+ 01-18 00:23:16 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR.
3
+ 01-18 00:23:16 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR.
4
+ 01-18 00:23:16 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR.
5
+ 01-18 00:23:22 INFO [logging.py:61]: Configuration file is saved to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/config__2024_01_18--00_23_20.toml.
6
+ 01-18 00:23:22 INFO [logging.py:61]: Environment information:
7
+ - `Accelerate` version: 0.26.1
8
+ - Platform: Linux-5.14.0-362.13.1.el9_3.x86_64-x86_64-with-glibc2.34
9
+ - Python version: 3.10.13
10
+ - Numpy version: 1.26.3
11
+ - PyTorch version (GPU?): 2.1.2 (True)
12
+ - System RAM: 503.48 GB
13
+ - GPU Available: True
14
+ - GPU IDs: 4
15
+ - GPU type: NVIDIA A100-SXM4-80GB
16
+ 01-18 00:23:22 INFO [logging.py:61]:
17
+ ===============================================================================================
18
+ Layer (type:depth-idx) Param #
19
+ ===============================================================================================
20
+ DistributedDataParallel --
21
+ ├─Model: 1-1 --
22
+ │ └─EncodecModel: 2-1 --
23
+ │ │ └─EncodecEncoder: 3-1 (7,425,792)
24
+ │ │ └─EncodecDecoder: 3-2 (7,426,018)
25
+ │ │ └─EncodecResidualVectorQuantizer: 3-3 --
26
+ │ └─TokenEmbedding: 2-2 --
27
+ │ │ └─Dropout: 3-4 --
28
+ │ │ └─Embedding: 3-5 524,800
29
+ │ └─Identity: 2-3 --
30
+ │ └─SinePositionalEmbedding: 2-4 1
31
+ │ │ └─Dropout: 3-6 --
32
+ │ └─TransformerEncoder: 2-5 --
33
+ │ │ └─ModuleList: 3-7 37,828,608
34
+ │ │ └─LayerNorm: 3-8 1,024
35
+ │ └─Linear: 2-6 524,800
36
+ │ └─MulticlassAccuracy: 2-7 --
37
+ │ └─TokenEmbedding: 2-8 --
38
+ │ │ └─Dropout: 3-9 --
39
+ │ │ └─Embedding: 3-10 524,288
40
+ │ └─ModuleList: 2-9 --
41
+ │ │ └─TokenEmbedding: 3-11 524,800
42
+ │ │ └─TokenEmbedding: 3-12 524,288
43
+ │ │ └─TokenEmbedding: 3-13 524,288
44
+ │ │ └─TokenEmbedding: 3-14 524,288
45
+ │ │ └─TokenEmbedding: 3-15 524,288
46
+ │ │ └─TokenEmbedding: 3-16 524,288
47
+ │ │ └─TokenEmbedding: 3-17 524,288
48
+ │ │ └─TokenEmbedding: 3-18 524,288
49
+ │ └─Identity: 2-10 --
50
+ │ └─SinePositionalEmbedding: 2-11 1
51
+ │ │ └─Dropout: 3-19 --
52
+ │ └─TransformerEncoder: 2-12 --
53
+ │ │ └─ModuleList: 3-20 50,436,096
54
+ │ │ └─AdaptiveLayerNorm: 3-21 526,336
55
+ │ └─ModuleList: 2-13 --
56
+ │ │ └─Linear: 3-22 524,288
57
+ │ │ └─Linear: 3-23 524,288
58
+ │ │ └─Linear: 3-24 524,288
59
+ │ │ └─Linear: 3-25 524,288
60
+ │ │ └─Linear: 3-26 524,288
61
+ ��� │ └─Linear: 3-27 524,288
62
+ │ │ └─Linear: 3-28 524,288
63
+ │ └─ModuleList: 2-14 --
64
+ │ │ └─TokenEmbedding: 3-29 512
65
+ │ │ └─TokenEmbedding: 3-30 512
66
+ │ │ └─TokenEmbedding: 3-31 512
67
+ │ │ └─TokenEmbedding: 3-32 512
68
+ │ │ └─TokenEmbedding: 3-33 512
69
+ │ │ └─TokenEmbedding: 3-34 512
70
+ │ │ └─TokenEmbedding: 3-35 512
71
+ │ └─MulticlassAccuracy: 2-15 --
72
+ ===============================================================================================
73
+ Total params: 113,086,180
74
+ Trainable params: 98,234,369
75
+ Non-trainable params: 14,851,811
76
+ ===============================================================================================
77
+ 01-18 00:23:22 INFO [logging.py:61]: Training control variables:
78
+ 01-18 00:23:22 INFO [logging.py:61]: `steps_per_epoch`: 500
79
+ 01-18 00:23:22 INFO [logging.py:61]: Gradient accumulation steps: 1
80
+ 01-18 00:23:22 INFO [logging.py:61]: `update_steps_per_epoch`: 500
81
+ 01-18 00:23:22 INFO [logging.py:61]: `max_steps`: 500000
82
+ 01-18 00:23:22 INFO [logging.py:61]: `max_epochs`: 1000
83
+ 01-18 00:23:22 INFO [logging.py:61]: warmup_steps=1000. warmup_ratio will be ignored.
84
+ 01-18 00:23:22 INFO [logging.py:61]: ========= Epoch 1 out of 1000 =========
85
+ 01-18 00:23:22 INFO [logging.py:61]: Begin training...
swin_default_LR1e-3_AR-NAR/swin_default_LR1e-2_AR-NAR_2024_01_18--00_33_02.log ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 01-18 00:33:02 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR.
2
+ 01-18 00:33:02 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR.
3
+ 01-18 00:33:02 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR.
4
+ 01-18 00:33:02 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR.
5
+ 01-18 00:33:32 INFO [logging.py:61]: Configuration file is saved to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/config__2024_01_18--00_33_25.toml.
6
+ 01-18 00:33:32 INFO [logging.py:61]: Environment information:
7
+ - `Accelerate` version: 0.26.1
8
+ - Platform: Linux-5.14.0-362.13.1.el9_3.x86_64-x86_64-with-glibc2.34
9
+ - Python version: 3.10.13
10
+ - Numpy version: 1.26.3
11
+ - PyTorch version (GPU?): 2.1.2 (True)
12
+ - System RAM: 503.48 GB
13
+ - GPU Available: True
14
+ - GPU IDs: 4
15
+ - GPU type: NVIDIA A100-SXM4-80GB
16
+ 01-18 00:33:32 INFO [logging.py:61]:
17
+ ===============================================================================================
18
+ Layer (type:depth-idx) Param #
19
+ ===============================================================================================
20
+ DistributedDataParallel --
21
+ ├─Model: 1-1 --
22
+ │ └─EncodecModel: 2-1 --
23
+ │ │ └─EncodecEncoder: 3-1 (7,425,792)
24
+ │ │ └─EncodecDecoder: 3-2 (7,426,018)
25
+ │ │ └─EncodecResidualVectorQuantizer: 3-3 --
26
+ │ └─TokenEmbedding: 2-2 --
27
+ │ │ └─Dropout: 3-4 --
28
+ │ │ └─Embedding: 3-5 524,800
29
+ │ └─Identity: 2-3 --
30
+ │ └─SinePositionalEmbedding: 2-4 1
31
+ │ │ └─Dropout: 3-6 --
32
+ │ └─TransformerEncoder: 2-5 --
33
+ │ │ └─ModuleList: 3-7 37,828,608
34
+ │ │ └─LayerNorm: 3-8 1,024
35
+ │ └─Linear: 2-6 524,800
36
+ │ └─MulticlassAccuracy: 2-7 --
37
+ │ └─TokenEmbedding: 2-8 --
38
+ │ │ └─Dropout: 3-9 --
39
+ │ │ └─Embedding: 3-10 524,288
40
+ │ └─ModuleList: 2-9 --
41
+ │ │ └─TokenEmbedding: 3-11 524,800
42
+ │ │ └─TokenEmbedding: 3-12 524,288
43
+ │ │ └─TokenEmbedding: 3-13 524,288
44
+ │ │ └─TokenEmbedding: 3-14 524,288
45
+ │ │ └─TokenEmbedding: 3-15 524,288
46
+ │ │ └─TokenEmbedding: 3-16 524,288
47
+ │ │ └─TokenEmbedding: 3-17 524,288
48
+ │ │ └─TokenEmbedding: 3-18 524,288
49
+ │ └─Identity: 2-10 --
50
+ │ └─SinePositionalEmbedding: 2-11 1
51
+ │ │ └─Dropout: 3-19 --
52
+ │ └─TransformerEncoder: 2-12 --
53
+ │ │ └─ModuleList: 3-20 50,436,096
54
+ │ │ └─AdaptiveLayerNorm: 3-21 526,336
55
+ │ └─ModuleList: 2-13 --
56
+ │ │ └─Linear: 3-22 524,288
57
+ │ │ └─Linear: 3-23 524,288
58
+ │ │ └─Linear: 3-24 524,288
59
+ │ │ └─Linear: 3-25 524,288
60
+ │ │ └─Linear: 3-26 524,288
61
+ │ │ └─Linear: 3-27 524,288
62
+ │ │ └─Linear: 3-28 524,288
63
+ │ └─ModuleList: 2-14 --
64
+ │ │ └─TokenEmbedding: 3-29 512
65
+ │ │ └─TokenEmbedding: 3-30 512
66
+ │ │ └─TokenEmbedding: 3-31 512
67
+ │ │ └─TokenEmbedding: 3-32 512
68
+ │ │ └─TokenEmbedding: 3-33 512
69
+ │ │ └─TokenEmbedding: 3-34 512
70
+ │ │ └─TokenEmbedding: 3-35 512
71
+ │ └─MulticlassAccuracy: 2-15 --
72
+ ===============================================================================================
73
+ Total params: 113,086,180
74
+ Trainable params: 98,234,369
75
+ Non-trainable params: 14,851,811
76
+ ===============================================================================================
77
+ 01-18 00:33:32 INFO [logging.py:61]: Training control variables:
78
+ 01-18 00:33:32 INFO [logging.py:61]: `steps_per_epoch`: 500
79
+ 01-18 00:33:32 INFO [logging.py:61]: Gradient accumulation steps: 1
80
+ 01-18 00:33:32 INFO [logging.py:61]: `update_steps_per_epoch`: 500
81
+ 01-18 00:33:32 INFO [logging.py:61]: `max_steps`: 500000
82
+ 01-18 00:33:32 INFO [logging.py:61]: `max_epochs`: 1000
83
+ 01-18 00:33:32 INFO [logging.py:61]: warmup_steps=1000. warmup_ratio will be ignored.
84
+ 01-18 00:33:32 INFO [logging.py:61]: ========= Epoch 1 out of 1000 =========
85
+ 01-18 00:33:32 INFO [logging.py:61]: Begin training...
86
+ 01-18 00:48:19 INFO [logging.py:61]: Loss 'loss' on epoch 1: 41.1013298034668
87
+ 01-18 00:48:19 INFO [logging.py:61]: Loss 'ar_loss' on epoch 1: 3.8041999340057373
88
+ 01-18 00:48:19 INFO [logging.py:61]: Loss 'nar_loss' on epoch 1: 37.2971305847168
89
+ 01-18 00:48:19 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 1: 0.5283006429672241
90
+ 01-18 00:48:19 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 1: 0.14399056136608124
91
+ 01-18 00:48:19 INFO [logging.py:61]: ========= Epoch 2 out of 1000 =========
92
+ 01-18 00:48:19 INFO [logging.py:61]: Begin training...
93
+ 01-18 01:03:02 INFO [logging.py:61]: Loss 'loss' on epoch 2: 71.26728057861328
94
+ 01-18 01:03:02 INFO [logging.py:61]: Loss 'ar_loss' on epoch 2: 3.0147104263305664
95
+ 01-18 01:03:02 INFO [logging.py:61]: Loss 'nar_loss' on epoch 2: 68.25257110595703
96
+ 01-18 01:03:02 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 2: 0.6516079306602478
97
+ 01-18 01:03:02 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 2: 0.14630988240242004
98
+ 01-18 01:03:02 INFO [logging.py:61]: ========= Epoch 3 out of 1000 =========
99
+ 01-18 01:03:02 INFO [logging.py:61]: Begin training...
100
+ 01-18 01:17:44 INFO [logging.py:61]: Loss 'loss' on epoch 3: 80.09443664550781
101
+ 01-18 01:17:44 INFO [logging.py:61]: Loss 'ar_loss' on epoch 3: 2.8742563724517822
102
+ 01-18 01:17:44 INFO [logging.py:61]: Loss 'nar_loss' on epoch 3: 77.22017669677734
103
+ 01-18 01:17:44 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 3: 0.6769989132881165
104
+ 01-18 01:17:44 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 3: 0.1532195657491684
105
+ 01-18 01:17:44 INFO [logging.py:61]: ========= Epoch 4 out of 1000 =========
106
+ 01-18 01:17:44 INFO [logging.py:61]: Begin training...
107
+ 01-18 01:32:28 INFO [logging.py:61]: Loss 'loss' on epoch 4: 64.45104217529297
108
+ 01-18 01:32:28 INFO [logging.py:61]: Loss 'ar_loss' on epoch 4: 2.756040096282959
109
+ 01-18 01:32:28 INFO [logging.py:61]: Loss 'nar_loss' on epoch 4: 61.69499969482422
110
+ 01-18 01:32:28 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 4: 0.6978757381439209
111
+ 01-18 01:32:28 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 4: 0.1739574521780014
112
+ 01-18 01:32:28 INFO [logging.py:61]: ========= Epoch 5 out of 1000 =========
113
+ 01-18 01:32:28 INFO [logging.py:61]: Begin training...
114
+ 01-18 01:47:11 INFO [logging.py:61]: Loss 'loss' on epoch 5: 50.534027099609375
115
+ 01-18 01:47:11 INFO [logging.py:61]: Loss 'ar_loss' on epoch 5: 2.4202704429626465
116
+ 01-18 01:47:11 INFO [logging.py:61]: Loss 'nar_loss' on epoch 5: 48.1137580871582
117
+ 01-18 01:47:11 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 5: 0.7520415186882019
118
+ 01-18 01:47:11 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 5: 0.2114027887582779
119
+ 01-18 01:47:11 INFO [logging.py:61]: Saving current state to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0005
120
+ 01-18 01:47:12 INFO [logging.py:61]: Model weights saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0005/pytorch_model.bin
121
+ 01-18 01:47:13 INFO [logging.py:61]: Optimizer state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0005/optimizer.bin
122
+ 01-18 01:47:13 INFO [logging.py:61]: Scheduler state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0005/scheduler.bin
123
+ 01-18 01:47:13 INFO [logging.py:61]: Sampler state for dataloader 0 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0005/sampler.bin
124
+ 01-18 01:47:13 INFO [logging.py:61]: Sampler state for dataloader 1 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0005/sampler_1.bin
125
+ 01-18 01:47:13 INFO [logging.py:61]: Random states saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0005/random_states_0.pkl
126
+ 01-18 01:47:13 INFO [logging.py:61]: Saving the state of TrainerState to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0005/custom_checkpoint_0.pkl
127
+ 01-18 01:47:14 INFO [logging.py:61]: ========= Epoch 6 out of 1000 =========
128
+ 01-18 01:47:14 INFO [logging.py:61]: Begin training...
129
+ 01-18 02:02:24 INFO [logging.py:61]: Loss 'loss' on epoch 6: 37.49298858642578
130
+ 01-18 02:02:24 INFO [logging.py:61]: Loss 'ar_loss' on epoch 6: 1.8649616241455078
131
+ 01-18 02:02:24 INFO [logging.py:61]: Loss 'nar_loss' on epoch 6: 35.628028869628906
132
+ 01-18 02:02:24 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 6: 0.8278111219406128
133
+ 01-18 02:02:24 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 6: 0.22959543764591217
134
+ 01-18 02:02:24 INFO [logging.py:61]: ========= Epoch 7 out of 1000 =========
135
+ 01-18 02:02:24 INFO [logging.py:61]: Begin training...
136
+ 01-18 02:17:03 INFO [logging.py:61]: Loss 'loss' on epoch 7: 28.96417236328125
137
+ 01-18 02:17:03 INFO [logging.py:61]: Loss 'ar_loss' on epoch 7: 1.677205204963684
138
+ 01-18 02:17:03 INFO [logging.py:61]: Loss 'nar_loss' on epoch 7: 27.286962509155273
139
+ 01-18 02:17:03 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 7: 0.8506313562393188
140
+ 01-18 02:17:03 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 7: 0.2341448813676834
141
+ 01-18 02:17:03 INFO [logging.py:61]: ========= Epoch 8 out of 1000 =========
142
+ 01-18 02:17:03 INFO [logging.py:61]: Begin training...
143
+ 01-18 02:31:48 INFO [logging.py:61]: Loss 'loss' on epoch 8: 28.844099044799805
144
+ 01-18 02:31:48 INFO [logging.py:61]: Loss 'ar_loss' on epoch 8: 1.6051831245422363
145
+ 01-18 02:31:48 INFO [logging.py:61]: Loss 'nar_loss' on epoch 8: 27.238914489746094
146
+ 01-18 02:31:48 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 8: 0.85962975025177
147
+ 01-18 02:31:48 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 8: 0.2343209981918335
148
+ 01-18 02:31:48 INFO [logging.py:61]: ========= Epoch 9 out of 1000 =========
149
+ 01-18 02:31:48 INFO [logging.py:61]: Begin training...
150
+ 01-18 02:46:32 INFO [logging.py:61]: Loss 'loss' on epoch 9: 18.423540115356445
151
+ 01-18 02:46:32 INFO [logging.py:61]: Loss 'ar_loss' on epoch 9: 1.5607959032058716
152
+ 01-18 02:46:32 INFO [logging.py:61]: Loss 'nar_loss' on epoch 9: 16.862743377685547
153
+ 01-18 02:46:32 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 9: 0.8656294345855713
154
+ 01-18 02:46:32 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 9: 0.268953800201416
155
+ 01-18 02:46:32 INFO [logging.py:61]: ========= Epoch 10 out of 1000 =========
156
+ 01-18 02:46:32 INFO [logging.py:61]: Begin training...
157
+ 01-18 03:01:15 INFO [logging.py:61]: Loss 'loss' on epoch 10: 17.509979248046875
158
+ 01-18 03:01:15 INFO [logging.py:61]: Loss 'ar_loss' on epoch 10: 1.516535997390747
159
+ 01-18 03:01:15 INFO [logging.py:61]: Loss 'nar_loss' on epoch 10: 15.993441581726074
160
+ 01-18 03:01:15 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 10: 0.8709884881973267
161
+ 01-18 03:01:15 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 10: 0.27383264899253845
162
+ 01-18 03:01:15 INFO [logging.py:61]: Saving current state to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0010
163
+ 01-18 03:01:16 INFO [logging.py:61]: Model weights saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0010/pytorch_model.bin
164
+ 01-18 03:01:18 INFO [logging.py:61]: Optimizer state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0010/optimizer.bin
165
+ 01-18 03:01:18 INFO [logging.py:61]: Scheduler state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0010/scheduler.bin
166
+ 01-18 03:01:18 INFO [logging.py:61]: Sampler state for dataloader 0 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0010/sampler.bin
167
+ 01-18 03:01:18 INFO [logging.py:61]: Sampler state for dataloader 1 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0010/sampler_1.bin
168
+ 01-18 03:01:18 INFO [logging.py:61]: Random states saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0010/random_states_0.pkl
169
+ 01-18 03:01:18 INFO [logging.py:61]: Saving the state of TrainerState to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0010/custom_checkpoint_0.pkl
170
+ 01-18 03:01:18 INFO [logging.py:61]: ========= Epoch 11 out of 1000 =========
171
+ 01-18 03:01:18 INFO [logging.py:61]: Begin training...
172
+ 01-18 03:16:19 INFO [logging.py:61]: Loss 'loss' on epoch 11: 20.488168716430664
173
+ 01-18 03:16:19 INFO [logging.py:61]: Loss 'ar_loss' on epoch 11: 1.492824673652649
174
+ 01-18 03:16:19 INFO [logging.py:61]: Loss 'nar_loss' on epoch 11: 18.995346069335938
175
+ 01-18 03:16:19 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 11: 0.8740840554237366
176
+ 01-18 03:16:19 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 11: 0.2761929929256439
177
+ 01-18 03:16:19 INFO [logging.py:61]: ========= Epoch 12 out of 1000 =========
178
+ 01-18 03:16:19 INFO [logging.py:61]: Begin training...
179
+ 01-18 03:31:06 INFO [logging.py:61]: Loss 'loss' on epoch 12: 12.799497604370117
180
+ 01-18 03:31:06 INFO [logging.py:61]: Loss 'ar_loss' on epoch 12: 1.4752053022384644
181
+ 01-18 03:31:06 INFO [logging.py:61]: Loss 'nar_loss' on epoch 12: 11.324292182922363
182
+ 01-18 03:31:06 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 12: 0.8769260048866272
183
+ 01-18 03:31:06 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 12: 0.29501354694366455
184
+ 01-18 03:31:06 INFO [logging.py:61]: ========= Epoch 13 out of 1000 =========
185
+ 01-18 03:31:06 INFO [logging.py:61]: Begin training...
186
+ 01-18 03:45:51 INFO [logging.py:61]: Loss 'loss' on epoch 13: 9.103281021118164
187
+ 01-18 03:45:51 INFO [logging.py:61]: Loss 'ar_loss' on epoch 13: 1.456277847290039
188
+ 01-18 03:45:51 INFO [logging.py:61]: Loss 'nar_loss' on epoch 13: 7.647003650665283
189
+ 01-18 03:45:51 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 13: 0.8797268867492676
190
+ 01-18 03:45:51 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 13: 0.3158681094646454
191
+ 01-18 03:45:51 INFO [logging.py:61]: ========= Epoch 14 out of 1000 =========
192
+ 01-18 03:45:51 INFO [logging.py:61]: Begin training...
193
+ 01-18 04:00:36 INFO [logging.py:61]: Loss 'loss' on epoch 14: 13.451669692993164
194
+ 01-18 04:00:36 INFO [logging.py:61]: Loss 'ar_loss' on epoch 14: 1.4416464567184448
195
+ 01-18 04:00:36 INFO [logging.py:61]: Loss 'nar_loss' on epoch 14: 12.01002311706543
196
+ 01-18 04:00:36 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 14: 0.8816895484924316
197
+ 01-18 04:00:36 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 14: 0.2968295216560364
198
+ 01-18 04:00:36 INFO [logging.py:61]: ========= Epoch 15 out of 1000 =========
199
+ 01-18 04:00:36 INFO [logging.py:61]: Begin training...
200
+ 01-18 04:15:15 INFO [logging.py:61]: Loss 'loss' on epoch 15: 10.637478828430176
201
+ 01-18 04:15:15 INFO [logging.py:61]: Loss 'ar_loss' on epoch 15: 1.4250620603561401
202
+ 01-18 04:15:15 INFO [logging.py:61]: Loss 'nar_loss' on epoch 15: 9.21241569519043
203
+ 01-18 04:15:15 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 15: 0.8841312527656555
204
+ 01-18 04:15:15 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 15: 0.3035244345664978
205
+ 01-18 04:15:15 INFO [logging.py:61]: Saving current state to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0015
206
+ 01-18 04:15:16 INFO [logging.py:61]: Model weights saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0015/pytorch_model.bin
207
+ 01-18 04:15:17 INFO [logging.py:61]: Optimizer state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0015/optimizer.bin
208
+ 01-18 04:15:17 INFO [logging.py:61]: Scheduler state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0015/scheduler.bin
209
+ 01-18 04:15:17 INFO [logging.py:61]: Sampler state for dataloader 0 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0015/sampler.bin
210
+ 01-18 04:15:17 INFO [logging.py:61]: Sampler state for dataloader 1 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0015/sampler_1.bin
211
+ 01-18 04:15:17 INFO [logging.py:61]: Random states saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0015/random_states_0.pkl
212
+ 01-18 04:15:17 INFO [logging.py:61]: Saving the state of TrainerState to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0015/custom_checkpoint_0.pkl
213
+ 01-18 04:15:17 INFO [logging.py:61]: ========= Epoch 16 out of 1000 =========
214
+ 01-18 04:15:17 INFO [logging.py:61]: Begin training...
215
+ 01-18 04:30:27 INFO [logging.py:61]: Loss 'loss' on epoch 16: 7.80450439453125
216
+ 01-18 04:30:27 INFO [logging.py:61]: Loss 'ar_loss' on epoch 16: 1.4189400672912598
217
+ 01-18 04:30:27 INFO [logging.py:61]: Loss 'nar_loss' on epoch 16: 6.385564804077148
218
+ 01-18 04:30:27 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 16: 0.8853126764297485
219
+ 01-18 04:30:27 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 16: 0.3228910267353058
220
+ 01-18 04:30:27 INFO [logging.py:61]: ========= Epoch 17 out of 1000 =========
221
+ 01-18 04:30:27 INFO [logging.py:61]: Begin training...
222
+ 01-18 04:45:12 INFO [logging.py:61]: Loss 'loss' on epoch 17: 7.98758602142334
223
+ 01-18 04:45:12 INFO [logging.py:61]: Loss 'ar_loss' on epoch 17: 1.402923583984375
224
+ 01-18 04:45:12 INFO [logging.py:61]: Loss 'nar_loss' on epoch 17: 6.584662914276123
225
+ 01-18 04:45:12 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 17: 0.8873386383056641
226
+ 01-18 04:45:12 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 17: 0.3134208917617798
227
+ 01-18 04:45:12 INFO [logging.py:61]: ========= Epoch 18 out of 1000 =========
228
+ 01-18 04:45:12 INFO [logging.py:61]: Begin training...
229
+ 01-18 04:59:55 INFO [logging.py:61]: Loss 'loss' on epoch 18: 8.954412460327148
230
+ 01-18 04:59:55 INFO [logging.py:61]: Loss 'ar_loss' on epoch 18: 1.3915390968322754
231
+ 01-18 04:59:55 INFO [logging.py:61]: Loss 'nar_loss' on epoch 18: 7.562872886657715
232
+ 01-18 04:59:55 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 18: 0.8887759447097778
233
+ 01-18 04:59:55 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 18: 0.3137945532798767
234
+ 01-18 04:59:55 INFO [logging.py:61]: ========= Epoch 19 out of 1000 =========
235
+ 01-18 04:59:55 INFO [logging.py:61]: Begin training...
236
+ 01-18 05:14:30 INFO [logging.py:61]: Loss 'loss' on epoch 19: 6.779003620147705
237
+ 01-18 05:14:30 INFO [logging.py:61]: Loss 'ar_loss' on epoch 19: 1.3836216926574707
238
+ 01-18 05:14:30 INFO [logging.py:61]: Loss 'nar_loss' on epoch 19: 5.395382404327393
239
+ 01-18 05:14:30 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 19: 0.8901618719100952
240
+ 01-18 05:14:30 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 19: 0.3347846567630768
241
+ 01-18 05:14:30 INFO [logging.py:61]: ========= Epoch 20 out of 1000 =========
242
+ 01-18 05:14:30 INFO [logging.py:61]: Begin training...
243
+ 01-18 05:29:19 INFO [logging.py:61]: Loss 'loss' on epoch 20: 8.779007911682129
244
+ 01-18 05:29:19 INFO [logging.py:61]: Loss 'ar_loss' on epoch 20: 1.3702212572097778
245
+ 01-18 05:29:19 INFO [logging.py:61]: Loss 'nar_loss' on epoch 20: 7.408785343170166
246
+ 01-18 05:29:19 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 20: 0.8919013738632202
247
+ 01-18 05:29:19 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 20: 0.3212190866470337
248
+ 01-18 05:29:19 INFO [logging.py:61]: Saving current state to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0020
249
+ 01-18 05:29:19 INFO [logging.py:61]: Model weights saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0020/pytorch_model.bin
250
+ 01-18 05:29:21 INFO [logging.py:61]: Optimizer state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0020/optimizer.bin
251
+ 01-18 05:29:21 INFO [logging.py:61]: Scheduler state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0020/scheduler.bin
252
+ 01-18 05:29:21 INFO [logging.py:61]: Sampler state for dataloader 0 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0020/sampler.bin
253
+ 01-18 05:29:21 INFO [logging.py:61]: Sampler state for dataloader 1 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0020/sampler_1.bin
254
+ 01-18 05:29:21 INFO [logging.py:61]: Random states saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0020/random_states_0.pkl
255
+ 01-18 05:29:21 INFO [logging.py:61]: Saving the state of TrainerState to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0020/custom_checkpoint_0.pkl
256
+ 01-18 05:29:21 INFO [logging.py:61]: ========= Epoch 21 out of 1000 =========
257
+ 01-18 05:29:21 INFO [logging.py:61]: Begin training...
258
+ 01-18 05:44:34 INFO [logging.py:61]: Loss 'loss' on epoch 21: 6.515992164611816
259
+ 01-18 05:44:34 INFO [logging.py:61]: Loss 'ar_loss' on epoch 21: 1.363883376121521
260
+ 01-18 05:44:34 INFO [logging.py:61]: Loss 'nar_loss' on epoch 21: 5.152108669281006
261
+ 01-18 05:44:34 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 21: 0.8928155899047852
262
+ 01-18 05:44:34 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 21: 0.3437054753303528
263
+ 01-18 05:44:34 INFO [logging.py:61]: ========= Epoch 22 out of 1000 =========
264
+ 01-18 05:44:34 INFO [logging.py:61]: Begin training...
265
+ 01-18 05:59:19 INFO [logging.py:61]: Loss 'loss' on epoch 22: 6.411158084869385
266
+ 01-18 05:59:19 INFO [logging.py:61]: Loss 'ar_loss' on epoch 22: 1.3568339347839355
267
+ 01-18 05:59:19 INFO [logging.py:61]: Loss 'nar_loss' on epoch 22: 5.054324150085449
268
+ 01-18 05:59:19 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 22: 0.8938457369804382
269
+ 01-18 05:59:19 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 22: 0.3443514406681061
270
+ 01-18 05:59:19 INFO [logging.py:61]: ========= Epoch 23 out of 1000 =========
271
+ 01-18 05:59:19 INFO [logging.py:61]: Begin training...
272
+ 01-18 06:14:01 INFO [logging.py:61]: Loss 'loss' on epoch 23: 6.536004066467285
273
+ 01-18 06:14:01 INFO [logging.py:61]: Loss 'ar_loss' on epoch 23: 1.3483623266220093
274
+ 01-18 06:14:01 INFO [logging.py:61]: Loss 'nar_loss' on epoch 23: 5.187641620635986
275
+ 01-18 06:14:01 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 23: 0.8951892256736755
276
+ 01-18 06:14:01 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 23: 0.3462466299533844
277
+ 01-18 06:14:01 INFO [logging.py:61]: ========= Epoch 24 out of 1000 =========
278
+ 01-18 06:14:01 INFO [logging.py:61]: Begin training...
279
+ 01-18 06:28:46 INFO [logging.py:61]: Loss 'loss' on epoch 24: 6.32097053527832
280
+ 01-18 06:28:46 INFO [logging.py:61]: Loss 'ar_loss' on epoch 24: 1.3559930324554443
281
+ 01-18 06:28:46 INFO [logging.py:61]: Loss 'nar_loss' on epoch 24: 4.9649786949157715
282
+ 01-18 06:28:46 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 24: 0.8945856094360352
283
+ 01-18 06:28:46 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 24: 0.3438716530799866
284
+ 01-18 06:28:46 INFO [logging.py:61]: ========= Epoch 25 out of 1000 =========
285
+ 01-18 06:28:46 INFO [logging.py:61]: Begin training...
286
+ 01-18 06:43:30 INFO [logging.py:61]: Loss 'loss' on epoch 25: 6.4575934410095215
287
+ 01-18 06:43:30 INFO [logging.py:61]: Loss 'ar_loss' on epoch 25: 1.3440008163452148
288
+ 01-18 06:43:30 INFO [logging.py:61]: Loss 'nar_loss' on epoch 25: 5.113592147827148
289
+ 01-18 06:43:30 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 25: 0.8960744142532349
290
+ 01-18 06:43:30 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 25: 0.3424939811229706
291
+ 01-18 06:43:30 INFO [logging.py:61]: Saving current state to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0025
292
+ 01-18 06:43:31 INFO [logging.py:61]: Model weights saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0025/pytorch_model.bin
293
+ 01-18 06:43:32 INFO [logging.py:61]: Optimizer state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0025/optimizer.bin
294
+ 01-18 06:43:32 INFO [logging.py:61]: Scheduler state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0025/scheduler.bin
295
+ 01-18 06:43:32 INFO [logging.py:61]: Sampler state for dataloader 0 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0025/sampler.bin
296
+ 01-18 06:43:32 INFO [logging.py:61]: Sampler state for dataloader 1 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0025/sampler_1.bin
297
+ 01-18 06:43:32 INFO [logging.py:61]: Random states saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0025/random_states_0.pkl
298
+ 01-18 06:43:32 INFO [logging.py:61]: Saving the state of TrainerState to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0025/custom_checkpoint_0.pkl
299
+ 01-18 06:43:32 INFO [logging.py:61]: ========= Epoch 26 out of 1000 =========
300
+ 01-18 06:43:32 INFO [logging.py:61]: Begin training...
301
+ 01-18 06:59:08 INFO [logging.py:61]: Loss 'loss' on epoch 26: 5.9814558029174805
302
+ 01-18 06:59:08 INFO [logging.py:61]: Loss 'ar_loss' on epoch 26: 1.3335331678390503
303
+ 01-18 06:59:08 INFO [logging.py:61]: Loss 'nar_loss' on epoch 26: 4.647922992706299
304
+ 01-18 06:59:08 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 26: 0.8972698450088501
305
+ 01-18 06:59:08 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 26: 0.3597102165222168
306
+ 01-18 06:59:08 INFO [logging.py:61]: ========= Epoch 27 out of 1000 =========
307
+ 01-18 06:59:08 INFO [logging.py:61]: Begin training...
308
+ 01-18 07:13:48 INFO [logging.py:61]: Loss 'loss' on epoch 27: 5.9211649894714355
309
+ 01-18 07:13:48 INFO [logging.py:61]: Loss 'ar_loss' on epoch 27: 1.3315753936767578
310
+ 01-18 07:13:48 INFO [logging.py:61]: Loss 'nar_loss' on epoch 27: 4.589588642120361
311
+ 01-18 07:13:48 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 27: 0.8978341817855835
312
+ 01-18 07:13:48 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 27: 0.3617432415485382
313
+ 01-18 07:13:48 INFO [logging.py:61]: ========= Epoch 28 out of 1000 =========
314
+ 01-18 07:13:48 INFO [logging.py:61]: Begin training...
315
+ 01-18 07:28:33 INFO [logging.py:61]: Loss 'loss' on epoch 28: 5.8742170333862305
316
+ 01-18 07:28:33 INFO [logging.py:61]: Loss 'ar_loss' on epoch 28: 1.3296784162521362
317
+ 01-18 07:28:33 INFO [logging.py:61]: Loss 'nar_loss' on epoch 28: 4.544538497924805
318
+ 01-18 07:28:33 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 28: 0.8983495831489563
319
+ 01-18 07:28:33 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 28: 0.366417795419693
320
+ 01-18 07:28:33 INFO [logging.py:61]: ========= Epoch 29 out of 1000 =========
321
+ 01-18 07:28:33 INFO [logging.py:61]: Begin training...
322
+ 01-18 07:43:17 INFO [logging.py:61]: Loss 'loss' on epoch 29: 5.860039710998535
323
+ 01-18 07:43:17 INFO [logging.py:61]: Loss 'ar_loss' on epoch 29: 1.3166179656982422
324
+ 01-18 07:43:17 INFO [logging.py:61]: Loss 'nar_loss' on epoch 29: 4.543421268463135
325
+ 01-18 07:43:17 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 29: 0.8997495174407959
326
+ 01-18 07:43:17 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 29: 0.3649654686450958
327
+ 01-18 07:43:17 INFO [logging.py:61]: ========= Epoch 30 out of 1000 =========
328
+ 01-18 07:43:17 INFO [logging.py:61]: Begin training...
329
+ 01-18 07:58:02 INFO [logging.py:61]: Loss 'loss' on epoch 30: 5.796247959136963
330
+ 01-18 07:58:02 INFO [logging.py:61]: Loss 'ar_loss' on epoch 30: 1.3036959171295166
331
+ 01-18 07:58:02 INFO [logging.py:61]: Loss 'nar_loss' on epoch 30: 4.492552280426025
332
+ 01-18 07:58:02 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 30: 0.90115886926651
333
+ 01-18 07:58:02 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 30: 0.37101566791534424
334
+ 01-18 07:58:02 INFO [logging.py:61]: Saving current state to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0030
335
+ 01-18 07:58:03 INFO [logging.py:61]: Model weights saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0030/pytorch_model.bin
336
+ 01-18 07:58:04 INFO [logging.py:61]: Optimizer state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0030/optimizer.bin
337
+ 01-18 07:58:04 INFO [logging.py:61]: Scheduler state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0030/scheduler.bin
338
+ 01-18 07:58:04 INFO [logging.py:61]: Sampler state for dataloader 0 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0030/sampler.bin
339
+ 01-18 07:58:04 INFO [logging.py:61]: Sampler state for dataloader 1 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0030/sampler_1.bin
340
+ 01-18 07:58:04 INFO [logging.py:61]: Random states saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0030/random_states_0.pkl
341
+ 01-18 07:58:04 INFO [logging.py:61]: Saving the state of TrainerState to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0030/custom_checkpoint_0.pkl
342
+ 01-18 07:58:04 INFO [logging.py:61]: ========= Epoch 31 out of 1000 =========
343
+ 01-18 07:58:04 INFO [logging.py:61]: Begin training...
344
+ 01-18 08:12:58 INFO [logging.py:61]: Loss 'loss' on epoch 31: 5.9744038581848145
345
+ 01-18 08:12:58 INFO [logging.py:61]: Loss 'ar_loss' on epoch 31: 1.311854362487793
346
+ 01-18 08:12:58 INFO [logging.py:61]: Loss 'nar_loss' on epoch 31: 4.662549018859863
347
+ 01-18 08:12:58 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 31: 0.9008342623710632
348
+ 01-18 08:12:58 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 31: 0.366784006357193
349
+ 01-18 08:12:58 INFO [logging.py:61]: ========= Epoch 32 out of 1000 =========
350
+ 01-18 08:12:58 INFO [logging.py:61]: Begin training...
351
+ 01-18 08:27:46 INFO [logging.py:61]: Loss 'loss' on epoch 32: 5.735446453094482
352
+ 01-18 08:27:46 INFO [logging.py:61]: Loss 'ar_loss' on epoch 32: 1.3046653270721436
353
+ 01-18 08:27:46 INFO [logging.py:61]: Loss 'nar_loss' on epoch 32: 4.43078088760376
354
+ 01-18 08:27:46 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 32: 0.9014154076576233
355
+ 01-18 08:27:46 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 32: 0.37591466307640076
356
+ 01-18 08:27:46 INFO [logging.py:61]: ========= Epoch 33 out of 1000 =========
357
+ 01-18 08:27:46 INFO [logging.py:61]: Begin training...
358
+ 01-18 08:42:30 INFO [logging.py:61]: Loss 'loss' on epoch 33: 5.872527122497559
359
+ 01-18 08:42:30 INFO [logging.py:61]: Loss 'ar_loss' on epoch 33: 1.2990329265594482
360
+ 01-18 08:42:30 INFO [logging.py:61]: Loss 'nar_loss' on epoch 33: 4.5734944343566895
361
+ 01-18 08:42:30 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 33: 0.9022935032844543
362
+ 01-18 08:42:30 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 33: 0.37081286311149597
363
+ 01-18 08:42:30 INFO [logging.py:61]: ========= Epoch 34 out of 1000 =========
364
+ 01-18 08:42:30 INFO [logging.py:61]: Begin training...
365
+ 01-18 08:57:15 INFO [logging.py:61]: Loss 'loss' on epoch 34: 5.6954474449157715
366
+ 01-18 08:57:15 INFO [logging.py:61]: Loss 'ar_loss' on epoch 34: 1.3018174171447754
367
+ 01-18 08:57:15 INFO [logging.py:61]: Loss 'nar_loss' on epoch 34: 4.393630504608154
368
+ 01-18 08:57:15 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 34: 0.9023982882499695
369
+ 01-18 08:57:15 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 34: 0.38097846508026123
370
+ 01-18 08:57:15 INFO [logging.py:61]: ========= Epoch 35 out of 1000 =========
371
+ 01-18 08:57:15 INFO [logging.py:61]: Begin training...
372
+ 01-18 09:11:52 INFO [logging.py:61]: Loss 'loss' on epoch 35: 5.682955741882324
373
+ 01-18 09:11:52 INFO [logging.py:61]: Loss 'ar_loss' on epoch 35: 1.2978262901306152
374
+ 01-18 09:11:52 INFO [logging.py:61]: Loss 'nar_loss' on epoch 35: 4.385129451751709
375
+ 01-18 09:11:52 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 35: 0.9027104377746582
376
+ 01-18 09:11:52 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 35: 0.3820667564868927
377
+ 01-18 09:11:52 INFO [logging.py:61]: Saving current state to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0035
378
+ 01-18 09:11:53 INFO [logging.py:61]: Model weights saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0035/pytorch_model.bin
379
+ 01-18 09:11:54 INFO [logging.py:61]: Optimizer state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0035/optimizer.bin
380
+ 01-18 09:11:54 INFO [logging.py:61]: Scheduler state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0035/scheduler.bin
381
+ 01-18 09:11:54 INFO [logging.py:61]: Sampler state for dataloader 0 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0035/sampler.bin
382
+ 01-18 09:11:54 INFO [logging.py:61]: Sampler state for dataloader 1 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0035/sampler_1.bin
383
+ 01-18 09:11:54 INFO [logging.py:61]: Random states saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0035/random_states_0.pkl
384
+ 01-18 09:11:54 INFO [logging.py:61]: Saving the state of TrainerState to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0035/custom_checkpoint_0.pkl
385
+ 01-18 09:11:54 INFO [logging.py:61]: ========= Epoch 36 out of 1000 =========
386
+ 01-18 09:11:54 INFO [logging.py:61]: Begin training...
387
+ 01-18 09:27:23 INFO [logging.py:61]: Loss 'loss' on epoch 36: 5.689979553222656
388
+ 01-18 09:27:23 INFO [logging.py:61]: Loss 'ar_loss' on epoch 36: 1.2983156442642212
389
+ 01-18 09:27:23 INFO [logging.py:61]: Loss 'nar_loss' on epoch 36: 4.391663551330566
390
+ 01-18 09:27:23 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 36: 0.90284264087677
391
+ 01-18 09:27:23 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 36: 0.3806702196598053
392
+ 01-18 09:27:23 INFO [logging.py:61]: ========= Epoch 37 out of 1000 =========
393
+ 01-18 09:27:23 INFO [logging.py:61]: Begin training...
394
+ 01-18 09:42:07 INFO [logging.py:61]: Loss 'loss' on epoch 37: 5.661297798156738
395
+ 01-18 09:42:07 INFO [logging.py:61]: Loss 'ar_loss' on epoch 37: 1.2922143936157227
396
+ 01-18 09:42:07 INFO [logging.py:61]: Loss 'nar_loss' on epoch 37: 4.369083404541016
397
+ 01-18 09:42:07 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 37: 0.9038951396942139
398
+ 01-18 09:42:07 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 37: 0.3834979236125946
399
+ 01-18 09:42:07 INFO [logging.py:61]: ========= Epoch 38 out of 1000 =========
400
+ 01-18 09:42:07 INFO [logging.py:61]: Begin training...
401
+ 01-18 09:57:28 INFO [logging.py:61]: Loss 'loss' on epoch 38: 5.638166427612305
402
+ 01-18 09:57:28 INFO [logging.py:61]: Loss 'ar_loss' on epoch 38: 1.2806264162063599
403
+ 01-18 09:57:28 INFO [logging.py:61]: Loss 'nar_loss' on epoch 38: 4.357540130615234
404
+ 01-18 09:57:28 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 38: 0.9050056338310242
405
+ 01-18 09:57:28 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 38: 0.38519880175590515
406
+ 01-18 09:57:28 INFO [logging.py:61]: ========= Epoch 39 out of 1000 =========
407
+ 01-18 09:57:28 INFO [logging.py:61]: Begin training...
408
+ 01-18 10:12:01 INFO [logging.py:61]: Loss 'loss' on epoch 39: 5.627877712249756
409
+ 01-18 10:12:01 INFO [logging.py:61]: Loss 'ar_loss' on epoch 39: 1.2748196125030518
410
+ 01-18 10:12:01 INFO [logging.py:61]: Loss 'nar_loss' on epoch 39: 4.353058338165283
411
+ 01-18 10:12:01 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 39: 0.9056810140609741
412
+ 01-18 10:12:01 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 39: 0.3871024549007416
413
+ 01-18 10:12:01 INFO [logging.py:61]: ========= Epoch 40 out of 1000 =========
414
+ 01-18 10:12:01 INFO [logging.py:61]: Begin training...
415
+ 01-18 10:26:42 INFO [logging.py:61]: Loss 'loss' on epoch 40: 5.619797229766846
416
+ 01-18 10:26:42 INFO [logging.py:61]: Loss 'ar_loss' on epoch 40: 1.2779885530471802
417
+ 01-18 10:26:42 INFO [logging.py:61]: Loss 'nar_loss' on epoch 40: 4.341809272766113
418
+ 01-18 10:26:42 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 40: 0.9054821729660034
419
+ 01-18 10:26:42 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 40: 0.38915959000587463
420
+ 01-18 10:26:42 INFO [logging.py:61]: Saving current state to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0040
421
+ 01-18 10:26:43 INFO [logging.py:61]: Model weights saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0040/pytorch_model.bin
422
+ 01-18 10:26:45 INFO [logging.py:61]: Optimizer state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0040/optimizer.bin
423
+ 01-18 10:26:45 INFO [logging.py:61]: Scheduler state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0040/scheduler.bin
424
+ 01-18 10:26:45 INFO [logging.py:61]: Sampler state for dataloader 0 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0040/sampler.bin
425
+ 01-18 10:26:45 INFO [logging.py:61]: Sampler state for dataloader 1 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0040/sampler_1.bin
426
+ 01-18 10:26:45 INFO [logging.py:61]: Random states saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0040/random_states_0.pkl
427
+ 01-18 10:26:45 INFO [logging.py:61]: Saving the state of TrainerState to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0040/custom_checkpoint_0.pkl
428
+ 01-18 10:26:45 INFO [logging.py:61]: ========= Epoch 41 out of 1000 =========
429
+ 01-18 10:26:45 INFO [logging.py:61]: Begin training...
430
+ 01-18 10:42:20 INFO [logging.py:61]: Loss 'loss' on epoch 41: 5.582706928253174
431
+ 01-18 10:42:20 INFO [logging.py:61]: Loss 'ar_loss' on epoch 41: 1.2725111246109009
432
+ 01-18 10:42:20 INFO [logging.py:61]: Loss 'nar_loss' on epoch 41: 4.3101959228515625
433
+ 01-18 10:42:20 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 41: 0.9060384035110474
434
+ 01-18 10:42:20 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 41: 0.39349165558815
435
+ 01-18 10:42:20 INFO [logging.py:61]: ========= Epoch 42 out of 1000 =========
436
+ 01-18 10:42:20 INFO [logging.py:61]: Begin training...
437
+ 01-18 10:57:24 INFO [logging.py:61]: Loss 'loss' on epoch 42: 5.580837726593018
438
+ 01-18 10:57:24 INFO [logging.py:61]: Loss 'ar_loss' on epoch 42: 1.2710316181182861
439
+ 01-18 10:57:24 INFO [logging.py:61]: Loss 'nar_loss' on epoch 42: 4.309806823730469
440
+ 01-18 10:57:24 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 42: 0.9061719179153442
441
+ 01-18 10:57:24 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 42: 0.39457419514656067
442
+ 01-18 10:57:24 INFO [logging.py:61]: ========= Epoch 43 out of 1000 =========
443
+ 01-18 10:57:24 INFO [logging.py:61]: Begin training...
444
+ 01-18 11:13:05 INFO [logging.py:61]: Loss 'loss' on epoch 43: 5.569582939147949
445
+ 01-18 11:13:05 INFO [logging.py:61]: Loss 'ar_loss' on epoch 43: 1.2762430906295776
446
+ 01-18 11:13:05 INFO [logging.py:61]: Loss 'nar_loss' on epoch 43: 4.293339252471924
447
+ 01-18 11:13:05 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 43: 0.9061148166656494
448
+ 01-18 11:13:05 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 43: 0.39825910329818726
449
+ 01-18 11:13:05 INFO [logging.py:61]: ========= Epoch 44 out of 1000 =========
450
+ 01-18 11:13:05 INFO [logging.py:61]: Begin training...
451
+ 01-18 11:27:38 INFO [logging.py:61]: Loss 'loss' on epoch 44: 5.717195987701416
452
+ 01-18 11:27:38 INFO [logging.py:61]: Loss 'ar_loss' on epoch 44: 1.2707628011703491
453
+ 01-18 11:27:38 INFO [logging.py:61]: Loss 'nar_loss' on epoch 44: 4.446432590484619
454
+ 01-18 11:27:38 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 44: 0.9066540598869324
455
+ 01-18 11:27:38 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 44: 0.3859277665615082
456
+ 01-18 11:27:38 INFO [logging.py:61]: ========= Epoch 45 out of 1000 =========
457
+ 01-18 11:27:38 INFO [logging.py:61]: Begin training...
458
+ 01-18 11:42:11 INFO [logging.py:61]: Loss 'loss' on epoch 45: 5.571887969970703
459
+ 01-18 11:42:11 INFO [logging.py:61]: Loss 'ar_loss' on epoch 45: 1.2563157081604004
460
+ 01-18 11:42:11 INFO [logging.py:61]: Loss 'nar_loss' on epoch 45: 4.315572261810303
461
+ 01-18 11:42:11 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 45: 0.9081159830093384
462
+ 01-18 11:42:11 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 45: 0.39590778946876526
463
+ 01-18 11:42:11 INFO [logging.py:61]: Saving current state to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0045
464
+ 01-18 11:42:12 INFO [logging.py:61]: Model weights saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0045/pytorch_model.bin
465
+ 01-18 11:42:13 INFO [logging.py:61]: Optimizer state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0045/optimizer.bin
466
+ 01-18 11:42:13 INFO [logging.py:61]: Scheduler state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0045/scheduler.bin
467
+ 01-18 11:42:13 INFO [logging.py:61]: Sampler state for dataloader 0 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0045/sampler.bin
468
+ 01-18 11:42:13 INFO [logging.py:61]: Sampler state for dataloader 1 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0045/sampler_1.bin
469
+ 01-18 11:42:13 INFO [logging.py:61]: Random states saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0045/random_states_0.pkl
470
+ 01-18 11:42:13 INFO [logging.py:61]: Saving the state of TrainerState to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0045/custom_checkpoint_0.pkl
471
+ 01-18 11:42:13 INFO [logging.py:61]: ========= Epoch 46 out of 1000 =========
472
+ 01-18 11:42:13 INFO [logging.py:61]: Begin training...
473
+ 01-18 11:56:47 INFO [logging.py:61]: Loss 'loss' on epoch 46: 5.5488386154174805
474
+ 01-18 11:56:47 INFO [logging.py:61]: Loss 'ar_loss' on epoch 46: 1.270042061805725
475
+ 01-18 11:56:47 INFO [logging.py:61]: Loss 'nar_loss' on epoch 46: 4.278796195983887
476
+ 01-18 11:56:47 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 46: 0.9069695472717285
477
+ 01-18 11:56:47 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 46: 0.40045443177223206
478
+ 01-18 11:56:47 INFO [logging.py:61]: ========= Epoch 47 out of 1000 =========
479
+ 01-18 11:56:47 INFO [logging.py:61]: Begin training...
480
+ 01-18 12:11:19 INFO [logging.py:61]: Loss 'loss' on epoch 47: 5.518341541290283
481
+ 01-18 12:11:19 INFO [logging.py:61]: Loss 'ar_loss' on epoch 47: 1.2568832635879517
482
+ 01-18 12:11:19 INFO [logging.py:61]: Loss 'nar_loss' on epoch 47: 4.261458396911621
483
+ 01-18 12:11:19 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 47: 0.9085185527801514
484
+ 01-18 12:11:19 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 47: 0.4032013416290283
485
+ 01-18 12:11:19 INFO [logging.py:61]: ========= Epoch 48 out of 1000 =========
486
+ 01-18 12:11:19 INFO [logging.py:61]: Begin training...
487
+ 01-18 12:25:53 INFO [logging.py:61]: Loss 'loss' on epoch 48: 5.5060343742370605
488
+ 01-18 12:25:54 INFO [logging.py:61]: Loss 'ar_loss' on epoch 48: 1.258938193321228
489
+ 01-18 12:25:54 INFO [logging.py:61]: Loss 'nar_loss' on epoch 48: 4.247096538543701
490
+ 01-18 12:25:54 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 48: 0.9083271622657776
491
+ 01-18 12:25:54 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 48: 0.40573224425315857
492
+ 01-18 12:25:54 INFO [logging.py:61]: ========= Epoch 49 out of 1000 =========
493
+ 01-18 12:25:54 INFO [logging.py:61]: Begin training...
494
+ 01-18 12:40:27 INFO [logging.py:61]: Loss 'loss' on epoch 49: 5.473796844482422
495
+ 01-18 12:40:27 INFO [logging.py:61]: Loss 'ar_loss' on epoch 49: 1.2464219331741333
496
+ 01-18 12:40:27 INFO [logging.py:61]: Loss 'nar_loss' on epoch 49: 4.22737455368042
497
+ 01-18 12:40:27 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 49: 0.9096682071685791
498
+ 01-18 12:40:27 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 49: 0.40877988934516907
499
+ 01-18 12:40:27 INFO [logging.py:61]: ========= Epoch 50 out of 1000 =========
500
+ 01-18 12:40:27 INFO [logging.py:61]: Begin training...
501
+ 01-18 12:54:59 INFO [logging.py:61]: Loss 'loss' on epoch 50: 5.809966564178467
502
+ 01-18 12:54:59 INFO [logging.py:61]: Loss 'ar_loss' on epoch 50: 1.2504818439483643
503
+ 01-18 12:54:59 INFO [logging.py:61]: Loss 'nar_loss' on epoch 50: 4.559484958648682
504
+ 01-18 12:54:59 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 50: 0.9094463586807251
505
+ 01-18 12:54:59 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 50: 0.38511520624160767
506
+ 01-18 12:54:59 INFO [logging.py:61]: Saving current state to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0050
507
+ 01-18 12:55:00 INFO [logging.py:61]: Model weights saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0050/pytorch_model.bin
508
+ 01-18 12:55:01 INFO [logging.py:61]: Optimizer state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0050/optimizer.bin
509
+ 01-18 12:55:01 INFO [logging.py:61]: Scheduler state saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0050/scheduler.bin
510
+ 01-18 12:55:01 INFO [logging.py:61]: Sampler state for dataloader 0 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0050/sampler.bin
511
+ 01-18 12:55:01 INFO [logging.py:61]: Sampler state for dataloader 1 saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0050/sampler_1.bin
512
+ 01-18 12:55:01 INFO [logging.py:61]: Random states saved in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0050/random_states_0.pkl
513
+ 01-18 12:55:01 INFO [logging.py:61]: Saving the state of TrainerState to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-2_AR-NAR/checkpoints/epoch_0050/custom_checkpoint_0.pkl
514
+ 01-18 12:55:01 INFO [logging.py:61]: ========= Epoch 51 out of 1000 =========
515
+ 01-18 12:55:01 INFO [logging.py:61]: Begin training...
swin_default_LR1e-3_AR-NAR/swin_default_LR1e-3_AR-NAR_2024_01_18--13_13_05.log ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 01-18 13:13:05 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-3_AR-NAR.
2
+ 01-18 13:13:05 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-3_AR-NAR.
3
+ 01-18 13:13:05 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-3_AR-NAR.
4
+ 01-18 13:13:05 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-3_AR-NAR.
5
+ 01-18 13:13:58 INFO [logging.py:61]: Configuration file is saved to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-3_AR-NAR/config__2024_01_18--13_13_46.toml.
6
+ 01-18 13:13:58 INFO [logging.py:61]: Environment information:
7
+ - `Accelerate` version: 0.26.1
8
+ - Platform: Linux-5.14.0-362.13.1.el9_3.x86_64-x86_64-with-glibc2.34
9
+ - Python version: 3.10.13
10
+ - Numpy version: 1.26.3
11
+ - PyTorch version (GPU?): 2.1.2 (True)
12
+ - System RAM: 503.48 GB
13
+ - GPU Available: True
14
+ - GPU IDs: 4
15
+ - GPU type: NVIDIA A100-SXM4-80GB
16
+ 01-18 13:13:58 INFO [logging.py:61]:
17
+ ===============================================================================================
18
+ Layer (type:depth-idx) Param #
19
+ ===============================================================================================
20
+ DistributedDataParallel --
21
+ ├─Model: 1-1 --
22
+ │ └─EncodecModel: 2-1 --
23
+ │ │ └─EncodecEncoder: 3-1 (7,425,792)
24
+ │ │ └─EncodecDecoder: 3-2 (7,426,018)
25
+ │ │ └─EncodecResidualVectorQuantizer: 3-3 --
26
+ │ └─TokenEmbedding: 2-2 --
27
+ │ │ └─Dropout: 3-4 --
28
+ │ │ └─Embedding: 3-5 524,800
29
+ │ └─Identity: 2-3 --
30
+ │ └─SinePositionalEmbedding: 2-4 1
31
+ │ │ └─Dropout: 3-6 --
32
+ │ └─TransformerEncoder: 2-5 --
33
+ │ │ └─ModuleList: 3-7 37,828,608
34
+ │ │ └─LayerNorm: 3-8 1,024
35
+ │ └─Linear: 2-6 524,800
36
+ │ └─MulticlassAccuracy: 2-7 --
37
+ │ └─TokenEmbedding: 2-8 --
38
+ │ │ └─Dropout: 3-9 --
39
+ │ │ └─Embedding: 3-10 524,288
40
+ │ └─ModuleList: 2-9 --
41
+ │ │ └─TokenEmbedding: 3-11 524,800
42
+ │ │ └─TokenEmbedding: 3-12 524,288
43
+ │ │ └─TokenEmbedding: 3-13 524,288
44
+ │ │ └─TokenEmbedding: 3-14 524,288
45
+ │ │ └─TokenEmbedding: 3-15 524,288
46
+ │ │ └─TokenEmbedding: 3-16 524,288
47
+ │ │ └─TokenEmbedding: 3-17 524,288
48
+ │ │ └─TokenEmbedding: 3-18 524,288
49
+ │ └─Identity: 2-10 --
50
+ │ └─SinePositionalEmbedding: 2-11 1
51
+ │ │ └─Dropout: 3-19 --
52
+ │ └─TransformerEncoder: 2-12 --
53
+ │ │ └─ModuleList: 3-20 50,436,096
54
+ │ │ └─AdaptiveLayerNorm: 3-21 526,336
55
+ │ └─ModuleList: 2-13 --
56
+ │ │ └─Linear: 3-22 524,288
57
+ │ │ └─Linear: 3-23 524,288
58
+ │ │ └─Linear: 3-24 524,288
59
+ │ │ └─Linear: 3-25 524,288
60
+ │ │ └─Linear: 3-26 524,288
61
+ ��� │ └─Linear: 3-27 524,288
62
+ │ │ └─Linear: 3-28 524,288
63
+ │ └─ModuleList: 2-14 --
64
+ │ │ └─TokenEmbedding: 3-29 512
65
+ │ │ └─TokenEmbedding: 3-30 512
66
+ │ │ └─TokenEmbedding: 3-31 512
67
+ │ │ └─TokenEmbedding: 3-32 512
68
+ │ │ └─TokenEmbedding: 3-33 512
69
+ │ │ └─TokenEmbedding: 3-34 512
70
+ │ │ └─TokenEmbedding: 3-35 512
71
+ │ └─MulticlassAccuracy: 2-15 --
72
+ ===============================================================================================
73
+ Total params: 113,086,180
74
+ Trainable params: 98,234,369
75
+ Non-trainable params: 14,851,811
76
+ ===============================================================================================
77
+ 01-18 13:13:58 INFO [logging.py:61]: Training control variables:
78
+ 01-18 13:13:58 INFO [logging.py:61]: `steps_per_epoch`: 500
79
+ 01-18 13:13:58 INFO [logging.py:61]: Gradient accumulation steps: 1
80
+ 01-18 13:13:58 INFO [logging.py:61]: `update_steps_per_epoch`: 500
81
+ 01-18 13:13:58 INFO [logging.py:61]: `max_steps`: 500000
82
+ 01-18 13:13:58 INFO [logging.py:61]: `max_epochs`: 1000
83
+ 01-18 13:13:58 INFO [logging.py:61]: warmup_steps=1000. warmup_ratio will be ignored.
84
+ 01-18 13:13:58 INFO [logging.py:61]: Loading states from /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-3_AR-NAR/checkpoints/epoch_0050
85
+ 01-18 13:13:58 INFO [logging.py:61]: All model weights loaded successfully
86
+ 01-18 13:13:59 INFO [logging.py:61]: All optimizer states loaded successfully
87
+ 01-18 13:13:59 INFO [logging.py:61]: All scheduler states loaded successfully
88
+ 01-18 13:13:59 INFO [logging.py:61]: All dataloader sampler states loaded successfully
89
+ 01-18 13:13:59 INFO [logging.py:61]: All random states loaded successfully
90
+ 01-18 13:14:00 INFO [logging.py:61]: Loading in 1 custom states
91
+ 01-18 13:14:00 INFO [logging.py:61]: Loading the state of TrainerState from /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-3_AR-NAR/checkpoints/epoch_0050/custom_checkpoint_0.pkl
92
+ 01-18 13:14:00 INFO [logging.py:61]: Checkpoint on epoch 50 is loaded.
93
+ 01-18 13:14:00 INFO [logging.py:61]: ========= Epoch 51 out of 1000 =========
94
+ 01-18 13:14:00 INFO [logging.py:61]: Begin training...
swin_default_LR1e-3_AR-NAR/swin_default_LR1e-3_AR-NAR_2024_01_18--13_14_39.log ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 01-18 13:14:39 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-3_AR-NAR.
2
+ 01-18 13:14:39 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-3_AR-NAR.
3
+ 01-18 13:14:39 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-3_AR-NAR.
4
+ 01-18 13:14:45 INFO [logging.py:61]: Configuration file is saved to /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-3_AR-NAR/config__2024_01_18--13_14_43.toml.
5
+ 01-18 13:14:45 INFO [logging.py:61]: Environment information:
6
+ - `Accelerate` version: 0.26.1
7
+ - Platform: Linux-5.14.0-362.13.1.el9_3.x86_64-x86_64-with-glibc2.34
8
+ - Python version: 3.10.13
9
+ - Numpy version: 1.26.3
10
+ - PyTorch version (GPU?): 2.1.2 (True)
11
+ - System RAM: 503.48 GB
12
+ - GPU Available: True
13
+ - GPU IDs: 4
14
+ - GPU type: NVIDIA A100-SXM4-80GB
15
+ 01-18 13:14:45 INFO [logging.py:61]:
16
+ ===============================================================================================
17
+ Layer (type:depth-idx) Param #
18
+ ===============================================================================================
19
+ DistributedDataParallel --
20
+ ├─Model: 1-1 --
21
+ │ └─EncodecModel: 2-1 --
22
+ │ │ └─EncodecEncoder: 3-1 (7,425,792)
23
+ │ │ └─EncodecDecoder: 3-2 (7,426,018)
24
+ │ │ └─EncodecResidualVectorQuantizer: 3-3 --
25
+ │ └─TokenEmbedding: 2-2 --
26
+ │ │ └─Dropout: 3-4 --
27
+ │ │ └─Embedding: 3-5 524,800
28
+ │ └─Identity: 2-3 --
29
+ │ └─SinePositionalEmbedding: 2-4 1
30
+ │ │ └─Dropout: 3-6 --
31
+ │ └─TransformerEncoder: 2-5 --
32
+ │ │ └─ModuleList: 3-7 37,828,608
33
+ │ │ └─LayerNorm: 3-8 1,024
34
+ │ └─Linear: 2-6 524,800
35
+ │ └─MulticlassAccuracy: 2-7 --
36
+ │ └─TokenEmbedding: 2-8 --
37
+ │ │ └─Dropout: 3-9 --
38
+ │ │ └─Embedding: 3-10 524,288
39
+ │ └─ModuleList: 2-9 --
40
+ │ │ └─TokenEmbedding: 3-11 524,800
41
+ │ │ └─TokenEmbedding: 3-12 524,288
42
+ │ │ └─TokenEmbedding: 3-13 524,288
43
+ │ │ └─TokenEmbedding: 3-14 524,288
44
+ │ │ └─TokenEmbedding: 3-15 524,288
45
+ │ │ └─TokenEmbedding: 3-16 524,288
46
+ │ │ └─TokenEmbedding: 3-17 524,288
47
+ │ │ └─TokenEmbedding: 3-18 524,288
48
+ │ └─Identity: 2-10 --
49
+ │ └─SinePositionalEmbedding: 2-11 1
50
+ │ │ └─Dropout: 3-19 --
51
+ │ └─TransformerEncoder: 2-12 --
52
+ │ │ └─ModuleList: 3-20 50,436,096
53
+ │ │ └─AdaptiveLayerNorm: 3-21 526,336
54
+ │ └─ModuleList: 2-13 --
55
+ │ │ └─Linear: 3-22 524,288
56
+ │ │ └─Linear: 3-23 524,288
57
+ │ │ └─Linear: 3-24 524,288
58
+ │ │ └─Linear: 3-25 524,288
59
+ │ │ └─Linear: 3-26 524,288
60
+ │ │ └─Linear: 3-27 524,288
61
+ │ │ └─Linear: 3-28 524,288
62
+ │ └─ModuleList: 2-14 --
63
+ │ │ └─TokenEmbedding: 3-29 512
64
+ │ │ └─TokenEmbedding: 3-30 512
65
+ │ │ └─TokenEmbedding: 3-31 512
66
+ │ │ └─TokenEmbedding: 3-32 512
67
+ │ │ └─TokenEmbedding: 3-33 512
68
+ │ │ └─TokenEmbedding: 3-34 512
69
+ │ │ └─TokenEmbedding: 3-35 512
70
+ │ └─MulticlassAccuracy: 2-15 --
71
+ ===============================================================================================
72
+ Total params: 113,086,180
73
+ Trainable params: 98,234,369
74
+ Non-trainable params: 14,851,811
75
+ ===============================================================================================
76
+ 01-18 13:14:45 INFO [logging.py:61]: Training control variables:
77
+ 01-18 13:14:45 INFO [logging.py:61]: `steps_per_epoch`: 500
78
+ 01-18 13:14:45 INFO [logging.py:61]: Gradient accumulation steps: 1
79
+ 01-18 13:14:45 INFO [logging.py:61]: `update_steps_per_epoch`: 500
80
+ 01-18 13:14:45 INFO [logging.py:61]: `max_steps`: 500000
81
+ 01-18 13:14:45 INFO [logging.py:61]: `max_epochs`: 1000
82
+ 01-18 13:14:45 INFO [logging.py:61]: warmup_steps=1000. warmup_ratio will be ignored.
83
+ 01-18 13:14:45 INFO [logging.py:61]: Loading states from /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-3_AR-NAR/checkpoints/epoch_0050
84
+ 01-18 13:14:45 INFO [logging.py:61]: All model weights loaded successfully
85
+ 01-18 13:14:46 INFO [logging.py:61]: All optimizer states loaded successfully
86
+ 01-18 13:14:46 INFO [logging.py:61]: All scheduler states loaded successfully
87
+ 01-18 13:14:46 INFO [logging.py:61]: All dataloader sampler states loaded successfully
88
+ 01-18 13:14:46 INFO [logging.py:61]: All random states loaded successfully
89
+ 01-18 13:14:46 INFO [logging.py:61]: Loading in 1 custom states
90
+ 01-18 13:14:46 INFO [logging.py:61]: Loading the state of TrainerState from /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-3_AR-NAR/checkpoints/epoch_0050/custom_checkpoint_0.pkl
91
+ 01-18 13:14:46 INFO [logging.py:61]: Checkpoint on epoch 50 is loaded.
92
+ 01-18 13:14:46 INFO [logging.py:61]: ========= Epoch 51 out of 1000 =========
93
+ 01-18 13:14:46 INFO [logging.py:61]: Begin training...
94
+ 01-18 13:29:20 INFO [logging.py:61]: Loss 'loss' on epoch 51: 5.497116565704346
95
+ 01-18 13:29:20 INFO [logging.py:61]: Loss 'ar_loss' on epoch 51: 1.253153920173645
96
+ 01-18 13:29:20 INFO [logging.py:61]: Loss 'nar_loss' on epoch 51: 4.243962287902832
97
+ 01-18 13:29:20 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 51: 0.9091926217079163
98
+ 01-18 13:29:20 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 51: 0.40614935755729675
99
+ 01-18 13:29:20 INFO [logging.py:61]: ========= Epoch 52 out of 1000 =========
100
+ 01-18 13:29:20 INFO [logging.py:61]: Begin training...
101
+ 01-18 13:43:53 INFO [logging.py:61]: Loss 'loss' on epoch 52: 5.452788829803467
102
+ 01-18 13:43:53 INFO [logging.py:61]: Loss 'ar_loss' on epoch 52: 1.245349645614624
103
+ 01-18 13:43:53 INFO [logging.py:61]: Loss 'nar_loss' on epoch 52: 4.207438945770264
104
+ 01-18 13:43:53 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 52: 0.9099061489105225
105
+ 01-18 13:43:53 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 52: 0.41280823945999146
106
+ 01-18 13:43:53 INFO [logging.py:61]: ========= Epoch 53 out of 1000 =========
107
+ 01-18 13:43:53 INFO [logging.py:61]: Begin training...
108
+ 01-18 13:58:30 INFO [logging.py:61]: Loss 'loss' on epoch 53: 5.5037336349487305
109
+ 01-18 13:58:30 INFO [logging.py:61]: Loss 'ar_loss' on epoch 53: 1.2453527450561523
110
+ 01-18 13:58:30 INFO [logging.py:61]: Loss 'nar_loss' on epoch 53: 4.25838041305542
111
+ 01-18 13:58:30 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 53: 0.9102101922035217
112
+ 01-18 13:58:30 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 53: 0.40620309114456177
113
+ 01-18 13:58:30 INFO [logging.py:61]: ========= Epoch 54 out of 1000 =========
114
+ 01-18 13:58:30 INFO [logging.py:61]: Begin training...
115
+ 01-18 14:13:06 INFO [logging.py:61]: Loss 'loss' on epoch 54: 5.462146282196045
116
+ 01-18 14:13:06 INFO [logging.py:61]: Loss 'ar_loss' on epoch 54: 1.2472673654556274
117
+ 01-18 14:13:06 INFO [logging.py:61]: Loss 'nar_loss' on epoch 54: 4.214879512786865
118
+ 01-18 14:13:06 INFO [logging.py:61]: Loss 'ar_accuracy_metric' on epoch 54: 0.9099826216697693
119
+ 01-18 14:13:06 INFO [logging.py:61]: Loss 'nar_acc_metric' on epoch 54: 0.4112021327018738
120
+ 01-18 14:13:06 INFO [logging.py:61]: ========= Epoch 55 out of 1000 =========
121
+ 01-18 14:13:06 INFO [logging.py:61]: Begin training...
swin_default_LR1e-3_AR-NAR/swin_default_LR1e-3_AR-NAR_2024_01_18--13_14_40.log ADDED
@@ -0,0 +1 @@
 
 
1
+ 01-18 13:14:40 INFO [logger.py:80]: Initialized logger with log file in /fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/exp/swin_default_LR1e-3_AR-NAR.
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705497738.gina1.1685133.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0db7b9d9f6a021d3820b3843c34b06bc51d57a21200540d977ae644206471754
3
+ size 2166
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705497738.gina1.1685134.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c455eb1988f60f1059a5d69e417cc6f3bfd31662340202195fcfd908dd2b4517
3
+ size 2166
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705497738.gina1.1685135.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5589a30e959b9cc127b91c8fd0778037d7efcd6c7d84e5de3df1c9de60d79f6a
3
+ size 2166
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705497738.gina1.1685136.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc2cf6cf8a87551e6cdaa13aaccd57b997c5523d5fcf74189141c71117e8a2b2
3
+ size 2166
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705497802.gina1.1688371.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad2246b8f994474bf05d7f34f921f5191105657fff5ab6f1c1ea723c76b824d7
3
+ size 16886
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705497802.gina1.1688372.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76f9a5e4dfe677d7a51b782cfcf642d1119c3d9ddd55f61a7d4a0fb71dc6e09e
3
+ size 2166
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705497802.gina1.1688373.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f5f21e4c295d18007c964da32d7b1a8ca5173b09af0ce62d0032c2d204c5390
3
+ size 2166
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705497802.gina1.1688374.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa97cba9197532f36bf3d5c158492e5793eac1bc64d0d3f4efbcecafd97c6854
3
+ size 2166
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705498412.gina1.1693065.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8f451a4e9894799899b82c2ba35bb8330502198e1ad3cdd9e6d6e0990667497
3
+ size 1392147
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705498412.gina1.1693066.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d9c9627dc4c32e365485f25015c3e55e198e23cd38ae374c22c50bbd71f2fe2
3
+ size 2166
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705498412.gina1.1693067.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45b2fa15d31fe66ac3e0ddb66e2b906fb54281a5dd9aa5f4f8fdd2631668ec0c
3
+ size 2166
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705498412.gina1.1693068.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ae622a26435c40efe71de00c971eb5022a6d469d2187073028ef5d338368ae9
3
+ size 2166
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705544038.gina1.1902904.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78c031ae9f3d415f23c57dbcf2f45c8a2971a83dad389a2c3c1bcbf750c29eda
3
+ size 3266
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705544038.gina1.1902905.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:731d7d744187d5c858d68909f304294a1215bf76660c0a33bf0c481bbae2a6b8
3
+ size 2166
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705544038.gina1.1902906.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d84abd50de308ddc97b1d78aaf91cec87d6d38e3b771b6aeb05c922a61fd56b
3
+ size 2166
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705544038.gina1.1902907.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9d207613129eb1b3e74702a3e039ed92d6a2af1d52c45653aff2c278cb14d0a
3
+ size 2166
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705544084.gina1.1906280.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c2fd6ce73bd363b25ad60f086960873063a6fc723aeb8443f56ddbaf34caade
3
+ size 2166
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705544085.gina1.1906278.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6014bdba79c64effbc71783539675bb5cffdf946d22ecbad3082683ae55c32bc
3
+ size 137240
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705544085.gina1.1906279.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6a1da535825cde6ff0bd5b73f77dbc9daba972a296229ec8caf86877bbe1ccb
3
+ size 2166
swin_default_LR1e-3_AR-NAR/tb_log/events.out.tfevents.1705544085.gina1.1906281.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e2cf4c3e804063dc307695f33ebb3484035892070eb43b4cfd9026081fa18d0
3
+ size 2166
swin_default_LR1e-4_AR-NAR/config__2024_01_18--13_07_07.toml ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [[validate_dataset]]
2
+ path = "dataloader.Dataset"
3
+
4
+ [validate_dataset.args]
5
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
6
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
7
+ duration = 6.0
8
+ sr = 24000
9
+ num_samples = 10
10
+ [validate_dataset.dataloader]
11
+ batch_size = 1
12
+ num_workers = 1
13
+
14
+ [meta]
15
+ save_dir = "exp"
16
+ description = "Train a model using Generative Adversarial Networks (GANs)"
17
+ seed = 20220815
18
+ exp_id = "swin_default_LR1e-4_AR-NAR"
19
+ config_path = "/fred/oz325/xhao/proj/audiozen/recipes/librimix_sot/tokenizer_separation/conf/swin_default_LR1e-4_AR-NAR.toml"
20
+
21
+ [trainer]
22
+ path = "trainer.Trainer"
23
+
24
+ [loss_function]
25
+ path = "torch.nn.MSELoss"
26
+
27
+ [optimizer]
28
+ path = "torch.optim.AdamW"
29
+
30
+ [model]
31
+ path = "model_ar_nar.Model"
32
+
33
+ [acoustics]
34
+ n_fft = 512
35
+ hop_length = 128
36
+ win_length = 512
37
+ sr = 24000
38
+
39
+ [train_dataset]
40
+ path = "dataloader.Dataset"
41
+
42
+ [test_dataset]
43
+ path = "dataloader.Dataset"
44
+
45
+ [trainer.args]
46
+ debug = false
47
+ max_steps = 0
48
+ max_epochs = 1000
49
+ max_grad_norm = 1.0
50
+ save_max_score = true
51
+ save_ckpt_interval = 5
52
+ max_patience = 200
53
+ plot_norm = true
54
+ validation_interval = 200
55
+ max_num_checkpoints = 100
56
+ scheduler_name = "constant_schedule_with_warmup"
57
+ warmup_steps = 1000
58
+ warmup_ratio = 0.0
59
+ gradient_accumulation_steps = 1
60
+
61
+ [loss_function.args]
62
+
63
+ [optimizer.args]
64
+ lr = 0.0001
65
+
66
+ [model.args]
67
+
68
+ [train_dataset.args]
69
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
70
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
71
+ duration = 6.0
72
+ sr = 24000
73
+
74
+ [train_dataset.dataloader]
75
+ batch_size = 20
76
+ num_workers = 10
77
+ drop_last = true
78
+ pin_memory = true
79
+
80
+ [test_dataset.args]
81
+ librispeech_dir = "~/data/LibriSpeech/LibriSpeech"
82
+ librispeech_metadata_fpath = "/home/xhao/proj/audiozen/recipes/librimix_sot/local/metadata/LibriSpeech/train-clean-100-24K.csv"
83
+ duration = 6.0
84
+ sr = 24000
85
+ num_samples = 10
86
+
87
+ [test_dataset.dataloader]
88
+ batch_size = 1
89
+ num_workers = 1