diff --git "a/TW3_F_Yennefer_Rus/TW3_F_Yennefer_Rus (train).log" "b/TW3_F_Yennefer_Rus/TW3_F_Yennefer_Rus (train).log" deleted file mode 100644--- "a/TW3_F_Yennefer_Rus/TW3_F_Yennefer_Rus (train).log" +++ /dev/null @@ -1,1985 +0,0 @@ -2023-07-20 16:52:11,431 44k INFO {'train': {'log_interval': 200, 'eval_interval': 1000, 'seed': 1234, 'epochs': 10000, 'learning_rate': 0.0001, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 6, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 10240, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0, 'use_sr': True, 'max_speclen': 512, 'port': '8001', 'keep_ckpts': 3}, 'data': {'training_files': 'filelists/train.txt', 'validation_files': 'filelists/val.txt', 'max_wav_value': 32768.0, 'sampling_rate': 44100, 'filter_length': 2048, 'hop_length': 512, 'win_length': 2048, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': 22050, 'contentvec_final_proj': False}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256, 'ssl_dim': 768, 'n_speakers': 200}, 'spk': {'TW3_F_Yennefer_Rus': 0}, 'model_dir': './logs/44k', 'reset': False} -2023-07-20 16:52:21,822 44k INFO emb_g.weight is not in the checkpoint -2023-07-20 16:52:21,822 44k INFO pre.weight is not in the checkpoint -2023-07-20 16:52:21,930 44k INFO Loaded checkpoint './logs/44k/G_0.pth' (iteration 0) -2023-07-20 16:52:22,414 44k INFO Loaded checkpoint './logs/44k/D_0.pth' (iteration 0) -2023-07-20 16:52:54,226 44k INFO Train Epoch: 1 [0%] -2023-07-20 16:52:54,229 44k INFO Losses: [2.6549651622772217, 2.3108129501342773, 8.000635147094727, 24.94369125366211, 17.321544647216797], step: 0, lr: 0.0001 -2023-07-20 16:53:12,140 44k INFO Saving model and optimizer state at iteration 1 to ./logs/44k/G_0.pth -2023-07-20 16:53:17,197 44k INFO Saving model and optimizer state at iteration 1 to ./logs/44k/D_0.pth -2023-07-20 16:57:31,174 44k INFO ====> Epoch: 1, cost 319.75 s -2023-07-20 16:57:47,267 44k INFO Train Epoch: 2 [2%] -2023-07-20 16:57:47,270 44k INFO Losses: [2.3624231815338135, 2.3843722343444824, 12.372735977172852, 23.657684326171875, 1.9778012037277222], step: 200, lr: 9.99875e-05 -2023-07-20 17:00:31,520 44k INFO ====> Epoch: 2, cost 180.35 s -2023-07-20 17:00:50,087 44k INFO Train Epoch: 3 [3%] -2023-07-20 17:00:50,089 44k INFO Losses: [2.688209295272827, 2.1808934211730957, 9.827109336853027, 23.379369735717773, 1.6149280071258545], step: 400, lr: 9.99750015625e-05 -2023-07-20 17:03:32,852 44k INFO ====> Epoch: 3, cost 181.33 s -2023-07-20 17:03:54,551 44k INFO Train Epoch: 4 [5%] -2023-07-20 17:03:54,552 44k INFO Losses: [2.551358461380005, 2.061535120010376, 12.522313117980957, 22.428821563720703, 1.8768309354782104], step: 600, lr: 9.996250468730469e-05 -2023-07-20 17:06:34,541 44k INFO ====> Epoch: 4, cost 181.69 s -2023-07-20 17:06:57,945 44k INFO Train Epoch: 5 [6%] -2023-07-20 17:06:57,947 44k INFO Losses: [2.5714869499206543, 2.049767255783081, 10.183616638183594, 19.14073371887207, 1.6126247644424438], step: 800, lr: 9.995000937421877e-05 -2023-07-20 17:09:37,121 44k INFO ====> Epoch: 5, cost 182.58 s -2023-07-20 17:10:04,402 44k INFO Train Epoch: 6 [8%] -2023-07-20 17:10:04,405 44k INFO Losses: [2.4035518169403076, 2.2511117458343506, 11.09814453125, 23.102121353149414, 1.9460796117782593], step: 1000, lr: 9.993751562304699e-05 -2023-07-20 17:10:15,043 44k INFO Saving model and optimizer state at iteration 6 to ./logs/44k/G_1000.pth -2023-07-20 17:10:21,622 44k INFO Saving model and optimizer state at iteration 6 to ./logs/44k/D_1000.pth -2023-07-20 17:13:01,409 44k INFO ====> Epoch: 6, cost 204.29 s -2023-07-20 17:13:25,994 44k INFO Train Epoch: 7 [9%] -2023-07-20 17:13:25,997 44k INFO Losses: [2.3630359172821045, 2.6367716789245605, 10.272412300109863, 21.564157485961914, 1.4057141542434692], step: 1200, lr: 9.99250234335941e-05 -2023-07-20 17:15:59,963 44k INFO ====> Epoch: 7, cost 178.55 s -2023-07-20 17:16:27,397 44k INFO Train Epoch: 8 [11%] -2023-07-20 17:16:27,398 44k INFO Losses: [2.4454617500305176, 2.1671462059020996, 10.42436695098877, 21.347150802612305, 1.9645624160766602], step: 1400, lr: 9.991253280566489e-05 -2023-07-20 17:18:58,605 44k INFO ====> Epoch: 8, cost 178.64 s -2023-07-20 17:19:31,604 44k INFO Train Epoch: 9 [12%] -2023-07-20 17:19:31,606 44k INFO Losses: [2.4612560272216797, 2.541487693786621, 9.758258819580078, 20.94875717163086, 1.6553128957748413], step: 1600, lr: 9.990004373906418e-05 -2023-07-20 17:21:59,240 44k INFO ====> Epoch: 9, cost 180.63 s -2023-07-20 17:22:33,691 44k INFO Train Epoch: 10 [14%] -2023-07-20 17:22:33,695 44k INFO Losses: [2.3402395248413086, 2.4466073513031006, 10.257102012634277, 21.61166763305664, 2.1383450031280518], step: 1800, lr: 9.98875562335968e-05 -2023-07-20 17:24:57,607 44k INFO ====> Epoch: 10, cost 178.37 s -2023-07-20 17:25:35,347 44k INFO Train Epoch: 11 [15%] -2023-07-20 17:25:35,349 44k INFO Losses: [2.6206185817718506, 2.117933511734009, 9.797964096069336, 21.2149715423584, 1.6357755661010742], step: 2000, lr: 9.987507028906759e-05 -2023-07-20 17:25:45,132 44k INFO Saving model and optimizer state at iteration 11 to ./logs/44k/G_2000.pth -2023-07-20 17:25:52,504 44k INFO Saving model and optimizer state at iteration 11 to ./logs/44k/D_2000.pth -2023-07-20 17:28:26,240 44k INFO ====> Epoch: 11, cost 208.63 s -2023-07-20 17:29:11,443 44k INFO Train Epoch: 12 [17%] -2023-07-20 17:29:11,445 44k INFO Losses: [2.535332441329956, 2.13535213470459, 7.600330829620361, 18.68032455444336, 1.471911907196045], step: 2200, lr: 9.986258590528146e-05 -2023-07-20 17:31:32,896 44k INFO ====> Epoch: 12, cost 186.66 s -2023-07-20 17:32:18,392 44k INFO Train Epoch: 13 [18%] -2023-07-20 17:32:18,393 44k INFO Losses: [2.259878635406494, 2.269017219543457, 8.744736671447754, 16.518293380737305, 1.017276406288147], step: 2400, lr: 9.98501030820433e-05 -2023-07-20 17:34:37,478 44k INFO ====> Epoch: 13, cost 184.58 s -2023-07-20 17:35:21,775 44k INFO Train Epoch: 14 [20%] -2023-07-20 17:35:21,776 44k INFO Losses: [2.44956374168396, 2.3762001991271973, 12.020590782165527, 21.302967071533203, 1.4892634153366089], step: 2600, lr: 9.983762181915804e-05 -2023-07-20 17:37:37,096 44k INFO ====> Epoch: 14, cost 179.62 s -2023-07-20 17:38:23,721 44k INFO Train Epoch: 15 [21%] -2023-07-20 17:38:23,722 44k INFO Losses: [2.5595412254333496, 2.1226553916931152, 9.608453750610352, 22.171175003051758, 1.4607454538345337], step: 2800, lr: 9.982514211643064e-05 -2023-07-20 17:40:38,127 44k INFO ====> Epoch: 15, cost 181.03 s -2023-07-20 17:41:27,090 44k INFO Train Epoch: 16 [23%] -2023-07-20 17:41:27,092 44k INFO Losses: [2.4680440425872803, 2.1165032386779785, 9.113836288452148, 19.4493465423584, 1.5319935083389282], step: 3000, lr: 9.981266397366609e-05 -2023-07-20 17:41:36,300 44k INFO Saving model and optimizer state at iteration 16 to ./logs/44k/G_3000.pth -2023-07-20 17:41:43,557 44k INFO Saving model and optimizer state at iteration 16 to ./logs/44k/D_3000.pth -2023-07-20 17:43:57,488 44k INFO ====> Epoch: 16, cost 199.36 s -2023-07-20 17:44:51,046 44k INFO Train Epoch: 17 [24%] -2023-07-20 17:44:51,051 44k INFO Losses: [2.3981950283050537, 2.328207015991211, 10.652934074401855, 22.471691131591797, 1.244320034980774], step: 3200, lr: 9.980018739066937e-05 -2023-07-20 17:46:57,847 44k INFO ====> Epoch: 17, cost 180.36 s -2023-07-20 17:47:55,860 44k INFO Train Epoch: 18 [26%] -2023-07-20 17:47:55,861 44k INFO Losses: [2.635838508605957, 2.4212026596069336, 9.357858657836914, 20.838699340820312, 1.6241799592971802], step: 3400, lr: 9.978771236724554e-05 -2023-07-20 17:50:00,200 44k INFO ====> Epoch: 18, cost 182.35 s -2023-07-20 17:50:59,082 44k INFO Train Epoch: 19 [27%] -2023-07-20 17:50:59,085 44k INFO Losses: [2.487027168273926, 2.3703718185424805, 8.314800262451172, 21.625612258911133, 1.476224422454834], step: 3600, lr: 9.977523890319963e-05 -2023-07-20 17:52:59,796 44k INFO ====> Epoch: 19, cost 179.60 s -2023-07-20 17:54:02,228 44k INFO Train Epoch: 20 [29%] -2023-07-20 17:54:02,230 44k INFO Losses: [2.546788215637207, 2.0312247276306152, 6.288949012756348, 15.490553855895996, 1.448472023010254], step: 3800, lr: 9.976276699833672e-05 -2023-07-20 17:56:01,626 44k INFO ====> Epoch: 20, cost 181.83 s -2023-07-20 17:57:04,991 44k INFO Train Epoch: 21 [30%] -2023-07-20 17:57:04,992 44k INFO Losses: [2.457071304321289, 2.4178550243377686, 9.359325408935547, 20.69144630432129, 1.4918889999389648], step: 4000, lr: 9.975029665246193e-05 -2023-07-20 17:57:16,508 44k INFO Saving model and optimizer state at iteration 21 to ./logs/44k/G_4000.pth -2023-07-20 17:57:19,250 44k INFO Saving model and optimizer state at iteration 21 to ./logs/44k/D_4000.pth -2023-07-20 17:59:27,240 44k INFO ====> Epoch: 21, cost 205.61 s -2023-07-20 18:00:37,508 44k INFO Train Epoch: 22 [32%] -2023-07-20 18:00:37,510 44k INFO Losses: [2.3810906410217285, 1.9600815773010254, 13.146063804626465, 21.63344955444336, 1.1862664222717285], step: 4200, lr: 9.973782786538036e-05 -2023-07-20 18:02:30,679 44k INFO ====> Epoch: 22, cost 183.44 s -2023-07-20 18:03:40,364 44k INFO Train Epoch: 23 [34%] -2023-07-20 18:03:40,366 44k INFO Losses: [2.610821485519409, 2.3236095905303955, 7.279092788696289, 16.65282440185547, 1.300463080406189], step: 4400, lr: 9.972536063689719e-05 -2023-07-20 18:05:31,618 44k INFO ====> Epoch: 23, cost 180.94 s -2023-07-20 18:06:44,178 44k INFO Train Epoch: 24 [35%] -2023-07-20 18:06:44,180 44k INFO Losses: [2.620581865310669, 2.0982961654663086, 8.767879486083984, 19.68589210510254, 1.731434941291809], step: 4600, lr: 9.971289496681757e-05 -2023-07-20 18:08:33,348 44k INFO ====> Epoch: 24, cost 181.73 s -2023-07-20 18:09:47,510 44k INFO Train Epoch: 25 [37%] -2023-07-20 18:09:47,512 44k INFO Losses: [2.2483267784118652, 2.559251308441162, 10.548751831054688, 21.469959259033203, 1.529847264289856], step: 4800, lr: 9.970043085494672e-05 -2023-07-20 18:11:33,549 44k INFO ====> Epoch: 25, cost 180.20 s -2023-07-20 18:12:49,035 44k INFO Train Epoch: 26 [38%] -2023-07-20 18:12:49,038 44k INFO Losses: [2.4070584774017334, 2.1705639362335205, 13.381940841674805, 21.176677703857422, 1.3552178144454956], step: 5000, lr: 9.968796830108985e-05 -2023-07-20 18:12:59,638 44k INFO Saving model and optimizer state at iteration 26 to ./logs/44k/G_5000.pth -2023-07-20 18:13:03,242 44k INFO Saving model and optimizer state at iteration 26 to ./logs/44k/D_5000.pth -2023-07-20 18:14:52,857 44k INFO ====> Epoch: 26, cost 199.31 s -2023-07-20 18:16:13,671 44k INFO Train Epoch: 27 [40%] -2023-07-20 18:16:13,672 44k INFO Losses: [2.6511778831481934, 2.140805244445801, 9.702326774597168, 17.00576400756836, 1.5217686891555786], step: 5200, lr: 9.967550730505221e-05 -2023-07-20 18:17:55,668 44k INFO ====> Epoch: 27, cost 182.81 s -2023-07-20 18:19:16,442 44k INFO Train Epoch: 28 [41%] -2023-07-20 18:19:16,445 44k INFO Losses: [2.408966064453125, 2.1780498027801514, 11.126025199890137, 21.064002990722656, 1.7257667779922485], step: 5400, lr: 9.966304786663908e-05 -2023-07-20 18:20:53,898 44k INFO ====> Epoch: 28, cost 178.23 s -2023-07-20 18:22:18,732 44k INFO Train Epoch: 29 [43%] -2023-07-20 18:22:18,735 44k INFO Losses: [2.865290880203247, 2.348569393157959, 7.310464859008789, 17.829967498779297, 1.1030519008636475], step: 5600, lr: 9.965058998565574e-05 -2023-07-20 18:23:53,670 44k INFO ====> Epoch: 29, cost 179.77 s -2023-07-20 18:25:21,017 44k INFO Train Epoch: 30 [44%] -2023-07-20 18:25:21,018 44k INFO Losses: [2.559297561645508, 2.170029401779175, 10.837512016296387, 19.910043716430664, 1.1427851915359497], step: 5800, lr: 9.963813366190753e-05 -2023-07-20 18:26:54,067 44k INFO ====> Epoch: 30, cost 180.40 s -2023-07-20 18:28:23,951 44k INFO Train Epoch: 31 [46%] -2023-07-20 18:28:23,954 44k INFO Losses: [2.5197255611419678, 2.2416508197784424, 7.561230659484863, 19.51876449584961, 1.4098842144012451], step: 6000, lr: 9.962567889519979e-05 -2023-07-20 18:28:34,285 44k INFO Saving model and optimizer state at iteration 31 to ./logs/44k/G_6000.pth -2023-07-20 18:28:43,891 44k INFO Saving model and optimizer state at iteration 31 to ./logs/44k/D_6000.pth -2023-07-20 18:30:23,445 44k INFO ====> Epoch: 31, cost 209.38 s -2023-07-20 18:31:56,157 44k INFO Train Epoch: 32 [47%] -2023-07-20 18:31:56,160 44k INFO Losses: [2.6133909225463867, 1.998877763748169, 9.98355770111084, 20.10099983215332, 1.7230534553527832], step: 6200, lr: 9.961322568533789e-05 -2023-07-20 18:33:23,291 44k INFO ====> Epoch: 32, cost 179.85 s -2023-07-20 18:34:58,308 44k INFO Train Epoch: 33 [49%] -2023-07-20 18:34:58,312 44k INFO Losses: [2.4696717262268066, 2.3913185596466064, 9.63505744934082, 17.75992774963379, 1.2508512735366821], step: 6400, lr: 9.960077403212722e-05 -2023-07-20 18:36:24,862 44k INFO ====> Epoch: 33, cost 181.57 s -2023-07-20 18:38:02,268 44k INFO Train Epoch: 34 [50%] -2023-07-20 18:38:02,269 44k INFO Losses: [2.3060879707336426, 2.3532767295837402, 10.936504364013672, 20.691280364990234, 1.3514916896820068], step: 6600, lr: 9.95883239353732e-05 -2023-07-20 18:39:26,078 44k INFO ====> Epoch: 34, cost 181.22 s -2023-07-20 18:41:05,336 44k INFO Train Epoch: 35 [52%] -2023-07-20 18:41:05,337 44k INFO Losses: [2.2477316856384277, 2.561652421951294, 10.706422805786133, 20.910585403442383, 1.1016647815704346], step: 6800, lr: 9.957587539488128e-05 -2023-07-20 18:42:26,744 44k INFO ====> Epoch: 35, cost 180.67 s -2023-07-20 18:44:09,381 44k INFO Train Epoch: 36 [53%] -2023-07-20 18:44:09,383 44k INFO Losses: [2.4391300678253174, 2.5204854011535645, 8.152429580688477, 19.27252960205078, 1.6379839181900024], step: 7000, lr: 9.956342841045691e-05 -2023-07-20 18:44:21,870 44k INFO Saving model and optimizer state at iteration 36 to ./logs/44k/G_7000.pth -2023-07-20 18:44:30,315 44k INFO Saving model and optimizer state at iteration 36 to ./logs/44k/D_7000.pth -2023-07-20 18:45:59,973 44k INFO ====> Epoch: 36, cost 213.23 s -2023-07-20 18:47:44,691 44k INFO Train Epoch: 37 [55%] -2023-07-20 18:47:44,693 44k INFO Losses: [2.518939256668091, 2.3384523391723633, 10.867809295654297, 22.37885856628418, 1.1197550296783447], step: 7200, lr: 9.95509829819056e-05 -2023-07-20 18:49:02,342 44k INFO ====> Epoch: 37, cost 182.37 s -2023-07-20 18:50:51,263 44k INFO Train Epoch: 38 [56%] -2023-07-20 18:50:51,265 44k INFO Losses: [2.396836996078491, 2.8353517055511475, 10.1766939163208, 19.798337936401367, 1.609784722328186], step: 7400, lr: 9.953853910903285e-05 -2023-07-20 18:52:06,137 44k INFO ====> Epoch: 38, cost 183.80 s -2023-07-20 18:53:54,396 44k INFO Train Epoch: 39 [58%] -2023-07-20 18:53:54,398 44k INFO Losses: [2.6037540435791016, 2.5809521675109863, 9.13779354095459, 20.047903060913086, 0.9823673963546753], step: 7600, lr: 9.952609679164422e-05 -2023-07-20 18:55:07,189 44k INFO ====> Epoch: 39, cost 181.05 s -2023-07-20 18:57:02,785 44k INFO Train Epoch: 40 [59%] -2023-07-20 18:57:02,787 44k INFO Losses: [2.487211227416992, 2.284109354019165, 11.341534614562988, 21.855871200561523, 1.3385928869247437], step: 7800, lr: 9.951365602954526e-05 -2023-07-20 18:58:10,688 44k INFO ====> Epoch: 40, cost 183.50 s -2023-07-20 19:00:05,064 44k INFO Train Epoch: 41 [61%] -2023-07-20 19:00:05,065 44k INFO Losses: [2.5608906745910645, 2.4043595790863037, 8.62250804901123, 20.063457489013672, 1.2857465744018555], step: 8000, lr: 9.950121682254156e-05 -2023-07-20 19:00:14,533 44k INFO Saving model and optimizer state at iteration 41 to ./logs/44k/G_8000.pth -2023-07-20 19:00:17,475 44k INFO Saving model and optimizer state at iteration 41 to ./logs/44k/D_8000.pth -2023-07-20 19:01:36,356 44k INFO ====> Epoch: 41, cost 205.67 s -2023-07-20 19:03:40,271 44k INFO Train Epoch: 42 [62%] -2023-07-20 19:03:40,274 44k INFO Losses: [2.3923001289367676, 2.2270140647888184, 11.20695686340332, 21.13703727722168, 1.7822784185409546], step: 8200, lr: 9.948877917043875e-05 -2023-07-20 19:04:44,932 44k INFO ====> Epoch: 42, cost 188.58 s -2023-07-20 19:06:44,743 44k INFO Train Epoch: 43 [64%] -2023-07-20 19:06:44,746 44k INFO Losses: [2.726840019226074, 2.062354326248169, 9.93214225769043, 18.027080535888672, 1.3484158515930176], step: 8400, lr: 9.947634307304244e-05 -2023-07-20 19:07:45,356 44k INFO ====> Epoch: 43, cost 180.42 s -2023-07-20 19:09:48,540 44k INFO Train Epoch: 44 [65%] -2023-07-20 19:09:48,545 44k INFO Losses: [2.4501028060913086, 2.310039520263672, 11.187196731567383, 20.839324951171875, 1.3646860122680664], step: 8600, lr: 9.94639085301583e-05 -2023-07-20 19:10:48,529 44k INFO ====> Epoch: 44, cost 183.17 s -2023-07-20 19:12:54,022 44k INFO Train Epoch: 45 [67%] -2023-07-20 19:12:54,023 44k INFO Losses: [2.725592851638794, 2.7419092655181885, 7.474781036376953, 18.503427505493164, 1.3519213199615479], step: 8800, lr: 9.945147554159202e-05 -2023-07-20 19:13:51,170 44k INFO ====> Epoch: 45, cost 182.64 s -2023-07-20 19:15:57,428 44k INFO Train Epoch: 46 [69%] -2023-07-20 19:15:57,430 44k INFO Losses: [2.544055461883545, 2.253089189529419, 9.716707229614258, 17.171669006347656, 1.4851804971694946], step: 9000, lr: 9.943904410714931e-05 -2023-07-20 19:16:08,113 44k INFO Saving model and optimizer state at iteration 46 to ./logs/44k/G_9000.pth -2023-07-20 19:16:10,871 44k INFO Saving model and optimizer state at iteration 46 to ./logs/44k/D_9000.pth -2023-07-20 19:17:13,763 44k INFO ====> Epoch: 46, cost 202.59 s -2023-07-20 19:19:25,824 44k INFO Train Epoch: 47 [70%] -2023-07-20 19:19:25,826 44k INFO Losses: [2.511774778366089, 2.173487901687622, 9.844861030578613, 21.387577056884766, 1.3677271604537964], step: 9200, lr: 9.942661422663591e-05 -2023-07-20 19:20:16,219 44k INFO ====> Epoch: 47, cost 182.46 s -2023-07-20 19:22:30,885 44k INFO Train Epoch: 48 [72%] -2023-07-20 19:22:30,886 44k INFO Losses: [2.3168516159057617, 2.079436779022217, 12.120068550109863, 21.091732025146484, 0.9466084241867065], step: 9400, lr: 9.941418589985758e-05 -2023-07-20 19:23:20,976 44k INFO ====> Epoch: 48, cost 184.76 s -2023-07-20 19:25:38,479 44k INFO Train Epoch: 49 [73%] -2023-07-20 19:25:38,480 44k INFO Losses: [2.7579941749572754, 2.0550801753997803, 7.796679496765137, 17.33649444580078, 1.1473761796951294], step: 9600, lr: 9.940175912662009e-05 -2023-07-20 19:26:25,419 44k INFO ====> Epoch: 49, cost 184.44 s -2023-07-20 19:28:46,090 44k INFO Train Epoch: 50 [75%] -2023-07-20 19:28:46,092 44k INFO Losses: [2.4937164783477783, 2.1466667652130127, 9.037463188171387, 19.573293685913086, 1.1084288358688354], step: 9800, lr: 9.938933390672926e-05 -2023-07-20 19:29:32,334 44k INFO ====> Epoch: 50, cost 186.92 s -2023-07-20 19:32:00,406 44k INFO Train Epoch: 51 [76%] -2023-07-20 19:32:00,408 44k INFO Losses: [2.6483333110809326, 2.1424872875213623, 10.12969970703125, 19.484716415405273, 1.0031901597976685], step: 10000, lr: 9.937691023999092e-05 -2023-07-20 19:32:10,666 44k INFO Saving model and optimizer state at iteration 51 to ./logs/44k/G_10000.pth -2023-07-20 19:32:20,519 44k INFO Saving model and optimizer state at iteration 51 to ./logs/44k/D_10000.pth -2023-07-20 19:33:10,204 44k INFO ====> Epoch: 51, cost 217.87 s -2023-07-20 19:35:35,096 44k INFO Train Epoch: 52 [78%] -2023-07-20 19:35:35,099 44k INFO Losses: [2.549329996109009, 2.0796213150024414, 9.507514953613281, 21.033971786499023, 1.4102563858032227], step: 10200, lr: 9.936448812621091e-05 -2023-07-20 19:36:13,179 44k INFO ====> Epoch: 52, cost 182.97 s -2023-07-20 19:38:41,131 44k INFO Train Epoch: 53 [79%] -2023-07-20 19:38:41,132 44k INFO Losses: [2.293226480484009, 2.0719127655029297, 12.765713691711426, 21.664472579956055, 1.6839107275009155], step: 10400, lr: 9.935206756519513e-05 -2023-07-20 19:39:21,044 44k INFO ====> Epoch: 53, cost 187.87 s -2023-07-20 19:41:54,258 44k INFO Train Epoch: 54 [81%] -2023-07-20 19:41:54,261 44k INFO Losses: [2.4242868423461914, 2.2546489238739014, 9.098361015319824, 20.135488510131836, 1.1017811298370361], step: 10600, lr: 9.933964855674948e-05 -2023-07-20 19:42:28,285 44k INFO ====> Epoch: 54, cost 187.24 s -2023-07-20 19:45:01,469 44k INFO Train Epoch: 55 [82%] -2023-07-20 19:45:01,470 44k INFO Losses: [2.4447293281555176, 2.2201459407806396, 13.112979888916016, 21.553321838378906, 1.6533279418945312], step: 10800, lr: 9.932723110067987e-05 -2023-07-20 19:45:33,770 44k INFO ====> Epoch: 55, cost 185.49 s -2023-07-20 19:48:07,061 44k INFO Train Epoch: 56 [84%] -2023-07-20 19:48:07,063 44k INFO Losses: [2.4058241844177246, 2.370492935180664, 10.695283889770508, 21.62225341796875, 2.0016796588897705], step: 11000, lr: 9.931481519679228e-05 -2023-07-20 19:48:19,142 44k INFO Saving model and optimizer state at iteration 56 to ./logs/44k/G_11000.pth -2023-07-20 19:48:25,034 44k INFO Saving model and optimizer state at iteration 56 to ./logs/44k/D_11000.pth -2023-07-20 19:48:56,452 44k INFO ====> Epoch: 56, cost 202.68 s -2023-07-20 19:51:32,894 44k INFO Train Epoch: 57 [85%] -2023-07-20 19:51:32,895 44k INFO Losses: [2.480071544647217, 2.5406851768493652, 10.775320053100586, 20.303232192993164, 1.592663049697876], step: 11200, lr: 9.930240084489267e-05 -2023-07-20 19:51:59,170 44k INFO ====> Epoch: 57, cost 182.72 s -2023-07-20 19:54:38,760 44k INFO Train Epoch: 58 [87%] -2023-07-20 19:54:38,764 44k INFO Losses: [2.695178508758545, 2.409956455230713, 10.142857551574707, 19.425067901611328, 1.0137999057769775], step: 11400, lr: 9.928998804478705e-05 -2023-07-20 19:55:01,507 44k INFO ====> Epoch: 58, cost 182.34 s -2023-07-20 19:57:43,149 44k INFO Train Epoch: 59 [88%] -2023-07-20 19:57:43,153 44k INFO Losses: [2.5947000980377197, 2.0249171257019043, 9.915077209472656, 19.04536247253418, 1.5586957931518555], step: 11600, lr: 9.927757679628145e-05 -2023-07-20 19:58:03,319 44k INFO ====> Epoch: 59, cost 181.81 s -2023-07-20 20:00:47,602 44k INFO Train Epoch: 60 [90%] -2023-07-20 20:00:47,604 44k INFO Losses: [2.5249814987182617, 2.3657820224761963, 10.11502456665039, 21.197704315185547, 1.2149767875671387], step: 11800, lr: 9.926516709918191e-05 -2023-07-20 20:01:05,370 44k INFO ====> Epoch: 60, cost 182.05 s -2023-07-20 20:03:52,879 44k INFO Train Epoch: 61 [91%] -2023-07-20 20:03:52,881 44k INFO Losses: [2.660614490509033, 2.445931911468506, 8.372563362121582, 18.114715576171875, 0.9959828853607178], step: 12000, lr: 9.92527589532945e-05 -2023-07-20 20:04:03,783 44k INFO Saving model and optimizer state at iteration 61 to ./logs/44k/G_12000.pth -2023-07-20 20:04:13,823 44k INFO Saving model and optimizer state at iteration 61 to ./logs/44k/D_12000.pth -2023-07-20 20:04:15,992 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_9000.pth -2023-07-20 20:04:16,000 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_9000.pth -2023-07-20 20:04:33,039 44k INFO ====> Epoch: 61, cost 207.67 s -2023-07-20 20:07:22,166 44k INFO Train Epoch: 62 [93%] -2023-07-20 20:07:22,167 44k INFO Losses: [2.5235397815704346, 1.9715110063552856, 8.818636894226074, 19.917375564575195, 1.2675663232803345], step: 12200, lr: 9.924035235842533e-05 -2023-07-20 20:07:35,213 44k INFO ====> Epoch: 62, cost 182.17 s -2023-07-20 20:10:25,173 44k INFO Train Epoch: 63 [94%] -2023-07-20 20:10:25,174 44k INFO Losses: [2.637273073196411, 2.286999225616455, 8.441672325134277, 18.83425521850586, 1.426032304763794], step: 12400, lr: 9.922794731438052e-05 -2023-07-20 20:10:38,408 44k INFO ====> Epoch: 63, cost 183.19 s -2023-07-20 20:13:30,417 44k INFO Train Epoch: 64 [96%] -2023-07-20 20:13:30,420 44k INFO Losses: [2.4999122619628906, 2.188037872314453, 9.207612991333008, 19.73558807373047, 1.3413097858428955], step: 12600, lr: 9.921554382096622e-05 -2023-07-20 20:13:38,874 44k INFO ====> Epoch: 64, cost 180.47 s -2023-07-20 20:16:37,058 44k INFO Train Epoch: 65 [97%] -2023-07-20 20:16:37,061 44k INFO Losses: [2.330841541290283, 2.149240255355835, 11.622438430786133, 20.801254272460938, 1.0015431642532349], step: 12800, lr: 9.92031418779886e-05 -2023-07-20 20:16:42,581 44k INFO ====> Epoch: 65, cost 183.71 s -2023-07-21 07:11:33,608 44k INFO {'train': {'log_interval': 200, 'eval_interval': 1000, 'seed': 1234, 'epochs': 10000, 'learning_rate': 0.0001, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 6, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 10240, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0, 'use_sr': True, 'max_speclen': 512, 'port': '8001', 'keep_ckpts': 3}, 'data': {'training_files': 'filelists/train.txt', 'validation_files': 'filelists/val.txt', 'max_wav_value': 32768.0, 'sampling_rate': 44100, 'filter_length': 2048, 'hop_length': 512, 'win_length': 2048, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': 22050, 'contentvec_final_proj': False}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256, 'ssl_dim': 768, 'n_speakers': 200}, 'spk': {'TW3_F_Yennefer_Rus': 0}, 'model_dir': './logs/44k', 'reset': False} -2023-07-21 07:11:50,381 44k INFO Loaded checkpoint './logs/44k/G_12000.pth' (iteration 61) -2023-07-21 07:11:55,646 44k INFO Loaded checkpoint './logs/44k/D_12000.pth' (iteration 61) -2023-07-21 07:16:20,290 44k INFO Train Epoch: 61 [91%] -2023-07-21 07:16:20,291 44k INFO Losses: [2.4005236625671387, 2.143214464187622, 12.11289119720459, 21.10793685913086, 0.9817655086517334], step: 12000, lr: 9.924035235842533e-05 -2023-07-21 07:16:43,089 44k INFO Saving model and optimizer state at iteration 61 to ./logs/44k/G_12000.pth -2023-07-21 07:16:45,832 44k INFO Saving model and optimizer state at iteration 61 to ./logs/44k/D_12000.pth -2023-07-21 07:17:16,605 44k INFO ====> Epoch: 61, cost 343.00 s -2023-07-21 07:20:08,203 44k INFO Train Epoch: 62 [93%] -2023-07-21 07:20:08,204 44k INFO Losses: [2.315915584564209, 2.008122205734253, 14.244099617004395, 20.633392333984375, 1.051470398902893], step: 12200, lr: 9.922794731438052e-05 -2023-07-21 07:20:21,449 44k INFO ====> Epoch: 62, cost 184.84 s -2023-07-21 07:23:17,710 44k INFO Train Epoch: 63 [94%] -2023-07-21 07:23:17,713 44k INFO Losses: [2.7832040786743164, 2.2605013847351074, 6.238687992095947, 16.273372650146484, 1.4567804336547852], step: 12400, lr: 9.921554382096622e-05 -2023-07-21 07:23:28,543 44k INFO ====> Epoch: 63, cost 187.09 s -2023-07-21 07:26:25,013 44k INFO Train Epoch: 64 [96%] -2023-07-21 07:26:25,015 44k INFO Losses: [2.5455474853515625, 2.232299327850342, 8.378844261169434, 18.464094161987305, 1.2586220502853394], step: 12600, lr: 9.92031418779886e-05 -2023-07-21 07:26:34,019 44k INFO ====> Epoch: 64, cost 185.48 s -2023-07-21 07:29:32,844 44k INFO Train Epoch: 65 [97%] -2023-07-21 07:29:32,846 44k INFO Losses: [2.3087987899780273, 2.3974392414093018, 13.688126564025879, 21.9514217376709, 1.3426589965820312], step: 12800, lr: 9.919074148525384e-05 -2023-07-21 07:29:39,109 44k INFO ====> Epoch: 65, cost 185.09 s -2023-07-21 07:32:42,616 44k INFO Train Epoch: 66 [99%] -2023-07-21 07:32:42,618 44k INFO Losses: [2.603152275085449, 2.2124340534210205, 9.031440734863281, 18.85213279724121, 1.2399928569793701], step: 13000, lr: 9.917834264256819e-05 -2023-07-21 07:32:57,095 44k INFO Saving model and optimizer state at iteration 66 to ./logs/44k/G_13000.pth -2023-07-21 07:33:08,034 44k INFO Saving model and optimizer state at iteration 66 to ./logs/44k/D_13000.pth -2023-07-21 07:33:12,664 44k INFO ====> Epoch: 66, cost 213.56 s -2023-07-21 07:36:20,880 44k INFO ====> Epoch: 67, cost 188.22 s -2023-07-21 07:36:34,953 44k INFO Train Epoch: 68 [1%] -2023-07-21 07:36:34,955 44k INFO Losses: [2.4635391235351562, 2.1485116481781006, 10.260381698608398, 19.720998764038086, 1.4313279390335083], step: 13200, lr: 9.915354960656915e-05 -2023-07-21 07:39:27,263 44k INFO ====> Epoch: 68, cost 186.38 s -2023-07-21 07:39:44,809 44k INFO Train Epoch: 69 [2%] -2023-07-21 07:39:44,811 44k INFO Losses: [2.362434148788452, 2.473827838897705, 10.082340240478516, 18.129188537597656, 1.0489836931228638], step: 13400, lr: 9.914115541286833e-05 -2023-07-21 07:42:34,671 44k INFO ====> Epoch: 69, cost 187.41 s -2023-07-21 07:42:53,005 44k INFO Train Epoch: 70 [4%] -2023-07-21 07:42:53,008 44k INFO Losses: [2.4519541263580322, 2.2244043350219727, 7.669000625610352, 19.440624237060547, 1.5487991571426392], step: 13600, lr: 9.912876276844171e-05 -2023-07-21 07:45:39,056 44k INFO ====> Epoch: 70, cost 184.38 s -2023-07-21 07:45:58,606 44k INFO Train Epoch: 71 [5%] -2023-07-21 07:45:58,609 44k INFO Losses: [2.5354197025299072, 2.350837230682373, 9.387038230895996, 20.047504425048828, 1.1029280424118042], step: 13800, lr: 9.911637167309565e-05 -2023-07-21 07:48:41,955 44k INFO ====> Epoch: 71, cost 182.90 s -2023-07-21 07:49:03,933 44k INFO Train Epoch: 72 [7%] -2023-07-21 07:49:03,936 44k INFO Losses: [2.4976425170898438, 2.2353854179382324, 9.130773544311523, 17.117338180541992, 1.0447603464126587], step: 14000, lr: 9.910398212663652e-05 -2023-07-21 07:49:13,078 44k INFO Saving model and optimizer state at iteration 72 to ./logs/44k/G_14000.pth -2023-07-21 07:49:18,013 44k INFO Saving model and optimizer state at iteration 72 to ./logs/44k/D_14000.pth -2023-07-21 07:52:09,591 44k INFO ====> Epoch: 72, cost 207.64 s -2023-07-21 07:52:39,835 44k INFO Train Epoch: 73 [8%] -2023-07-21 07:52:39,839 44k INFO Losses: [2.339995861053467, 2.481045961380005, 11.3228178024292, 20.247177124023438, 1.0861921310424805], step: 14200, lr: 9.909159412887068e-05 -2023-07-21 07:55:16,337 44k INFO ====> Epoch: 73, cost 186.75 s -2023-07-21 07:55:45,409 44k INFO Train Epoch: 74 [10%] -2023-07-21 07:55:45,412 44k INFO Losses: [2.4676647186279297, 2.208507776260376, 9.17160701751709, 19.046796798706055, 1.2951756715774536], step: 14400, lr: 9.907920767960457e-05 -2023-07-21 07:58:19,489 44k INFO ====> Epoch: 74, cost 183.15 s -2023-07-21 07:58:49,232 44k INFO Train Epoch: 75 [11%] -2023-07-21 07:58:49,235 44k INFO Losses: [2.2827744483947754, 2.4766008853912354, 10.772512435913086, 21.550439834594727, 1.1508170366287231], step: 14600, lr: 9.906682277864462e-05 -2023-07-21 08:01:21,443 44k INFO ====> Epoch: 75, cost 181.95 s -2023-07-21 08:01:52,901 44k INFO Train Epoch: 76 [13%] -2023-07-21 08:01:52,904 44k INFO Losses: [2.392402410507202, 2.4708542823791504, 12.453817367553711, 19.718006134033203, 1.2657043933868408], step: 14800, lr: 9.905443942579728e-05 -2023-07-21 08:04:22,593 44k INFO ====> Epoch: 76, cost 181.15 s -2023-07-21 08:04:59,054 44k INFO Train Epoch: 77 [14%] -2023-07-21 08:04:59,055 44k INFO Losses: [2.4960827827453613, 2.1346449851989746, 10.354548454284668, 21.352693557739258, 1.412553071975708], step: 15000, lr: 9.904205762086905e-05 -2023-07-21 08:05:08,162 44k INFO Saving model and optimizer state at iteration 77 to ./logs/44k/G_15000.pth -2023-07-21 08:05:12,827 44k INFO Saving model and optimizer state at iteration 77 to ./logs/44k/D_15000.pth -2023-07-21 08:07:50,060 44k INFO ====> Epoch: 77, cost 207.47 s -2023-07-21 08:08:27,209 44k INFO Train Epoch: 78 [16%] -2023-07-21 08:08:27,211 44k INFO Losses: [2.558884859085083, 2.155639410018921, 9.468905448913574, 19.232311248779297, 1.0511982440948486], step: 15200, lr: 9.902967736366644e-05 -2023-07-21 08:10:53,685 44k INFO ====> Epoch: 78, cost 183.63 s -2023-07-21 08:11:35,115 44k INFO Train Epoch: 79 [17%] -2023-07-21 08:11:35,117 44k INFO Losses: [2.6648879051208496, 2.3893754482269287, 9.10502815246582, 19.24029541015625, 1.170854926109314], step: 15400, lr: 9.901729865399597e-05 -2023-07-21 08:14:01,550 44k INFO ====> Epoch: 79, cost 187.87 s -2023-07-21 08:14:43,911 44k INFO Train Epoch: 80 [19%] -2023-07-21 08:14:43,913 44k INFO Losses: [2.405710220336914, 2.2100954055786133, 11.107368469238281, 20.497608184814453, 1.1133354902267456], step: 15600, lr: 9.900492149166423e-05 -2023-07-21 08:17:05,935 44k INFO ====> Epoch: 80, cost 184.38 s -2023-07-21 08:17:50,825 44k INFO Train Epoch: 81 [20%] -2023-07-21 08:17:50,826 44k INFO Losses: [2.551543712615967, 2.3236334323883057, 8.283823013305664, 19.89794921875, 1.2640385627746582], step: 15800, lr: 9.899254587647776e-05 -2023-07-21 08:20:09,202 44k INFO ====> Epoch: 81, cost 183.27 s -2023-07-21 08:20:59,578 44k INFO Train Epoch: 82 [22%] -2023-07-21 08:20:59,580 44k INFO Losses: [2.581618309020996, 2.5768370628356934, 11.515137672424316, 22.027008056640625, 1.239376425743103], step: 16000, lr: 9.89801718082432e-05 -2023-07-21 08:21:09,898 44k INFO Saving model and optimizer state at iteration 82 to ./logs/44k/G_16000.pth -2023-07-21 08:21:16,038 44k INFO Saving model and optimizer state at iteration 82 to ./logs/44k/D_16000.pth -2023-07-21 08:23:37,715 44k INFO ====> Epoch: 82, cost 208.51 s -2023-07-21 08:24:33,313 44k INFO Train Epoch: 83 [23%] -2023-07-21 08:24:33,315 44k INFO Losses: [2.302760601043701, 2.4519166946411133, 11.319392204284668, 20.25299072265625, 1.1131806373596191], step: 16200, lr: 9.896779928676716e-05 -2023-07-21 08:26:46,212 44k INFO ====> Epoch: 83, cost 188.50 s -2023-07-21 08:27:42,094 44k INFO Train Epoch: 84 [25%] -2023-07-21 08:27:42,095 44k INFO Losses: [2.4844181537628174, 2.14253830909729, 12.555291175842285, 19.883747100830078, 1.4185903072357178], step: 16400, lr: 9.895542831185631e-05 -2023-07-21 08:29:53,338 44k INFO ====> Epoch: 84, cost 187.13 s -2023-07-21 08:30:51,556 44k INFO Train Epoch: 85 [26%] -2023-07-21 08:30:51,557 44k INFO Losses: [2.2371504306793213, 2.281780481338501, 13.914836883544922, 19.661624908447266, 1.0883585214614868], step: 16600, lr: 9.894305888331732e-05 -2023-07-21 08:33:00,097 44k INFO ====> Epoch: 85, cost 186.76 s -2023-07-21 08:33:58,011 44k INFO Train Epoch: 86 [28%] -2023-07-21 08:33:58,012 44k INFO Losses: [2.5654637813568115, 2.1231164932250977, 13.189398765563965, 19.56635856628418, 0.8093919157981873], step: 16800, lr: 9.89306910009569e-05 -2023-07-21 08:36:03,230 44k INFO ====> Epoch: 86, cost 183.13 s -2023-07-21 08:37:04,550 44k INFO Train Epoch: 87 [29%] -2023-07-21 08:37:04,552 44k INFO Losses: [2.4546806812286377, 2.509476661682129, 11.329051971435547, 21.740802764892578, 1.178739070892334], step: 17000, lr: 9.891832466458178e-05 -2023-07-21 08:37:15,905 44k INFO Saving model and optimizer state at iteration 87 to ./logs/44k/G_17000.pth -2023-07-21 08:37:18,906 44k INFO Saving model and optimizer state at iteration 87 to ./logs/44k/D_17000.pth -2023-07-21 08:39:25,599 44k INFO ====> Epoch: 87, cost 202.37 s -2023-07-21 08:40:34,081 44k INFO Train Epoch: 88 [31%] -2023-07-21 08:40:34,083 44k INFO Losses: [2.495065212249756, 2.1265201568603516, 10.297574043273926, 20.086381912231445, 1.7630232572555542], step: 17200, lr: 9.89059598739987e-05 -2023-07-21 08:42:35,653 44k INFO ====> Epoch: 88, cost 190.05 s -2023-07-21 08:43:45,275 44k INFO Train Epoch: 89 [32%] -2023-07-21 08:43:45,276 44k INFO Losses: [2.354419708251953, 2.121298313140869, 10.789497375488281, 20.463909149169922, 1.2040244340896606], step: 17400, lr: 9.889359662901445e-05 -2023-07-21 08:45:42,004 44k INFO ====> Epoch: 89, cost 186.35 s -2023-07-21 08:46:50,606 44k INFO Train Epoch: 90 [34%] -2023-07-21 08:46:50,608 44k INFO Losses: [2.7292897701263428, 2.508078098297119, 9.306083679199219, 16.741544723510742, 0.7141510844230652], step: 17600, lr: 9.888123492943583e-05 -2023-07-21 08:48:47,036 44k INFO ====> Epoch: 90, cost 185.03 s -2023-07-21 08:49:59,260 44k INFO Train Epoch: 91 [36%] -2023-07-21 08:49:59,262 44k INFO Losses: [2.589515209197998, 2.3951096534729004, 11.230073928833008, 21.27693748474121, 1.0459965467453003], step: 17800, lr: 9.886887477506964e-05 -2023-07-21 08:51:51,141 44k INFO ====> Epoch: 91, cost 184.11 s -2023-07-21 08:53:05,279 44k INFO Train Epoch: 92 [37%] -2023-07-21 08:53:05,280 44k INFO Losses: [2.564547538757324, 2.056434154510498, 10.065702438354492, 19.531475067138672, 1.4304977655410767], step: 18000, lr: 9.885651616572276e-05 -2023-07-21 08:53:15,142 44k INFO Saving model and optimizer state at iteration 92 to ./logs/44k/G_18000.pth -2023-07-21 08:53:22,105 44k INFO Saving model and optimizer state at iteration 92 to ./logs/44k/D_18000.pth -2023-07-21 08:55:17,675 44k INFO ====> Epoch: 92, cost 206.53 s -2023-07-21 08:56:40,848 44k INFO Train Epoch: 93 [39%] -2023-07-21 08:56:40,849 44k INFO Losses: [2.9818739891052246, 2.018566370010376, 9.191640853881836, 18.265018463134766, 1.4165199995040894], step: 18200, lr: 9.884415910120204e-05 -2023-07-21 08:58:28,143 44k INFO ====> Epoch: 93, cost 190.47 s -2023-07-21 08:59:51,264 44k INFO Train Epoch: 94 [40%] -2023-07-21 08:59:51,266 44k INFO Losses: [2.393101692199707, 2.2086567878723145, 10.372150421142578, 20.040912628173828, 1.142297625541687], step: 18400, lr: 9.883180358131438e-05 -2023-07-21 09:01:34,929 44k INFO ====> Epoch: 94, cost 186.79 s -2023-07-21 09:02:59,440 44k INFO Train Epoch: 95 [42%] -2023-07-21 09:02:59,441 44k INFO Losses: [2.4298272132873535, 2.220283269882202, 12.545902252197266, 19.67744255065918, 1.2557722330093384], step: 18600, lr: 9.881944960586671e-05 -2023-07-21 09:04:41,021 44k INFO ====> Epoch: 95, cost 186.09 s -2023-07-21 09:06:07,733 44k INFO Train Epoch: 96 [43%] -2023-07-21 09:06:07,734 44k INFO Losses: [2.5173728466033936, 2.3312416076660156, 11.402571678161621, 20.092973709106445, 1.186493992805481], step: 18800, lr: 9.880709717466598e-05 -2023-07-21 09:07:46,637 44k INFO ====> Epoch: 96, cost 185.62 s -2023-07-21 09:09:13,646 44k INFO Train Epoch: 97 [45%] -2023-07-21 09:09:13,649 44k INFO Losses: [2.4996426105499268, 2.151503562927246, 12.562758445739746, 20.056133270263672, 1.1889969110488892], step: 19000, lr: 9.879474628751914e-05 -2023-07-21 09:09:24,626 44k INFO Saving model and optimizer state at iteration 97 to ./logs/44k/G_19000.pth -2023-07-21 09:09:35,724 44k INFO Saving model and optimizer state at iteration 97 to ./logs/44k/D_19000.pth -2023-07-21 09:11:14,445 44k INFO ====> Epoch: 97, cost 207.81 s -2023-07-21 09:12:50,337 44k INFO Train Epoch: 98 [46%] -2023-07-21 09:12:50,340 44k INFO Losses: [2.2745842933654785, 2.269577741622925, 9.733717918395996, 19.522037506103516, 1.0672245025634766], step: 19200, lr: 9.87823969442332e-05 -2023-07-21 09:14:24,074 44k INFO ====> Epoch: 98, cost 189.63 s -2023-07-21 09:16:02,511 44k INFO Train Epoch: 99 [48%] -2023-07-21 09:16:02,512 44k INFO Losses: [2.7394399642944336, 2.0541954040527344, 5.252060413360596, 18.16535186767578, 0.8769844174385071], step: 19400, lr: 9.877004914461517e-05 -2023-07-21 09:17:32,421 44k INFO ====> Epoch: 99, cost 188.35 s -2023-07-21 15:30:26,749 44k INFO {'train': {'log_interval': 200, 'eval_interval': 1000, 'seed': 1234, 'epochs': 10000, 'learning_rate': 0.0001, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 6, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 10240, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0, 'use_sr': True, 'max_speclen': 512, 'port': '8001', 'keep_ckpts': 3}, 'data': {'training_files': 'filelists/train.txt', 'validation_files': 'filelists/val.txt', 'max_wav_value': 32768.0, 'sampling_rate': 44100, 'filter_length': 2048, 'hop_length': 512, 'win_length': 2048, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': 22050, 'contentvec_final_proj': False}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256, 'ssl_dim': 768, 'n_speakers': 200}, 'spk': {'TW3_F_Yennefer_Rus': 0}, 'model_dir': './logs/44k', 'reset': False} -2023-07-21 15:30:43,661 44k INFO Loaded checkpoint './logs/44k/G_19000.pth' (iteration 97) -2023-07-21 15:30:48,594 44k INFO Loaded checkpoint './logs/44k/D_19000.pth' (iteration 97) -2023-07-21 15:33:15,118 44k INFO Train Epoch: 97 [45%] -2023-07-21 15:33:15,120 44k INFO Losses: [2.6667346954345703, 2.285155773162842, 6.80302095413208, 15.60183334350586, 1.1260244846343994], step: 19000, lr: 9.87823969442332e-05 -2023-07-21 15:33:35,180 44k INFO Saving model and optimizer state at iteration 97 to ./logs/44k/G_19000.pth -2023-07-21 15:33:40,809 44k INFO Saving model and optimizer state at iteration 97 to ./logs/44k/D_19000.pth -2023-07-21 15:36:18,436 44k INFO ====> Epoch: 97, cost 351.69 s -2023-07-21 15:37:57,262 44k INFO Train Epoch: 98 [46%] -2023-07-21 15:37:57,264 44k INFO Losses: [2.3216423988342285, 2.2843780517578125, 9.189483642578125, 19.40732192993164, 1.1140297651290894], step: 19200, lr: 9.877004914461517e-05 -2023-07-21 15:39:33,231 44k INFO ====> Epoch: 98, cost 194.80 s -2023-07-21 15:41:12,891 44k INFO Train Epoch: 99 [48%] -2023-07-21 15:41:12,892 44k INFO Losses: [2.442420482635498, 2.1055383682250977, 8.507013320922852, 15.822084426879883, 1.17451810836792], step: 19400, lr: 9.875770288847208e-05 -2023-07-21 15:42:46,584 44k INFO ====> Epoch: 99, cost 193.35 s -2023-07-21 15:44:29,835 44k INFO Train Epoch: 100 [49%] -2023-07-21 15:44:29,837 44k INFO Losses: [2.402589797973633, 2.245296001434326, 10.374245643615723, 20.717180252075195, 1.2909756898880005], step: 19600, lr: 9.874535817561101e-05 -2023-07-21 15:46:00,049 44k INFO ====> Epoch: 100, cost 193.47 s -2023-07-21 15:47:45,029 44k INFO Train Epoch: 101 [51%] -2023-07-21 15:47:45,033 44k INFO Losses: [2.416675090789795, 2.3001060485839844, 11.332229614257812, 18.723390579223633, 1.3238000869750977], step: 19800, lr: 9.873301500583906e-05 -2023-07-21 15:49:13,400 44k INFO ====> Epoch: 101, cost 193.35 s -2023-07-21 15:51:01,931 44k INFO Train Epoch: 102 [52%] -2023-07-21 15:51:01,933 44k INFO Losses: [2.4810502529144287, 2.1506435871124268, 10.206113815307617, 19.574739456176758, 0.9150567650794983], step: 20000, lr: 9.872067337896332e-05 -2023-07-21 15:51:12,217 44k INFO Saving model and optimizer state at iteration 102 to ./logs/44k/G_20000.pth -2023-07-21 15:51:15,681 44k INFO Saving model and optimizer state at iteration 102 to ./logs/44k/D_20000.pth -2023-07-21 15:52:47,239 44k INFO ====> Epoch: 102, cost 213.84 s -2023-07-21 15:54:42,008 44k INFO Train Epoch: 103 [54%] -2023-07-21 15:54:42,010 44k INFO Losses: [2.1730079650878906, 2.431161642074585, 11.148210525512695, 18.98563003540039, 1.362691879272461], step: 20200, lr: 9.870833329479095e-05 -2023-07-21 15:56:03,750 44k INFO ====> Epoch: 103, cost 196.51 s -2023-07-21 15:57:57,262 44k INFO Train Epoch: 104 [55%] -2023-07-21 15:57:57,265 44k INFO Losses: [2.4425392150878906, 2.2819862365722656, 12.210150718688965, 19.898584365844727, 0.45925813913345337], step: 20400, lr: 9.86959947531291e-05 -2023-07-21 15:59:17,227 44k INFO ====> Epoch: 104, cost 193.48 s -2023-07-21 16:01:12,799 44k INFO Train Epoch: 105 [57%] -2023-07-21 16:01:12,800 44k INFO Losses: [2.386535167694092, 2.5515527725219727, 11.482751846313477, 18.822919845581055, 0.9690454006195068], step: 20600, lr: 9.868365775378495e-05 -2023-07-21 16:02:29,927 44k INFO ====> Epoch: 105, cost 192.70 s -2023-07-21 16:04:27,558 44k INFO Train Epoch: 106 [58%] -2023-07-21 16:04:27,560 44k INFO Losses: [2.2356696128845215, 2.4252524375915527, 11.9246187210083, 20.95952033996582, 0.9231759905815125], step: 20800, lr: 9.867132229656573e-05 -2023-07-21 16:05:42,174 44k INFO ====> Epoch: 106, cost 192.25 s -2023-07-21 16:07:43,903 44k INFO Train Epoch: 107 [60%] -2023-07-21 16:07:43,904 44k INFO Losses: [2.499807834625244, 2.2573041915893555, 10.586657524108887, 19.15555191040039, 1.0701605081558228], step: 21000, lr: 9.865898838127865e-05 -2023-07-21 16:07:54,352 44k INFO Saving model and optimizer state at iteration 107 to ./logs/44k/G_21000.pth -2023-07-21 16:08:01,445 44k INFO Saving model and optimizer state at iteration 107 to ./logs/44k/D_21000.pth -2023-07-21 16:09:21,888 44k INFO ====> Epoch: 107, cost 219.71 s -2023-07-21 16:11:27,922 44k INFO Train Epoch: 108 [61%] -2023-07-21 16:11:27,924 44k INFO Losses: [2.4869942665100098, 2.3175716400146484, 8.57555103302002, 18.184423446655273, 0.9401000142097473], step: 21200, lr: 9.864665600773098e-05 -2023-07-21 16:12:40,509 44k INFO ====> Epoch: 108, cost 198.62 s -2023-07-21 16:14:51,119 44k INFO Train Epoch: 109 [63%] -2023-07-21 16:14:51,122 44k INFO Losses: [2.4567947387695312, 2.5130372047424316, 11.324175834655762, 19.903196334838867, 1.294120192527771], step: 21400, lr: 9.863432517573002e-05 -2023-07-21 16:15:57,881 44k INFO ====> Epoch: 109, cost 197.37 s -2023-07-21 16:18:09,561 44k INFO Train Epoch: 110 [64%] -2023-07-21 16:18:09,562 44k INFO Losses: [2.5494439601898193, 2.223268747329712, 10.10909652709961, 20.695697784423828, 1.1217299699783325], step: 21600, lr: 9.862199588508305e-05 -2023-07-21 16:19:13,292 44k INFO ====> Epoch: 110, cost 195.41 s -2023-07-21 16:21:26,511 44k INFO Train Epoch: 111 [66%] -2023-07-21 16:21:26,513 44k INFO Losses: [2.549898862838745, 2.207927703857422, 14.676955223083496, 20.802814483642578, 0.5788459777832031], step: 21800, lr: 9.86096681355974e-05 -2023-07-21 16:22:28,292 44k INFO ====> Epoch: 111, cost 195.00 s -2023-07-21 16:24:43,621 44k INFO Train Epoch: 112 [68%] -2023-07-21 16:24:43,623 44k INFO Losses: [2.4726691246032715, 2.4312119483947754, 12.334954261779785, 20.125089645385742, 0.5599287748336792], step: 22000, lr: 9.859734192708044e-05 -2023-07-21 16:24:55,109 44k INFO Saving model and optimizer state at iteration 112 to ./logs/44k/G_22000.pth -2023-07-21 16:25:00,937 44k INFO Saving model and optimizer state at iteration 112 to ./logs/44k/D_22000.pth -2023-07-21 16:25:08,237 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_19000.pth -2023-07-21 16:25:08,240 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_19000.pth -2023-07-21 16:26:07,125 44k INFO ====> Epoch: 112, cost 218.83 s -2023-07-21 16:28:25,889 44k INFO Train Epoch: 113 [69%] -2023-07-21 16:28:25,890 44k INFO Losses: [2.575185775756836, 2.4204142093658447, 8.832566261291504, 15.648338317871094, 1.2017974853515625], step: 22200, lr: 9.858501725933955e-05 -2023-07-21 16:29:21,262 44k INFO ====> Epoch: 113, cost 194.14 s -2023-07-21 16:31:43,228 44k INFO Train Epoch: 114 [71%] -2023-07-21 16:31:43,229 44k INFO Losses: [2.367739200592041, 2.139310359954834, 9.185691833496094, 17.317838668823242, 0.18838606774806976], step: 22400, lr: 9.857269413218213e-05 -2023-07-21 16:32:40,067 44k INFO ====> Epoch: 114, cost 198.80 s -2023-07-21 16:35:09,513 44k INFO Train Epoch: 115 [72%] -2023-07-21 16:35:09,515 44k INFO Losses: [2.462178945541382, 2.2144768238067627, 11.6893949508667, 20.14139175415039, 0.6953765749931335], step: 22600, lr: 9.85603725454156e-05 -2023-07-21 16:36:01,955 44k INFO ====> Epoch: 115, cost 201.89 s -2023-07-21 16:38:32,814 44k INFO Train Epoch: 116 [74%] -2023-07-21 16:38:32,815 44k INFO Losses: [2.6289303302764893, 2.5551493167877197, 9.782899856567383, 20.110610961914062, 0.8511331677436829], step: 22800, lr: 9.854805249884741e-05 -2023-07-21 16:39:20,585 44k INFO ====> Epoch: 116, cost 198.63 s -2023-07-21 16:41:47,508 44k INFO Train Epoch: 117 [75%] -2023-07-21 16:41:47,510 44k INFO Losses: [2.735032081604004, 2.325071096420288, 8.52432918548584, 20.389137268066406, 1.0641735792160034], step: 23000, lr: 9.853573399228505e-05 -2023-07-21 16:41:58,614 44k INFO Saving model and optimizer state at iteration 117 to ./logs/44k/G_23000.pth -2023-07-21 16:42:05,895 44k INFO Saving model and optimizer state at iteration 117 to ./logs/44k/D_23000.pth -2023-07-21 16:43:04,005 44k INFO ====> Epoch: 117, cost 223.42 s -2023-07-21 16:45:37,907 44k INFO Train Epoch: 118 [77%] -2023-07-21 16:45:37,909 44k INFO Losses: [2.4117939472198486, 2.415827512741089, 10.839245796203613, 17.546489715576172, 1.4093621969223022], step: 23200, lr: 9.8523417025536e-05 -2023-07-21 16:46:22,043 44k INFO ====> Epoch: 118, cost 198.04 s -2023-07-21 16:48:55,485 44k INFO Train Epoch: 119 [78%] -2023-07-21 16:48:55,487 44k INFO Losses: [2.332998752593994, 2.465550661087036, 10.334614753723145, 18.212646484375, 1.3598270416259766], step: 23400, lr: 9.851110159840781e-05 -2023-07-21 16:49:34,190 44k INFO ====> Epoch: 119, cost 192.15 s -2023-07-21 16:52:09,705 44k INFO Train Epoch: 120 [80%] -2023-07-21 16:52:09,707 44k INFO Losses: [2.304168462753296, 2.559504508972168, 10.118978500366211, 20.66158103942871, 0.9032669067382812], step: 23600, lr: 9.8498787710708e-05 -2023-07-21 16:52:46,588 44k INFO ====> Epoch: 120, cost 192.40 s -2023-07-21 16:55:23,387 44k INFO Train Epoch: 121 [81%] -2023-07-21 16:55:23,390 44k INFO Losses: [2.7152490615844727, 2.1467156410217285, 6.936227321624756, 14.56566333770752, 1.2746107578277588], step: 23800, lr: 9.848647536224416e-05 -2023-07-21 16:55:57,517 44k INFO ====> Epoch: 121, cost 190.93 s -2023-07-21 16:58:43,460 44k INFO Train Epoch: 122 [83%] -2023-07-21 16:58:43,461 44k INFO Losses: [2.461012125015259, 2.343348264694214, 9.520187377929688, 18.30912971496582, 1.119667887687683], step: 24000, lr: 9.847416455282387e-05 -2023-07-21 16:58:57,216 44k INFO Saving model and optimizer state at iteration 122 to ./logs/44k/G_24000.pth -2023-07-21 16:59:07,230 44k INFO Saving model and optimizer state at iteration 122 to ./logs/44k/D_24000.pth -2023-07-21 16:59:42,825 44k INFO ====> Epoch: 122, cost 225.31 s -2023-07-21 17:02:25,679 44k INFO Train Epoch: 123 [84%] -2023-07-21 17:02:25,681 44k INFO Losses: [2.526350498199463, 2.605272054672241, 10.41413402557373, 20.973079681396484, 1.1124430894851685], step: 24200, lr: 9.846185528225477e-05 -2023-07-21 17:02:58,005 44k INFO ====> Epoch: 123, cost 195.18 s -2023-07-21 17:05:45,666 44k INFO Train Epoch: 124 [86%] -2023-07-21 17:05:45,669 44k INFO Losses: [2.6690456867218018, 1.9728515148162842, 7.9757490158081055, 18.855947494506836, 1.1572319269180298], step: 24400, lr: 9.84495475503445e-05 -2023-07-21 17:06:12,954 44k INFO ====> Epoch: 124, cost 194.95 s -2023-07-21 17:08:59,624 44k INFO Train Epoch: 125 [87%] -2023-07-21 17:08:59,626 44k INFO Losses: [2.617051124572754, 2.222356081008911, 8.140710830688477, 20.03601837158203, 1.26643705368042], step: 24600, lr: 9.84372413569007e-05 -2023-07-21 17:09:23,383 44k INFO ====> Epoch: 125, cost 190.43 s -2023-07-21 17:12:15,176 44k INFO Train Epoch: 126 [89%] -2023-07-21 17:12:15,178 44k INFO Losses: [2.7547807693481445, 2.1496028900146484, 10.172414779663086, 18.232770919799805, 1.333817481994629], step: 24800, lr: 9.842493670173108e-05 -2023-07-21 17:12:36,187 44k INFO ====> Epoch: 126, cost 192.80 s -2023-07-21 17:15:32,226 44k INFO Train Epoch: 127 [90%] -2023-07-21 17:15:32,230 44k INFO Losses: [2.462491989135742, 2.3374414443969727, 9.395182609558105, 19.67860221862793, 0.20602381229400635], step: 25000, lr: 9.841263358464336e-05 -2023-07-21 17:15:50,197 44k INFO Saving model and optimizer state at iteration 127 to ./logs/44k/G_25000.pth -2023-07-21 17:15:54,882 44k INFO Saving model and optimizer state at iteration 127 to ./logs/44k/D_25000.pth -2023-07-21 17:16:18,292 44k INFO ====> Epoch: 127, cost 222.11 s -2023-07-21 17:19:12,926 44k INFO Train Epoch: 128 [92%] -2023-07-21 17:19:12,930 44k INFO Losses: [2.421919107437134, 2.2275290489196777, 9.994449615478516, 19.664825439453125, 0.7534552216529846], step: 25200, lr: 9.840033200544528e-05 -2023-07-21 17:19:30,496 44k INFO ====> Epoch: 128, cost 192.20 s -2023-07-21 17:22:32,138 44k INFO Train Epoch: 129 [93%] -2023-07-21 17:22:32,139 44k INFO Losses: [2.548980474472046, 2.1678566932678223, 9.816315650939941, 18.385934829711914, 1.2947406768798828], step: 25400, lr: 9.838803196394459e-05 -2023-07-21 17:22:45,304 44k INFO ====> Epoch: 129, cost 194.81 s -2023-07-21 17:25:45,956 44k INFO Train Epoch: 130 [95%] -2023-07-21 17:25:45,958 44k INFO Losses: [2.3946595191955566, 2.3280417919158936, 12.204469680786133, 19.71204948425293, 0.9284786581993103], step: 25600, lr: 9.837573345994909e-05 -2023-07-21 17:25:56,358 44k INFO ====> Epoch: 130, cost 191.05 s -2023-07-21 17:29:03,813 44k INFO Train Epoch: 131 [96%] -2023-07-21 17:29:03,814 44k INFO Losses: [2.5545730590820312, 2.2363829612731934, 12.559056282043457, 19.758169174194336, 1.530608892440796], step: 25800, lr: 9.836343649326659e-05 -2023-07-21 17:29:11,537 44k INFO ====> Epoch: 131, cost 195.18 s -2023-07-21 17:32:17,733 44k INFO Train Epoch: 132 [98%] -2023-07-21 17:32:17,735 44k INFO Losses: [2.522730827331543, 2.305175542831421, 9.701803207397461, 19.935274124145508, 1.6927341222763062], step: 26000, lr: 9.835114106370493e-05 -2023-07-21 17:32:32,857 44k INFO Saving model and optimizer state at iteration 132 to ./logs/44k/G_26000.pth -2023-07-21 17:32:38,418 44k INFO Saving model and optimizer state at iteration 132 to ./logs/44k/D_26000.pth -2023-07-21 17:32:46,424 44k INFO ====> Epoch: 132, cost 214.89 s -2023-07-21 17:36:02,166 44k INFO Train Epoch: 133 [99%] -2023-07-21 17:36:02,169 44k INFO Losses: [2.457596778869629, 2.41813325881958, 8.44925308227539, 19.715219497680664, 1.0681700706481934], step: 26200, lr: 9.833884717107196e-05 -2023-07-21 17:36:07,375 44k INFO ====> Epoch: 133, cost 200.95 s -2023-07-21 17:39:21,908 44k INFO ====> Epoch: 134, cost 194.53 s -2023-07-21 17:39:37,507 44k INFO Train Epoch: 135 [1%] -2023-07-21 17:39:37,510 44k INFO Losses: [2.4319474697113037, 2.3722245693206787, 8.892995834350586, 20.178388595581055, 0.7872319221496582], step: 26400, lr: 9.831426399582366e-05 -2023-07-21 17:42:34,422 44k INFO ====> Epoch: 135, cost 192.51 s -2023-07-21 17:42:52,168 44k INFO Train Epoch: 136 [3%] -2023-07-21 17:42:52,169 44k INFO Losses: [2.744927167892456, 2.060849189758301, 11.300294876098633, 20.54011344909668, 1.0647633075714111], step: 26600, lr: 9.830197471282419e-05 -2023-07-21 17:45:48,399 44k INFO ====> Epoch: 136, cost 193.98 s -2023-07-21 17:46:07,967 44k INFO Train Epoch: 137 [4%] -2023-07-21 17:46:07,969 44k INFO Losses: [2.8440122604370117, 1.939738154411316, 6.539734840393066, 16.02583885192871, 1.1885879039764404], step: 26800, lr: 9.828968696598508e-05 -2023-07-21 17:49:00,525 44k INFO ====> Epoch: 137, cost 192.13 s -2023-07-21 17:49:23,562 44k INFO Train Epoch: 138 [6%] -2023-07-21 17:49:23,563 44k INFO Losses: [2.773386001586914, 2.2041497230529785, 8.719069480895996, 19.60767936706543, 1.6085671186447144], step: 27000, lr: 9.827740075511432e-05 -2023-07-21 17:49:35,463 44k INFO Saving model and optimizer state at iteration 138 to ./logs/44k/G_27000.pth -2023-07-21 17:49:45,985 44k INFO Saving model and optimizer state at iteration 138 to ./logs/44k/D_27000.pth -2023-07-21 17:49:48,351 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_24000.pth -2023-07-21 17:49:48,356 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_24000.pth -2023-07-21 17:52:42,402 44k INFO ====> Epoch: 138, cost 221.88 s -2023-07-21 17:53:08,384 44k INFO Train Epoch: 139 [7%] -2023-07-21 17:53:08,385 44k INFO Losses: [2.387085199356079, 2.3535380363464355, 11.334831237792969, 20.683361053466797, 1.3246866464614868], step: 27200, lr: 9.826511608001993e-05 -2023-07-21 17:55:56,530 44k INFO ====> Epoch: 139, cost 194.13 s -2023-07-21 17:56:24,334 44k INFO Train Epoch: 140 [9%] -2023-07-21 17:56:24,337 44k INFO Losses: [2.4610371589660645, 2.463737964630127, 9.792011260986328, 17.615875244140625, 0.990350604057312], step: 27400, lr: 9.825283294050992e-05 -2023-07-21 17:59:09,802 44k INFO ====> Epoch: 140, cost 193.27 s -2023-07-21 17:59:42,618 44k INFO Train Epoch: 141 [10%] -2023-07-21 17:59:42,620 44k INFO Losses: [2.381868839263916, 2.3638408184051514, 10.328003883361816, 19.237380981445312, 0.5003438591957092], step: 27600, lr: 9.824055133639235e-05 -2023-07-21 18:02:27,803 44k INFO ====> Epoch: 141, cost 198.00 s -2023-07-21 18:03:01,081 44k INFO Train Epoch: 142 [12%] -2023-07-21 18:03:01,084 44k INFO Losses: [2.47285532951355, 1.9715862274169922, 11.930777549743652, 20.132404327392578, 1.26872718334198], step: 27800, lr: 9.822827126747529e-05 -2023-07-21 18:05:41,366 44k INFO ====> Epoch: 142, cost 193.56 s -2023-07-21 18:06:15,844 44k INFO Train Epoch: 143 [13%] -2023-07-21 18:06:15,847 44k INFO Losses: [2.311788558959961, 2.4879229068756104, 13.119593620300293, 19.357288360595703, 0.9590017199516296], step: 28000, lr: 9.821599273356685e-05 -2023-07-21 18:06:25,791 44k INFO Saving model and optimizer state at iteration 143 to ./logs/44k/G_28000.pth -2023-07-21 18:06:35,489 44k INFO Saving model and optimizer state at iteration 143 to ./logs/44k/D_28000.pth -2023-07-21 18:09:16,691 44k INFO ====> Epoch: 143, cost 215.33 s -2023-07-21 18:09:58,818 44k INFO Train Epoch: 144 [15%] -2023-07-21 18:09:58,821 44k INFO Losses: [2.425577163696289, 2.4195539951324463, 13.453057289123535, 19.275527954101562, 1.0691722631454468], step: 28200, lr: 9.820371573447515e-05 -2023-07-21 18:12:30,700 44k INFO ====> Epoch: 144, cost 194.01 s -2023-07-21 18:13:18,962 44k INFO Train Epoch: 145 [16%] -2023-07-21 18:13:18,965 44k INFO Losses: [2.459174633026123, 2.3881382942199707, 9.774147987365723, 19.26771354675293, 1.0676015615463257], step: 28400, lr: 9.819144027000834e-05 -2023-07-21 18:15:48,536 44k INFO ====> Epoch: 145, cost 197.84 s -2023-07-21 18:16:34,855 44k INFO Train Epoch: 146 [18%] -2023-07-21 18:16:34,856 44k INFO Losses: [2.7603838443756104, 2.105320930480957, 8.29627513885498, 16.036434173583984, 1.258935570716858], step: 28600, lr: 9.817916633997459e-05 -2023-07-21 18:19:01,817 44k INFO ====> Epoch: 146, cost 193.28 s -2023-07-21 18:19:50,747 44k INFO Train Epoch: 147 [19%] -2023-07-21 18:19:50,749 44k INFO Losses: [2.388951063156128, 2.2558794021606445, 13.342424392700195, 19.6672420501709, 1.1756566762924194], step: 28800, lr: 9.816689394418209e-05 -2023-07-21 18:22:16,275 44k INFO ====> Epoch: 147, cost 194.46 s -2023-07-21 18:23:06,279 44k INFO Train Epoch: 148 [21%] -2023-07-21 18:23:06,281 44k INFO Losses: [2.7740745544433594, 2.15147066116333, 10.475726127624512, 19.8051815032959, 1.1482189893722534], step: 29000, lr: 9.815462308243906e-05 -2023-07-21 18:23:16,609 44k INFO Saving model and optimizer state at iteration 148 to ./logs/44k/G_29000.pth -2023-07-21 18:23:22,657 44k INFO Saving model and optimizer state at iteration 148 to ./logs/44k/D_29000.pth -2023-07-21 18:25:48,924 44k INFO ====> Epoch: 148, cost 212.65 s -2023-07-21 18:26:44,836 44k INFO Train Epoch: 149 [22%] -2023-07-21 18:26:44,837 44k INFO Losses: [2.616018772125244, 2.205622434616089, 11.079109191894531, 20.302108764648438, 1.065405249595642], step: 29200, lr: 9.814235375455375e-05 -2023-07-21 18:29:04,902 44k INFO ====> Epoch: 149, cost 195.98 s -2023-07-21 18:30:02,360 44k INFO Train Epoch: 150 [24%] -2023-07-21 18:30:02,361 44k INFO Losses: [2.5923447608947754, 2.2311201095581055, 10.188232421875, 18.72749137878418, 0.7336432337760925], step: 29400, lr: 9.813008596033443e-05 -2023-07-21 18:32:19,385 44k INFO ====> Epoch: 150, cost 194.48 s -2023-07-21 18:33:21,586 44k INFO Train Epoch: 151 [25%] -2023-07-21 18:33:21,588 44k INFO Losses: [2.408165693283081, 2.288963794708252, 10.249737739562988, 21.004695892333984, 0.9634822010993958], step: 29600, lr: 9.811781969958938e-05 -2023-07-21 18:35:37,010 44k INFO ====> Epoch: 151, cost 197.62 s -2023-07-21 18:36:37,239 44k INFO Train Epoch: 152 [27%] -2023-07-21 18:36:37,241 44k INFO Losses: [2.421182870864868, 2.534841537475586, 8.831451416015625, 19.441137313842773, 1.2992007732391357], step: 29800, lr: 9.810555497212693e-05 -2023-07-21 18:38:51,302 44k INFO ====> Epoch: 152, cost 194.29 s -2023-07-21 18:39:55,137 44k INFO Train Epoch: 153 [28%] -2023-07-21 18:39:55,140 44k INFO Losses: [2.559748649597168, 2.2781600952148438, 9.738786697387695, 19.182235717773438, 0.8010022640228271], step: 30000, lr: 9.809329177775541e-05 -2023-07-21 18:40:05,871 44k INFO Saving model and optimizer state at iteration 153 to ./logs/44k/G_30000.pth -2023-07-21 18:40:15,644 44k INFO Saving model and optimizer state at iteration 153 to ./logs/44k/D_30000.pth -2023-07-21 18:40:22,025 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_27000.pth -2023-07-21 18:40:22,027 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_27000.pth -2023-07-21 18:42:31,380 44k INFO ====> Epoch: 153, cost 220.08 s -2023-07-21 18:43:39,856 44k INFO Train Epoch: 154 [30%] -2023-07-21 18:43:39,857 44k INFO Losses: [2.3989877700805664, 2.2629611492156982, 10.62449836730957, 18.90825080871582, 0.9505196809768677], step: 30200, lr: 9.808103011628319e-05 -2023-07-21 18:45:46,358 44k INFO ====> Epoch: 154, cost 194.98 s -2023-07-21 18:47:00,951 44k INFO Train Epoch: 155 [31%] -2023-07-21 18:47:00,953 44k INFO Losses: [2.6765365600585938, 1.9810618162155151, 10.38754653930664, 20.22066879272461, 0.9741595983505249], step: 30400, lr: 9.806876998751865e-05 -2023-07-21 18:49:05,378 44k INFO ====> Epoch: 155, cost 199.02 s -2023-07-21 18:50:19,500 44k INFO Train Epoch: 156 [33%] -2023-07-21 18:50:19,502 44k INFO Losses: [2.3128530979156494, 2.2194900512695312, 13.159067153930664, 19.166492462158203, 1.0481305122375488], step: 30600, lr: 9.80565113912702e-05 -2023-07-21 18:52:20,488 44k INFO ====> Epoch: 156, cost 195.11 s -2023-07-21 18:53:37,412 44k INFO Train Epoch: 157 [35%] -2023-07-21 18:53:37,414 44k INFO Losses: [2.392058849334717, 2.4387149810791016, 8.455172538757324, 20.425487518310547, 1.242039680480957], step: 30800, lr: 9.804425432734629e-05 -2023-07-21 18:55:36,378 44k INFO ====> Epoch: 157, cost 195.89 s -2023-07-21 18:56:54,679 44k INFO Train Epoch: 158 [36%] -2023-07-21 18:56:54,682 44k INFO Losses: [2.683244228363037, 2.346961259841919, 10.223226547241211, 19.01651382446289, 0.7395071983337402], step: 31000, lr: 9.803199879555537e-05 -2023-07-21 18:57:04,189 44k INFO Saving model and optimizer state at iteration 158 to ./logs/44k/G_31000.pth -2023-07-21 18:57:15,375 44k INFO Saving model and optimizer state at iteration 158 to ./logs/44k/D_31000.pth -2023-07-21 18:59:24,430 44k INFO ====> Epoch: 158, cost 228.05 s -2023-07-21 19:00:48,935 44k INFO Train Epoch: 159 [38%] -2023-07-21 19:00:48,937 44k INFO Losses: [2.697953701019287, 1.9758442640304565, 10.477890968322754, 19.391162872314453, 0.6272150874137878], step: 31200, lr: 9.801974479570593e-05 -2023-07-21 19:02:40,771 44k INFO ====> Epoch: 159, cost 196.34 s -2023-07-21 19:04:08,404 44k INFO Train Epoch: 160 [39%] -2023-07-21 19:04:08,407 44k INFO Losses: [2.528224468231201, 2.0778298377990723, 10.73872184753418, 14.877276420593262, 1.1427303552627563], step: 31400, lr: 9.800749232760646e-05 -2023-07-21 19:05:58,750 44k INFO ====> Epoch: 160, cost 197.98 s -2023-07-21 19:07:27,603 44k INFO Train Epoch: 161 [41%] -2023-07-21 19:07:27,606 44k INFO Losses: [2.325450897216797, 2.343747615814209, 13.327208518981934, 19.331987380981445, 1.2686046361923218], step: 31600, lr: 9.79952413910655e-05 -2023-07-21 19:09:13,868 44k INFO ====> Epoch: 161, cost 195.12 s -2023-07-21 19:10:44,412 44k INFO Train Epoch: 162 [42%] -2023-07-21 19:10:44,414 44k INFO Losses: [2.249293804168701, 2.4865951538085938, 13.026719093322754, 21.153087615966797, 1.149781584739685], step: 31800, lr: 9.798299198589162e-05 -2023-07-21 19:12:27,023 44k INFO ====> Epoch: 162, cost 193.15 s -2023-07-21 19:14:00,739 44k INFO Train Epoch: 163 [44%] -2023-07-21 19:14:00,740 44k INFO Losses: [2.6316452026367188, 2.191751003265381, 8.334941864013672, 16.73215675354004, 0.7187716364860535], step: 32000, lr: 9.797074411189339e-05 -2023-07-21 19:14:14,180 44k INFO Saving model and optimizer state at iteration 163 to ./logs/44k/G_32000.pth -2023-07-21 19:14:21,402 44k INFO Saving model and optimizer state at iteration 163 to ./logs/44k/D_32000.pth -2023-07-21 19:16:09,480 44k INFO ====> Epoch: 163, cost 222.46 s -2023-07-21 19:17:41,926 44k INFO Train Epoch: 164 [45%] -2023-07-21 19:17:41,928 44k INFO Losses: [2.4647040367126465, 2.5417134761810303, 9.12618637084961, 17.586902618408203, 1.3511015176773071], step: 32200, lr: 9.795849776887939e-05 -2023-07-21 19:19:20,398 44k INFO ====> Epoch: 164, cost 190.92 s -2023-07-22 06:59:31,070 44k INFO {'train': {'log_interval': 200, 'eval_interval': 1000, 'seed': 1234, 'epochs': 10000, 'learning_rate': 0.0001, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 6, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 10240, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0, 'use_sr': True, 'max_speclen': 512, 'port': '8001', 'keep_ckpts': 3}, 'data': {'training_files': 'filelists/train.txt', 'validation_files': 'filelists/val.txt', 'max_wav_value': 32768.0, 'sampling_rate': 44100, 'filter_length': 2048, 'hop_length': 512, 'win_length': 2048, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': 22050, 'contentvec_final_proj': False}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256, 'ssl_dim': 768, 'n_speakers': 200}, 'spk': {'TW3_F_Yennefer_Rus': 0}, 'model_dir': './logs/44k', 'reset': False} -2023-07-22 06:59:46,077 44k INFO Loaded checkpoint './logs/44k/G_32000.pth' (iteration 163) -2023-07-22 06:59:53,239 44k INFO Loaded checkpoint './logs/44k/D_32000.pth' (iteration 163) -2023-07-22 07:02:15,779 44k INFO Train Epoch: 163 [44%] -2023-07-22 07:02:15,781 44k INFO Losses: [2.585324287414551, 2.2421796321868896, 11.93289852142334, 19.693323135375977, 0.8644950985908508], step: 32000, lr: 9.795849776887939e-05 -2023-07-22 07:02:36,097 44k INFO Saving model and optimizer state at iteration 163 to ./logs/44k/G_32000.pth -2023-07-22 07:02:38,663 44k INFO Saving model and optimizer state at iteration 163 to ./logs/44k/D_32000.pth -2023-07-22 07:05:08,140 44k INFO ====> Epoch: 163, cost 337.07 s -2023-07-22 07:06:42,163 44k INFO Train Epoch: 164 [45%] -2023-07-22 07:06:42,164 44k INFO Losses: [2.402092456817627, 2.9058008193969727, 5.901551246643066, 14.02944278717041, 1.2172725200653076], step: 32200, lr: 9.794625295665828e-05 -2023-07-22 07:08:18,957 44k INFO ====> Epoch: 164, cost 190.82 s -2023-07-22 07:09:52,563 44k INFO Train Epoch: 165 [47%] -2023-07-22 07:09:52,564 44k INFO Losses: [2.4914743900299072, 2.4218010902404785, 12.468343734741211, 18.963159561157227, 1.0232659578323364], step: 32400, lr: 9.79340096750387e-05 -2023-07-22 07:11:27,494 44k INFO ====> Epoch: 165, cost 188.54 s -2023-07-22 07:13:05,650 44k INFO Train Epoch: 166 [48%] -2023-07-22 07:13:05,652 44k INFO Losses: [2.542128562927246, 2.0251944065093994, 8.841829299926758, 18.33026123046875, 1.1472913026809692], step: 32600, lr: 9.792176792382932e-05 -2023-07-22 07:14:36,596 44k INFO ====> Epoch: 166, cost 189.10 s -2023-07-22 07:16:18,462 44k INFO Train Epoch: 167 [50%] -2023-07-22 07:16:18,464 44k INFO Losses: [2.54712176322937, 2.241243839263916, 8.657332420349121, 18.359033584594727, 1.0978599786758423], step: 32800, lr: 9.790952770283884e-05 -2023-07-22 07:17:46,934 44k INFO ====> Epoch: 167, cost 190.34 s -2023-07-22 07:19:30,327 44k INFO Train Epoch: 168 [51%] -2023-07-22 07:19:30,331 44k INFO Losses: [2.5301692485809326, 2.289682388305664, 11.795625686645508, 19.78899574279785, 1.1156961917877197], step: 33000, lr: 9.789728901187598e-05 -2023-07-22 07:19:39,748 44k INFO Saving model and optimizer state at iteration 168 to ./logs/44k/G_33000.pth -2023-07-22 07:19:46,299 44k INFO Saving model and optimizer state at iteration 168 to ./logs/44k/D_33000.pth -2023-07-22 07:21:18,444 44k INFO ====> Epoch: 168, cost 211.51 s -2023-07-22 07:23:06,312 44k INFO Train Epoch: 169 [53%] -2023-07-22 07:23:06,314 44k INFO Losses: [2.234825611114502, 2.4606504440307617, 11.232003211975098, 19.83521270751953, 0.8921003937721252], step: 33200, lr: 9.78850518507495e-05 -2023-07-22 07:24:29,495 44k INFO ====> Epoch: 169, cost 191.05 s -2023-07-22 07:26:20,268 44k INFO Train Epoch: 170 [54%] -2023-07-22 07:26:20,271 44k INFO Losses: [2.500972270965576, 2.4165501594543457, 11.314021110534668, 19.45246696472168, 1.1679878234863281], step: 33400, lr: 9.787281621926815e-05 -2023-07-22 07:27:39,993 44k INFO ====> Epoch: 170, cost 190.50 s -2023-07-22 07:29:31,825 44k INFO Train Epoch: 171 [56%] -2023-07-22 07:29:31,829 44k INFO Losses: [2.5938193798065186, 2.1618199348449707, 13.51310920715332, 20.7421932220459, 0.6043803095817566], step: 33600, lr: 9.786058211724074e-05 -2023-07-22 07:30:49,280 44k INFO ====> Epoch: 171, cost 189.29 s -2023-07-22 07:32:42,189 44k INFO Train Epoch: 172 [57%] -2023-07-22 07:32:42,190 44k INFO Losses: [2.4377405643463135, 2.368424654006958, 10.224843978881836, 16.9254207611084, 1.125497817993164], step: 33800, lr: 9.784834954447608e-05 -2023-07-22 07:33:56,880 44k INFO ====> Epoch: 172, cost 187.60 s -2023-07-22 07:35:54,230 44k INFO Train Epoch: 173 [59%] -2023-07-22 07:35:54,231 44k INFO Losses: [2.4037399291992188, 2.3424618244171143, 10.579471588134766, 19.59169578552246, 0.8090623021125793], step: 34000, lr: 9.783611850078301e-05 -2023-07-22 07:36:05,198 44k INFO Saving model and optimizer state at iteration 173 to ./logs/44k/G_34000.pth -2023-07-22 07:36:08,156 44k INFO Saving model and optimizer state at iteration 173 to ./logs/44k/D_34000.pth -2023-07-22 07:37:24,320 44k INFO ====> Epoch: 173, cost 207.44 s -2023-07-22 07:39:23,471 44k INFO Train Epoch: 174 [60%] -2023-07-22 07:39:23,474 44k INFO Losses: [2.278329849243164, 2.302506685256958, 12.002521514892578, 19.518491744995117, 0.9009906649589539], step: 34200, lr: 9.782388898597041e-05 -2023-07-22 07:40:32,701 44k INFO ====> Epoch: 174, cost 188.38 s -2023-07-22 07:42:35,863 44k INFO Train Epoch: 175 [62%] -2023-07-22 07:42:35,865 44k INFO Losses: [2.7949326038360596, 1.9939422607421875, 8.745769500732422, 16.489070892333984, 0.8996574878692627], step: 34400, lr: 9.781166099984716e-05 -2023-07-22 07:43:43,362 44k INFO ====> Epoch: 175, cost 190.66 s -2023-07-22 07:45:50,465 44k INFO Train Epoch: 176 [63%] -2023-07-22 07:45:50,467 44k INFO Losses: [2.420475959777832, 2.113380193710327, 11.868936538696289, 19.097858428955078, 1.3120217323303223], step: 34600, lr: 9.779943454222217e-05 -2023-07-22 07:46:55,442 44k INFO ====> Epoch: 176, cost 192.08 s -2023-07-22 07:49:00,196 44k INFO Train Epoch: 177 [65%] -2023-07-22 07:49:00,199 44k INFO Losses: [2.419018268585205, 2.098923444747925, 11.873685836791992, 18.537317276000977, 0.9861211776733398], step: 34800, lr: 9.778720961290439e-05 -2023-07-22 07:50:01,908 44k INFO ====> Epoch: 177, cost 186.47 s -2023-07-22 07:52:11,946 44k INFO Train Epoch: 178 [66%] -2023-07-22 07:52:11,949 44k INFO Losses: [2.4365410804748535, 2.216414213180542, 12.816457748413086, 18.669607162475586, 0.8110028505325317], step: 35000, lr: 9.777498621170277e-05 -2023-07-22 07:52:23,046 44k INFO Saving model and optimizer state at iteration 178 to ./logs/44k/G_35000.pth -2023-07-22 07:52:25,758 44k INFO Saving model and optimizer state at iteration 178 to ./logs/44k/D_35000.pth -2023-07-22 07:52:27,802 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_32000.pth -2023-07-22 07:52:27,805 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_32000.pth -2023-07-22 07:53:28,590 44k INFO ====> Epoch: 178, cost 206.68 s -2023-07-22 07:55:42,309 44k INFO Train Epoch: 179 [68%] -2023-07-22 07:55:42,310 44k INFO Losses: [2.3464760780334473, 2.2969608306884766, 13.042510986328125, 18.74628257751465, 1.164148211479187], step: 35200, lr: 9.776276433842631e-05 -2023-07-22 07:56:38,743 44k INFO ====> Epoch: 179, cost 190.15 s -2023-07-22 07:58:53,944 44k INFO Train Epoch: 180 [70%] -2023-07-22 07:58:53,948 44k INFO Losses: [2.3933627605438232, 2.236266851425171, 11.958230018615723, 20.431270599365234, 0.9583703875541687], step: 35400, lr: 9.7750543992884e-05 -2023-07-22 07:59:46,388 44k INFO ====> Epoch: 180, cost 187.65 s -2023-07-22 08:02:04,182 44k INFO Train Epoch: 181 [71%] -2023-07-22 08:02:04,184 44k INFO Losses: [2.22413969039917, 2.6679093837738037, 13.00373363494873, 21.472909927368164, 1.2171555757522583], step: 35600, lr: 9.773832517488488e-05 -2023-07-22 08:02:55,162 44k INFO ====> Epoch: 181, cost 188.77 s -2023-07-22 08:05:14,206 44k INFO Train Epoch: 182 [73%] -2023-07-22 08:05:14,208 44k INFO Losses: [2.269204616546631, 2.6259496212005615, 13.318050384521484, 20.243640899658203, 1.1521800756454468], step: 35800, lr: 9.772610788423802e-05 -2023-07-22 08:06:03,239 44k INFO ====> Epoch: 182, cost 188.08 s -2023-07-22 08:08:23,647 44k INFO Train Epoch: 183 [74%] -2023-07-22 08:08:23,651 44k INFO Losses: [2.4962010383605957, 2.2254912853240967, 8.213802337646484, 20.58279800415039, 0.9232741594314575], step: 36000, lr: 9.771389212075249e-05 -2023-07-22 08:08:32,476 44k INFO Saving model and optimizer state at iteration 183 to ./logs/44k/G_36000.pth -2023-07-22 08:08:38,911 44k INFO Saving model and optimizer state at iteration 183 to ./logs/44k/D_36000.pth -2023-07-22 08:09:31,675 44k INFO ====> Epoch: 183, cost 208.44 s -2023-07-22 08:11:59,499 44k INFO Train Epoch: 184 [76%] -2023-07-22 08:11:59,501 44k INFO Losses: [2.2151060104370117, 2.2734124660491943, 13.114251136779785, 22.474565505981445, 0.848603367805481], step: 36200, lr: 9.77016778842374e-05 -2023-07-22 08:12:42,356 44k INFO ====> Epoch: 184, cost 190.68 s -2023-07-22 08:15:11,252 44k INFO Train Epoch: 185 [77%] -2023-07-22 08:15:11,255 44k INFO Losses: [2.2168984413146973, 2.6019043922424316, 12.84189224243164, 20.628704071044922, 0.9228880405426025], step: 36400, lr: 9.768946517450186e-05 -2023-07-22 08:15:52,344 44k INFO ====> Epoch: 185, cost 189.99 s -2023-07-22 08:18:21,437 44k INFO Train Epoch: 186 [79%] -2023-07-22 08:18:21,438 44k INFO Losses: [2.570892572402954, 2.361739158630371, 11.260383605957031, 18.764596939086914, 1.344403862953186], step: 36600, lr: 9.767725399135504e-05 -2023-07-22 08:18:59,508 44k INFO ====> Epoch: 186, cost 187.16 s -2023-07-22 08:21:31,251 44k INFO Train Epoch: 187 [80%] -2023-07-22 08:21:31,252 44k INFO Losses: [2.4719996452331543, 2.602984666824341, 10.32966136932373, 19.67409324645996, 0.8235208988189697], step: 36800, lr: 9.766504433460612e-05 -2023-07-22 08:22:06,942 44k INFO ====> Epoch: 187, cost 187.43 s -2023-07-22 08:24:42,030 44k INFO Train Epoch: 188 [82%] -2023-07-22 08:24:42,033 44k INFO Losses: [2.429318904876709, 2.268805503845215, 12.827041625976562, 21.937902450561523, 1.3552038669586182], step: 37000, lr: 9.765283620406429e-05 -2023-07-22 08:24:51,180 44k INFO Saving model and optimizer state at iteration 188 to ./logs/44k/G_37000.pth -2023-07-22 08:24:58,464 44k INFO Saving model and optimizer state at iteration 188 to ./logs/44k/D_37000.pth -2023-07-22 08:25:37,208 44k INFO ====> Epoch: 188, cost 210.27 s -2023-07-22 08:28:17,365 44k INFO Train Epoch: 189 [83%] -2023-07-22 08:28:17,367 44k INFO Losses: [2.5878653526306152, 2.374795436859131, 12.22877311706543, 19.799243927001953, 0.7193838953971863], step: 37200, lr: 9.764062959953878e-05 -2023-07-22 08:28:48,808 44k INFO ====> Epoch: 189, cost 191.60 s -2023-07-22 08:31:27,456 44k INFO Train Epoch: 190 [85%] -2023-07-22 08:31:27,459 44k INFO Losses: [2.2830705642700195, 2.6535918712615967, 12.441581726074219, 20.73936653137207, 1.4427928924560547], step: 37400, lr: 9.762842452083883e-05 -2023-07-22 15:03:24,465 44k INFO {'train': {'log_interval': 200, 'eval_interval': 1000, 'seed': 1234, 'epochs': 10000, 'learning_rate': 0.0001, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 6, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 10240, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0, 'use_sr': True, 'max_speclen': 512, 'port': '8001', 'keep_ckpts': 3}, 'data': {'training_files': 'filelists/train.txt', 'validation_files': 'filelists/val.txt', 'max_wav_value': 32768.0, 'sampling_rate': 44100, 'filter_length': 2048, 'hop_length': 512, 'win_length': 2048, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': 22050, 'contentvec_final_proj': False}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256, 'ssl_dim': 768, 'n_speakers': 200}, 'spk': {'TW3_F_Yennefer_Rus': 0}, 'model_dir': './logs/44k', 'reset': False} -2023-07-22 15:03:47,378 44k INFO Loaded checkpoint './logs/44k/G_37000.pth' (iteration 188) -2023-07-22 15:03:55,373 44k INFO Loaded checkpoint './logs/44k/D_37000.pth' (iteration 188) -2023-07-22 15:07:48,636 44k INFO Train Epoch: 188 [82%] -2023-07-22 15:07:48,637 44k INFO Losses: [2.441403388977051, 2.3106958866119385, 10.996516227722168, 20.52176284790039, 1.2344295978546143], step: 37000, lr: 9.764062959953878e-05 -2023-07-22 15:08:08,756 44k INFO Saving model and optimizer state at iteration 188 to ./logs/44k/G_37000.pth -2023-07-22 15:08:11,327 44k INFO Saving model and optimizer state at iteration 188 to ./logs/44k/D_37000.pth -2023-07-22 15:09:02,402 44k INFO ====> Epoch: 188, cost 337.94 s -2023-07-22 15:11:37,280 44k INFO Train Epoch: 189 [83%] -2023-07-22 15:11:37,282 44k INFO Losses: [2.5089426040649414, 2.2769877910614014, 9.403998374938965, 18.835556030273438, 0.6499198079109192], step: 37200, lr: 9.762842452083883e-05 -2023-07-22 15:12:07,323 44k INFO ====> Epoch: 189, cost 184.92 s -2023-07-22 15:14:45,290 44k INFO Train Epoch: 190 [85%] -2023-07-22 15:14:45,294 44k INFO Losses: [2.3101742267608643, 2.4599995613098145, 12.435778617858887, 20.43679428100586, 0.7300646901130676], step: 37400, lr: 9.761622096777372e-05 -2023-07-22 15:15:12,912 44k INFO ====> Epoch: 190, cost 185.59 s -2023-07-22 15:17:53,126 44k INFO Train Epoch: 191 [86%] -2023-07-22 15:17:53,130 44k INFO Losses: [2.658201217651367, 2.078291177749634, 10.143412590026855, 16.351102828979492, 1.2943898439407349], step: 37600, lr: 9.760401894015275e-05 -2023-07-22 15:18:18,086 44k INFO ====> Epoch: 191, cost 185.17 s -2023-07-22 15:21:00,960 44k INFO Train Epoch: 192 [88%] -2023-07-22 15:21:00,962 44k INFO Losses: [2.500812530517578, 2.188133955001831, 12.519804000854492, 20.014394760131836, 0.9091939926147461], step: 37800, lr: 9.759181843778522e-05 -2023-07-22 15:21:23,534 44k INFO ====> Epoch: 192, cost 185.45 s -2023-07-22 15:24:09,146 44k INFO Train Epoch: 193 [89%] -2023-07-22 15:24:09,148 44k INFO Losses: [2.255077362060547, 2.3533365726470947, 11.840723037719727, 19.09269142150879, 1.1694731712341309], step: 38000, lr: 9.757961946048049e-05 -2023-07-22 15:24:20,575 44k INFO Saving model and optimizer state at iteration 193 to ./logs/44k/G_38000.pth -2023-07-22 15:24:28,142 44k INFO Saving model and optimizer state at iteration 193 to ./logs/44k/D_38000.pth -2023-07-22 15:24:54,532 44k INFO ====> Epoch: 193, cost 211.00 s -2023-07-22 15:27:41,921 44k INFO Train Epoch: 194 [91%] -2023-07-22 15:27:41,922 44k INFO Losses: [2.414470672607422, 2.2293601036071777, 11.56885051727295, 20.629003524780273, 0.44519442319869995], step: 38200, lr: 9.756742200804793e-05 -2023-07-22 15:27:59,897 44k INFO ====> Epoch: 194, cost 185.36 s -2023-07-22 15:30:50,427 44k INFO Train Epoch: 195 [92%] -2023-07-22 15:30:50,430 44k INFO Losses: [2.58298921585083, 2.2688815593719482, 10.780206680297852, 19.878589630126953, 1.2501468658447266], step: 38400, lr: 9.755522608029692e-05 -2023-07-22 15:31:03,874 44k INFO ====> Epoch: 195, cost 183.98 s -2023-07-22 15:33:59,800 44k INFO Train Epoch: 196 [94%] -2023-07-22 15:33:59,801 44k INFO Losses: [2.3408610820770264, 2.40810227394104, 11.823299407958984, 18.417362213134766, 0.5994876623153687], step: 38600, lr: 9.754303167703689e-05 -2023-07-22 15:34:11,463 44k INFO ====> Epoch: 196, cost 187.59 s -2023-07-22 15:37:07,863 44k INFO Train Epoch: 197 [95%] -2023-07-22 15:37:07,866 44k INFO Losses: [2.5536317825317383, 2.2395989894866943, 9.684464454650879, 18.660524368286133, 0.8477760553359985], step: 38800, lr: 9.753083879807726e-05 -2023-07-22 15:37:16,307 44k INFO ====> Epoch: 197, cost 184.84 s -2023-07-22 15:40:15,331 44k INFO Train Epoch: 198 [97%] -2023-07-22 15:40:15,332 44k INFO Losses: [2.4982998371124268, 2.4224164485931396, 12.448868751525879, 20.01620864868164, 1.143965482711792], step: 39000, lr: 9.75186474432275e-05 -2023-07-22 15:40:26,592 44k INFO Saving model and optimizer state at iteration 198 to ./logs/44k/G_39000.pth -2023-07-22 15:40:29,397 44k INFO Saving model and optimizer state at iteration 198 to ./logs/44k/D_39000.pth -2023-07-22 15:40:36,822 44k INFO ====> Epoch: 198, cost 200.52 s -2023-07-22 15:43:40,441 44k INFO Train Epoch: 199 [98%] -2023-07-22 15:43:40,442 44k INFO Losses: [2.49290132522583, 2.4873545169830322, 10.080567359924316, 19.668354034423828, 1.369390845298767], step: 39200, lr: 9.750645761229709e-05 -2023-07-22 15:43:44,939 44k INFO ====> Epoch: 199, cost 188.12 s -2023-07-22 15:46:49,342 44k INFO ====> Epoch: 200, cost 184.40 s -2023-07-22 15:47:03,201 44k INFO Train Epoch: 201 [0%] -2023-07-22 15:47:03,204 44k INFO Losses: [2.4995694160461426, 2.029576063156128, 12.433341026306152, 18.397254943847656, 0.8050822019577026], step: 39400, lr: 9.748208252143241e-05 -2023-07-22 15:49:54,794 44k INFO ====> Epoch: 201, cost 185.45 s -2023-07-22 15:50:11,022 44k INFO Train Epoch: 202 [2%] -2023-07-22 15:50:11,025 44k INFO Losses: [2.456756114959717, 2.381274461746216, 10.963191032409668, 19.281002044677734, 1.0571413040161133], step: 39600, lr: 9.746989726111722e-05 -2023-07-22 15:53:01,344 44k INFO ====> Epoch: 202, cost 186.55 s -2023-07-22 15:53:19,730 44k INFO Train Epoch: 203 [3%] -2023-07-22 15:53:19,731 44k INFO Losses: [2.484696388244629, 2.141167163848877, 10.566902160644531, 20.36380958557129, 0.9494445323944092], step: 39800, lr: 9.745771352395957e-05 -2023-07-22 15:56:07,162 44k INFO ====> Epoch: 203, cost 185.82 s -2023-07-22 15:56:25,420 44k INFO Train Epoch: 204 [5%] -2023-07-22 15:56:25,422 44k INFO Losses: [2.530027389526367, 2.152061939239502, 10.492026329040527, 19.46674156188965, 1.0050216913223267], step: 40000, lr: 9.744553130976908e-05 -2023-07-22 15:56:35,079 44k INFO Saving model and optimizer state at iteration 204 to ./logs/44k/G_40000.pth -2023-07-22 15:56:41,935 44k INFO Saving model and optimizer state at iteration 204 to ./logs/44k/D_40000.pth -2023-07-22 15:59:28,566 44k INFO ====> Epoch: 204, cost 201.40 s -2023-07-22 15:59:50,990 44k INFO Train Epoch: 205 [6%] -2023-07-22 15:59:50,993 44k INFO Losses: [2.711050271987915, 2.008875846862793, 10.762093544006348, 15.85610294342041, 0.7585859298706055], step: 40200, lr: 9.743335061835535e-05 -2023-07-22 16:02:33,506 44k INFO ====> Epoch: 205, cost 184.94 s -2023-07-22 16:02:57,436 44k INFO Train Epoch: 206 [8%] -2023-07-22 16:02:57,438 44k INFO Losses: [2.3877711296081543, 2.394460916519165, 9.700550079345703, 19.43191146850586, 1.2458809614181519], step: 40400, lr: 9.742117144952805e-05 -2023-07-22 16:05:37,744 44k INFO ====> Epoch: 206, cost 184.24 s -2023-07-22 16:06:05,868 44k INFO Train Epoch: 207 [9%] -2023-07-22 16:06:05,878 44k INFO Losses: [2.419792413711548, 2.511079788208008, 10.309126853942871, 19.038393020629883, 0.6269144415855408], step: 40600, lr: 9.740899380309685e-05 -2023-07-22 16:08:44,127 44k INFO ====> Epoch: 207, cost 186.38 s -2023-07-22 16:09:14,185 44k INFO Train Epoch: 208 [11%] -2023-07-22 16:09:14,187 44k INFO Losses: [2.436455249786377, 2.1928141117095947, 6.075944900512695, 16.90185546875, 1.1212495565414429], step: 40800, lr: 9.739681767887146e-05 -2023-07-22 16:11:49,793 44k INFO ====> Epoch: 208, cost 185.67 s -2023-07-22 16:12:22,073 44k INFO Train Epoch: 209 [12%] -2023-07-22 16:12:22,075 44k INFO Losses: [2.5166592597961426, 2.160179376602173, 10.601025581359863, 19.084491729736328, 0.98102205991745], step: 41000, lr: 9.73846430766616e-05 -2023-07-22 16:12:31,169 44k INFO Saving model and optimizer state at iteration 209 to ./logs/44k/G_41000.pth -2023-07-22 16:12:41,919 44k INFO Saving model and optimizer state at iteration 209 to ./logs/44k/D_41000.pth -2023-07-22 16:15:16,892 44k INFO ====> Epoch: 209, cost 207.10 s -2023-07-22 16:15:56,965 44k INFO Train Epoch: 210 [14%] -2023-07-22 16:15:56,966 44k INFO Losses: [2.5167527198791504, 2.343341827392578, 12.279075622558594, 19.347774505615234, 0.9018924832344055], step: 41200, lr: 9.7372469996277e-05 -2023-07-22 16:18:25,502 44k INFO ====> Epoch: 210, cost 188.61 s -2023-07-22 16:19:04,591 44k INFO Train Epoch: 211 [15%] -2023-07-22 16:19:04,594 44k INFO Losses: [2.4049477577209473, 2.3347959518432617, 10.576948165893555, 19.303022384643555, 0.9534285664558411], step: 41400, lr: 9.736029843752747e-05 -2023-07-22 16:21:32,016 44k INFO ====> Epoch: 211, cost 186.51 s -2023-07-22 16:22:13,793 44k INFO Train Epoch: 212 [17%] -2023-07-22 16:22:13,794 44k INFO Losses: [2.3647708892822266, 2.36202335357666, 11.445732116699219, 20.470346450805664, 0.907575249671936], step: 41600, lr: 9.734812840022278e-05 -2023-07-22 16:24:38,160 44k INFO ====> Epoch: 212, cost 186.14 s -2023-07-22 16:25:22,010 44k INFO Train Epoch: 213 [18%] -2023-07-22 16:25:22,013 44k INFO Losses: [2.3928990364074707, 2.317216157913208, 12.156888961791992, 17.86039161682129, 0.537847638130188], step: 41800, lr: 9.733595988417275e-05 -2023-07-22 16:27:43,806 44k INFO ====> Epoch: 213, cost 185.65 s -2023-07-22 16:28:28,732 44k INFO Train Epoch: 214 [20%] -2023-07-22 16:28:28,734 44k INFO Losses: [2.154418468475342, 2.488471269607544, 13.228680610656738, 20.660818099975586, 0.6959773302078247], step: 42000, lr: 9.732379288918723e-05 -2023-07-22 16:28:40,533 44k INFO Saving model and optimizer state at iteration 214 to ./logs/44k/G_42000.pth -2023-07-22 16:28:43,631 44k INFO Saving model and optimizer state at iteration 214 to ./logs/44k/D_42000.pth -2023-07-22 16:31:16,465 44k INFO ====> Epoch: 214, cost 212.66 s -2023-07-22 16:32:11,261 44k INFO Train Epoch: 215 [21%] -2023-07-22 16:32:11,262 44k INFO Losses: [2.3985595703125, 2.5150277614593506, 11.173113822937012, 20.342042922973633, 1.201341152191162], step: 42200, lr: 9.731162741507607e-05 -2023-07-22 16:34:28,872 44k INFO ====> Epoch: 215, cost 192.41 s -2023-07-22 16:35:21,774 44k INFO Train Epoch: 216 [23%] -2023-07-22 16:35:21,775 44k INFO Losses: [2.4011378288269043, 2.3120017051696777, 10.122314453125, 16.92221450805664, 0.7077859044075012], step: 42400, lr: 9.729946346164919e-05 -2023-07-22 16:37:37,300 44k INFO ====> Epoch: 216, cost 188.43 s -2023-07-22 16:38:32,211 44k INFO Train Epoch: 217 [24%] -2023-07-22 16:38:32,212 44k INFO Losses: [2.5303049087524414, 2.2790040969848633, 11.278924942016602, 19.099878311157227, 1.0323985815048218], step: 42600, lr: 9.728730102871649e-05 -2023-07-22 16:40:44,761 44k INFO ====> Epoch: 217, cost 187.46 s -2023-07-22 16:41:42,402 44k INFO Train Epoch: 218 [26%] -2023-07-22 16:41:42,403 44k INFO Losses: [2.523988723754883, 2.4846980571746826, 11.461652755737305, 18.920991897583008, 0.9722663760185242], step: 42800, lr: 9.727514011608789e-05 -2023-07-22 16:43:52,483 44k INFO ====> Epoch: 218, cost 187.72 s -2023-07-22 16:44:54,600 44k INFO Train Epoch: 219 [27%] -2023-07-22 16:44:54,603 44k INFO Losses: [2.443206310272217, 2.361900806427002, 11.394880294799805, 21.86053466796875, 1.2349188327789307], step: 43000, lr: 9.726298072357337e-05 -2023-07-22 16:45:06,257 44k INFO Saving model and optimizer state at iteration 219 to ./logs/44k/G_43000.pth -2023-07-22 16:45:09,707 44k INFO Saving model and optimizer state at iteration 219 to ./logs/44k/D_43000.pth -2023-07-22 16:45:23,120 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_40000.pth -2023-07-22 16:45:23,122 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_40000.pth -2023-07-22 16:47:33,580 44k INFO ====> Epoch: 219, cost 221.10 s -2023-07-22 16:48:40,429 44k INFO Train Epoch: 220 [29%] -2023-07-22 16:48:40,433 44k INFO Losses: [2.4845993518829346, 2.2424733638763428, 11.186004638671875, 18.393198013305664, 1.3454376459121704], step: 43200, lr: 9.725082285098293e-05 -2023-07-22 16:50:42,664 44k INFO ====> Epoch: 220, cost 189.08 s -2023-07-22 16:51:52,223 44k INFO Train Epoch: 221 [30%] -2023-07-22 16:51:52,224 44k INFO Losses: [2.4311628341674805, 2.1261823177337646, 12.015487670898438, 20.132461547851562, 0.8797117471694946], step: 43400, lr: 9.723866649812655e-05 -2023-07-22 16:53:53,820 44k INFO ====> Epoch: 221, cost 191.16 s -2023-07-22 16:55:00,854 44k INFO Train Epoch: 222 [32%] -2023-07-22 16:55:00,856 44k INFO Losses: [2.16326904296875, 2.511178970336914, 12.403450965881348, 19.414657592773438, 0.944162905216217], step: 43600, lr: 9.722651166481428e-05 -2023-07-22 16:56:59,612 44k INFO ====> Epoch: 222, cost 185.79 s -2023-07-22 16:58:09,087 44k INFO Train Epoch: 223 [34%] -2023-07-22 16:58:09,090 44k INFO Losses: [2.591031074523926, 2.408923387527466, 5.865946292877197, 15.766362190246582, 0.8638672232627869], step: 43800, lr: 9.721435835085619e-05 -2023-07-22 17:00:05,535 44k INFO ====> Epoch: 223, cost 185.92 s -2023-07-22 17:01:17,279 44k INFO Train Epoch: 224 [35%] -2023-07-22 17:01:17,282 44k INFO Losses: [2.5083813667297363, 2.1478679180145264, 9.007478713989258, 16.14285659790039, 1.1415948867797852], step: 44000, lr: 9.720220655606233e-05 -2023-07-22 17:01:28,312 44k INFO Saving model and optimizer state at iteration 224 to ./logs/44k/G_44000.pth -2023-07-22 17:01:31,214 44k INFO Saving model and optimizer state at iteration 224 to ./logs/44k/D_44000.pth -2023-07-22 17:01:34,788 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_41000.pth -2023-07-22 17:01:34,796 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_41000.pth -2023-07-22 17:03:28,012 44k INFO ====> Epoch: 224, cost 202.48 s -2023-07-22 17:04:46,556 44k INFO Train Epoch: 225 [37%] -2023-07-22 17:04:46,559 44k INFO Losses: [2.5711352825164795, 1.9236774444580078, 7.92902135848999, 19.299884796142578, 0.924669623374939], step: 44200, lr: 9.719005628024282e-05 -2023-07-22 17:06:37,329 44k INFO ====> Epoch: 225, cost 189.32 s -2023-07-22 17:07:55,954 44k INFO Train Epoch: 226 [38%] -2023-07-22 17:07:55,957 44k INFO Losses: [2.370633363723755, 2.186004400253296, 10.88974380493164, 19.4685115814209, 1.135475993156433], step: 44400, lr: 9.717790752320778e-05 -2023-07-22 17:09:43,783 44k INFO ====> Epoch: 226, cost 186.45 s -2023-07-22 17:11:03,875 44k INFO Train Epoch: 227 [40%] -2023-07-22 17:11:03,876 44k INFO Losses: [2.485462188720703, 2.478276014328003, 11.953454971313477, 18.34847068786621, 0.9287052154541016], step: 44600, lr: 9.716576028476738e-05 -2023-07-22 17:12:49,204 44k INFO ====> Epoch: 227, cost 185.42 s -2023-07-22 17:14:14,301 44k INFO Train Epoch: 228 [41%] -2023-07-22 17:14:14,304 44k INFO Losses: [2.2572944164276123, 2.318941116333008, 12.266435623168945, 18.96845817565918, 0.8677745461463928], step: 44800, lr: 9.715361456473177e-05 -2023-07-22 17:15:57,597 44k INFO ====> Epoch: 228, cost 188.39 s -2023-07-22 17:17:25,878 44k INFO Train Epoch: 229 [43%] -2023-07-22 17:17:25,880 44k INFO Losses: [2.428697347640991, 2.6221110820770264, 9.482841491699219, 19.40119743347168, 0.8764947652816772], step: 45000, lr: 9.714147036291117e-05 -2023-07-22 17:17:37,708 44k INFO Saving model and optimizer state at iteration 229 to ./logs/44k/G_45000.pth -2023-07-22 17:17:44,646 44k INFO Saving model and optimizer state at iteration 229 to ./logs/44k/D_45000.pth -2023-07-22 17:17:50,348 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_42000.pth -2023-07-22 17:17:50,351 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_42000.pth -2023-07-22 17:19:34,098 44k INFO ====> Epoch: 229, cost 216.50 s -2023-07-22 17:21:08,606 44k INFO Train Epoch: 230 [44%] -2023-07-22 17:21:08,608 44k INFO Losses: [2.2845940589904785, 2.5569605827331543, 12.381278038024902, 19.600616455078125, 0.9426952004432678], step: 45200, lr: 9.71293276791158e-05 -2023-07-22 17:22:46,128 44k INFO ====> Epoch: 230, cost 192.03 s -2023-07-22 17:24:20,546 44k INFO Train Epoch: 231 [46%] -2023-07-22 17:24:20,548 44k INFO Losses: [2.385017156600952, 2.581902265548706, 11.410624504089355, 21.031883239746094, 0.8205609321594238], step: 45400, lr: 9.711718651315591e-05 -2023-07-22 17:25:56,315 44k INFO ====> Epoch: 231, cost 190.19 s -2023-07-22 17:27:31,208 44k INFO Train Epoch: 232 [47%] -2023-07-22 17:27:31,210 44k INFO Losses: [2.327714681625366, 2.5418074131011963, 11.55052661895752, 19.26805305480957, 1.1349209547042847], step: 45600, lr: 9.710504686484176e-05 -2023-07-22 17:29:03,691 44k INFO ====> Epoch: 232, cost 187.38 s -2023-07-22 17:30:42,888 44k INFO Train Epoch: 233 [49%] -2023-07-22 17:30:42,890 44k INFO Losses: [2.4742331504821777, 2.116865873336792, 11.416472434997559, 18.47442626953125, 1.0985156297683716], step: 45800, lr: 9.709290873398365e-05 -2023-07-22 17:32:10,701 44k INFO ====> Epoch: 233, cost 187.01 s -2023-07-22 17:33:52,755 44k INFO Train Epoch: 234 [50%] -2023-07-22 17:33:52,757 44k INFO Losses: [2.5581369400024414, 2.1077880859375, 9.726001739501953, 18.922183990478516, 1.2231109142303467], step: 46000, lr: 9.70807721203919e-05 -2023-07-22 17:34:03,898 44k INFO Saving model and optimizer state at iteration 234 to ./logs/44k/G_46000.pth -2023-07-22 17:34:07,822 44k INFO Saving model and optimizer state at iteration 234 to ./logs/44k/D_46000.pth -2023-07-22 17:34:16,950 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_43000.pth -2023-07-22 17:34:16,952 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_43000.pth -2023-07-22 17:35:44,333 44k INFO ====> Epoch: 234, cost 213.63 s -2023-07-22 19:14:09,345 44k INFO {'train': {'log_interval': 200, 'eval_interval': 1000, 'seed': 1234, 'epochs': 10000, 'learning_rate': 0.0001, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 6, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 10240, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0, 'use_sr': True, 'max_speclen': 512, 'port': '8001', 'keep_ckpts': 3}, 'data': {'training_files': 'filelists/train.txt', 'validation_files': 'filelists/val.txt', 'max_wav_value': 32768.0, 'sampling_rate': 44100, 'filter_length': 2048, 'hop_length': 512, 'win_length': 2048, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': 22050, 'contentvec_final_proj': False}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256, 'ssl_dim': 768, 'n_speakers': 200}, 'spk': {'TW3_F_Yennefer_Rus': 0}, 'model_dir': './logs/44k', 'reset': False} -2023-07-22 19:14:28,990 44k INFO Loaded checkpoint './logs/44k/G_45000.pth' (iteration 229) -2023-07-22 19:14:40,655 44k INFO Loaded checkpoint './logs/44k/D_45000.pth' (iteration 229) -2023-07-22 19:17:00,487 44k INFO Train Epoch: 229 [43%] -2023-07-22 19:17:00,491 44k INFO Losses: [2.332531452178955, 2.410524845123291, 10.516853332519531, 19.430788040161133, 0.8549007773399353], step: 45000, lr: 9.71293276791158e-05 -2023-07-22 19:17:20,494 44k INFO Saving model and optimizer state at iteration 229 to ./logs/44k/G_45000.pth -2023-07-22 19:17:23,058 44k INFO Saving model and optimizer state at iteration 229 to ./logs/44k/D_45000.pth -2023-07-22 19:19:52,538 44k INFO ====> Epoch: 229, cost 343.19 s -2023-07-22 19:21:22,406 44k INFO Train Epoch: 230 [44%] -2023-07-22 19:21:22,409 44k INFO Losses: [2.516045093536377, 2.1217212677001953, 14.028848648071289, 19.244049072265625, 0.788071870803833], step: 45200, lr: 9.711718651315591e-05 -2023-07-22 19:23:00,401 44k INFO ====> Epoch: 230, cost 187.86 s -2023-07-22 19:24:32,471 44k INFO Train Epoch: 231 [46%] -2023-07-22 19:24:32,472 44k INFO Losses: [2.546809434890747, 2.627643346786499, 8.196196556091309, 18.192941665649414, 0.367060124874115], step: 45400, lr: 9.710504686484176e-05 -2023-07-22 19:26:07,766 44k INFO ====> Epoch: 231, cost 187.36 s -2023-07-22 19:27:44,557 44k INFO Train Epoch: 232 [47%] -2023-07-22 19:27:44,558 44k INFO Losses: [2.511570453643799, 2.464445114135742, 10.40671157836914, 19.27495002746582, 1.0986320972442627], step: 45600, lr: 9.709290873398365e-05 -2023-07-22 19:29:17,256 44k INFO ====> Epoch: 232, cost 189.49 s -2023-07-22 19:30:53,480 44k INFO Train Epoch: 233 [49%] -2023-07-22 19:30:53,481 44k INFO Losses: [2.427959680557251, 2.4580941200256348, 10.981380462646484, 19.50925064086914, 1.0708677768707275], step: 45800, lr: 9.70807721203919e-05 -2023-07-22 19:32:21,538 44k INFO ====> Epoch: 233, cost 184.28 s -2023-07-22 19:34:04,389 44k INFO Train Epoch: 234 [50%] -2023-07-22 19:34:04,393 44k INFO Losses: [2.283668041229248, 2.632579803466797, 11.054332733154297, 17.951215744018555, 0.9859746694564819], step: 46000, lr: 9.706863702387684e-05 -2023-07-22 19:34:17,005 44k INFO Saving model and optimizer state at iteration 234 to ./logs/44k/G_46000.pth -2023-07-22 19:34:22,898 44k INFO Saving model and optimizer state at iteration 234 to ./logs/44k/D_46000.pth -2023-07-22 19:35:59,880 44k INFO ====> Epoch: 234, cost 218.34 s -2023-07-22 19:37:49,775 44k INFO Train Epoch: 235 [52%] -2023-07-22 19:37:49,776 44k INFO Losses: [2.3438494205474854, 2.310753345489502, 11.02319622039795, 19.387592315673828, 1.388058066368103], step: 46200, lr: 9.705650344424885e-05 -2023-07-22 19:39:11,731 44k INFO ====> Epoch: 235, cost 191.85 s -2023-07-22 19:40:59,042 44k INFO Train Epoch: 236 [53%] -2023-07-22 19:40:59,044 44k INFO Losses: [2.4551491737365723, 2.5523247718811035, 12.950431823730469, 19.717992782592773, 1.2555534839630127], step: 46400, lr: 9.704437138131832e-05 -2023-07-22 19:42:22,283 44k INFO ====> Epoch: 236, cost 190.55 s -2023-07-22 19:44:11,875 44k INFO Train Epoch: 237 [55%] -2023-07-22 19:44:11,876 44k INFO Losses: [2.3444249629974365, 2.499401807785034, 11.061455726623535, 18.392974853515625, 0.6710307002067566], step: 46600, lr: 9.703224083489565e-05 -2023-07-22 19:45:29,665 44k INFO ====> Epoch: 237, cost 187.38 s -2023-07-22 19:47:20,462 44k INFO Train Epoch: 238 [56%] -2023-07-22 19:47:20,464 44k INFO Losses: [2.4489123821258545, 2.726597309112549, 13.66533374786377, 19.95982551574707, 0.30094969272613525], step: 46800, lr: 9.702011180479129e-05 -2023-07-22 19:48:35,996 44k INFO ====> Epoch: 238, cost 186.33 s -2023-07-22 19:50:26,780 44k INFO Train Epoch: 239 [58%] -2023-07-22 19:50:26,781 44k INFO Losses: [2.6077497005462646, 2.781548023223877, 9.624807357788086, 16.21816635131836, 0.8973683714866638], step: 47000, lr: 9.700798429081568e-05 -2023-07-22 19:50:36,452 44k INFO Saving model and optimizer state at iteration 239 to ./logs/44k/G_47000.pth -2023-07-22 19:50:42,364 44k INFO Saving model and optimizer state at iteration 239 to ./logs/44k/D_47000.pth -2023-07-22 19:51:58,367 44k INFO ====> Epoch: 239, cost 202.37 s -2023-07-22 19:53:52,134 44k INFO Train Epoch: 240 [59%] -2023-07-22 19:53:52,137 44k INFO Losses: [2.3579788208007812, 2.5397770404815674, 8.642542839050293, 18.22545051574707, 0.7177121043205261], step: 47200, lr: 9.699585829277933e-05 -2023-07-22 19:55:02,182 44k INFO ====> Epoch: 240, cost 183.82 s -2023-07-22 19:57:03,436 44k INFO Train Epoch: 241 [61%] -2023-07-22 19:57:03,437 44k INFO Losses: [2.340689182281494, 2.2796244621276855, 12.01513385772705, 19.040964126586914, 1.1761600971221924], step: 47400, lr: 9.698373381049272e-05 -2023-07-22 19:58:12,050 44k INFO ====> Epoch: 241, cost 189.87 s -2023-07-22 20:00:12,049 44k INFO Train Epoch: 242 [62%] -2023-07-22 20:00:12,051 44k INFO Losses: [2.357215642929077, 2.233599901199341, 11.707725524902344, 18.539146423339844, 0.9447231292724609], step: 47600, lr: 9.69716108437664e-05 -2023-07-22 20:01:19,278 44k INFO ====> Epoch: 242, cost 187.23 s -2023-07-22 20:03:23,323 44k INFO Train Epoch: 243 [64%] -2023-07-22 20:03:23,325 44k INFO Losses: [2.3822154998779297, 2.3974502086639404, 11.432353019714355, 18.59490394592285, 0.7261757254600525], step: 47800, lr: 9.695948939241093e-05 -2023-07-22 20:04:26,726 44k INFO ====> Epoch: 243, cost 187.45 s -2023-07-22 20:06:35,049 44k INFO Train Epoch: 244 [65%] -2023-07-22 20:06:35,052 44k INFO Losses: [2.6240665912628174, 2.125138759613037, 9.709309577941895, 17.323101043701172, 1.0964633226394653], step: 48000, lr: 9.694736945623688e-05 -2023-07-22 20:06:45,996 44k INFO Saving model and optimizer state at iteration 244 to ./logs/44k/G_48000.pth -2023-07-22 20:06:57,557 44k INFO Saving model and optimizer state at iteration 244 to ./logs/44k/D_48000.pth -2023-07-22 20:07:03,139 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_45000.pth -2023-07-22 20:07:03,143 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_45000.pth -2023-07-22 20:08:03,086 44k INFO ====> Epoch: 244, cost 216.36 s -2023-07-22 20:10:17,072 44k INFO Train Epoch: 245 [67%] -2023-07-22 20:10:17,073 44k INFO Losses: [2.351764678955078, 2.9082162380218506, 9.533535957336426, 18.760215759277344, 1.2309482097625732], step: 48200, lr: 9.693525103505484e-05 -2023-07-22 20:11:16,032 44k INFO ====> Epoch: 245, cost 192.95 s -2023-07-22 20:13:27,574 44k INFO Train Epoch: 246 [69%] -2023-07-22 20:13:27,577 44k INFO Losses: [2.4004456996917725, 2.260373830795288, 9.922468185424805, 16.73210906982422, 0.8661314845085144], step: 48400, lr: 9.692313412867544e-05 -2023-07-22 20:14:21,015 44k INFO ====> Epoch: 246, cost 184.98 s -2023-07-22 20:16:36,206 44k INFO Train Epoch: 247 [70%] -2023-07-22 20:16:36,209 44k INFO Losses: [2.506882429122925, 2.19392466545105, 9.842905044555664, 15.13754940032959, 0.7189900279045105], step: 48600, lr: 9.691101873690936e-05 -2023-07-22 20:17:28,943 44k INFO ====> Epoch: 247, cost 187.93 s -2023-07-22 20:19:46,931 44k INFO Train Epoch: 248 [72%] -2023-07-22 20:19:46,934 44k INFO Losses: [2.436012029647827, 2.3019487857818604, 13.48349666595459, 19.49954605102539, 0.9199798107147217], step: 48800, lr: 9.689890485956725e-05 -2023-07-22 20:20:36,144 44k INFO ====> Epoch: 248, cost 187.20 s -2023-07-22 20:22:55,154 44k INFO Train Epoch: 249 [73%] -2023-07-22 20:22:55,156 44k INFO Losses: [2.479963779449463, 2.363328218460083, 10.17467212677002, 18.699806213378906, 1.152662754058838], step: 49000, lr: 9.68867924964598e-05 -2023-07-22 20:23:05,436 44k INFO Saving model and optimizer state at iteration 249 to ./logs/44k/G_49000.pth -2023-07-22 20:23:08,425 44k INFO Saving model and optimizer state at iteration 249 to ./logs/44k/D_49000.pth -2023-07-22 20:23:13,855 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_46000.pth -2023-07-22 20:23:13,864 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_46000.pth -2023-07-22 20:24:04,639 44k INFO ====> Epoch: 249, cost 208.50 s -2023-07-22 20:26:32,593 44k INFO Train Epoch: 250 [75%] -2023-07-22 20:26:32,595 44k INFO Losses: [2.367183208465576, 2.364567279815674, 13.024526596069336, 19.218908309936523, 0.9040659070014954], step: 49200, lr: 9.687468164739773e-05 -2023-07-22 20:27:16,498 44k INFO ====> Epoch: 250, cost 191.86 s -2023-07-22 20:29:43,576 44k INFO Train Epoch: 251 [76%] -2023-07-22 20:29:43,578 44k INFO Losses: [2.4787638187408447, 2.440725803375244, 8.510250091552734, 17.77101707458496, 0.9614638090133667], step: 49400, lr: 9.68625723121918e-05 -2023-07-22 20:30:25,303 44k INFO ====> Epoch: 251, cost 188.80 s -2023-07-22 20:32:52,345 44k INFO Train Epoch: 252 [78%] -2023-07-22 20:32:52,346 44k INFO Losses: [2.583603620529175, 2.71201229095459, 11.708261489868164, 19.696733474731445, 1.340529441833496], step: 49600, lr: 9.685046449065278e-05 -2023-07-22 20:33:34,682 44k INFO ====> Epoch: 252, cost 189.38 s -2023-07-22 20:36:03,177 44k INFO Train Epoch: 253 [79%] -2023-07-22 20:36:03,178 44k INFO Losses: [2.3613569736480713, 2.120302200317383, 12.228474617004395, 19.414560317993164, 1.1441636085510254], step: 49800, lr: 9.683835818259144e-05 -2023-07-22 20:36:39,673 44k INFO ====> Epoch: 253, cost 184.99 s -2023-07-22 20:39:11,638 44k INFO Train Epoch: 254 [81%] -2023-07-22 20:39:11,642 44k INFO Losses: [2.6259520053863525, 2.3337035179138184, 10.151969909667969, 18.860790252685547, 0.8806833624839783], step: 50000, lr: 9.68262533878186e-05 -2023-07-22 20:39:24,203 44k INFO Saving model and optimizer state at iteration 254 to ./logs/44k/G_50000.pth -2023-07-22 20:39:34,229 44k INFO Saving model and optimizer state at iteration 254 to ./logs/44k/D_50000.pth -2023-07-22 20:40:13,663 44k INFO ====> Epoch: 254, cost 213.99 s -2023-07-23 15:30:48,010 44k INFO {'train': {'log_interval': 200, 'eval_interval': 1000, 'seed': 1234, 'epochs': 10000, 'learning_rate': 0.0001, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 6, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 10240, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0, 'use_sr': True, 'max_speclen': 512, 'port': '8001', 'keep_ckpts': 3}, 'data': {'training_files': 'filelists/train.txt', 'validation_files': 'filelists/val.txt', 'max_wav_value': 32768.0, 'sampling_rate': 44100, 'filter_length': 2048, 'hop_length': 512, 'win_length': 2048, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': 22050, 'contentvec_final_proj': False}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256, 'ssl_dim': 768, 'n_speakers': 200}, 'spk': {'TW3_F_Yennefer_Rus': 0}, 'model_dir': './logs/44k', 'reset': False} -2023-07-23 15:31:02,895 44k INFO Loaded checkpoint './logs/44k/G_50000.pth' (iteration 254) -2023-07-23 15:31:10,965 44k INFO Loaded checkpoint './logs/44k/D_50000.pth' (iteration 254) -2023-07-23 15:34:49,036 44k INFO {'train': {'log_interval': 200, 'eval_interval': 1000, 'seed': 1234, 'epochs': 10000, 'learning_rate': 0.0001, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 6, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 10240, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0, 'use_sr': True, 'max_speclen': 512, 'port': '8001', 'keep_ckpts': 3}, 'data': {'training_files': 'filelists/train.txt', 'validation_files': 'filelists/val.txt', 'max_wav_value': 32768.0, 'sampling_rate': 44100, 'filter_length': 2048, 'hop_length': 512, 'win_length': 2048, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': 22050, 'contentvec_final_proj': False}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256, 'ssl_dim': 768, 'n_speakers': 200}, 'spk': {'TW3_F_Yennefer_Rus': 0}, 'model_dir': './logs/44k', 'reset': False} -2023-07-23 15:35:02,464 44k INFO Loaded checkpoint './logs/44k/G_50000.pth' (iteration 254) -2023-07-23 15:35:06,092 44k INFO Loaded checkpoint './logs/44k/D_50000.pth' (iteration 254) -2023-07-23 15:38:57,114 44k INFO Train Epoch: 254 [81%] -2023-07-23 15:38:57,117 44k INFO Losses: [2.5006282329559326, 2.451428174972534, 12.801855087280273, 18.32100486755371, 0.9596075415611267], step: 50000, lr: 9.681415010614512e-05 -2023-07-23 15:39:16,091 44k INFO Saving model and optimizer state at iteration 254 to ./logs/44k/G_50000.pth -2023-07-23 15:39:18,904 44k INFO Saving model and optimizer state at iteration 254 to ./logs/44k/D_50000.pth -2023-07-23 15:40:14,257 44k INFO ====> Epoch: 254, cost 325.22 s -2023-07-23 15:42:46,726 44k INFO Train Epoch: 255 [82%] -2023-07-23 15:42:46,729 44k INFO Losses: [2.0732908248901367, 2.7142882347106934, 13.900521278381348, 19.20340347290039, 1.497022032737732], step: 50200, lr: 9.680204833738185e-05 -2023-07-23 15:43:17,880 44k INFO ====> Epoch: 255, cost 183.62 s -2023-07-23 15:45:56,022 44k INFO Train Epoch: 256 [84%] -2023-07-23 15:45:56,023 44k INFO Losses: [2.3569393157958984, 2.2816436290740967, 11.247642517089844, 19.90302848815918, 0.8953431844711304], step: 50400, lr: 9.678994808133967e-05 -2023-07-23 15:46:25,805 44k INFO ====> Epoch: 256, cost 187.92 s -2023-07-23 15:49:05,067 44k INFO Train Epoch: 257 [85%] -2023-07-23 15:49:05,069 44k INFO Losses: [2.4117980003356934, 2.393082618713379, 9.436841011047363, 19.112468719482422, 1.3574036359786987], step: 50600, lr: 9.67778493378295e-05 -2023-07-23 15:49:33,503 44k INFO ====> Epoch: 257, cost 187.70 s -2023-07-23 15:52:15,833 44k INFO Train Epoch: 258 [87%] -2023-07-23 15:52:15,836 44k INFO Losses: [2.328404664993286, 2.066641330718994, 8.805981636047363, 19.201705932617188, 0.6685563921928406], step: 50800, lr: 9.676575210666227e-05 -2023-07-23 15:52:39,808 44k INFO ====> Epoch: 258, cost 186.30 s -2023-07-23 15:55:25,504 44k INFO Train Epoch: 259 [88%] -2023-07-23 15:55:25,505 44k INFO Losses: [2.549481153488159, 2.269237995147705, 11.78179931640625, 19.138822555541992, 0.8916083574295044], step: 51000, lr: 9.675365638764893e-05 -2023-07-23 15:55:37,066 44k INFO Saving model and optimizer state at iteration 259 to ./logs/44k/G_51000.pth -2023-07-23 15:55:43,493 44k INFO Saving model and optimizer state at iteration 259 to ./logs/44k/D_51000.pth -2023-07-23 15:56:07,694 44k INFO ====> Epoch: 259, cost 207.89 s -2023-07-23 15:58:56,000 44k INFO Train Epoch: 260 [90%] -2023-07-23 15:58:56,004 44k INFO Losses: [2.397326707839966, 2.2677500247955322, 8.892960548400879, 19.83380126953125, 0.8841097354888916], step: 51200, lr: 9.674156218060047e-05 -2023-07-23 15:59:13,963 44k INFO ====> Epoch: 260, cost 186.27 s -2023-07-23 16:02:03,849 44k INFO Train Epoch: 261 [91%] -2023-07-23 16:02:03,853 44k INFO Losses: [2.349165439605713, 2.439957618713379, 9.336631774902344, 19.867923736572266, 0.9570019245147705], step: 51400, lr: 9.67294694853279e-05 -2023-07-23 16:02:20,274 44k INFO ====> Epoch: 261, cost 186.31 s -2023-07-23 16:05:19,031 44k INFO Train Epoch: 262 [93%] -2023-07-23 16:05:19,032 44k INFO Losses: [2.3482487201690674, 2.2750866413116455, 11.509903907775879, 18.676836013793945, 0.7260769009590149], step: 51600, lr: 9.671737830164223e-05 -2023-07-23 16:05:32,752 44k INFO ====> Epoch: 262, cost 192.48 s -2023-07-23 16:08:27,425 44k INFO Train Epoch: 263 [94%] -2023-07-23 16:08:27,426 44k INFO Losses: [2.5269551277160645, 2.195035219192505, 11.456549644470215, 18.409690856933594, 0.07022977620363235], step: 51800, lr: 9.670528862935451e-05 -2023-07-23 16:08:37,948 44k INFO ====> Epoch: 263, cost 185.20 s -2023-07-23 16:11:35,538 44k INFO Train Epoch: 264 [96%] -2023-07-23 16:11:35,541 44k INFO Losses: [2.4118752479553223, 2.0938668251037598, 11.937652587890625, 18.606487274169922, 1.5529723167419434], step: 52000, lr: 9.669320046827584e-05 -2023-07-23 16:11:44,778 44k INFO Saving model and optimizer state at iteration 264 to ./logs/44k/G_52000.pth -2023-07-23 16:11:52,154 44k INFO Saving model and optimizer state at iteration 264 to ./logs/44k/D_52000.pth -2023-07-23 16:12:06,246 44k INFO ====> Epoch: 264, cost 208.30 s -2023-07-23 16:15:04,949 44k INFO Train Epoch: 265 [97%] -2023-07-23 16:15:04,951 44k INFO Losses: [2.2772390842437744, 2.206277370452881, 13.549243927001953, 20.840330123901367, 0.6336231231689453], step: 52200, lr: 9.668111381821731e-05 -2023-07-23 16:15:13,519 44k INFO ====> Epoch: 265, cost 187.27 s -2023-07-23 16:18:21,328 44k INFO Train Epoch: 266 [99%] -2023-07-23 16:18:21,330 44k INFO Losses: [2.538515090942383, 2.3972225189208984, 11.052191734313965, 17.775854110717773, 0.9715489745140076], step: 52400, lr: 9.666902867899003e-05 -2023-07-23 16:18:25,649 44k INFO ====> Epoch: 266, cost 192.13 s -2023-07-23 16:21:28,575 44k INFO ====> Epoch: 267, cost 182.93 s -2023-07-23 16:21:43,521 44k INFO Train Epoch: 268 [1%] -2023-07-23 16:21:43,522 44k INFO Losses: [2.470780372619629, 2.0563454627990723, 13.056387901306152, 19.07166290283203, 1.1480662822723389], step: 52600, lr: 9.664486293227385e-05 -2023-07-23 16:24:36,670 44k INFO ====> Epoch: 268, cost 188.10 s -2023-07-23 16:24:51,269 44k INFO Train Epoch: 269 [2%] -2023-07-23 16:24:51,270 44k INFO Losses: [2.503633975982666, 2.5406832695007324, 10.520054817199707, 18.005332946777344, 0.3653826415538788], step: 52800, lr: 9.663278232440732e-05 -2023-07-23 16:27:40,378 44k INFO ====> Epoch: 269, cost 183.71 s -2023-07-23 16:27:58,832 44k INFO Train Epoch: 270 [4%] -2023-07-23 16:27:58,834 44k INFO Losses: [2.5618903636932373, 2.510833740234375, 11.053813934326172, 21.011423110961914, 1.1412519216537476], step: 53000, lr: 9.662070322661676e-05 -2023-07-23 16:28:10,414 44k INFO Saving model and optimizer state at iteration 270 to ./logs/44k/G_53000.pth -2023-07-23 16:28:17,677 44k INFO Saving model and optimizer state at iteration 270 to ./logs/44k/D_53000.pth -2023-07-23 16:28:22,999 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_50000.pth -2023-07-23 16:28:23,006 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_50000.pth -2023-07-23 16:31:14,803 44k INFO ====> Epoch: 270, cost 214.42 s -2023-07-23 16:31:40,891 44k INFO Train Epoch: 271 [5%] -2023-07-23 16:31:40,892 44k INFO Losses: [2.3730411529541016, 2.4882729053497314, 10.385974884033203, 18.174209594726562, 0.8806889057159424], step: 53200, lr: 9.660862563871342e-05 -2023-07-23 16:34:27,388 44k INFO ====> Epoch: 271, cost 192.58 s -2023-07-23 16:34:50,819 44k INFO Train Epoch: 272 [7%] -2023-07-23 16:34:50,821 44k INFO Losses: [2.276527166366577, 2.34232234954834, 11.215876579284668, 17.755990982055664, 0.36383286118507385], step: 53400, lr: 9.659654956050859e-05 -2023-07-23 16:37:32,969 44k INFO ====> Epoch: 272, cost 185.58 s -2023-07-23 16:37:59,770 44k INFO Train Epoch: 273 [8%] -2023-07-23 16:37:59,772 44k INFO Losses: [2.4131433963775635, 2.6605451107025146, 10.041913986206055, 19.46611785888672, 0.6789529323577881], step: 53600, lr: 9.658447499181352e-05 -2023-07-23 16:40:40,627 44k INFO ====> Epoch: 273, cost 187.66 s -2023-07-23 16:41:09,864 44k INFO Train Epoch: 274 [10%] -2023-07-23 16:41:09,867 44k INFO Losses: [2.303886651992798, 2.6071486473083496, 12.69161605834961, 21.8099308013916, 0.8257055878639221], step: 53800, lr: 9.657240193243954e-05 -2023-07-23 16:43:46,761 44k INFO ====> Epoch: 274, cost 186.13 s -2023-07-23 16:44:18,058 44k INFO Train Epoch: 275 [11%] -2023-07-23 16:44:18,061 44k INFO Losses: [2.499335289001465, 2.5340914726257324, 12.226036071777344, 19.94944190979004, 0.736632227897644], step: 54000, lr: 9.656033038219798e-05 -2023-07-23 16:44:28,897 44k INFO Saving model and optimizer state at iteration 275 to ./logs/44k/G_54000.pth -2023-07-23 16:44:35,695 44k INFO Saving model and optimizer state at iteration 275 to ./logs/44k/D_54000.pth -2023-07-23 16:47:13,561 44k INFO ====> Epoch: 275, cost 206.80 s -2023-07-23 16:47:51,423 44k INFO Train Epoch: 276 [13%] -2023-07-23 16:47:51,425 44k INFO Losses: [2.271847724914551, 2.473914861679077, 11.780071258544922, 19.351085662841797, 0.8396559357643127], step: 54200, lr: 9.65482603409002e-05 -2023-07-23 16:50:25,687 44k INFO ====> Epoch: 276, cost 192.13 s -2023-07-23 16:51:04,432 44k INFO Train Epoch: 277 [14%] -2023-07-23 16:51:04,434 44k INFO Losses: [2.4319257736206055, 2.1110024452209473, 9.59428882598877, 18.469379425048828, 0.9526942372322083], step: 54400, lr: 9.653619180835758e-05 -2023-07-23 16:53:32,129 44k INFO ====> Epoch: 277, cost 186.44 s -2023-07-23 16:54:12,551 44k INFO Train Epoch: 278 [16%] -2023-07-23 16:54:12,554 44k INFO Losses: [2.393279790878296, 2.3607327938079834, 10.10796070098877, 17.79863166809082, 0.6621531844139099], step: 54600, lr: 9.652412478438153e-05 -2023-07-23 16:56:38,322 44k INFO ====> Epoch: 278, cost 186.19 s -2023-07-23 16:57:20,104 44k INFO Train Epoch: 279 [17%] -2023-07-23 16:57:20,105 44k INFO Losses: [2.6009931564331055, 2.1398704051971436, 10.4327392578125, 17.937559127807617, 0.9320712685585022], step: 54800, lr: 9.651205926878348e-05 -2023-07-23 16:59:43,664 44k INFO ====> Epoch: 279, cost 185.34 s -2023-07-23 17:00:27,412 44k INFO Train Epoch: 280 [19%] -2023-07-23 17:00:27,414 44k INFO Losses: [2.324124336242676, 2.5362303256988525, 13.014823913574219, 19.69860076904297, 0.9871938228607178], step: 55000, lr: 9.649999526137489e-05 -2023-07-23 17:00:37,273 44k INFO Saving model and optimizer state at iteration 280 to ./logs/44k/G_55000.pth -2023-07-23 17:00:42,797 44k INFO Saving model and optimizer state at iteration 280 to ./logs/44k/D_55000.pth -2023-07-23 17:03:09,419 44k INFO ====> Epoch: 280, cost 205.75 s -2023-07-23 17:04:01,635 44k INFO Train Epoch: 281 [20%] -2023-07-23 17:04:01,637 44k INFO Losses: [2.3632121086120605, 2.2445068359375, 9.341973304748535, 18.388296127319336, 0.9140627384185791], step: 55200, lr: 9.64879327619672e-05 -2023-07-23 17:06:20,214 44k INFO ====> Epoch: 281, cost 190.80 s -2023-07-23 17:07:10,322 44k INFO Train Epoch: 282 [22%] -2023-07-23 17:07:10,323 44k INFO Losses: [2.4476706981658936, 3.1955933570861816, 9.605997085571289, 18.07845687866211, 1.0145922899246216], step: 55400, lr: 9.647587177037196e-05 -2023-07-23 17:09:26,610 44k INFO ====> Epoch: 282, cost 186.40 s -2023-07-23 17:10:19,027 44k INFO Train Epoch: 283 [23%] -2023-07-23 17:10:19,029 44k INFO Losses: [2.517185926437378, 2.374234437942505, 9.517387390136719, 16.80596351623535, 0.6682974696159363], step: 55600, lr: 9.646381228640066e-05 -2023-07-23 17:12:31,527 44k INFO ====> Epoch: 283, cost 184.92 s -2023-07-23 17:13:26,789 44k INFO Train Epoch: 284 [25%] -2023-07-23 17:13:26,791 44k INFO Losses: [2.336336135864258, 2.5222280025482178, 10.33954906463623, 18.73029136657715, 1.232483148574829], step: 55800, lr: 9.645175430986486e-05 -2023-07-23 17:15:36,763 44k INFO ====> Epoch: 284, cost 185.24 s -2023-07-23 17:16:34,742 44k INFO Train Epoch: 285 [26%] -2023-07-23 17:16:34,745 44k INFO Losses: [2.329878568649292, 2.2614991664886475, 12.796177864074707, 19.20319175720215, 1.0678331851959229], step: 56000, lr: 9.643969784057613e-05 -2023-07-23 17:16:45,416 44k INFO Saving model and optimizer state at iteration 285 to ./logs/44k/G_56000.pth -2023-07-23 17:16:52,056 44k INFO Saving model and optimizer state at iteration 285 to ./logs/44k/D_56000.pth -2023-07-23 17:19:13,752 44k INFO ====> Epoch: 285, cost 216.99 s -2023-07-23 17:20:16,204 44k INFO Train Epoch: 286 [28%] -2023-07-23 17:20:16,206 44k INFO Losses: [2.382997512817383, 2.627403974533081, 12.796072959899902, 20.543331146240234, 0.9305292963981628], step: 56200, lr: 9.642764287834605e-05 -2023-07-23 17:22:20,857 44k INFO ====> Epoch: 286, cost 187.10 s -2023-07-23 17:23:26,048 44k INFO Train Epoch: 287 [29%] -2023-07-23 17:23:26,049 44k INFO Losses: [2.3564612865448, 2.378355026245117, 13.136396408081055, 21.580181121826172, 1.1696916818618774], step: 56400, lr: 9.641558942298625e-05 -2023-07-23 17:25:29,301 44k INFO ====> Epoch: 287, cost 188.44 s -2023-07-23 17:26:35,344 44k INFO Train Epoch: 288 [31%] -2023-07-23 17:26:35,346 44k INFO Losses: [2.3599703311920166, 2.266298532485962, 12.428869247436523, 19.423311233520508, 0.6412020325660706], step: 56600, lr: 9.640353747430838e-05 -2023-07-23 17:28:35,560 44k INFO ====> Epoch: 288, cost 186.26 s -2023-07-23 17:29:43,685 44k INFO Train Epoch: 289 [32%] -2023-07-23 17:29:43,686 44k INFO Losses: [2.49898624420166, 2.3795692920684814, 11.180850982666016, 19.291969299316406, 0.9009244441986084], step: 56800, lr: 9.639148703212408e-05 -2023-07-23 17:31:41,176 44k INFO ====> Epoch: 289, cost 185.62 s -2023-07-23 17:32:52,432 44k INFO Train Epoch: 290 [34%] -2023-07-23 17:32:52,435 44k INFO Losses: [2.3224451541900635, 2.252474308013916, 10.999974250793457, 19.144880294799805, 1.1482415199279785], step: 57000, lr: 9.637943809624507e-05 -2023-07-23 17:33:01,650 44k INFO Saving model and optimizer state at iteration 290 to ./logs/44k/G_57000.pth -2023-07-23 17:33:12,279 44k INFO Saving model and optimizer state at iteration 290 to ./logs/44k/D_57000.pth -2023-07-23 17:35:09,951 44k INFO ====> Epoch: 290, cost 208.78 s -2023-07-23 17:36:23,675 44k INFO Train Epoch: 291 [36%] -2023-07-23 17:36:23,676 44k INFO Losses: [2.507538318634033, 2.3391826152801514, 9.28809642791748, 19.44198226928711, 1.0695492029190063], step: 57200, lr: 9.636739066648303e-05 -2023-07-23 17:38:16,305 44k INFO ====> Epoch: 291, cost 186.35 s -2023-07-23 17:39:30,746 44k INFO Train Epoch: 292 [37%] -2023-07-23 17:39:30,749 44k INFO Losses: [2.474524736404419, 2.1062235832214355, 9.697952270507812, 18.27392578125, 1.0124585628509521], step: 57400, lr: 9.635534474264972e-05 -2023-07-23 17:41:20,696 44k INFO ====> Epoch: 292, cost 184.39 s -2023-07-23 17:42:39,146 44k INFO Train Epoch: 293 [39%] -2023-07-23 17:42:39,147 44k INFO Losses: [2.671983480453491, 2.0111818313598633, 11.529107093811035, 19.126672744750977, 0.7990759015083313], step: 57600, lr: 9.634330032455689e-05 -2023-07-23 17:44:26,380 44k INFO ====> Epoch: 293, cost 185.68 s -2023-07-23 17:45:47,576 44k INFO Train Epoch: 294 [40%] -2023-07-23 17:45:47,579 44k INFO Losses: [2.416952610015869, 2.4344735145568848, 9.91821575164795, 16.681102752685547, 1.1379313468933105], step: 57800, lr: 9.633125741201631e-05 -2023-07-23 17:47:32,029 44k INFO ====> Epoch: 294, cost 185.65 s -2023-07-23 17:48:57,823 44k INFO Train Epoch: 295 [42%] -2023-07-23 17:48:57,826 44k INFO Losses: [2.5347707271575928, 2.1520907878875732, 11.146931648254395, 15.859087944030762, 1.0514731407165527], step: 58000, lr: 9.631921600483981e-05 -2023-07-23 17:49:08,172 44k INFO Saving model and optimizer state at iteration 295 to ./logs/44k/G_58000.pth -2023-07-23 17:49:15,368 44k INFO Saving model and optimizer state at iteration 295 to ./logs/44k/D_58000.pth -2023-07-23 17:51:01,501 44k INFO ====> Epoch: 295, cost 209.47 s -2023-07-23 17:52:31,683 44k INFO Train Epoch: 296 [43%] -2023-07-23 17:52:31,685 44k INFO Losses: [2.2614707946777344, 2.4410526752471924, 10.95527172088623, 17.196128845214844, 0.9657965302467346], step: 58200, lr: 9.63071761028392e-05 -2023-07-23 17:54:11,930 44k INFO ====> Epoch: 296, cost 190.43 s -2023-07-23 17:55:46,021 44k INFO Train Epoch: 297 [45%] -2023-07-23 17:55:46,022 44k INFO Losses: [2.526811122894287, 2.174917697906494, 12.177001953125, 19.455265045166016, 0.7942889928817749], step: 58400, lr: 9.629513770582634e-05 -2023-07-23 17:57:23,925 44k INFO ====> Epoch: 297, cost 192.00 s -2023-07-23 17:58:57,797 44k INFO Train Epoch: 298 [46%] -2023-07-23 17:58:57,798 44k INFO Losses: [2.372215747833252, 2.6117076873779297, 10.249738693237305, 20.46584129333496, 0.9704165458679199], step: 58600, lr: 9.628310081361311e-05 -2023-07-23 18:00:32,096 44k INFO ====> Epoch: 298, cost 188.17 s -2023-07-23 18:02:05,983 44k INFO Train Epoch: 299 [48%] -2023-07-23 18:02:05,987 44k INFO Losses: [2.385315179824829, 2.2807679176330566, 8.228663444519043, 18.46454620361328, 1.0351980924606323], step: 58800, lr: 9.627106542601141e-05 -2023-07-23 18:03:36,541 44k INFO ====> Epoch: 299, cost 184.45 s -2023-07-23 18:05:12,975 44k INFO Train Epoch: 300 [49%] -2023-07-23 18:05:12,979 44k INFO Losses: [2.3049826622009277, 2.4110960960388184, 12.713878631591797, 19.67588233947754, 1.248907446861267], step: 59000, lr: 9.625903154283315e-05 -2023-07-23 18:05:22,610 44k INFO Saving model and optimizer state at iteration 300 to ./logs/44k/G_59000.pth -2023-07-23 18:05:29,070 44k INFO Saving model and optimizer state at iteration 300 to ./logs/44k/D_59000.pth -2023-07-23 18:06:59,758 44k INFO ====> Epoch: 300, cost 203.22 s -2023-07-23 18:08:45,039 44k INFO Train Epoch: 301 [51%] -2023-07-23 18:08:45,040 44k INFO Losses: [2.662311553955078, 2.4250283241271973, 9.681180000305176, 19.57198715209961, 0.9785237908363342], step: 59200, lr: 9.62469991638903e-05 -2023-07-23 18:10:12,539 44k INFO ====> Epoch: 301, cost 192.78 s -2023-07-23 18:12:02,738 44k INFO Train Epoch: 302 [52%] -2023-07-23 18:12:02,740 44k INFO Losses: [2.379378318786621, 2.3786263465881348, 9.8785982131958, 19.426830291748047, 1.153516173362732], step: 59400, lr: 9.62349682889948e-05 -2023-07-23 18:13:26,479 44k INFO ====> Epoch: 302, cost 193.94 s -2023-07-23 18:15:14,386 44k INFO Train Epoch: 303 [54%] -2023-07-23 18:15:14,389 44k INFO Losses: [2.4583048820495605, 2.278900623321533, 12.888322830200195, 18.916818618774414, 0.6694754958152771], step: 59600, lr: 9.622293891795867e-05 -2023-07-23 18:16:34,076 44k INFO ====> Epoch: 303, cost 187.60 s -2023-07-23 18:18:23,522 44k INFO Train Epoch: 304 [55%] -2023-07-23 18:18:23,525 44k INFO Losses: [2.2822322845458984, 2.509164810180664, 14.435027122497559, 18.952836990356445, 0.8608156442642212], step: 59800, lr: 9.621091105059392e-05 -2023-07-23 18:19:41,019 44k INFO ====> Epoch: 304, cost 186.94 s -2023-07-23 18:21:33,357 44k INFO Train Epoch: 305 [57%] -2023-07-23 18:21:33,358 44k INFO Losses: [2.193953275680542, 2.5723915100097656, 9.681042671203613, 17.258028030395508, 0.9067174196243286], step: 60000, lr: 9.619888468671259e-05 -2023-07-23 18:21:44,457 44k INFO Saving model and optimizer state at iteration 305 to ./logs/44k/G_60000.pth -2023-07-23 18:21:50,621 44k INFO Saving model and optimizer state at iteration 305 to ./logs/44k/D_60000.pth -2023-07-23 18:23:11,055 44k INFO ====> Epoch: 305, cost 210.04 s -2023-07-23 18:25:09,832 44k INFO Train Epoch: 306 [58%] -2023-07-23 18:25:09,836 44k INFO Losses: [2.5451149940490723, 2.296966791152954, 15.23921012878418, 21.432958602905273, 0.7961567044258118], step: 60200, lr: 9.618685982612675e-05 -2023-07-23 18:26:22,031 44k INFO ====> Epoch: 306, cost 190.98 s -2023-07-23 18:28:21,020 44k INFO Train Epoch: 307 [60%] -2023-07-23 18:28:21,022 44k INFO Losses: [2.492791175842285, 2.277843475341797, 6.934319972991943, 16.606689453125, 0.5684162974357605], step: 60400, lr: 9.617483646864849e-05 -2023-07-23 18:29:31,551 44k INFO ====> Epoch: 307, cost 189.52 s -2023-07-23 18:31:31,005 44k INFO Train Epoch: 308 [61%] -2023-07-23 18:31:31,008 44k INFO Losses: [2.605201482772827, 2.1228814125061035, 10.944991111755371, 18.399911880493164, 1.541266918182373], step: 60600, lr: 9.61628146140899e-05 -2023-07-23 18:32:39,452 44k INFO ====> Epoch: 308, cost 187.90 s -2023-07-23 18:34:40,398 44k INFO Train Epoch: 309 [63%] -2023-07-23 18:34:40,399 44k INFO Losses: [2.6264264583587646, 2.317626714706421, 8.517193794250488, 16.11817169189453, 0.6265290379524231], step: 60800, lr: 9.615079426226314e-05 -2023-07-23 18:35:46,449 44k INFO ====> Epoch: 309, cost 187.00 s -2023-07-23 18:37:50,266 44k INFO Train Epoch: 310 [64%] -2023-07-23 18:37:50,268 44k INFO Losses: [2.4602866172790527, 2.329652786254883, 13.169464111328125, 18.86207389831543, 0.9761765599250793], step: 61000, lr: 9.613877541298036e-05 -2023-07-23 18:38:01,923 44k INFO Saving model and optimizer state at iteration 310 to ./logs/44k/G_61000.pth -2023-07-23 18:38:11,402 44k INFO Saving model and optimizer state at iteration 310 to ./logs/44k/D_61000.pth -2023-07-23 18:39:16,510 44k INFO ====> Epoch: 310, cost 210.06 s -2023-07-23 18:41:25,651 44k INFO Train Epoch: 311 [66%] -2023-07-23 18:41:25,652 44k INFO Losses: [2.4237661361694336, 2.331591844558716, 11.513187408447266, 19.05890464782715, 1.230372428894043], step: 61200, lr: 9.612675806605373e-05 -2023-07-23 18:42:27,146 44k INFO ====> Epoch: 311, cost 190.64 s -2023-07-23 18:44:39,501 44k INFO Train Epoch: 312 [68%] -2023-07-23 18:44:39,503 44k INFO Losses: [2.485503673553467, 2.5254502296447754, 11.545964241027832, 18.926048278808594, 0.8070792555809021], step: 61400, lr: 9.611474222129547e-05 -2023-07-23 18:45:35,335 44k INFO ====> Epoch: 312, cost 188.19 s -2023-07-23 18:47:49,191 44k INFO Train Epoch: 313 [69%] -2023-07-23 18:47:49,194 44k INFO Losses: [2.629530906677246, 2.1251657009124756, 8.076020240783691, 16.048036575317383, 1.1012743711471558], step: 61600, lr: 9.61027278785178e-05 -2023-07-23 18:48:43,448 44k INFO ====> Epoch: 313, cost 188.11 s -2023-07-23 18:50:57,655 44k INFO Train Epoch: 314 [71%] -2023-07-23 18:50:57,658 44k INFO Losses: [2.5445034503936768, 1.9853423833847046, 9.355672836303711, 18.2261962890625, 0.7159317135810852], step: 61800, lr: 9.609071503753299e-05 -2023-07-23 18:51:48,560 44k INFO ====> Epoch: 314, cost 185.11 s -2023-07-23 18:54:06,711 44k INFO Train Epoch: 315 [72%] -2023-07-23 18:54:06,714 44k INFO Losses: [2.3226876258850098, 2.618192672729492, 10.755167961120605, 19.49469566345215, 0.8190866708755493], step: 62000, lr: 9.60787036981533e-05 -2023-07-23 18:54:17,537 44k INFO Saving model and optimizer state at iteration 315 to ./logs/44k/G_62000.pth -2023-07-23 18:54:26,898 44k INFO Saving model and optimizer state at iteration 315 to ./logs/44k/D_62000.pth -2023-07-23 18:55:20,499 44k INFO ====> Epoch: 315, cost 211.94 s -2023-07-23 18:57:43,125 44k INFO Train Epoch: 316 [74%] -2023-07-23 18:57:43,127 44k INFO Losses: [2.4613702297210693, 2.139224052429199, 9.471819877624512, 16.565290451049805, 0.5542193651199341], step: 62200, lr: 9.606669386019102e-05 -2023-07-23 18:58:31,226 44k INFO ====> Epoch: 316, cost 190.73 s -2023-07-23 19:00:57,730 44k INFO Train Epoch: 317 [75%] -2023-07-23 19:00:57,732 44k INFO Losses: [2.595567226409912, 2.2232491970062256, 9.563693046569824, 17.914854049682617, 1.3369160890579224], step: 62400, lr: 9.60546855234585e-05 -2023-07-23 19:01:41,203 44k INFO ====> Epoch: 317, cost 189.98 s -2023-07-23 19:04:06,852 44k INFO Train Epoch: 318 [77%] -2023-07-23 19:04:06,854 44k INFO Losses: [2.50290584564209, 2.418386220932007, 9.415340423583984, 17.688642501831055, 0.464738667011261], step: 62600, lr: 9.604267868776807e-05 -2023-07-23 19:04:47,672 44k INFO ====> Epoch: 318, cost 186.47 s -2023-07-23 19:07:14,929 44k INFO Train Epoch: 319 [78%] -2023-07-23 19:07:14,931 44k INFO Losses: [2.540738582611084, 2.3488609790802, 10.086684226989746, 19.241172790527344, 0.7099358439445496], step: 62800, lr: 9.603067335293209e-05 -2023-07-23 19:07:53,679 44k INFO ====> Epoch: 319, cost 186.01 s -2023-07-23 19:10:23,705 44k INFO Train Epoch: 320 [80%] -2023-07-23 19:10:23,708 44k INFO Losses: [2.3135669231414795, 2.6069653034210205, 12.499679565429688, 20.924720764160156, 1.0498158931732178], step: 63000, lr: 9.601866951876297e-05 -2023-07-23 19:10:34,093 44k INFO Saving model and optimizer state at iteration 320 to ./logs/44k/G_63000.pth -2023-07-23 19:10:39,822 44k INFO Saving model and optimizer state at iteration 320 to ./logs/44k/D_63000.pth -2023-07-23 19:10:42,044 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_60000.pth -2023-07-23 19:10:42,046 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_60000.pth -2023-07-23 19:11:20,052 44k INFO ====> Epoch: 320, cost 206.37 s -2023-07-25 04:32:28,581 44k INFO {'train': {'log_interval': 200, 'eval_interval': 1000, 'seed': 1234, 'epochs': 10000, 'learning_rate': 0.0001, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 6, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 10240, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0, 'use_sr': True, 'max_speclen': 512, 'port': '8001', 'keep_ckpts': 3}, 'data': {'training_files': 'filelists/train.txt', 'validation_files': 'filelists/val.txt', 'max_wav_value': 32768.0, 'sampling_rate': 44100, 'filter_length': 2048, 'hop_length': 512, 'win_length': 2048, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': 22050, 'contentvec_final_proj': False}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256, 'ssl_dim': 768, 'n_speakers': 200}, 'spk': {'TW3_F_Yennefer_Rus': 0}, 'model_dir': './logs/44k', 'reset': False} -2023-07-25 04:32:58,905 44k INFO Loaded checkpoint './logs/44k/G_63000.pth' (iteration 320) -2023-07-25 04:33:20,554 44k INFO Loaded checkpoint './logs/44k/D_63000.pth' (iteration 320) -2023-07-25 04:37:12,451 44k INFO Train Epoch: 320 [80%] -2023-07-25 04:37:12,454 44k INFO Losses: [2.4755308628082275, 2.560807228088379, 10.522757530212402, 20.165172576904297, 0.4830493628978729], step: 63000, lr: 9.600666718507311e-05 -2023-07-25 04:37:31,987 44k INFO Saving model and optimizer state at iteration 320 to ./logs/44k/G_63000.pth -2023-07-25 04:37:38,602 44k INFO Saving model and optimizer state at iteration 320 to ./logs/44k/D_63000.pth -2023-07-25 04:38:36,434 44k INFO ====> Epoch: 320, cost 367.86 s -2023-07-25 04:41:09,566 44k INFO Train Epoch: 321 [81%] -2023-07-25 04:41:09,567 44k INFO Losses: [2.6450843811035156, 2.4151852130889893, 11.320324897766113, 19.250890731811523, 0.9683637619018555], step: 63200, lr: 9.599466635167497e-05 -2023-07-25 04:41:42,832 44k INFO ====> Epoch: 321, cost 186.40 s -2023-07-25 04:44:16,534 44k INFO Train Epoch: 322 [83%] -2023-07-25 04:44:16,535 44k INFO Losses: [2.456009864807129, 2.3000876903533936, 10.76156234741211, 18.032917022705078, 0.5682522058486938], step: 63400, lr: 9.5982667018381e-05 -2023-07-25 04:44:47,991 44k INFO ====> Epoch: 322, cost 185.16 s -2023-07-25 04:47:25,966 44k INFO Train Epoch: 323 [84%] -2023-07-25 04:47:25,969 44k INFO Losses: [2.4321789741516113, 2.59066104888916, 8.620502471923828, 17.63128089904785, 0.6238230466842651], step: 63600, lr: 9.59706691850037e-05 -2023-07-25 04:47:56,299 44k INFO ====> Epoch: 323, cost 188.31 s -2023-07-25 04:50:36,256 44k INFO Train Epoch: 324 [86%] -2023-07-25 04:50:36,260 44k INFO Losses: [2.469851016998291, 2.3987767696380615, 9.618183135986328, 19.413127899169922, 0.9780266284942627], step: 63800, lr: 9.595867285135558e-05 -2023-07-25 04:51:01,514 44k INFO ====> Epoch: 324, cost 185.21 s -2023-07-25 04:53:48,532 44k INFO Train Epoch: 325 [87%] -2023-07-25 04:53:48,534 44k INFO Losses: [2.3864035606384277, 2.409243583679199, 10.887566566467285, 19.46685218811035, 1.2898247241973877], step: 64000, lr: 9.594667801724916e-05 -2023-07-25 04:54:01,883 44k INFO Saving model and optimizer state at iteration 325 to ./logs/44k/G_64000.pth -2023-07-25 04:54:07,495 44k INFO Saving model and optimizer state at iteration 325 to ./logs/44k/D_64000.pth -2023-07-25 04:54:35,947 44k INFO ====> Epoch: 325, cost 214.43 s -2023-07-25 04:57:23,761 44k INFO Train Epoch: 326 [89%] -2023-07-25 04:57:23,764 44k INFO Losses: [2.668358087539673, 2.3168489933013916, 8.468618392944336, 17.917695999145508, 0.9560359716415405], step: 64200, lr: 9.5934684682497e-05 -2023-07-25 04:57:43,388 44k INFO ====> Epoch: 326, cost 187.44 s -2023-07-25 05:00:32,806 44k INFO Train Epoch: 327 [90%] -2023-07-25 05:00:32,808 44k INFO Losses: [2.4702389240264893, 2.217118978500366, 9.208762168884277, 17.866024017333984, 0.7953405976295471], step: 64400, lr: 9.592269284691169e-05 -2023-07-25 05:00:51,547 44k INFO ====> Epoch: 327, cost 188.16 s -2023-07-25 05:03:43,614 44k INFO Train Epoch: 328 [92%] -2023-07-25 05:03:43,615 44k INFO Losses: [2.358940362930298, 2.4201741218566895, 9.801713943481445, 17.272668838500977, 1.1166034936904907], step: 64600, lr: 9.591070251030582e-05 -2023-07-25 05:03:58,724 44k INFO ====> Epoch: 328, cost 187.18 s -2023-07-25 05:06:54,329 44k INFO Train Epoch: 329 [93%] -2023-07-25 05:06:54,330 44k INFO Losses: [2.10361909866333, 2.469362258911133, 13.051758766174316, 19.3937931060791, 0.8368887305259705], step: 64800, lr: 9.589871367249203e-05 -2023-07-25 05:07:06,774 44k INFO ====> Epoch: 329, cost 188.05 s -2023-07-25 05:10:04,033 44k INFO Train Epoch: 330 [95%] -2023-07-25 05:10:04,034 44k INFO Losses: [2.5062103271484375, 2.2453105449676514, 6.781197547912598, 18.1358585357666, 1.1564602851867676], step: 65000, lr: 9.588672633328296e-05 -2023-07-25 05:10:14,606 44k INFO Saving model and optimizer state at iteration 330 to ./logs/44k/G_65000.pth -2023-07-25 05:10:17,302 44k INFO Saving model and optimizer state at iteration 330 to ./logs/44k/D_65000.pth -2023-07-25 05:10:30,678 44k INFO ====> Epoch: 330, cost 203.90 s -2023-07-25 05:13:31,893 44k INFO Train Epoch: 331 [96%] -2023-07-25 05:13:31,896 44k INFO Losses: [2.6072094440460205, 2.2643566131591797, 7.527828693389893, 17.33403778076172, 0.6983505487442017], step: 65200, lr: 9.58747404924913e-05 -2023-07-25 05:13:39,867 44k INFO ====> Epoch: 331, cost 189.19 s -2023-07-25 05:16:41,347 44k INFO Train Epoch: 332 [98%] -2023-07-25 05:16:41,348 44k INFO Losses: [2.306040048599243, 2.525367021560669, 12.983407974243164, 19.410629272460938, 1.332772970199585], step: 65400, lr: 9.586275614992974e-05 -2023-07-25 05:16:46,102 44k INFO ====> Epoch: 332, cost 186.24 s -2023-07-25 05:19:50,555 44k INFO Train Epoch: 333 [99%] -2023-07-25 05:19:50,558 44k INFO Losses: [2.4494409561157227, 2.6265485286712646, 12.475065231323242, 17.362764358520508, 0.7863194346427917], step: 65600, lr: 9.5850773305411e-05 -2023-07-25 05:19:54,500 44k INFO ====> Epoch: 333, cost 188.40 s -2023-07-25 05:22:59,584 44k INFO ====> Epoch: 334, cost 185.08 s -2023-07-25 05:23:11,921 44k INFO Train Epoch: 335 [1%] -2023-07-25 05:23:11,922 44k INFO Losses: [2.4836840629577637, 2.477837085723877, 12.025009155273438, 19.844097137451172, 0.963062047958374], step: 65800, lr: 9.582681210975297e-05 -2023-07-25 05:26:04,091 44k INFO ====> Epoch: 335, cost 184.51 s -2023-07-25 05:26:23,648 44k INFO Train Epoch: 336 [3%] -2023-07-25 05:26:23,651 44k INFO Losses: [2.4119462966918945, 2.3300843238830566, 9.911319732666016, 21.856515884399414, 0.9191834926605225], step: 66000, lr: 9.581483375823925e-05 -2023-07-25 05:26:34,867 44k INFO Saving model and optimizer state at iteration 336 to ./logs/44k/G_66000.pth -2023-07-25 05:26:37,439 44k INFO Saving model and optimizer state at iteration 336 to ./logs/44k/D_66000.pth -2023-07-25 05:26:39,952 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_63000.pth -2023-07-25 05:26:39,956 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_63000.pth -2023-07-25 05:29:29,917 44k INFO ====> Epoch: 336, cost 205.83 s -2023-07-25 05:29:55,244 44k INFO Train Epoch: 337 [4%] -2023-07-25 05:29:55,248 44k INFO Losses: [2.344330310821533, 2.5253913402557373, 10.763432502746582, 18.028865814208984, 1.0815895795822144], step: 66200, lr: 9.580285690401946e-05 -2023-07-25 05:32:40,972 44k INFO ====> Epoch: 337, cost 191.06 s -2023-07-25 05:33:04,475 44k INFO Train Epoch: 338 [6%] -2023-07-25 05:33:04,478 44k INFO Losses: [2.3504371643066406, 2.7320613861083984, 11.902974128723145, 20.223026275634766, 0.8907226920127869], step: 66400, lr: 9.579088154690645e-05 -2023-07-25 05:35:48,938 44k INFO ====> Epoch: 338, cost 187.97 s -2023-07-25 05:36:12,431 44k INFO Train Epoch: 339 [7%] -2023-07-25 05:36:12,432 44k INFO Losses: [2.150118827819824, 2.484757423400879, 17.15068817138672, 19.631168365478516, 0.7204747200012207], step: 66600, lr: 9.577890768671308e-05 -2023-07-25 05:38:56,085 44k INFO ====> Epoch: 339, cost 187.15 s -2023-07-25 05:39:20,793 44k INFO Train Epoch: 340 [9%] -2023-07-25 05:39:20,796 44k INFO Losses: [2.3851585388183594, 2.600836992263794, 10.298863410949707, 18.88875389099121, 0.5256041884422302], step: 66800, lr: 9.576693532325224e-05 -2023-07-25 05:42:01,828 44k INFO ====> Epoch: 340, cost 185.74 s -2023-07-25 05:42:31,455 44k INFO Train Epoch: 341 [10%] -2023-07-25 05:42:31,459 44k INFO Losses: [2.291593074798584, 2.549273729324341, 13.476722717285156, 18.71889305114746, 1.328331470489502], step: 67000, lr: 9.575496445633683e-05 -2023-07-25 05:42:41,738 44k INFO Saving model and optimizer state at iteration 341 to ./logs/44k/G_67000.pth -2023-07-25 05:42:44,453 44k INFO Saving model and optimizer state at iteration 341 to ./logs/44k/D_67000.pth -2023-07-25 05:42:46,932 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_64000.pth -2023-07-25 05:42:46,942 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_64000.pth -2023-07-25 05:45:24,707 44k INFO ====> Epoch: 341, cost 202.88 s -2023-07-25 05:45:55,231 44k INFO Train Epoch: 342 [12%] -2023-07-25 05:45:55,232 44k INFO Losses: [2.2476789951324463, 2.5529556274414062, 10.07763671875, 18.930885314941406, 0.42706063389778137], step: 67200, lr: 9.574299508577979e-05 -2023-07-25 05:48:30,246 44k INFO ====> Epoch: 342, cost 185.54 s -2023-07-25 05:49:05,023 44k INFO Train Epoch: 343 [13%] -2023-07-25 05:49:05,026 44k INFO Losses: [2.280576705932617, 2.378415584564209, 10.447707176208496, 17.203716278076172, 0.9522852897644043], step: 67400, lr: 9.573102721139406e-05 -2023-07-25 05:51:36,926 44k INFO ====> Epoch: 343, cost 186.68 s -2023-07-25 05:52:16,085 44k INFO Train Epoch: 344 [15%] -2023-07-25 05:52:16,089 44k INFO Losses: [2.2930469512939453, 2.6356635093688965, 11.134135246276855, 16.61655616760254, 0.8131645917892456], step: 67600, lr: 9.571906083299264e-05 -2023-07-25 05:54:44,337 44k INFO ====> Epoch: 344, cost 187.41 s -2023-07-25 05:55:24,732 44k INFO Train Epoch: 345 [16%] -2023-07-25 05:55:24,736 44k INFO Losses: [2.2068958282470703, 2.3613553047180176, 10.962515830993652, 18.869834899902344, 0.593727171421051], step: 67800, lr: 9.570709595038851e-05 -2023-07-25 05:57:50,588 44k INFO ====> Epoch: 345, cost 186.25 s -2023-07-25 05:58:33,807 44k INFO Train Epoch: 346 [18%] -2023-07-25 05:58:33,808 44k INFO Losses: [2.4339630603790283, 2.423616886138916, 12.73266315460205, 17.957508087158203, 0.9371153116226196], step: 68000, lr: 9.569513256339471e-05 -2023-07-25 05:58:42,229 44k INFO Saving model and optimizer state at iteration 346 to ./logs/44k/G_68000.pth -2023-07-25 05:58:46,368 44k INFO Saving model and optimizer state at iteration 346 to ./logs/44k/D_68000.pth -2023-07-25 05:58:52,382 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_65000.pth -2023-07-25 05:58:52,386 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_65000.pth -2023-07-25 06:01:16,970 44k INFO ====> Epoch: 346, cost 206.38 s -2023-07-25 06:02:07,515 44k INFO Train Epoch: 347 [19%] -2023-07-25 06:02:07,518 44k INFO Losses: [2.4101009368896484, 2.304386615753174, 11.058842658996582, 18.896120071411133, 0.8654371500015259], step: 68200, lr: 9.568317067182427e-05 -2023-07-25 06:04:27,945 44k INFO ====> Epoch: 347, cost 190.97 s -2023-07-25 06:05:17,585 44k INFO Train Epoch: 348 [21%] -2023-07-25 06:05:17,586 44k INFO Losses: [2.3713455200195312, 2.050567150115967, 12.743279457092285, 15.604907989501953, 0.22549527883529663], step: 68400, lr: 9.56712102754903e-05 -2023-07-25 06:07:36,806 44k INFO ====> Epoch: 348, cost 188.86 s -2023-07-25 06:08:28,105 44k INFO Train Epoch: 349 [22%] -2023-07-25 06:08:28,106 44k INFO Losses: [2.5697216987609863, 2.192610025405884, 9.771305084228516, 17.90059471130371, 1.240525484085083], step: 68600, lr: 9.565925137420586e-05 -2023-07-25 06:10:43,527 44k INFO ====> Epoch: 349, cost 186.72 s -2023-07-25 06:11:36,559 44k INFO Train Epoch: 350 [24%] -2023-07-25 06:11:36,561 44k INFO Losses: [2.4415194988250732, 2.2942707538604736, 10.788424491882324, 19.233196258544922, 1.1866183280944824], step: 68800, lr: 9.564729396778408e-05 -2023-07-25 06:13:48,516 44k INFO ====> Epoch: 350, cost 184.99 s -2023-07-25 06:14:44,917 44k INFO Train Epoch: 351 [25%] -2023-07-25 06:14:44,920 44k INFO Losses: [2.4401888847351074, 2.145780563354492, 10.189545631408691, 19.390884399414062, 0.8166384100914001], step: 69000, lr: 9.56353380560381e-05 -2023-07-25 06:14:53,564 44k INFO Saving model and optimizer state at iteration 351 to ./logs/44k/G_69000.pth -2023-07-25 06:14:59,813 44k INFO Saving model and optimizer state at iteration 351 to ./logs/44k/D_69000.pth -2023-07-25 06:17:23,104 44k INFO ====> Epoch: 351, cost 214.59 s -2023-07-25 06:18:26,469 44k INFO Train Epoch: 352 [27%] -2023-07-25 06:18:26,470 44k INFO Losses: [2.2746634483337402, 2.681729316711426, 13.39730453491211, 17.034122467041016, 1.0289024114608765], step: 69200, lr: 9.562338363878108e-05 -2023-07-25 06:20:35,982 44k INFO ====> Epoch: 352, cost 192.88 s -2023-07-25 06:21:36,807 44k INFO Train Epoch: 353 [28%] -2023-07-25 06:21:36,808 44k INFO Losses: [2.3481128215789795, 2.3594095706939697, 12.693611145019531, 19.1501522064209, 0.7412102222442627], step: 69400, lr: 9.561143071582622e-05 -2023-07-25 06:23:42,673 44k INFO ====> Epoch: 353, cost 186.69 s -2023-07-25 06:24:47,822 44k INFO Train Epoch: 354 [30%] -2023-07-25 06:24:47,825 44k INFO Losses: [2.5662434101104736, 2.3190135955810547, 7.882725238800049, 15.24366569519043, 0.9431986212730408], step: 69600, lr: 9.559947928698674e-05 -2023-07-25 06:26:49,956 44k INFO ====> Epoch: 354, cost 187.28 s -2023-07-25 06:27:57,055 44k INFO Train Epoch: 355 [31%] -2023-07-25 06:27:57,058 44k INFO Losses: [2.4100828170776367, 2.5168848037719727, 12.037881851196289, 19.430583953857422, 1.2886271476745605], step: 69800, lr: 9.558752935207586e-05 -2023-07-25 06:29:56,337 44k INFO ====> Epoch: 355, cost 186.38 s -2023-07-25 06:31:06,530 44k INFO Train Epoch: 356 [33%] -2023-07-25 06:31:06,531 44k INFO Losses: [2.412853717803955, 2.2133233547210693, 11.89167594909668, 17.058992385864258, 0.7370908856391907], step: 70000, lr: 9.557558091090685e-05 -2023-07-25 06:31:15,411 44k INFO Saving model and optimizer state at iteration 356 to ./logs/44k/G_70000.pth -2023-07-25 06:31:22,638 44k INFO Saving model and optimizer state at iteration 356 to ./logs/44k/D_70000.pth -2023-07-25 06:33:28,438 44k INFO ====> Epoch: 356, cost 212.10 s -2023-07-25 06:34:44,381 44k INFO Train Epoch: 357 [35%] -2023-07-25 06:34:44,384 44k INFO Losses: [2.3527073860168457, 2.5826430320739746, 9.227700233459473, 15.25343132019043, 0.7002070546150208], step: 70200, lr: 9.556363396329299e-05 -2023-07-25 06:36:39,477 44k INFO ====> Epoch: 357, cost 191.04 s -2023-07-25 06:37:54,206 44k INFO Train Epoch: 358 [36%] -2023-07-25 06:37:54,208 44k INFO Losses: [2.3272156715393066, 2.4262313842773438, 10.491996765136719, 18.83709144592285, 1.0133968591690063], step: 70400, lr: 9.555168850904757e-05 -2023-07-25 06:39:47,439 44k INFO ====> Epoch: 358, cost 187.96 s -2023-07-25 06:41:05,874 44k INFO Train Epoch: 359 [38%] -2023-07-25 06:41:05,876 44k INFO Losses: [2.479978561401367, 2.5771331787109375, 12.192023277282715, 19.01509666442871, 1.0618306398391724], step: 70600, lr: 9.553974454798393e-05 -2023-07-25 06:42:54,068 44k INFO ====> Epoch: 359, cost 186.63 s -2023-07-25 06:44:17,181 44k INFO Train Epoch: 360 [39%] -2023-07-25 06:44:17,185 44k INFO Losses: [2.3728456497192383, 2.445255994796753, 12.724091529846191, 17.893760681152344, 1.0049232244491577], step: 70800, lr: 9.552780207991543e-05 -2023-07-25 06:46:02,754 44k INFO ====> Epoch: 360, cost 188.69 s -2023-07-25 06:47:25,844 44k INFO Train Epoch: 361 [41%] -2023-07-25 06:47:25,846 44k INFO Losses: [2.54620099067688, 2.375173807144165, 10.275221824645996, 16.74363899230957, 0.9074703454971313], step: 71000, lr: 9.551586110465545e-05 -2023-07-25 06:47:35,239 44k INFO Saving model and optimizer state at iteration 361 to ./logs/44k/G_71000.pth -2023-07-25 06:47:43,231 44k INFO Saving model and optimizer state at iteration 361 to ./logs/44k/D_71000.pth -2023-07-25 06:49:38,979 44k INFO ====> Epoch: 361, cost 216.22 s -2023-07-25 06:51:04,868 44k INFO Train Epoch: 362 [42%] -2023-07-25 06:51:04,872 44k INFO Losses: [2.295466661453247, 2.5344576835632324, 10.11965274810791, 18.698637008666992, 0.5215442180633545], step: 71200, lr: 9.550392162201736e-05 -2023-07-25 06:52:46,094 44k INFO ====> Epoch: 362, cost 187.12 s -2023-07-25 06:54:13,630 44k INFO Train Epoch: 363 [44%] -2023-07-25 06:54:13,632 44k INFO Losses: [2.5126471519470215, 2.4093995094299316, 11.958312034606934, 19.293842315673828, 0.9449364542961121], step: 71400, lr: 9.54919836318146e-05 -2023-07-25 06:55:51,160 44k INFO ====> Epoch: 363, cost 185.07 s -2023-07-25 06:57:24,568 44k INFO Train Epoch: 364 [45%] -2023-07-25 06:57:24,570 44k INFO Losses: [2.5758466720581055, 2.3708279132843018, 9.332837104797363, 17.976634979248047, 0.8237696290016174], step: 71600, lr: 9.548004713386062e-05 -2023-07-25 06:59:01,036 44k INFO ====> Epoch: 364, cost 189.88 s -2023-07-25 07:00:38,759 44k INFO Train Epoch: 365 [47%] -2023-07-25 07:00:38,760 44k INFO Losses: [2.7071053981781006, 2.4011707305908203, 11.147781372070312, 17.48169708251953, 1.3482880592346191], step: 71800, lr: 9.546811212796888e-05 -2023-07-25 07:02:11,414 44k INFO ====> Epoch: 365, cost 190.38 s -2023-07-25 07:03:49,298 44k INFO Train Epoch: 366 [48%] -2023-07-25 07:03:49,301 44k INFO Losses: [2.3631467819213867, 2.2876853942871094, 10.987893104553223, 17.779644012451172, 0.6389096975326538], step: 72000, lr: 9.545617861395288e-05 -2023-07-25 07:04:00,938 44k INFO Saving model and optimizer state at iteration 366 to ./logs/44k/G_72000.pth -2023-07-25 07:04:07,061 44k INFO Saving model and optimizer state at iteration 366 to ./logs/44k/D_72000.pth -2023-07-25 07:05:44,746 44k INFO ====> Epoch: 366, cost 213.33 s -2023-07-25 07:07:22,398 44k INFO Train Epoch: 367 [50%] -2023-07-25 07:07:22,401 44k INFO Losses: [2.623826742172241, 2.2868399620056152, 8.501319885253906, 16.78904151916504, 1.2384706735610962], step: 72200, lr: 9.544424659162614e-05 -2023-07-25 07:08:49,239 44k INFO ====> Epoch: 367, cost 184.49 s -2023-07-25 07:10:29,279 44k INFO Train Epoch: 368 [51%] -2023-07-25 07:10:29,281 44k INFO Losses: [2.5172901153564453, 2.168010711669922, 10.486604690551758, 19.0184383392334, 0.9385050535202026], step: 72400, lr: 9.543231606080218e-05 -2023-07-25 07:11:53,997 44k INFO ====> Epoch: 368, cost 184.76 s -2023-07-25 07:13:40,027 44k INFO Train Epoch: 369 [53%] -2023-07-25 07:13:40,030 44k INFO Losses: [2.3425192832946777, 2.5317769050598145, 12.643417358398438, 19.631505966186523, 1.1961934566497803], step: 72600, lr: 9.542038702129457e-05 -2023-07-25 07:15:01,995 44k INFO ====> Epoch: 369, cost 188.00 s -2023-07-25 07:16:49,797 44k INFO Train Epoch: 370 [54%] -2023-07-25 07:16:49,799 44k INFO Losses: [2.5633962154388428, 2.273432493209839, 9.805071830749512, 15.981773376464844, 0.7708781957626343], step: 72800, lr: 9.540845947291691e-05 -2023-07-25 07:18:08,693 44k INFO ====> Epoch: 370, cost 186.70 s -2023-07-25 07:19:58,958 44k INFO Train Epoch: 371 [56%] -2023-07-25 07:19:58,959 44k INFO Losses: [2.295053720474243, 2.416804075241089, 12.382322311401367, 16.39413070678711, 0.7347373366355896], step: 73000, lr: 9.53965334154828e-05 -2023-07-25 07:20:09,233 44k INFO Saving model and optimizer state at iteration 371 to ./logs/44k/G_73000.pth -2023-07-25 07:20:16,348 44k INFO Saving model and optimizer state at iteration 371 to ./logs/44k/D_73000.pth -2023-07-25 07:21:45,664 44k INFO ====> Epoch: 371, cost 216.97 s -2023-07-25 07:23:38,147 44k INFO Train Epoch: 372 [57%] -2023-07-25 07:23:38,149 44k INFO Losses: [2.3918468952178955, 2.517549753189087, 12.197556495666504, 19.762638092041016, 0.7099893689155579], step: 73200, lr: 9.538460884880585e-05 -2023-07-25 07:24:51,991 44k INFO ====> Epoch: 372, cost 186.33 s -2023-07-25 07:26:49,943 44k INFO Train Epoch: 373 [59%] -2023-07-25 07:26:49,944 44k INFO Losses: [2.242967367172241, 2.594575881958008, 13.283071517944336, 19.800640106201172, 1.052377700805664], step: 73400, lr: 9.537268577269974e-05 -2023-07-25 07:28:01,266 44k INFO ====> Epoch: 373, cost 189.27 s -2023-07-25 07:30:01,068 44k INFO Train Epoch: 374 [60%] -2023-07-25 07:30:01,070 44k INFO Losses: [2.4079644680023193, 2.740478992462158, 8.777030944824219, 17.964954376220703, 1.0440467596054077], step: 73600, lr: 9.536076418697815e-05 -2023-07-25 07:31:09,860 44k INFO ====> Epoch: 374, cost 188.59 s -2023-07-25 07:33:12,590 44k INFO Train Epoch: 375 [62%] -2023-07-25 07:33:12,593 44k INFO Losses: [2.283552408218384, 2.487022876739502, 10.761653900146484, 15.361309051513672, 0.7748256325721741], step: 73800, lr: 9.534884409145477e-05 -2023-07-25 07:34:20,182 44k INFO ====> Epoch: 375, cost 190.32 s -2023-07-25 07:36:24,626 44k INFO Train Epoch: 376 [63%] -2023-07-25 07:36:24,628 44k INFO Losses: [2.4786722660064697, 2.14839243888855, 11.000462532043457, 19.69455337524414, 1.0500060319900513], step: 74000, lr: 9.533692548594333e-05 -2023-07-25 07:36:35,002 44k INFO Saving model and optimizer state at iteration 376 to ./logs/44k/G_74000.pth -2023-07-25 07:36:41,582 44k INFO Saving model and optimizer state at iteration 376 to ./logs/44k/D_74000.pth -2023-07-25 07:37:47,943 44k INFO ====> Epoch: 376, cost 207.76 s -2023-07-25 07:39:55,179 44k INFO Train Epoch: 377 [65%] -2023-07-25 07:39:55,182 44k INFO Losses: [2.4232406616210938, 2.3825604915618896, 9.54179573059082, 17.841821670532227, 1.085039734840393], step: 74200, lr: 9.532500837025758e-05 -2023-07-25 07:40:56,355 44k INFO ====> Epoch: 377, cost 188.41 s -2023-07-26 08:01:58,751 44k INFO {'train': {'log_interval': 200, 'eval_interval': 1000, 'seed': 1234, 'epochs': 10000, 'learning_rate': 0.0001, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 6, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 10240, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0, 'use_sr': True, 'max_speclen': 512, 'port': '8001', 'keep_ckpts': 3}, 'data': {'training_files': 'filelists/train.txt', 'validation_files': 'filelists/val.txt', 'max_wav_value': 32768.0, 'sampling_rate': 44100, 'filter_length': 2048, 'hop_length': 512, 'win_length': 2048, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': 22050, 'contentvec_final_proj': False}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256, 'ssl_dim': 768, 'n_speakers': 200}, 'spk': {'TW3_F_Yennefer_Rus': 0}, 'model_dir': './logs/44k', 'reset': False} -2023-07-26 08:02:17,305 44k INFO Loaded checkpoint './logs/44k/G_74000.pth' (iteration 376) -2023-07-26 08:02:25,016 44k INFO Loaded checkpoint './logs/44k/D_74000.pth' (iteration 376) -2023-07-26 08:05:45,462 44k INFO Train Epoch: 376 [63%] -2023-07-26 08:05:45,464 44k INFO Losses: [2.3638358116149902, 2.3754262924194336, 10.034878730773926, 17.8461971282959, 1.448283076286316], step: 74000, lr: 9.532500837025758e-05 -2023-07-26 08:06:07,572 44k INFO Saving model and optimizer state at iteration 376 to ./logs/44k/G_74000.pth -2023-07-26 08:06:13,607 44k INFO Saving model and optimizer state at iteration 376 to ./logs/44k/D_74000.pth -2023-07-26 08:08:03,183 44k INFO ====> Epoch: 376, cost 364.44 s -2023-07-26 08:10:14,929 44k INFO Train Epoch: 377 [65%] -2023-07-26 08:10:14,932 44k INFO Losses: [2.3450469970703125, 2.51840877532959, 10.93001937866211, 19.003936767578125, 0.9009534120559692], step: 74200, lr: 9.53130927442113e-05 -2023-07-26 08:11:18,656 44k INFO ====> Epoch: 377, cost 195.47 s -2023-07-26 08:13:33,963 44k INFO Train Epoch: 378 [66%] -2023-07-26 08:13:33,965 44k INFO Losses: [2.1338295936584473, 2.514827013015747, 13.825456619262695, 18.869230270385742, 1.2399251461029053], step: 74400, lr: 9.530117860761828e-05 -2023-07-26 08:14:35,706 44k INFO ====> Epoch: 378, cost 197.05 s -2023-07-26 08:16:53,214 44k INFO Train Epoch: 379 [68%] -2023-07-26 08:16:53,215 44k INFO Losses: [2.3942079544067383, 2.3390238285064697, 13.977034568786621, 19.0222110748291, 0.9333906769752502], step: 74600, lr: 9.528926596029232e-05 -2023-07-26 08:17:50,988 44k INFO ====> Epoch: 379, cost 195.28 s -2023-07-26 08:20:11,894 44k INFO Train Epoch: 380 [70%] -2023-07-26 08:20:11,895 44k INFO Losses: [2.3517441749572754, 2.456390380859375, 13.00299072265625, 18.95575523376465, 0.7457544803619385], step: 74800, lr: 9.527735480204728e-05 -2023-07-26 08:21:08,583 44k INFO ====> Epoch: 380, cost 197.59 s -2023-07-26 08:23:32,636 44k INFO Train Epoch: 381 [71%] -2023-07-26 08:23:32,638 44k INFO Losses: [2.2392730712890625, 2.3343048095703125, 11.798629760742188, 18.45540428161621, 0.5148274898529053], step: 75000, lr: 9.526544513269702e-05 -2023-07-26 08:23:49,743 44k INFO Saving model and optimizer state at iteration 381 to ./logs/44k/G_75000.pth -2023-07-26 08:23:57,226 44k INFO Saving model and optimizer state at iteration 381 to ./logs/44k/D_75000.pth -2023-07-26 08:24:58,210 44k INFO ====> Epoch: 381, cost 229.63 s -2023-07-26 08:27:26,186 44k INFO Train Epoch: 382 [73%] -2023-07-26 08:27:26,188 44k INFO Losses: [2.277895927429199, 2.632558584213257, 11.635198593139648, 16.588829040527344, 0.6267083883285522], step: 75200, lr: 9.525353695205543e-05 -2023-07-26 08:28:16,594 44k INFO ====> Epoch: 382, cost 198.38 s -2023-07-26 08:30:46,158 44k INFO Train Epoch: 383 [74%] -2023-07-26 08:30:46,160 44k INFO Losses: [2.3193912506103516, 2.5529026985168457, 9.73926830291748, 18.459184646606445, 1.0458101034164429], step: 75400, lr: 9.524163025993642e-05 -2023-07-26 08:31:36,461 44k INFO ====> Epoch: 383, cost 199.87 s -2023-07-26 08:34:11,651 44k INFO Train Epoch: 384 [76%] -2023-07-26 08:34:11,653 44k INFO Losses: [2.059798002243042, 2.5544281005859375, 12.06481647491455, 19.02806282043457, 0.37545275688171387], step: 75600, lr: 9.522972505615393e-05 -2023-07-26 08:34:56,325 44k INFO ====> Epoch: 384, cost 199.86 s -2023-07-26 08:37:31,204 44k INFO Train Epoch: 385 [77%] -2023-07-26 08:37:31,206 44k INFO Losses: [2.2649776935577393, 2.7230381965637207, 13.847853660583496, 19.81093978881836, 0.9529355764389038], step: 75800, lr: 9.52178213405219e-05 -2023-07-26 08:38:14,175 44k INFO ====> Epoch: 385, cost 197.85 s -2023-07-26 08:40:50,451 44k INFO Train Epoch: 386 [79%] -2023-07-26 08:40:50,453 44k INFO Losses: [2.3942623138427734, 2.2140557765960693, 9.678873062133789, 15.956775665283203, 0.8609799742698669], step: 76000, lr: 9.520591911285433e-05 -2023-07-26 08:41:03,189 44k INFO Saving model and optimizer state at iteration 386 to ./logs/44k/G_76000.pth -2023-07-26 08:41:14,572 44k INFO Saving model and optimizer state at iteration 386 to ./logs/44k/D_76000.pth -2023-07-26 08:42:03,907 44k INFO ====> Epoch: 386, cost 229.73 s -2023-07-26 08:44:42,703 44k INFO Train Epoch: 387 [80%] -2023-07-26 08:44:42,704 44k INFO Losses: [2.1539833545684814, 2.635955333709717, 11.767909049987793, 18.495405197143555, 0.7559518218040466], step: 76200, lr: 9.519401837296521e-05 -2023-07-26 08:45:20,141 44k INFO ====> Epoch: 387, cost 196.23 s -2023-07-26 08:48:04,830 44k INFO Train Epoch: 388 [82%] -2023-07-26 08:48:04,832 44k INFO Losses: [2.123274803161621, 2.40275502204895, 12.962637901306152, 20.919008255004883, 0.7387451529502869], step: 76400, lr: 9.51821191206686e-05 -2023-07-26 08:48:40,476 44k INFO ====> Epoch: 388, cost 200.34 s -2023-07-26 08:51:24,742 44k INFO Train Epoch: 389 [83%] -2023-07-26 08:51:24,744 44k INFO Losses: [2.5036110877990723, 2.6306521892547607, 13.168987274169922, 19.309648513793945, 0.5111571550369263], step: 76600, lr: 9.517022135577851e-05 -2023-07-26 08:51:56,602 44k INFO ====> Epoch: 389, cost 196.13 s -2023-07-26 08:54:44,703 44k INFO Train Epoch: 390 [85%] -2023-07-26 08:54:44,704 44k INFO Losses: [2.0840861797332764, 2.5199503898620605, 14.506942749023438, 19.734844207763672, 0.6727297306060791], step: 76800, lr: 9.515832507810904e-05 -2023-07-26 08:55:14,148 44k INFO ====> Epoch: 390, cost 197.55 s -2023-07-26 08:58:03,799 44k INFO Train Epoch: 391 [86%] -2023-07-26 08:58:03,800 44k INFO Losses: [2.4027998447418213, 2.341224431991577, 14.025223731994629, 17.66230583190918, 1.1469920873641968], step: 77000, lr: 9.514643028747427e-05 -2023-07-26 08:58:14,816 44k INFO Saving model and optimizer state at iteration 391 to ./logs/44k/G_77000.pth -2023-07-26 08:58:22,214 44k INFO Saving model and optimizer state at iteration 391 to ./logs/44k/D_77000.pth -2023-07-26 08:58:24,795 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_74000.pth -2023-07-26 08:58:24,808 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_74000.pth -2023-07-26 08:58:56,376 44k INFO ====> Epoch: 391, cost 222.23 s -2023-07-26 09:01:52,459 44k INFO Train Epoch: 392 [88%] -2023-07-26 09:01:52,461 44k INFO Losses: [2.4997217655181885, 2.330348491668701, 10.354461669921875, 18.0952205657959, 0.9968717098236084], step: 77200, lr: 9.513453698368834e-05 -2023-07-26 09:02:20,202 44k INFO ====> Epoch: 392, cost 203.83 s -2023-07-26 09:05:20,552 44k INFO Train Epoch: 393 [89%] -2023-07-26 09:05:20,556 44k INFO Losses: [2.1763527393341064, 2.7814338207244873, 12.128959655761719, 18.629077911376953, 1.0919145345687866], step: 77400, lr: 9.512264516656537e-05 -2023-07-26 09:05:42,833 44k INFO ====> Epoch: 393, cost 202.63 s -2023-07-26 09:08:42,559 44k INFO Train Epoch: 394 [91%] -2023-07-26 09:08:42,561 44k INFO Losses: [2.079793930053711, 2.4538357257843018, 11.63211727142334, 19.26938819885254, 1.1916217803955078], step: 77600, lr: 9.511075483591955e-05 -2023-07-26 09:09:01,628 44k INFO ====> Epoch: 394, cost 198.80 s -2023-07-26 09:12:03,905 44k INFO Train Epoch: 395 [92%] -2023-07-26 09:12:03,907 44k INFO Losses: [2.3260679244995117, 2.4173195362091064, 11.886085510253906, 19.080217361450195, 0.81728595495224], step: 77800, lr: 9.509886599156505e-05 -2023-07-26 09:12:18,908 44k INFO ====> Epoch: 395, cost 197.28 s -2023-07-26 09:15:23,312 44k INFO Train Epoch: 396 [94%] -2023-07-26 09:15:23,313 44k INFO Losses: [2.3237197399139404, 2.540574312210083, 12.213521957397461, 19.58467674255371, 1.7184557914733887], step: 78000, lr: 9.508697863331611e-05 -2023-07-26 09:15:39,652 44k INFO Saving model and optimizer state at iteration 396 to ./logs/44k/G_78000.pth -2023-07-26 09:15:46,828 44k INFO Saving model and optimizer state at iteration 396 to ./logs/44k/D_78000.pth -2023-07-26 09:15:56,553 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_75000.pth -2023-07-26 09:15:56,556 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_75000.pth -2023-07-26 09:16:12,190 44k INFO ====> Epoch: 396, cost 233.28 s -2023-07-26 09:19:15,379 44k INFO Train Epoch: 397 [95%] -2023-07-26 09:19:15,381 44k INFO Losses: [2.6478800773620605, 2.5472280979156494, 12.878994941711426, 17.613691329956055, 0.8839368224143982], step: 78200, lr: 9.507509276098694e-05 -2023-07-26 09:19:25,712 44k INFO ====> Epoch: 397, cost 193.52 s -2023-07-26 09:22:36,865 44k INFO Train Epoch: 398 [97%] -2023-07-26 09:22:36,867 44k INFO Losses: [2.5595476627349854, 2.3454742431640625, 13.633504867553711, 18.741374969482422, 0.7028172612190247], step: 78400, lr: 9.506320837439182e-05 -2023-07-26 09:22:47,513 44k INFO ====> Epoch: 398, cost 201.80 s -2023-07-26 09:26:00,591 44k INFO Train Epoch: 399 [98%] -2023-07-26 09:26:00,593 44k INFO Losses: [2.419861316680908, 2.3645153045654297, 9.50949764251709, 19.181337356567383, 0.8217363357543945], step: 78600, lr: 9.505132547334502e-05 -2023-07-26 09:26:06,556 44k INFO ====> Epoch: 399, cost 199.04 s -2023-07-26 09:29:22,771 44k INFO ====> Epoch: 400, cost 196.22 s -2023-07-26 09:29:38,886 44k INFO Train Epoch: 401 [0%] -2023-07-26 09:29:38,887 44k INFO Losses: [2.4541432857513428, 2.14528489112854, 11.626108169555664, 16.99962615966797, 0.6729657053947449], step: 78800, lr: 9.502756412715364e-05 -2023-07-26 09:32:41,638 44k INFO ====> Epoch: 401, cost 198.87 s -2023-07-26 09:32:59,889 44k INFO Train Epoch: 402 [2%] -2023-07-26 09:32:59,891 44k INFO Losses: [2.057875871658325, 2.7987701892852783, 13.864038467407227, 18.785083770751953, 0.759579062461853], step: 79000, lr: 9.501568568163774e-05 -2023-07-26 09:33:11,125 44k INFO Saving model and optimizer state at iteration 402 to ./logs/44k/G_79000.pth -2023-07-26 09:33:18,302 44k INFO Saving model and optimizer state at iteration 402 to ./logs/44k/D_79000.pth -2023-07-26 09:33:23,334 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_76000.pth -2023-07-26 09:33:23,340 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_76000.pth -2023-07-26 09:36:24,176 44k INFO ====> Epoch: 402, cost 222.54 s -2023-07-26 09:36:45,293 44k INFO Train Epoch: 403 [3%] -2023-07-26 09:36:45,295 44k INFO Losses: [2.5432558059692383, 2.2326560020446777, 8.265777587890625, 16.89855194091797, 1.089661717414856], step: 79200, lr: 9.500380872092753e-05 -2023-07-26 09:39:41,561 44k INFO ====> Epoch: 403, cost 197.38 s -2023-07-26 09:40:05,838 44k INFO Train Epoch: 404 [5%] -2023-07-26 09:40:05,840 44k INFO Losses: [2.162412166595459, 2.5375633239746094, 13.11090087890625, 20.09492301940918, 1.1947343349456787], step: 79400, lr: 9.49919332448374e-05 -2023-07-26 09:42:58,309 44k INFO ====> Epoch: 404, cost 196.75 s -2023-07-26 09:43:23,918 44k INFO Train Epoch: 405 [6%] -2023-07-26 09:43:23,921 44k INFO Losses: [1.978492259979248, 2.6988372802734375, 13.868794441223145, 18.894588470458984, 0.9806001782417297], step: 79600, lr: 9.498005925318179e-05 -2023-07-26 09:46:17,840 44k INFO ====> Epoch: 405, cost 199.53 s -2023-07-26 09:46:46,409 44k INFO Train Epoch: 406 [8%] -2023-07-26 09:46:46,411 44k INFO Losses: [2.22659969329834, 2.4341418743133545, 12.980076789855957, 20.288345336914062, 0.6408030986785889], step: 79800, lr: 9.496818674577514e-05 -2023-07-26 09:49:37,082 44k INFO ====> Epoch: 406, cost 199.24 s -2023-07-26 09:50:11,461 44k INFO Train Epoch: 407 [9%] -2023-07-26 09:50:11,463 44k INFO Losses: [2.387986421585083, 2.3532207012176514, 9.817323684692383, 18.135372161865234, 0.24290831387043], step: 80000, lr: 9.495631572243191e-05 -2023-07-28 05:57:12,769 44k INFO {'train': {'log_interval': 200, 'eval_interval': 1000, 'seed': 1234, 'epochs': 10000, 'learning_rate': 0.0001, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 6, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 10240, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0, 'use_sr': True, 'max_speclen': 512, 'port': '8001', 'keep_ckpts': 3}, 'data': {'training_files': 'filelists/train.txt', 'validation_files': 'filelists/val.txt', 'max_wav_value': 32768.0, 'sampling_rate': 44100, 'filter_length': 2048, 'hop_length': 512, 'win_length': 2048, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': 22050, 'contentvec_final_proj': False}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256, 'ssl_dim': 768, 'n_speakers': 200}, 'spk': {'TW3_F_Yennefer_Rus': 0}, 'model_dir': './logs/44k', 'reset': False} -2023-07-28 05:57:14,468 44k WARNING git hash values are different. 93a70435(saved) != 2cf4c79a(current) -2023-07-28 05:57:32,491 44k INFO Loaded checkpoint './logs/44k/G_79000.pth' (iteration 402) -2023-07-28 05:57:43,368 44k INFO Loaded checkpoint './logs/44k/D_79000.pth' (iteration 402) -2023-07-28 05:58:17,257 44k INFO Train Epoch: 402 [2%] -2023-07-28 05:58:17,261 44k INFO Losses: [2.3646788597106934, 2.3277909755706787, 10.228991508483887, 15.950904846191406, 1.029319405555725], step: 79000, lr: 9.500380872092753e-05 -2023-07-28 05:58:34,237 44k INFO Saving model and optimizer state at iteration 402 to ./logs/44k/G_79000.pth -2023-07-28 05:58:37,081 44k INFO Saving model and optimizer state at iteration 402 to ./logs/44k/D_79000.pth -2023-07-28 06:02:54,087 44k INFO ====> Epoch: 402, cost 341.32 s -2023-07-28 06:03:10,160 44k INFO Train Epoch: 403 [3%] -2023-07-28 06:03:10,162 44k INFO Losses: [2.309328079223633, 2.516190528869629, 13.13507080078125, 20.0684871673584, 1.100438117980957], step: 79200, lr: 9.49919332448374e-05 -2023-07-28 06:06:04,780 44k INFO ====> Epoch: 403, cost 190.69 s -2023-07-28 06:06:26,253 44k INFO Train Epoch: 404 [5%] -2023-07-28 06:06:26,255 44k INFO Losses: [2.20246958732605, 2.3394761085510254, 15.926359176635742, 20.387004852294922, 0.7945383787155151], step: 79400, lr: 9.498005925318179e-05 -2023-07-28 06:09:17,925 44k INFO ====> Epoch: 404, cost 193.14 s -2023-07-28 06:09:42,228 44k INFO Train Epoch: 405 [6%] -2023-07-28 06:09:42,229 44k INFO Losses: [2.3675951957702637, 2.3735220432281494, 9.484576225280762, 16.420528411865234, 0.8646402955055237], step: 79600, lr: 9.496818674577514e-05 -2023-07-28 06:12:31,071 44k INFO ====> Epoch: 405, cost 193.15 s -2023-07-28 06:12:57,932 44k INFO Train Epoch: 406 [8%] -2023-07-28 06:12:57,933 44k INFO Losses: [2.3567769527435303, 2.619617223739624, 10.972625732421875, 18.835201263427734, 0.6962006092071533], step: 79800, lr: 9.495631572243191e-05 -2023-07-28 06:15:45,374 44k INFO ====> Epoch: 406, cost 194.30 s -2023-07-28 06:16:14,480 44k INFO Train Epoch: 407 [9%] -2023-07-28 06:16:14,482 44k INFO Losses: [2.2937328815460205, 2.4200222492218018, 9.106892585754395, 15.953207969665527, 0.8502748012542725], step: 80000, lr: 9.494444618296661e-05 -2023-07-28 06:16:24,194 44k INFO Saving model and optimizer state at iteration 407 to ./logs/44k/G_80000.pth -2023-07-28 06:16:26,926 44k INFO Saving model and optimizer state at iteration 407 to ./logs/44k/D_80000.pth -2023-07-28 06:19:17,429 44k INFO ====> Epoch: 407, cost 212.06 s -2023-07-28 06:19:48,263 44k INFO Train Epoch: 408 [11%] -2023-07-28 06:19:48,264 44k INFO Losses: [2.2249510288238525, 2.8071789741516113, 11.122891426086426, 18.027328491210938, 0.9554635882377625], step: 80200, lr: 9.493257812719373e-05 -2023-07-28 06:22:30,075 44k INFO ====> Epoch: 408, cost 192.65 s -2023-07-28 06:23:04,013 44k INFO Train Epoch: 409 [12%] -2023-07-28 06:23:04,015 44k INFO Losses: [2.4603757858276367, 2.3792428970336914, 8.852936744689941, 17.154415130615234, 1.0631310939788818], step: 80400, lr: 9.492071155492783e-05 -2023-07-28 06:25:42,718 44k INFO ====> Epoch: 409, cost 192.64 s -2023-07-28 06:26:20,696 44k INFO Train Epoch: 410 [14%] -2023-07-28 06:26:20,697 44k INFO Losses: [2.275728225708008, 2.794625759124756, 13.019681930541992, 19.306549072265625, 0.9673046469688416], step: 80600, lr: 9.490884646598347e-05 -2023-07-28 06:28:56,507 44k INFO ====> Epoch: 410, cost 193.79 s -2023-07-28 06:29:36,488 44k INFO Train Epoch: 411 [15%] -2023-07-28 06:29:36,494 44k INFO Losses: [2.4295473098754883, 2.470752477645874, 11.238961219787598, 18.333789825439453, 0.7245648503303528], step: 80800, lr: 9.489698286017521e-05 -2023-07-28 06:32:09,499 44k INFO ====> Epoch: 411, cost 192.99 s -2023-07-28 06:32:52,270 44k INFO Train Epoch: 412 [17%] -2023-07-28 06:32:52,271 44k INFO Losses: [2.4203245639801025, 2.305345296859741, 10.346842765808105, 16.719438552856445, 0.9858787655830383], step: 81000, lr: 9.488512073731768e-05 -2023-07-28 06:33:02,585 44k INFO Saving model and optimizer state at iteration 412 to ./logs/44k/G_81000.pth -2023-07-28 06:33:05,210 44k INFO Saving model and optimizer state at iteration 412 to ./logs/44k/D_81000.pth -2023-07-28 06:35:40,308 44k INFO ====> Epoch: 412, cost 210.81 s -2023-07-28 06:36:28,516 44k INFO Train Epoch: 413 [18%] -2023-07-28 06:36:28,517 44k INFO Losses: [2.3486685752868652, 2.323007583618164, 10.52485179901123, 14.380027770996094, 0.14341972768306732], step: 81200, lr: 9.487326009722552e-05 -2023-07-28 06:38:55,639 44k INFO ====> Epoch: 413, cost 195.33 s -2023-07-28 06:39:43,095 44k INFO Train Epoch: 414 [20%] -2023-07-28 06:39:43,097 44k INFO Losses: [2.274632215499878, 2.9559762477874756, 11.93787670135498, 17.240310668945312, 0.9564627408981323], step: 81400, lr: 9.486140093971337e-05 -2023-07-28 06:42:08,799 44k INFO ====> Epoch: 414, cost 193.16 s -2023-07-28 06:42:59,361 44k INFO Train Epoch: 415 [21%] -2023-07-28 06:42:59,362 44k INFO Losses: [2.0304994583129883, 2.910217523574829, 13.536666870117188, 21.711145401000977, 0.6272156238555908], step: 81600, lr: 9.484954326459589e-05 -2023-07-28 06:45:21,700 44k INFO ====> Epoch: 415, cost 192.90 s -2023-07-28 06:46:15,647 44k INFO Train Epoch: 416 [23%] -2023-07-28 06:46:15,649 44k INFO Losses: [2.226191520690918, 2.5841333866119385, 13.43307113647461, 18.58431625366211, 0.5850363969802856], step: 81800, lr: 9.483768707168782e-05 -2023-07-28 06:48:34,749 44k INFO ====> Epoch: 416, cost 193.05 s -2023-07-28 06:49:31,573 44k INFO Train Epoch: 417 [24%] -2023-07-28 06:49:31,574 44k INFO Losses: [2.17295503616333, 2.421325445175171, 13.637075424194336, 17.33139991760254, 0.5363914370536804], step: 82000, lr: 9.482583236080386e-05 -2023-07-28 06:49:41,901 44k INFO Saving model and optimizer state at iteration 417 to ./logs/44k/G_82000.pth -2023-07-28 06:49:45,752 44k INFO Saving model and optimizer state at iteration 417 to ./logs/44k/D_82000.pth -2023-07-28 06:52:07,362 44k INFO ====> Epoch: 417, cost 212.61 s -2023-07-28 06:53:10,088 44k INFO Train Epoch: 418 [26%] -2023-07-28 06:53:10,092 44k INFO Losses: [2.3566322326660156, 2.79107666015625, 10.070869445800781, 18.931732177734375, 0.5892783999443054], step: 82200, lr: 9.481397913175876e-05 -2023-07-28 06:55:22,996 44k INFO ====> Epoch: 418, cost 195.63 s -2023-07-28 06:56:24,519 44k INFO Train Epoch: 419 [27%] -2023-07-28 06:56:24,520 44k INFO Losses: [2.2419166564941406, 2.8732523918151855, 12.770025253295898, 18.93949317932129, 0.8715353012084961], step: 82400, lr: 9.480212738436729e-05 -2023-07-28 06:58:34,424 44k INFO ====> Epoch: 419, cost 191.43 s -2023-07-28 06:59:39,293 44k INFO Train Epoch: 420 [29%] -2023-07-28 06:59:39,296 44k INFO Losses: [2.265590190887451, 2.5823957920074463, 12.942420959472656, 18.88718032836914, 1.0877387523651123], step: 82600, lr: 9.479027711844423e-05 -2023-07-28 07:01:48,316 44k INFO ====> Epoch: 420, cost 193.89 s -2023-07-28 07:02:54,129 44k INFO Train Epoch: 421 [30%] -2023-07-28 07:02:54,132 44k INFO Losses: [2.4849987030029297, 2.515474319458008, 12.661136627197266, 17.473934173583984, 0.35996386408805847], step: 82800, lr: 9.477842833380443e-05 -2023-07-28 07:04:58,663 44k INFO ====> Epoch: 421, cost 190.35 s -2023-07-28 07:06:08,863 44k INFO Train Epoch: 422 [32%] -2023-07-28 07:06:08,864 44k INFO Losses: [2.1083486080169678, 2.644257068634033, 14.738850593566895, 17.19365692138672, 1.0472246408462524], step: 83000, lr: 9.47665810302627e-05 -2023-07-28 07:06:18,987 44k INFO Saving model and optimizer state at iteration 422 to ./logs/44k/G_83000.pth -2023-07-28 07:06:22,170 44k INFO Saving model and optimizer state at iteration 422 to ./logs/44k/D_83000.pth -2023-07-28 07:08:34,487 44k INFO ====> Epoch: 422, cost 215.82 s -2023-07-28 07:09:50,102 44k INFO Train Epoch: 423 [34%] -2023-07-28 07:09:50,106 44k INFO Losses: [2.516263484954834, 2.6018571853637695, 8.840169906616211, 17.87189483642578, 0.6123842597007751], step: 83200, lr: 9.475473520763392e-05 -2023-07-28 07:11:49,619 44k INFO ====> Epoch: 423, cost 195.13 s -2023-07-28 07:13:05,179 44k INFO Train Epoch: 424 [35%] -2023-07-28 07:13:05,182 44k INFO Losses: [2.254316806793213, 2.6325087547302246, 10.701687812805176, 16.47757339477539, 0.49666357040405273], step: 83400, lr: 9.474289086573296e-05 -2023-07-28 07:15:02,059 44k INFO ====> Epoch: 424, cost 192.44 s -2023-07-28 07:16:21,281 44k INFO Train Epoch: 425 [37%] -2023-07-28 07:16:21,283 44k INFO Losses: [2.2570111751556396, 2.58046817779541, 11.325643539428711, 19.037317276000977, 1.1894712448120117], step: 83600, lr: 9.473104800437474e-05 -2023-07-28 07:18:14,440 44k INFO ====> Epoch: 425, cost 192.38 s -2023-07-28 07:19:36,554 44k INFO Train Epoch: 426 [38%] -2023-07-28 07:19:36,556 44k INFO Losses: [2.3077170848846436, 2.5390539169311523, 14.170886993408203, 18.045394897460938, 0.03026140294969082], step: 83800, lr: 9.471920662337418e-05 -2023-07-28 07:21:28,935 44k INFO ====> Epoch: 426, cost 194.49 s -2023-07-28 07:22:50,277 44k INFO Train Epoch: 427 [40%] -2023-07-28 07:22:50,281 44k INFO Losses: [2.245270252227783, 2.508060932159424, 13.210451126098633, 17.91596221923828, 0.875553548336029], step: 84000, lr: 9.470736672254626e-05 -2023-07-28 07:23:00,719 44k INFO Saving model and optimizer state at iteration 427 to ./logs/44k/G_84000.pth -2023-07-28 07:23:10,428 44k INFO Saving model and optimizer state at iteration 427 to ./logs/44k/D_84000.pth -2023-07-28 07:25:04,810 44k INFO ====> Epoch: 427, cost 215.88 s -2023-07-28 07:26:35,115 44k INFO Train Epoch: 428 [41%] -2023-07-28 07:26:35,116 44k INFO Losses: [2.3524842262268066, 2.610440492630005, 13.777667045593262, 18.274595260620117, 0.8822522759437561], step: 84200, lr: 9.469552830170594e-05 -2023-07-28 07:28:19,855 44k INFO ====> Epoch: 428, cost 195.04 s -2023-07-28 07:29:50,966 44k INFO Train Epoch: 429 [43%] -2023-07-28 07:29:50,969 44k INFO Losses: [2.2023518085479736, 2.7657692432403564, 9.577252388000488, 18.677215576171875, 0.9726400971412659], step: 84400, lr: 9.468369136066823e-05 -2023-07-28 07:31:35,231 44k INFO ====> Epoch: 429, cost 195.38 s -2023-07-28 07:33:09,771 44k INFO Train Epoch: 430 [44%] -2023-07-28 07:33:09,772 44k INFO Losses: [2.3371498584747314, 2.5151326656341553, 11.495080947875977, 18.267637252807617, 0.7551075220108032], step: 84600, lr: 9.467185589924815e-05 -2023-07-28 07:34:50,371 44k INFO ====> Epoch: 430, cost 195.14 s -2023-07-28 07:36:25,271 44k INFO Train Epoch: 431 [46%] -2023-07-28 07:36:25,274 44k INFO Losses: [2.3749217987060547, 2.368912935256958, 8.640846252441406, 17.06429672241211, 1.317742109298706], step: 84800, lr: 9.466002191726074e-05 -2023-07-28 07:38:01,449 44k INFO ====> Epoch: 431, cost 191.08 s -2023-07-28 07:39:39,813 44k INFO Train Epoch: 432 [47%] -2023-07-28 07:39:39,816 44k INFO Losses: [2.266174793243408, 2.3652031421661377, 14.710149765014648, 18.804729461669922, 1.1003684997558594], step: 85000, lr: 9.464818941452107e-05 -2023-07-28 07:39:50,768 44k INFO Saving model and optimizer state at iteration 432 to ./logs/44k/G_85000.pth -2023-07-28 07:39:58,528 44k INFO Saving model and optimizer state at iteration 432 to ./logs/44k/D_85000.pth -2023-07-28 07:40:04,821 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_82000.pth -2023-07-28 07:40:04,823 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_82000.pth -2023-07-28 07:41:42,480 44k INFO ====> Epoch: 432, cost 221.03 s -2023-07-28 07:43:27,001 44k INFO Train Epoch: 433 [49%] -2023-07-28 07:43:27,005 44k INFO Losses: [2.1866841316223145, 2.6345996856689453, 12.23606014251709, 17.89522933959961, 0.44463151693344116], step: 85200, lr: 9.463635839084426e-05 -2023-07-28 07:44:57,993 44k INFO ====> Epoch: 433, cost 195.51 s -2023-07-28 07:46:42,359 44k INFO Train Epoch: 434 [50%] -2023-07-28 07:46:42,360 44k INFO Losses: [2.446272850036621, 2.278015375137329, 9.75872802734375, 18.56068229675293, 0.7677353024482727], step: 85400, lr: 9.46245288460454e-05 -2023-07-28 07:48:11,269 44k INFO ====> Epoch: 434, cost 193.28 s -2023-07-28 07:49:57,598 44k INFO Train Epoch: 435 [52%] -2023-07-28 07:49:57,601 44k INFO Losses: [2.2852048873901367, 2.480663299560547, 12.202441215515137, 19.782150268554688, 0.5450642704963684], step: 85600, lr: 9.461270077993965e-05 -2023-07-28 07:51:24,201 44k INFO ====> Epoch: 435, cost 192.93 s -2023-07-28 07:53:13,868 44k INFO Train Epoch: 436 [53%] -2023-07-28 07:53:13,871 44k INFO Losses: [2.5053205490112305, 2.556654930114746, 11.76624584197998, 18.179704666137695, 1.0059512853622437], step: 85800, lr: 9.460087419234215e-05 -2023-07-28 07:54:37,023 44k INFO ====> Epoch: 436, cost 192.82 s -2023-07-28 07:56:29,487 44k INFO Train Epoch: 437 [55%] -2023-07-28 07:56:29,488 44k INFO Losses: [2.0061988830566406, 2.8680078983306885, 14.546449661254883, 19.76282501220703, 0.26728275418281555], step: 86000, lr: 9.458904908306811e-05 -2023-07-28 07:56:41,572 44k INFO Saving model and optimizer state at iteration 437 to ./logs/44k/G_86000.pth -2023-07-28 07:56:47,492 44k INFO Saving model and optimizer state at iteration 437 to ./logs/44k/D_86000.pth -2023-07-28 07:58:17,420 44k INFO ====> Epoch: 437, cost 220.40 s -2023-07-28 08:00:14,852 44k INFO Train Epoch: 438 [56%] -2023-07-28 08:00:14,853 44k INFO Losses: [2.401583671569824, 2.7985446453094482, 13.377863883972168, 18.803077697753906, 1.0017095804214478], step: 86200, lr: 9.457722545193272e-05 -2023-07-28 08:01:34,848 44k INFO ====> Epoch: 438, cost 197.43 s -2023-07-28 08:03:36,279 44k INFO Train Epoch: 439 [58%] -2023-07-28 08:03:36,280 44k INFO Losses: [2.430826187133789, 2.512950897216797, 10.585251808166504, 17.824419021606445, 0.8333525657653809], step: 86400, lr: 9.456540329875122e-05 -2023-07-28 08:04:52,223 44k INFO ====> Epoch: 439, cost 197.37 s -2023-07-28 08:06:54,329 44k INFO Train Epoch: 440 [59%] -2023-07-28 08:06:54,332 44k INFO Losses: [2.3255021572113037, 2.6545562744140625, 14.349471092224121, 19.117088317871094, 0.9800734519958496], step: 86600, lr: 9.455358262333887e-05 -2023-07-28 08:08:07,604 44k INFO ====> Epoch: 440, cost 195.38 s -2023-07-28 08:10:11,342 44k INFO Train Epoch: 441 [61%] -2023-07-28 08:10:11,345 44k INFO Losses: [2.314946413040161, 2.3298795223236084, 11.044219970703125, 19.784481048583984, 1.2769718170166016], step: 86800, lr: 9.454176342551095e-05 -2023-07-28 08:11:20,970 44k INFO ====> Epoch: 441, cost 193.37 s -2023-07-28 08:13:27,473 44k INFO Train Epoch: 442 [62%] -2023-07-28 08:13:27,474 44k INFO Losses: [2.3929009437561035, 2.3985536098480225, 14.130040168762207, 19.847349166870117, 1.0819814205169678], step: 87000, lr: 9.452994570508276e-05 -2023-07-28 08:13:40,143 44k INFO Saving model and optimizer state at iteration 442 to ./logs/44k/G_87000.pth -2023-07-28 08:13:46,156 44k INFO Saving model and optimizer state at iteration 442 to ./logs/44k/D_87000.pth -2023-07-28 08:15:01,334 44k INFO ====> Epoch: 442, cost 220.36 s -2023-07-28 08:17:13,250 44k INFO Train Epoch: 443 [64%] -2023-07-28 08:17:13,253 44k INFO Losses: [2.2393710613250732, 2.589975357055664, 11.348123550415039, 18.376754760742188, 0.5163195133209229], step: 87200, lr: 9.451812946186962e-05 -2023-07-28 08:18:17,416 44k INFO ====> Epoch: 443, cost 196.08 s -2023-07-28 08:20:31,485 44k INFO Train Epoch: 444 [65%] -2023-07-28 08:20:31,487 44k INFO Losses: [2.429718255996704, 2.431854486465454, 10.854389190673828, 19.300045013427734, 0.8893839716911316], step: 87400, lr: 9.450631469568687e-05 -2023-07-28 08:21:34,918 44k INFO ====> Epoch: 444, cost 197.50 s -2023-07-28 08:23:49,056 44k INFO Train Epoch: 445 [67%] -2023-07-28 08:23:49,059 44k INFO Losses: [2.399972915649414, 2.6890292167663574, 9.096315383911133, 17.21070671081543, 0.847396969795227], step: 87600, lr: 9.44945014063499e-05 -2023-07-28 08:24:47,564 44k INFO ====> Epoch: 445, cost 192.65 s -2023-07-28 08:27:05,263 44k INFO Train Epoch: 446 [69%] -2023-07-28 08:27:05,266 44k INFO Losses: [2.5121119022369385, 2.519871234893799, 11.088659286499023, 19.152971267700195, 0.9878566265106201], step: 87800, lr: 9.448268959367411e-05 -2023-07-28 08:28:01,925 44k INFO ====> Epoch: 446, cost 194.36 s -2023-07-28 08:30:20,321 44k INFO Train Epoch: 447 [70%] -2023-07-28 08:30:20,322 44k INFO Losses: [2.4151227474212646, 2.597712755203247, 13.873629570007324, 19.714265823364258, 1.1465305089950562], step: 88000, lr: 9.44708792574749e-05 -2023-07-28 08:30:35,494 44k INFO Saving model and optimizer state at iteration 447 to ./logs/44k/G_88000.pth -2023-07-28 08:30:46,061 44k INFO Saving model and optimizer state at iteration 447 to ./logs/44k/D_88000.pth -2023-07-28 08:30:51,586 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_85000.pth -2023-07-28 08:30:51,588 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_85000.pth -2023-07-28 08:31:47,048 44k INFO ====> Epoch: 447, cost 225.12 s -2023-07-28 08:34:12,708 44k INFO Train Epoch: 448 [72%] -2023-07-28 08:34:12,710 44k INFO Losses: [2.4499430656433105, 2.45424747467041, 13.729780197143555, 18.48137092590332, 0.6432726383209229], step: 88200, lr: 9.445907039756771e-05 -2023-07-28 08:35:07,373 44k INFO ====> Epoch: 448, cost 200.33 s -2023-07-28 08:37:35,536 44k INFO Train Epoch: 449 [73%] -2023-07-28 08:37:35,538 44k INFO Losses: [2.295522928237915, 2.585463047027588, 11.352367401123047, 19.170785903930664, 0.9424953460693359], step: 88400, lr: 9.4447263013768e-05 -2023-07-28 08:38:25,486 44k INFO ====> Epoch: 449, cost 198.11 s -2023-07-28 08:40:51,662 44k INFO Train Epoch: 450 [75%] -2023-07-28 08:40:51,666 44k INFO Losses: [2.287156343460083, 2.238318920135498, 10.232880592346191, 17.418333053588867, 0.5693546533584595], step: 88600, lr: 9.443545710589128e-05 -2023-07-28 08:41:36,908 44k INFO ====> Epoch: 450, cost 191.42 s -2023-07-28 08:44:06,207 44k INFO Train Epoch: 451 [76%] -2023-07-28 08:44:06,208 44k INFO Losses: [2.347543716430664, 2.488084316253662, 12.9260892868042, 19.424997329711914, 1.0144562721252441], step: 88800, lr: 9.442365267375304e-05 -2023-07-28 08:44:49,467 44k INFO ====> Epoch: 451, cost 192.56 s -2023-07-28 08:47:24,960 44k INFO Train Epoch: 452 [78%] -2023-07-28 08:47:24,963 44k INFO Losses: [2.5469064712524414, 2.223968029022217, 10.941459655761719, 16.36357307434082, 1.018877387046814], step: 89000, lr: 9.441184971716882e-05 -2023-07-28 08:47:38,259 44k INFO Saving model and optimizer state at iteration 452 to ./logs/44k/G_89000.pth -2023-07-28 08:47:48,783 44k INFO Saving model and optimizer state at iteration 452 to ./logs/44k/D_89000.pth -2023-07-28 08:48:34,257 44k INFO ====> Epoch: 452, cost 224.79 s -2023-07-28 08:51:08,520 44k INFO Train Epoch: 453 [79%] -2023-07-28 08:51:08,521 44k INFO Losses: [2.045888662338257, 2.737135648727417, 15.314550399780273, 19.712059020996094, 0.7758307456970215], step: 89200, lr: 9.440004823595418e-05 -2023-07-28 08:51:47,432 44k INFO ====> Epoch: 453, cost 193.18 s -2023-07-28 08:54:27,849 44k INFO Train Epoch: 454 [81%] -2023-07-28 08:54:27,852 44k INFO Losses: [2.3151865005493164, 2.663576126098633, 11.803759574890137, 19.671321868896484, 0.7507362961769104], step: 89400, lr: 9.438824822992467e-05 -2023-07-28 08:55:06,814 44k INFO ====> Epoch: 454, cost 199.38 s -2023-07-28 08:57:52,167 44k INFO Train Epoch: 455 [82%] -2023-07-28 08:57:52,168 44k INFO Losses: [1.918971061706543, 2.9590015411376953, 15.693469047546387, 19.557626724243164, 1.024475336074829], step: 89600, lr: 9.437644969889592e-05 -2023-07-28 08:58:26,362 44k INFO ====> Epoch: 455, cost 199.55 s -2023-07-28 09:01:09,401 44k INFO Train Epoch: 456 [84%] -2023-07-28 09:01:09,405 44k INFO Losses: [2.409163236618042, 2.382859706878662, 10.933839797973633, 19.539426803588867, 0.9539042115211487], step: 89800, lr: 9.436465264268356e-05 -2023-07-28 09:01:39,383 44k INFO ====> Epoch: 456, cost 193.02 s -2023-07-28 09:04:25,127 44k INFO Train Epoch: 457 [85%] -2023-07-28 09:04:25,129 44k INFO Losses: [2.3299756050109863, 2.293668508529663, 12.80317497253418, 18.75678253173828, 1.0395828485488892], step: 90000, lr: 9.435285706110322e-05 -2023-07-28 09:04:35,698 44k INFO Saving model and optimizer state at iteration 457 to ./logs/44k/G_90000.pth -2023-07-28 09:04:42,844 44k INFO Saving model and optimizer state at iteration 457 to ./logs/44k/D_90000.pth -2023-07-28 09:05:16,689 44k INFO ====> Epoch: 457, cost 217.31 s -2023-07-28 09:08:04,867 44k INFO Train Epoch: 458 [87%] -2023-07-28 09:08:04,870 44k INFO Losses: [2.2295949459075928, 2.397033452987671, 13.360812187194824, 19.51348304748535, 0.7340725660324097], step: 90200, lr: 9.434106295397058e-05 -2023-07-28 09:08:31,903 44k INFO ====> Epoch: 458, cost 195.21 s -2023-07-28 09:11:29,815 44k INFO Train Epoch: 459 [88%] -2023-07-28 09:11:29,818 44k INFO Losses: [2.521055221557617, 2.2494287490844727, 10.09217357635498, 15.471075057983398, 0.9883989691734314], step: 90400, lr: 9.432927032110133e-05 -2023-07-28 09:11:51,449 44k INFO ====> Epoch: 459, cost 199.55 s -2023-07-28 09:14:48,381 44k INFO Train Epoch: 460 [90%] -2023-07-28 09:14:48,383 44k INFO Losses: [2.494269609451294, 2.399876594543457, 11.405051231384277, 18.191387176513672, 1.0077555179595947], step: 90600, lr: 9.431747916231119e-05 -2023-07-28 09:15:09,024 44k INFO ====> Epoch: 460, cost 197.57 s -2023-07-28 09:18:06,859 44k INFO Train Epoch: 461 [91%] -2023-07-28 09:18:06,861 44k INFO Losses: [2.4349865913391113, 2.430562973022461, 11.630430221557617, 19.459186553955078, 1.1481024026870728], step: 90800, lr: 9.430568947741589e-05 -2023-07-28 09:18:23,035 44k INFO ====> Epoch: 461, cost 194.01 s -2023-07-28 09:21:23,708 44k INFO Train Epoch: 462 [93%] -2023-07-28 09:21:23,711 44k INFO Losses: [2.360874652862549, 2.2167580127716064, 14.48666763305664, 19.500951766967773, 0.6773166656494141], step: 91000, lr: 9.42939012662312e-05 -2023-07-28 09:21:33,635 44k INFO Saving model and optimizer state at iteration 462 to ./logs/44k/G_91000.pth -2023-07-28 09:21:40,869 44k INFO Saving model and optimizer state at iteration 462 to ./logs/44k/D_91000.pth -2023-07-28 09:22:00,926 44k INFO ====> Epoch: 462, cost 217.89 s -2023-07-28 09:25:06,023 44k INFO Train Epoch: 463 [94%] -2023-07-28 09:25:06,026 44k INFO Losses: [2.3978419303894043, 2.422466993331909, 12.129443168640137, 18.44054412841797, 1.1533154249191284], step: 91200, lr: 9.428211452857292e-05 -2023-07-28 09:25:17,560 44k INFO ====> Epoch: 463, cost 196.63 s -2023-07-29 06:04:54,080 44k INFO {'train': {'log_interval': 200, 'eval_interval': 1000, 'seed': 1234, 'epochs': 10000, 'learning_rate': 0.0001, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 6, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 10240, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0, 'use_sr': True, 'max_speclen': 512, 'port': '8001', 'keep_ckpts': 3}, 'data': {'training_files': 'filelists/train.txt', 'validation_files': 'filelists/val.txt', 'max_wav_value': 32768.0, 'sampling_rate': 44100, 'filter_length': 2048, 'hop_length': 512, 'win_length': 2048, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': 22050, 'contentvec_final_proj': False}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256, 'ssl_dim': 768, 'n_speakers': 200}, 'spk': {'TW3_F_Yennefer_Rus': 0}, 'model_dir': './logs/44k', 'reset': False} -2023-07-29 06:04:55,323 44k WARNING git hash values are different. 93a70435(saved) != 2cf4c79a(current) -2023-07-29 06:05:16,663 44k INFO Loaded checkpoint './logs/44k/G_91000.pth' (iteration 462) -2023-07-29 06:05:32,518 44k INFO Loaded checkpoint './logs/44k/D_91000.pth' (iteration 462) -2023-07-29 06:09:56,522 44k INFO Train Epoch: 462 [93%] -2023-07-29 06:09:56,525 44k INFO Losses: [2.359058380126953, 2.3062074184417725, 11.424729347229004, 17.276315689086914, 1.0585967302322388], step: 91000, lr: 9.428211452857292e-05 -2023-07-29 06:10:16,167 44k INFO Saving model and optimizer state at iteration 462 to ./logs/44k/G_91000.pth -2023-07-29 06:10:18,885 44k INFO Saving model and optimizer state at iteration 462 to ./logs/44k/D_91000.pth -2023-07-29 06:10:42,789 44k INFO ====> Epoch: 462, cost 348.71 s -2023-07-29 06:13:37,831 44k INFO Train Epoch: 463 [94%] -2023-07-29 06:13:37,835 44k INFO Losses: [2.429429054260254, 2.556804895401001, 10.890604019165039, 16.786766052246094, 0.6478838324546814], step: 91200, lr: 9.427032926425684e-05 -2023-07-29 06:13:48,686 44k INFO ====> Epoch: 463, cost 185.90 s -2023-07-29 06:16:47,596 44k INFO Train Epoch: 464 [96%] -2023-07-29 06:16:47,599 44k INFO Losses: [2.2896108627319336, 2.4141130447387695, 13.685308456420898, 19.64841651916504, 0.67793869972229], step: 91400, lr: 9.425854547309881e-05 -2023-07-29 06:16:56,360 44k INFO ====> Epoch: 464, cost 187.67 s -2023-07-29 06:19:59,585 44k INFO Train Epoch: 465 [97%] -2023-07-29 06:19:59,587 44k INFO Losses: [2.1358537673950195, 2.7821218967437744, 13.900437355041504, 19.74587631225586, 0.9101913571357727], step: 91600, lr: 9.424676315491467e-05 -2023-07-29 06:20:05,776 44k INFO ====> Epoch: 465, cost 189.42 s -2023-07-29 06:23:12,508 44k INFO Train Epoch: 466 [99%] -2023-07-29 06:23:12,510 44k INFO Losses: [2.3200955390930176, 2.6373257637023926, 13.471185684204102, 19.572195053100586, 0.32119402289390564], step: 91800, lr: 9.423498230952031e-05 -2023-07-29 06:23:15,893 44k INFO ====> Epoch: 466, cost 190.12 s -2023-07-29 06:26:26,458 44k INFO ====> Epoch: 467, cost 190.56 s -2023-07-29 06:26:40,384 44k INFO Train Epoch: 468 [1%] -2023-07-29 06:26:40,386 44k INFO Losses: [2.502150774002075, 2.1541805267333984, 7.640766143798828, 17.53664779663086, 1.3182235956192017], step: 92000, lr: 9.421142503636453e-05 -2023-07-29 06:26:51,574 44k INFO Saving model and optimizer state at iteration 468 to ./logs/44k/G_92000.pth -2023-07-29 06:26:54,325 44k INFO Saving model and optimizer state at iteration 468 to ./logs/44k/D_92000.pth -2023-07-29 06:29:50,323 44k INFO ====> Epoch: 468, cost 203.87 s -2023-07-29 06:30:07,126 44k INFO Train Epoch: 469 [2%] -2023-07-29 06:30:07,129 44k INFO Losses: [2.2770071029663086, 2.443047523498535, 12.300792694091797, 18.64503288269043, 1.0920207500457764], step: 92200, lr: 9.419964860823498e-05 -2023-07-29 06:32:59,066 44k INFO ====> Epoch: 469, cost 188.74 s -2023-07-29 06:33:18,768 44k INFO Train Epoch: 470 [4%] -2023-07-29 06:33:18,770 44k INFO Losses: [1.979555368423462, 3.0458714962005615, 13.553513526916504, 18.766643524169922, 1.2888392210006714], step: 92400, lr: 9.418787365215894e-05 -2023-07-29 06:36:10,011 44k INFO ====> Epoch: 470, cost 190.94 s -2023-07-29 06:36:31,300 44k INFO Train Epoch: 471 [5%] -2023-07-29 06:36:31,302 44k INFO Losses: [2.387843370437622, 2.5950827598571777, 10.43812370300293, 17.627351760864258, 0.05076530575752258], step: 92600, lr: 9.417610016795242e-05 -2023-07-29 06:39:18,196 44k INFO ====> Epoch: 471, cost 188.18 s -2023-07-29 06:39:39,803 44k INFO Train Epoch: 472 [7%] -2023-07-29 06:39:39,807 44k INFO Losses: [2.231309413909912, 2.635280132293701, 11.860804557800293, 17.924911499023438, 0.8245717883110046], step: 92800, lr: 9.416432815543143e-05 -2023-07-29 06:42:22,468 44k INFO ====> Epoch: 472, cost 184.27 s -2023-07-29 06:42:46,142 44k INFO Train Epoch: 473 [8%] -2023-07-29 06:42:46,143 44k INFO Losses: [2.4783294200897217, 2.218870162963867, 10.148725509643555, 19.019182205200195, 0.5546180605888367], step: 93000, lr: 9.4152557614412e-05 -2023-07-29 06:42:56,647 44k INFO Saving model and optimizer state at iteration 473 to ./logs/44k/G_93000.pth -2023-07-29 06:43:00,545 44k INFO Saving model and optimizer state at iteration 473 to ./logs/44k/D_93000.pth -2023-07-29 06:45:43,893 44k INFO ====> Epoch: 473, cost 201.42 s -2023-07-29 06:46:16,776 44k INFO Train Epoch: 474 [10%] -2023-07-29 06:46:16,778 44k INFO Losses: [2.3018336296081543, 2.6661698818206787, 9.84494400024414, 17.75308609008789, 0.8628433346748352], step: 93200, lr: 9.41407885447102e-05 -2023-07-29 06:48:56,351 44k INFO ====> Epoch: 474, cost 192.46 s -2023-07-29 06:49:25,949 44k INFO Train Epoch: 475 [11%] -2023-07-29 06:49:25,951 44k INFO Losses: [2.5535879135131836, 2.5535898208618164, 9.844694137573242, 18.103036880493164, 0.6207296848297119], step: 93400, lr: 9.412902094614211e-05 -2023-07-29 06:52:02,226 44k INFO ====> Epoch: 475, cost 185.87 s -2023-07-29 06:52:33,349 44k INFO Train Epoch: 476 [13%] -2023-07-29 06:52:33,352 44k INFO Losses: [2.323481321334839, 2.592643976211548, 16.896461486816406, 19.568025588989258, 1.069192886352539], step: 93600, lr: 9.411725481852385e-05 -2023-07-29 06:55:05,863 44k INFO ====> Epoch: 476, cost 183.64 s -2023-07-29 06:55:40,548 44k INFO Train Epoch: 477 [14%] -2023-07-29 06:55:40,551 44k INFO Losses: [2.4433412551879883, 2.484210968017578, 10.41572093963623, 18.40603256225586, 0.5250674486160278], step: 93800, lr: 9.410549016167153e-05 -2023-07-29 06:58:10,929 44k INFO ====> Epoch: 477, cost 185.07 s -2023-07-29 06:58:49,629 44k INFO Train Epoch: 478 [16%] -2023-07-29 06:58:49,630 44k INFO Losses: [2.464003324508667, 2.3006505966186523, 11.242310523986816, 18.118051528930664, 0.8479373455047607], step: 94000, lr: 9.409372697540131e-05 -2023-07-29 06:59:00,170 44k INFO Saving model and optimizer state at iteration 478 to ./logs/44k/G_94000.pth -2023-07-29 06:59:03,013 44k INFO Saving model and optimizer state at iteration 478 to ./logs/44k/D_94000.pth -2023-07-29 07:01:35,224 44k INFO ====> Epoch: 478, cost 204.30 s -2023-07-29 07:02:19,686 44k INFO Train Epoch: 479 [17%] -2023-07-29 07:02:19,689 44k INFO Losses: [2.5225343704223633, 2.420818328857422, 10.201705932617188, 19.83951759338379, 0.964104950428009], step: 94200, lr: 9.408196525952938e-05 -2023-07-29 07:04:43,347 44k INFO ====> Epoch: 479, cost 188.12 s -2023-07-29 07:05:31,857 44k INFO Train Epoch: 480 [19%] -2023-07-29 07:05:31,859 44k INFO Losses: [2.5317275524139404, 2.8082141876220703, 8.935391426086426, 18.335010528564453, 0.7307987809181213], step: 94400, lr: 9.407020501387194e-05 -2023-07-29 07:07:52,911 44k INFO ====> Epoch: 480, cost 189.56 s -2023-07-29 07:08:38,389 44k INFO Train Epoch: 481 [20%] -2023-07-29 07:08:38,393 44k INFO Losses: [2.304030179977417, 2.373994827270508, 12.892603874206543, 18.517311096191406, 0.8902214765548706], step: 94600, lr: 9.405844623824521e-05 -2023-07-29 07:10:57,383 44k INFO ====> Epoch: 481, cost 184.47 s -2023-07-29 07:11:46,984 44k INFO Train Epoch: 482 [22%] -2023-07-29 07:11:46,989 44k INFO Losses: [2.3441669940948486, 2.4371771812438965, 10.732386589050293, 19.453670501708984, 0.990674614906311], step: 94800, lr: 9.404668893246542e-05 -2023-07-29 07:14:04,005 44k INFO ====> Epoch: 482, cost 186.62 s -2023-07-29 07:14:57,200 44k INFO Train Epoch: 483 [23%] -2023-07-29 07:14:57,202 44k INFO Losses: [2.5241644382476807, 2.3117222785949707, 14.629537582397461, 17.955894470214844, 1.371857762336731], step: 95000, lr: 9.403493309634886e-05 -2023-07-29 07:15:06,840 44k INFO Saving model and optimizer state at iteration 483 to ./logs/44k/G_95000.pth -2023-07-29 07:15:10,120 44k INFO Saving model and optimizer state at iteration 483 to ./logs/44k/D_95000.pth -2023-07-29 07:17:28,168 44k INFO ====> Epoch: 483, cost 204.16 s -2023-07-29 07:18:24,528 44k INFO Train Epoch: 484 [25%] -2023-07-29 07:18:24,531 44k INFO Losses: [2.1422297954559326, 2.5713958740234375, 11.43370246887207, 17.289098739624023, 0.2901522219181061], step: 95200, lr: 9.402317872971181e-05 -2023-07-29 07:20:36,118 44k INFO ====> Epoch: 484, cost 187.95 s -2023-07-29 07:21:32,435 44k INFO Train Epoch: 485 [26%] -2023-07-29 07:21:32,436 44k INFO Losses: [2.1757330894470215, 2.6625590324401855, 14.614097595214844, 18.319562911987305, 0.2097359448671341], step: 95400, lr: 9.401142583237059e-05 -2023-07-29 07:23:41,365 44k INFO ====> Epoch: 485, cost 185.25 s -2023-07-29 07:24:42,319 44k INFO Train Epoch: 486 [28%] -2023-07-29 07:24:42,322 44k INFO Losses: [2.3474345207214355, 2.487673044204712, 14.58984088897705, 19.254648208618164, 1.3415447473526], step: 95600, lr: 9.399967440414155e-05 -2023-07-29 07:26:48,262 44k INFO ====> Epoch: 486, cost 186.90 s -2023-07-29 07:27:52,394 44k INFO Train Epoch: 487 [29%] -2023-07-29 07:27:52,395 44k INFO Losses: [2.0807275772094727, 3.0709803104400635, 15.885435104370117, 19.568531036376953, 0.9527596235275269], step: 95800, lr: 9.398792444484102e-05 -2023-07-29 07:29:57,816 44k INFO ====> Epoch: 487, cost 189.55 s -2023-07-29 07:31:02,225 44k INFO Train Epoch: 488 [31%] -2023-07-29 07:31:02,227 44k INFO Losses: [2.8095226287841797, 2.177865743637085, 10.786866188049316, 19.04127311706543, 1.1003053188323975], step: 96000, lr: 9.397617595428541e-05 -2023-07-29 07:31:12,239 44k INFO Saving model and optimizer state at iteration 488 to ./logs/44k/G_96000.pth -2023-07-29 07:31:16,429 44k INFO Saving model and optimizer state at iteration 488 to ./logs/44k/D_96000.pth -2023-07-29 07:33:21,362 44k INFO ====> Epoch: 488, cost 203.55 s -2023-07-29 07:34:32,575 44k INFO Train Epoch: 489 [32%] -2023-07-29 07:34:32,576 44k INFO Losses: [2.3444316387176514, 2.524317741394043, 12.047869682312012, 19.28728675842285, 0.8741477131843567], step: 96200, lr: 9.396442893229112e-05 -2023-07-29 07:36:30,175 44k INFO ====> Epoch: 489, cost 188.81 s -2023-07-29 07:37:41,900 44k INFO Train Epoch: 490 [34%] -2023-07-29 07:37:41,902 44k INFO Losses: [2.365452527999878, 2.2726073265075684, 11.11032485961914, 17.116016387939453, 1.0989903211593628], step: 96400, lr: 9.395268337867458e-05 -2023-07-29 07:39:36,903 44k INFO ====> Epoch: 490, cost 186.73 s -2023-07-29 07:40:51,133 44k INFO Train Epoch: 491 [36%] -2023-07-29 07:40:51,137 44k INFO Losses: [2.279456377029419, 2.526348114013672, 9.942840576171875, 18.922710418701172, 0.9357972741127014], step: 96600, lr: 9.394093929325224e-05 -2023-07-29 07:42:44,540 44k INFO ====> Epoch: 491, cost 187.64 s -2023-07-29 07:44:00,725 44k INFO Train Epoch: 492 [37%] -2023-07-29 07:44:00,727 44k INFO Losses: [2.4531726837158203, 2.1283164024353027, 8.788697242736816, 14.448691368103027, 0.7233993411064148], step: 96800, lr: 9.392919667584057e-05 -2023-07-29 07:45:52,493 44k INFO ====> Epoch: 492, cost 187.95 s -2023-07-29 07:47:10,515 44k INFO Train Epoch: 493 [39%] -2023-07-29 07:47:10,519 44k INFO Losses: [2.7059006690979004, 2.207681655883789, 10.598915100097656, 17.92115020751953, 1.1902127265930176], step: 97000, lr: 9.39174555262561e-05 -2023-07-29 07:47:19,411 44k INFO Saving model and optimizer state at iteration 493 to ./logs/44k/G_97000.pth -2023-07-29 07:47:26,150 44k INFO Saving model and optimizer state at iteration 493 to ./logs/44k/D_97000.pth -2023-07-29 07:47:28,448 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_94000.pth -2023-07-29 07:47:28,450 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_94000.pth -2023-07-29 07:49:16,926 44k INFO ====> Epoch: 493, cost 204.43 s -2023-07-29 07:50:39,990 44k INFO Train Epoch: 494 [40%] -2023-07-29 07:50:39,991 44k INFO Losses: [2.274976968765259, 2.575018882751465, 14.317551612854004, 18.378517150878906, 0.5586758852005005], step: 97200, lr: 9.39057158443153e-05 -2023-07-29 07:52:27,136 44k INFO ====> Epoch: 494, cost 190.21 s -2023-07-29 07:53:54,127 44k INFO Train Epoch: 495 [42%] -2023-07-29 07:53:54,130 44k INFO Losses: [2.454768657684326, 2.426847219467163, 9.697604179382324, 18.767757415771484, 0.8175158500671387], step: 97400, lr: 9.389397762983476e-05 -2023-07-29 07:55:37,295 44k INFO ====> Epoch: 495, cost 190.16 s -2023-07-29 07:57:04,290 44k INFO Train Epoch: 496 [43%] -2023-07-29 07:57:04,292 44k INFO Losses: [2.259951591491699, 2.2923882007598877, 11.38487720489502, 18.39510154724121, 1.342573881149292], step: 97600, lr: 9.388224088263103e-05 -2023-07-29 07:58:45,024 44k INFO ====> Epoch: 496, cost 187.73 s -2023-07-29 08:00:17,479 44k INFO Train Epoch: 497 [45%] -2023-07-29 08:00:17,482 44k INFO Losses: [2.2939939498901367, 2.914663314819336, 13.024429321289062, 19.267654418945312, 0.7281192541122437], step: 97800, lr: 9.38705056025207e-05 -2023-07-29 08:01:54,950 44k INFO ====> Epoch: 497, cost 189.93 s -2023-07-29 08:03:28,981 44k INFO Train Epoch: 498 [46%] -2023-07-29 08:03:28,983 44k INFO Losses: [2.4851033687591553, 2.200305461883545, 11.758586883544922, 17.96369171142578, 0.6846098899841309], step: 98000, lr: 9.385877178932038e-05 -2023-07-29 08:03:39,383 44k INFO Saving model and optimizer state at iteration 498 to ./logs/44k/G_98000.pth -2023-07-29 08:03:42,198 44k INFO Saving model and optimizer state at iteration 498 to ./logs/44k/D_98000.pth -2023-07-29 08:05:21,188 44k INFO ====> Epoch: 498, cost 206.24 s -2023-07-29 08:07:02,991 44k INFO Train Epoch: 499 [48%] -2023-07-29 08:07:02,994 44k INFO Losses: [2.4583020210266113, 2.2870469093322754, 8.537273406982422, 16.776464462280273, 1.1709691286087036], step: 98200, lr: 9.384703944284672e-05 -2023-07-29 08:08:34,657 44k INFO ====> Epoch: 499, cost 193.47 s -2023-07-29 08:10:17,989 44k INFO Train Epoch: 500 [49%] -2023-07-29 08:10:17,990 44k INFO Losses: [2.3295741081237793, 2.3626601696014404, 11.646297454833984, 18.836034774780273, 0.937366783618927], step: 98400, lr: 9.383530856291636e-05 -2023-07-29 08:11:47,178 44k INFO ====> Epoch: 500, cost 192.52 s -2023-07-29 08:13:29,078 44k INFO Train Epoch: 501 [51%] -2023-07-29 08:13:29,081 44k INFO Losses: [2.3988912105560303, 2.2088944911956787, 9.747764587402344, 18.276832580566406, 1.202313780784607], step: 98600, lr: 9.382357914934599e-05 -2023-07-29 08:14:55,092 44k INFO ====> Epoch: 501, cost 187.91 s -2023-07-29 08:16:38,348 44k INFO Train Epoch: 502 [52%] -2023-07-29 08:16:38,349 44k INFO Losses: [2.675215721130371, 2.275815010070801, 7.638833522796631, 17.61923599243164, 0.6178475022315979], step: 98800, lr: 9.381185120195232e-05 -2023-07-29 08:18:01,963 44k INFO ====> Epoch: 502, cost 186.87 s -2023-07-29 08:19:49,805 44k INFO Train Epoch: 503 [54%] -2023-07-29 08:19:49,809 44k INFO Losses: [2.2797296047210693, 2.5554049015045166, 10.821635246276855, 17.281736373901367, 0.732896089553833], step: 99000, lr: 9.380012472055207e-05 -2023-07-29 08:19:58,965 44k INFO Saving model and optimizer state at iteration 503 to ./logs/44k/G_99000.pth -2023-07-29 08:20:02,723 44k INFO Saving model and optimizer state at iteration 503 to ./logs/44k/D_99000.pth -2023-07-29 08:21:26,368 44k INFO ====> Epoch: 503, cost 204.41 s -2023-07-29 08:23:16,593 44k INFO Train Epoch: 504 [55%] -2023-07-29 08:23:16,594 44k INFO Losses: [2.3617162704467773, 2.517270565032959, 12.366140365600586, 18.312759399414062, 0.6240535974502563], step: 99200, lr: 9.3788399704962e-05 -2023-07-29 08:24:35,822 44k INFO ====> Epoch: 504, cost 189.45 s -2023-07-29 08:26:26,038 44k INFO Train Epoch: 505 [57%] -2023-07-29 08:26:26,041 44k INFO Losses: [2.3941922187805176, 2.6487207412719727, 8.679996490478516, 16.00930404663086, 1.0080832242965698], step: 99400, lr: 9.377667615499888e-05 -2023-07-29 08:27:40,307 44k INFO ====> Epoch: 505, cost 184.49 s -2023-07-29 08:29:32,556 44k INFO Train Epoch: 506 [58%] -2023-07-29 08:29:32,560 44k INFO Losses: [2.438903570175171, 2.42946720123291, 11.141715049743652, 16.786720275878906, 0.5609295964241028], step: 99600, lr: 9.376495407047951e-05 -2023-07-29 08:30:45,130 44k INFO ====> Epoch: 506, cost 184.82 s -2023-07-29 08:32:42,455 44k INFO Train Epoch: 507 [60%] -2023-07-29 08:32:42,459 44k INFO Losses: [2.1920292377471924, 2.643498420715332, 10.68832015991211, 18.920818328857422, 0.7133192420005798], step: 99800, lr: 9.37532334512207e-05 -2023-07-29 08:33:52,099 44k INFO ====> Epoch: 507, cost 186.97 s -2023-07-29 08:35:51,549 44k INFO Train Epoch: 508 [61%] -2023-07-29 08:35:51,553 44k INFO Losses: [2.333817481994629, 2.5226809978485107, 9.92951488494873, 18.93316650390625, 0.6512612104415894], step: 100000, lr: 9.374151429703929e-05 -2023-07-29 08:36:02,123 44k INFO Saving model and optimizer state at iteration 508 to ./logs/44k/G_100000.pth -2023-07-29 08:36:04,751 44k INFO Saving model and optimizer state at iteration 508 to ./logs/44k/D_100000.pth -2023-07-29 08:37:16,763 44k INFO ====> Epoch: 508, cost 204.66 s -2023-07-29 08:39:19,836 44k INFO Train Epoch: 509 [63%] -2023-07-29 08:39:19,837 44k INFO Losses: [2.218674659729004, 2.710069417953491, 11.66411304473877, 20.19742202758789, 1.3467005491256714], step: 100200, lr: 9.372979660775216e-05 -2023-07-29 08:40:25,176 44k INFO ====> Epoch: 509, cost 188.41 s -2023-07-29 08:42:30,419 44k INFO Train Epoch: 510 [64%] -2023-07-29 08:42:30,422 44k INFO Losses: [2.5715761184692383, 2.125577449798584, 8.32373332977295, 16.195297241210938, 1.0494028329849243], step: 100400, lr: 9.371808038317619e-05 -2023-07-29 08:43:32,874 44k INFO ====> Epoch: 510, cost 187.70 s -2023-07-29 08:45:41,455 44k INFO Train Epoch: 511 [66%] -2023-07-29 08:45:41,457 44k INFO Losses: [2.0724270343780518, 2.5686287879943848, 14.677064895629883, 18.104671478271484, 0.7256052494049072], step: 100600, lr: 9.370636562312829e-05 -2023-07-29 08:46:42,544 44k INFO ====> Epoch: 511, cost 189.67 s -2023-07-29 08:48:50,764 44k INFO Train Epoch: 512 [68%] -2023-07-29 08:48:50,768 44k INFO Losses: [2.415742874145508, 2.4852118492126465, 11.185688018798828, 15.635108947753906, 0.9893898367881775], step: 100800, lr: 9.36946523274254e-05 -2023-07-29 08:49:47,354 44k INFO ====> Epoch: 512, cost 184.81 s -2023-07-29 08:51:58,977 44k INFO Train Epoch: 513 [69%] -2023-07-29 08:51:58,978 44k INFO Losses: [2.419904947280884, 2.609632730484009, 12.236265182495117, 19.339128494262695, 1.1527421474456787], step: 101000, lr: 9.368294049588446e-05 -2023-07-29 08:52:08,572 44k INFO Saving model and optimizer state at iteration 513 to ./logs/44k/G_101000.pth -2023-07-29 08:52:11,950 44k INFO Saving model and optimizer state at iteration 513 to ./logs/44k/D_101000.pth -2023-07-29 08:53:11,865 44k INFO ====> Epoch: 513, cost 204.51 s -2023-07-29 08:55:27,635 44k INFO Train Epoch: 514 [71%] -2023-07-29 08:55:27,639 44k INFO Losses: [2.3763246536254883, 2.435695171356201, 12.005317687988281, 19.11176300048828, 0.48565050959587097], step: 101200, lr: 9.367123012832248e-05 -2023-07-29 08:56:18,951 44k INFO ====> Epoch: 514, cost 187.09 s -2023-07-29 08:58:37,467 44k INFO Train Epoch: 515 [72%] -2023-07-29 08:58:37,468 44k INFO Losses: [2.0858707427978516, 2.8389172554016113, 12.872121810913086, 18.464635848999023, 0.7605320811271667], step: 101400, lr: 9.365952122455643e-05 -2023-07-29 08:59:26,659 44k INFO ====> Epoch: 515, cost 187.71 s -2023-07-29 09:01:47,146 44k INFO Train Epoch: 516 [74%] -2023-07-29 09:01:47,150 44k INFO Losses: [2.4845402240753174, 2.2584855556488037, 9.590155601501465, 17.956769943237305, 0.7024598121643066], step: 101600, lr: 9.364781378440336e-05 -2023-07-29 09:02:32,975 44k INFO ====> Epoch: 516, cost 186.32 s -2023-07-29 09:04:55,787 44k INFO Train Epoch: 517 [75%] -2023-07-29 09:04:55,791 44k INFO Losses: [2.21860671043396, 2.68434739112854, 12.146842956542969, 19.53103256225586, 1.036292314529419], step: 101800, lr: 9.36361078076803e-05 -2023-07-29 09:05:39,551 44k INFO ====> Epoch: 517, cost 186.58 s -2023-07-29 09:08:06,827 44k INFO Train Epoch: 518 [77%] -2023-07-29 09:08:06,830 44k INFO Losses: [2.24428653717041, 2.6867775917053223, 9.58912181854248, 18.310714721679688, 0.8947120308876038], step: 102000, lr: 9.362440329420433e-05 -2023-07-29 09:08:15,749 44k INFO Saving model and optimizer state at iteration 518 to ./logs/44k/G_102000.pth -2023-07-29 09:08:23,092 44k INFO Saving model and optimizer state at iteration 518 to ./logs/44k/D_102000.pth -2023-07-29 09:09:09,012 44k INFO ====> Epoch: 518, cost 209.46 s -2023-07-29 09:11:36,406 44k INFO Train Epoch: 519 [78%] -2023-07-29 09:11:36,407 44k INFO Losses: [2.41986346244812, 2.3427658081054688, 13.704694747924805, 19.08769416809082, 0.7277838587760925], step: 102200, lr: 9.361270024379255e-05 -2023-07-29 09:12:15,573 44k INFO ====> Epoch: 519, cost 186.56 s -2023-07-29 09:14:46,270 44k INFO Train Epoch: 520 [80%] -2023-07-29 09:14:46,272 44k INFO Losses: [2.2185113430023193, 2.6033527851104736, 13.599997520446777, 21.364591598510742, 0.9149141311645508], step: 102400, lr: 9.360099865626208e-05 -2023-07-29 09:15:22,350 44k INFO ====> Epoch: 520, cost 186.78 s -2023-07-29 09:17:57,644 44k INFO Train Epoch: 521 [81%] -2023-07-29 09:17:57,648 44k INFO Losses: [2.4976935386657715, 2.136601209640503, 8.551946640014648, 18.592308044433594, 1.0273525714874268], step: 102600, lr: 9.358929853143005e-05 -2023-07-29 09:18:31,402 44k INFO ====> Epoch: 521, cost 189.05 s -2023-07-29 09:21:08,277 44k INFO Train Epoch: 522 [83%] -2023-07-29 09:21:08,279 44k INFO Losses: [2.557133436203003, 2.4242615699768066, 14.014547348022461, 18.722856521606445, 0.6345592141151428], step: 102800, lr: 9.357759986911361e-05 -2023-07-29 09:21:40,322 44k INFO ====> Epoch: 522, cost 188.92 s -2023-07-29 09:24:17,697 44k INFO Train Epoch: 523 [84%] -2023-07-29 09:24:17,701 44k INFO Losses: [2.379854917526245, 2.537074089050293, 12.245451927185059, 18.825767517089844, 0.9281562566757202], step: 103000, lr: 9.356590266912997e-05 -2023-07-29 09:24:28,453 44k INFO Saving model and optimizer state at iteration 523 to ./logs/44k/G_103000.pth -2023-07-29 09:24:35,855 44k INFO Saving model and optimizer state at iteration 523 to ./logs/44k/D_103000.pth -2023-07-29 09:25:07,013 44k INFO ====> Epoch: 523, cost 206.69 s -2023-07-29 09:27:50,995 44k INFO Train Epoch: 524 [86%] -2023-07-29 09:27:50,996 44k INFO Losses: [2.36579966545105, 2.376593589782715, 9.096076011657715, 17.455917358398438, 1.0532230138778687], step: 103200, lr: 9.355420693129632e-05 -2023-07-29 09:28:17,091 44k INFO ====> Epoch: 524, cost 190.08 s -2023-07-29 09:31:06,083 44k INFO Train Epoch: 525 [87%] -2023-07-29 09:31:06,086 44k INFO Losses: [2.5361883640289307, 2.424555778503418, 9.041167259216309, 19.01300621032715, 0.9492159485816956], step: 103400, lr: 9.35425126554299e-05 -2023-07-29 09:31:29,729 44k INFO ====> Epoch: 525, cost 192.64 s -2023-07-29 09:34:18,726 44k INFO Train Epoch: 526 [89%] -2023-07-29 09:34:18,729 44k INFO Losses: [2.4192519187927246, 2.454631805419922, 9.981739044189453, 18.31422996520996, 0.44240543246269226], step: 103600, lr: 9.353081984134796e-05 -2023-07-29 09:34:38,571 44k INFO ====> Epoch: 526, cost 188.84 s -2023-07-29 09:37:29,818 44k INFO Train Epoch: 527 [90%] -2023-07-29 09:37:29,821 44k INFO Losses: [2.175581216812134, 2.685429096221924, 12.230260848999023, 18.915597915649414, 1.1387109756469727], step: 103800, lr: 9.351912848886779e-05 -2023-07-29 09:37:47,198 44k INFO ====> Epoch: 527, cost 188.63 s -2023-07-29 09:40:40,849 44k INFO Train Epoch: 528 [92%] -2023-07-29 09:40:40,852 44k INFO Losses: [2.413193464279175, 2.6871767044067383, 9.382375717163086, 19.37618064880371, 1.1960912942886353], step: 104000, lr: 9.350743859780667e-05 -2023-07-29 09:40:51,887 44k INFO Saving model and optimizer state at iteration 528 to ./logs/44k/G_104000.pth -2023-07-29 09:40:54,645 44k INFO Saving model and optimizer state at iteration 528 to ./logs/44k/D_104000.pth -2023-07-29 09:41:00,626 44k INFO .. Free up space by deleting ckpt ./logs/44k/G_101000.pth -2023-07-29 09:41:00,631 44k INFO .. Free up space by deleting ckpt ./logs/44k/D_101000.pth -2023-07-29 09:41:17,738 44k INFO ====> Epoch: 528, cost 210.54 s -2023-07-29 09:44:17,247 44k INFO Train Epoch: 529 [93%] -2023-07-29 09:44:17,250 44k INFO Losses: [2.4894115924835205, 2.3306732177734375, 10.043404579162598, 18.840389251708984, 0.6949144601821899], step: 104200, lr: 9.349575016798194e-05 -2023-07-29 09:44:32,333 44k INFO ====> Epoch: 529, cost 194.59 s -2023-07-29 09:47:32,174 44k INFO Train Epoch: 530 [95%] -2023-07-29 09:47:32,177 44k INFO Losses: [2.2610368728637695, 2.877363920211792, 10.683180809020996, 19.87116241455078, 0.986940324306488], step: 104400, lr: 9.348406319921095e-05 -2023-07-29 09:47:43,369 44k INFO ====> Epoch: 530, cost 191.04 s