|
{ |
|
"best_metric": 0.23106186091899872, |
|
"best_model_checkpoint": "ai-light-dance_singing4_ft_wav2vec2-large-xlsr-53-5gram-v4-2/checkpoint-1872", |
|
"epoch": 99.99653979238754, |
|
"global_step": 7200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 7.2e-07, |
|
"loss": 1.2632, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.44e-06, |
|
"loss": 1.3592, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 2.16e-06, |
|
"loss": 1.1299, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 2.96e-06, |
|
"loss": 1.1977, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 3.7600000000000004e-06, |
|
"loss": 1.0437, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.56e-06, |
|
"loss": 1.1255, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 5.36e-06, |
|
"loss": 0.9329, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.4334475100040436, |
|
"eval_runtime": 125.7994, |
|
"eval_samples_per_second": 4.229, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.1348589059587324, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 6.16e-06, |
|
"loss": 0.9882, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 6.96e-06, |
|
"loss": 0.9202, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 7.76e-06, |
|
"loss": 0.8906, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 8.560000000000001e-06, |
|
"loss": 0.859, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 9.360000000000002e-06, |
|
"loss": 0.8122, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 1.008e-05, |
|
"loss": 0.8647, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 1.0880000000000001e-05, |
|
"loss": 0.7631, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.3389911651611328, |
|
"eval_runtime": 125.9823, |
|
"eval_samples_per_second": 4.223, |
|
"eval_steps_per_second": 0.532, |
|
"eval_wer": 0.13183775792247862, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 1.168e-05, |
|
"loss": 0.9705, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 1.248e-05, |
|
"loss": 0.7539, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 1.3280000000000002e-05, |
|
"loss": 0.7717, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 1.408e-05, |
|
"loss": 0.771, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 1.4880000000000002e-05, |
|
"loss": 0.7623, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 1.5680000000000002e-05, |
|
"loss": 0.7912, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 1.648e-05, |
|
"loss": 0.7575, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.318192720413208, |
|
"eval_runtime": 126.4874, |
|
"eval_samples_per_second": 4.206, |
|
"eval_steps_per_second": 0.53, |
|
"eval_wer": 0.14405090955839814, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 1.728e-05, |
|
"loss": 0.8294, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 1.8080000000000003e-05, |
|
"loss": 0.7419, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 1.8880000000000002e-05, |
|
"loss": 0.7306, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 1.968e-05, |
|
"loss": 0.6948, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 2.048e-05, |
|
"loss": 0.7168, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 2.1280000000000003e-05, |
|
"loss": 0.6785, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 2.2080000000000002e-05, |
|
"loss": 0.667, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 0.29954788088798523, |
|
"eval_runtime": 125.7745, |
|
"eval_samples_per_second": 4.23, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.12881660988622484, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 2.288e-05, |
|
"loss": 0.7793, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 2.368e-05, |
|
"loss": 0.661, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 2.4480000000000003e-05, |
|
"loss": 0.7091, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 2.5280000000000002e-05, |
|
"loss": 0.6231, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 2.6080000000000004e-05, |
|
"loss": 0.6888, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 2.6880000000000004e-05, |
|
"loss": 0.6414, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 2.768e-05, |
|
"loss": 0.6548, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 2.8480000000000002e-05, |
|
"loss": 0.6474, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 0.32113975286483765, |
|
"eval_runtime": 125.429, |
|
"eval_samples_per_second": 4.241, |
|
"eval_steps_per_second": 0.534, |
|
"eval_wer": 0.15742109661245743, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 5.14, |
|
"learning_rate": 2.928e-05, |
|
"loss": 0.6143, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 5.28, |
|
"learning_rate": 3.0080000000000003e-05, |
|
"loss": 0.6748, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 5.42, |
|
"learning_rate": 3.088e-05, |
|
"loss": 0.627, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 5.55, |
|
"learning_rate": 3.168e-05, |
|
"loss": 0.6667, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 5.69, |
|
"learning_rate": 3.248000000000001e-05, |
|
"loss": 0.612, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 5.83, |
|
"learning_rate": 3.328e-05, |
|
"loss": 0.6174, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 5.97, |
|
"learning_rate": 3.408e-05, |
|
"loss": 0.6094, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 0.2943839728832245, |
|
"eval_runtime": 125.493, |
|
"eval_samples_per_second": 4.239, |
|
"eval_steps_per_second": 0.534, |
|
"eval_wer": 0.15298579417625507, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 6.11, |
|
"learning_rate": 3.4880000000000005e-05, |
|
"loss": 0.722, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 3.5680000000000004e-05, |
|
"loss": 0.6218, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"learning_rate": 3.648e-05, |
|
"loss": 0.6006, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 6.53, |
|
"learning_rate": 3.728e-05, |
|
"loss": 0.6263, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 6.66, |
|
"learning_rate": 3.808e-05, |
|
"loss": 0.5899, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"learning_rate": 3.888e-05, |
|
"loss": 0.6288, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 6.94, |
|
"learning_rate": 3.9680000000000006e-05, |
|
"loss": 0.5586, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 0.3445891737937927, |
|
"eval_runtime": 125.4152, |
|
"eval_samples_per_second": 4.242, |
|
"eval_steps_per_second": 0.534, |
|
"eval_wer": 0.18094748344796555, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 7.08, |
|
"learning_rate": 3.996417910447761e-05, |
|
"loss": 0.6473, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 7.22, |
|
"learning_rate": 3.99044776119403e-05, |
|
"loss": 0.5851, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 7.36, |
|
"learning_rate": 3.9844776119402985e-05, |
|
"loss": 0.6313, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 3.9785074626865674e-05, |
|
"loss": 0.6304, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 7.64, |
|
"learning_rate": 3.972537313432836e-05, |
|
"loss": 0.5582, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 7.78, |
|
"learning_rate": 3.9665671641791046e-05, |
|
"loss": 0.5938, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 7.91, |
|
"learning_rate": 3.9605970149253735e-05, |
|
"loss": 0.5728, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 0.2681596875190735, |
|
"eval_runtime": 125.0016, |
|
"eval_samples_per_second": 4.256, |
|
"eval_steps_per_second": 0.536, |
|
"eval_wer": 0.12457414668637912, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 8.06, |
|
"learning_rate": 3.954626865671642e-05, |
|
"loss": 0.6302, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 8.19, |
|
"learning_rate": 3.948656716417911e-05, |
|
"loss": 0.5609, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 3.94268656716418e-05, |
|
"loss": 0.5992, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 8.47, |
|
"learning_rate": 3.936716417910448e-05, |
|
"loss": 0.5509, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 8.61, |
|
"learning_rate": 3.930746268656717e-05, |
|
"loss": 0.5622, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 8.75, |
|
"learning_rate": 3.924776119402985e-05, |
|
"loss": 0.5663, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 8.89, |
|
"learning_rate": 3.918805970149254e-05, |
|
"loss": 0.575, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 0.26104775071144104, |
|
"eval_runtime": 125.7246, |
|
"eval_samples_per_second": 4.231, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.1244455871954747, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 9.03, |
|
"learning_rate": 3.912835820895523e-05, |
|
"loss": 0.639, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 9.17, |
|
"learning_rate": 3.906865671641791e-05, |
|
"loss": 0.553, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 9.3, |
|
"learning_rate": 3.90089552238806e-05, |
|
"loss": 0.5865, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 9.44, |
|
"learning_rate": 3.894925373134329e-05, |
|
"loss": 0.5558, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 9.58, |
|
"learning_rate": 3.8889552238805974e-05, |
|
"loss": 0.5802, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 9.72, |
|
"learning_rate": 3.882985074626866e-05, |
|
"loss": 0.5578, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 9.86, |
|
"learning_rate": 3.8770149253731346e-05, |
|
"loss": 0.5616, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 3.8710447761194035e-05, |
|
"loss": 0.5882, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 0.24629366397857666, |
|
"eval_runtime": 125.6764, |
|
"eval_samples_per_second": 4.233, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.11994600501382015, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 10.14, |
|
"learning_rate": 3.865074626865672e-05, |
|
"loss": 0.5583, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 10.28, |
|
"learning_rate": 3.859104477611941e-05, |
|
"loss": 0.5394, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 10.42, |
|
"learning_rate": 3.853134328358209e-05, |
|
"loss": 0.5381, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 10.55, |
|
"learning_rate": 3.847164179104478e-05, |
|
"loss": 0.5779, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 10.69, |
|
"learning_rate": 3.841194029850746e-05, |
|
"loss": 0.5287, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 10.83, |
|
"learning_rate": 3.835223880597015e-05, |
|
"loss": 0.5673, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 10.97, |
|
"learning_rate": 3.829253731343284e-05, |
|
"loss": 0.5367, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_loss": 0.2541561722755432, |
|
"eval_runtime": 124.9573, |
|
"eval_samples_per_second": 4.257, |
|
"eval_steps_per_second": 0.536, |
|
"eval_wer": 0.10927556726875362, |
|
"step": 792 |
|
}, |
|
{ |
|
"epoch": 11.11, |
|
"learning_rate": 3.823283582089552e-05, |
|
"loss": 0.5502, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 11.25, |
|
"learning_rate": 3.817313432835821e-05, |
|
"loss": 0.5654, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 11.39, |
|
"learning_rate": 3.81134328358209e-05, |
|
"loss": 0.5214, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 11.53, |
|
"learning_rate": 3.8053731343283584e-05, |
|
"loss": 0.57, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 11.66, |
|
"learning_rate": 3.7994029850746274e-05, |
|
"loss": 0.5264, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 11.8, |
|
"learning_rate": 3.793432835820896e-05, |
|
"loss": 0.5852, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 11.94, |
|
"learning_rate": 3.7874626865671646e-05, |
|
"loss": 0.5261, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_loss": 0.2522805631160736, |
|
"eval_runtime": 125.7101, |
|
"eval_samples_per_second": 4.232, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.11306807225043389, |
|
"step": 864 |
|
}, |
|
{ |
|
"epoch": 12.08, |
|
"learning_rate": 3.7814925373134335e-05, |
|
"loss": 0.5659, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 12.22, |
|
"learning_rate": 3.775522388059702e-05, |
|
"loss": 0.5554, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 12.36, |
|
"learning_rate": 3.769552238805971e-05, |
|
"loss": 0.5637, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 3.763582089552239e-05, |
|
"loss": 0.5645, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 12.64, |
|
"learning_rate": 3.757611940298508e-05, |
|
"loss": 0.512, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 12.78, |
|
"learning_rate": 3.751641791044776e-05, |
|
"loss": 0.5351, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 12.91, |
|
"learning_rate": 3.745671641791045e-05, |
|
"loss": 0.5091, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_loss": 0.24912887811660767, |
|
"eval_runtime": 125.7106, |
|
"eval_samples_per_second": 4.232, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.11750337468663624, |
|
"step": 936 |
|
}, |
|
{ |
|
"epoch": 13.06, |
|
"learning_rate": 3.7397014925373133e-05, |
|
"loss": 0.5542, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 13.19, |
|
"learning_rate": 3.733731343283582e-05, |
|
"loss": 0.5288, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 13.33, |
|
"learning_rate": 3.727761194029851e-05, |
|
"loss": 0.5333, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 13.47, |
|
"learning_rate": 3.7217910447761195e-05, |
|
"loss": 0.5174, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 13.61, |
|
"learning_rate": 3.7158208955223884e-05, |
|
"loss": 0.5334, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 13.75, |
|
"learning_rate": 3.709850746268657e-05, |
|
"loss": 0.5249, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 13.89, |
|
"learning_rate": 3.7038805970149256e-05, |
|
"loss": 0.5323, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_loss": 0.25354790687561035, |
|
"eval_runtime": 125.3694, |
|
"eval_samples_per_second": 4.243, |
|
"eval_steps_per_second": 0.534, |
|
"eval_wer": 0.12078164170469885, |
|
"step": 1008 |
|
}, |
|
{ |
|
"epoch": 14.03, |
|
"learning_rate": 3.6979104477611945e-05, |
|
"loss": 0.551, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 14.17, |
|
"learning_rate": 3.691940298507463e-05, |
|
"loss": 0.5035, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 14.3, |
|
"learning_rate": 3.685970149253732e-05, |
|
"loss": 0.5039, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 14.44, |
|
"learning_rate": 3.680000000000001e-05, |
|
"loss": 0.5034, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 14.58, |
|
"learning_rate": 3.674029850746269e-05, |
|
"loss": 0.5221, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 14.72, |
|
"learning_rate": 3.668059701492538e-05, |
|
"loss": 0.5001, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 14.86, |
|
"learning_rate": 3.662089552238806e-05, |
|
"loss": 0.5308, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 3.656119402985075e-05, |
|
"loss": 0.5478, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_loss": 0.25416386127471924, |
|
"eval_runtime": 125.4981, |
|
"eval_samples_per_second": 4.239, |
|
"eval_steps_per_second": 0.534, |
|
"eval_wer": 0.1203959632319856, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 15.14, |
|
"learning_rate": 3.650149253731343e-05, |
|
"loss": 0.5143, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 15.28, |
|
"learning_rate": 3.644179104477612e-05, |
|
"loss": 0.4947, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 15.42, |
|
"learning_rate": 3.6382089552238805e-05, |
|
"loss": 0.502, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 15.55, |
|
"learning_rate": 3.6322388059701495e-05, |
|
"loss": 0.5333, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 15.69, |
|
"learning_rate": 3.626268656716418e-05, |
|
"loss": 0.4971, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 15.83, |
|
"learning_rate": 3.6202985074626867e-05, |
|
"loss": 0.529, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 15.97, |
|
"learning_rate": 3.6143283582089556e-05, |
|
"loss": 0.4775, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_loss": 0.24761441349983215, |
|
"eval_runtime": 125.5206, |
|
"eval_samples_per_second": 4.238, |
|
"eval_steps_per_second": 0.534, |
|
"eval_wer": 0.11383942919586039, |
|
"step": 1152 |
|
}, |
|
{ |
|
"epoch": 16.11, |
|
"learning_rate": 3.608358208955224e-05, |
|
"loss": 0.5271, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 16.25, |
|
"learning_rate": 3.602388059701493e-05, |
|
"loss": 0.5068, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 16.39, |
|
"learning_rate": 3.596417910447762e-05, |
|
"loss": 0.4861, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 16.53, |
|
"learning_rate": 3.59044776119403e-05, |
|
"loss": 0.5266, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 16.66, |
|
"learning_rate": 3.584477611940299e-05, |
|
"loss": 0.4773, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 16.8, |
|
"learning_rate": 3.578507462686567e-05, |
|
"loss": 0.4999, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 16.94, |
|
"learning_rate": 3.572537313432836e-05, |
|
"loss": 0.4922, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_loss": 0.2534642815589905, |
|
"eval_runtime": 125.4558, |
|
"eval_samples_per_second": 4.241, |
|
"eval_steps_per_second": 0.534, |
|
"eval_wer": 0.11615350003213987, |
|
"step": 1224 |
|
}, |
|
{ |
|
"epoch": 17.08, |
|
"learning_rate": 3.566567164179105e-05, |
|
"loss": 0.5258, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 17.22, |
|
"learning_rate": 3.560597014925373e-05, |
|
"loss": 0.4844, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 17.36, |
|
"learning_rate": 3.554626865671642e-05, |
|
"loss": 0.4963, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 3.548656716417911e-05, |
|
"loss": 0.5157, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 17.64, |
|
"learning_rate": 3.5426865671641794e-05, |
|
"loss": 0.4841, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 17.78, |
|
"learning_rate": 3.5367164179104484e-05, |
|
"loss": 0.504, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 17.91, |
|
"learning_rate": 3.5307462686567166e-05, |
|
"loss": 0.4893, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_loss": 0.25007733702659607, |
|
"eval_runtime": 125.2056, |
|
"eval_samples_per_second": 4.249, |
|
"eval_steps_per_second": 0.535, |
|
"eval_wer": 0.11428938741402583, |
|
"step": 1296 |
|
}, |
|
{ |
|
"epoch": 18.06, |
|
"learning_rate": 3.5247761194029856e-05, |
|
"loss": 0.5313, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 18.19, |
|
"learning_rate": 3.518805970149254e-05, |
|
"loss": 0.4616, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 18.33, |
|
"learning_rate": 3.512835820895523e-05, |
|
"loss": 0.4936, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 18.47, |
|
"learning_rate": 3.506865671641791e-05, |
|
"loss": 0.4718, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 18.61, |
|
"learning_rate": 3.50089552238806e-05, |
|
"loss": 0.5097, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 18.75, |
|
"learning_rate": 3.494925373134328e-05, |
|
"loss": 0.5264, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 18.89, |
|
"learning_rate": 3.488955223880597e-05, |
|
"loss": 0.4672, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_loss": 0.27027812600135803, |
|
"eval_runtime": 125.7385, |
|
"eval_samples_per_second": 4.231, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.11621777977759208, |
|
"step": 1368 |
|
}, |
|
{ |
|
"epoch": 19.03, |
|
"learning_rate": 3.482985074626866e-05, |
|
"loss": 0.5124, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 19.17, |
|
"learning_rate": 3.4770149253731344e-05, |
|
"loss": 0.4672, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 19.3, |
|
"learning_rate": 3.471044776119403e-05, |
|
"loss": 0.514, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 19.44, |
|
"learning_rate": 3.465074626865672e-05, |
|
"loss": 0.4728, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 19.58, |
|
"learning_rate": 3.4591044776119405e-05, |
|
"loss": 0.5109, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 19.72, |
|
"learning_rate": 3.4531343283582094e-05, |
|
"loss": 0.4927, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 19.86, |
|
"learning_rate": 3.4471641791044784e-05, |
|
"loss": 0.4889, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 3.4411940298507466e-05, |
|
"loss": 0.4764, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_loss": 0.26316529512405396, |
|
"eval_runtime": 126.5261, |
|
"eval_samples_per_second": 4.205, |
|
"eval_steps_per_second": 0.53, |
|
"eval_wer": 0.1253455036318056, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 20.14, |
|
"learning_rate": 3.4352238805970156e-05, |
|
"loss": 0.4782, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 20.28, |
|
"learning_rate": 3.429253731343284e-05, |
|
"loss": 0.5151, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 20.42, |
|
"learning_rate": 3.423283582089553e-05, |
|
"loss": 0.4718, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 20.55, |
|
"learning_rate": 3.417313432835821e-05, |
|
"loss": 0.5069, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 20.69, |
|
"learning_rate": 3.41134328358209e-05, |
|
"loss": 0.4838, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 20.83, |
|
"learning_rate": 3.405373134328358e-05, |
|
"loss": 0.5045, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 20.97, |
|
"learning_rate": 3.399402985074627e-05, |
|
"loss": 0.4716, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"eval_loss": 0.2533712089061737, |
|
"eval_runtime": 125.818, |
|
"eval_samples_per_second": 4.228, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.10966124574146686, |
|
"step": 1512 |
|
}, |
|
{ |
|
"epoch": 21.11, |
|
"learning_rate": 3.3934328358208954e-05, |
|
"loss": 0.4991, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 21.25, |
|
"learning_rate": 3.387462686567164e-05, |
|
"loss": 0.4855, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 21.39, |
|
"learning_rate": 3.381492537313433e-05, |
|
"loss": 0.4672, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 21.53, |
|
"learning_rate": 3.3755223880597015e-05, |
|
"loss": 0.4734, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 21.66, |
|
"learning_rate": 3.3695522388059705e-05, |
|
"loss": 0.4573, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 21.8, |
|
"learning_rate": 3.363582089552239e-05, |
|
"loss": 0.4839, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 21.94, |
|
"learning_rate": 3.3576119402985077e-05, |
|
"loss": 0.4733, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_loss": 0.2495640367269516, |
|
"eval_runtime": 125.5122, |
|
"eval_samples_per_second": 4.239, |
|
"eval_steps_per_second": 0.534, |
|
"eval_wer": 0.10856849006877933, |
|
"step": 1584 |
|
}, |
|
{ |
|
"epoch": 22.08, |
|
"learning_rate": 3.3516417910447766e-05, |
|
"loss": 0.5178, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 22.22, |
|
"learning_rate": 3.345671641791045e-05, |
|
"loss": 0.4618, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 22.36, |
|
"learning_rate": 3.339701492537314e-05, |
|
"loss": 0.4526, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 3.333731343283583e-05, |
|
"loss": 0.4851, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 22.64, |
|
"learning_rate": 3.327761194029851e-05, |
|
"loss": 0.4588, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 22.78, |
|
"learning_rate": 3.32179104477612e-05, |
|
"loss": 0.4704, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 22.91, |
|
"learning_rate": 3.315820895522388e-05, |
|
"loss": 0.4577, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"eval_loss": 0.263689249753952, |
|
"eval_runtime": 125.7589, |
|
"eval_samples_per_second": 4.23, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.12245291508645625, |
|
"step": 1656 |
|
}, |
|
{ |
|
"epoch": 23.06, |
|
"learning_rate": 3.309850746268657e-05, |
|
"loss": 0.5169, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 23.19, |
|
"learning_rate": 3.3038805970149254e-05, |
|
"loss": 0.4549, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 23.33, |
|
"learning_rate": 3.297910447761194e-05, |
|
"loss": 0.4886, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 23.47, |
|
"learning_rate": 3.2919402985074626e-05, |
|
"loss": 0.4745, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 23.61, |
|
"learning_rate": 3.2859701492537315e-05, |
|
"loss": 0.4737, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 23.75, |
|
"learning_rate": 3.28e-05, |
|
"loss": 0.476, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 23.89, |
|
"learning_rate": 3.274029850746269e-05, |
|
"loss": 0.4714, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_loss": 0.2489064484834671, |
|
"eval_runtime": 125.8703, |
|
"eval_samples_per_second": 4.227, |
|
"eval_steps_per_second": 0.532, |
|
"eval_wer": 0.11023976345053674, |
|
"step": 1728 |
|
}, |
|
{ |
|
"epoch": 24.03, |
|
"learning_rate": 3.2680597014925376e-05, |
|
"loss": 0.4918, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 24.17, |
|
"learning_rate": 3.262089552238806e-05, |
|
"loss": 0.4572, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 24.3, |
|
"learning_rate": 3.256119402985075e-05, |
|
"loss": 0.4665, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 24.44, |
|
"learning_rate": 3.250149253731344e-05, |
|
"loss": 0.4504, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 24.58, |
|
"learning_rate": 3.244179104477612e-05, |
|
"loss": 0.4835, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 24.72, |
|
"learning_rate": 3.238208955223881e-05, |
|
"loss": 0.4709, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 24.86, |
|
"learning_rate": 3.23223880597015e-05, |
|
"loss": 0.477, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 3.226268656716418e-05, |
|
"loss": 0.4615, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_loss": 0.23991893231868744, |
|
"eval_runtime": 125.557, |
|
"eval_samples_per_second": 4.237, |
|
"eval_steps_per_second": 0.534, |
|
"eval_wer": 0.10201195603265412, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 25.14, |
|
"learning_rate": 3.220298507462687e-05, |
|
"loss": 0.4755, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 25.28, |
|
"learning_rate": 3.2143283582089554e-05, |
|
"loss": 0.4826, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 25.42, |
|
"learning_rate": 3.208358208955224e-05, |
|
"loss": 0.4455, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 25.55, |
|
"learning_rate": 3.202388059701493e-05, |
|
"loss": 0.4732, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 25.69, |
|
"learning_rate": 3.1964179104477615e-05, |
|
"loss": 0.4516, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 25.83, |
|
"learning_rate": 3.1904477611940304e-05, |
|
"loss": 0.47, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 25.97, |
|
"learning_rate": 3.184477611940299e-05, |
|
"loss": 0.4636, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_loss": 0.23106186091899872, |
|
"eval_runtime": 125.6068, |
|
"eval_samples_per_second": 4.235, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.10419746737802918, |
|
"step": 1872 |
|
}, |
|
{ |
|
"epoch": 26.11, |
|
"learning_rate": 3.1785074626865676e-05, |
|
"loss": 0.4788, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 26.25, |
|
"learning_rate": 3.172537313432836e-05, |
|
"loss": 0.4535, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 26.39, |
|
"learning_rate": 3.166567164179105e-05, |
|
"loss": 0.4566, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 26.53, |
|
"learning_rate": 3.160597014925373e-05, |
|
"loss": 0.4732, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 26.66, |
|
"learning_rate": 3.154626865671642e-05, |
|
"loss": 0.4688, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 26.8, |
|
"learning_rate": 3.14865671641791e-05, |
|
"loss": 0.4602, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 26.94, |
|
"learning_rate": 3.142686567164179e-05, |
|
"loss": 0.4564, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"eval_loss": 0.24130558967590332, |
|
"eval_runtime": 125.8942, |
|
"eval_samples_per_second": 4.226, |
|
"eval_steps_per_second": 0.532, |
|
"eval_wer": 0.10419746737802918, |
|
"step": 1944 |
|
}, |
|
{ |
|
"epoch": 27.08, |
|
"learning_rate": 3.136716417910448e-05, |
|
"loss": 0.4945, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 27.22, |
|
"learning_rate": 3.1307462686567164e-05, |
|
"loss": 0.4422, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 27.36, |
|
"learning_rate": 3.124776119402985e-05, |
|
"loss": 0.4616, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 3.118805970149254e-05, |
|
"loss": 0.4741, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 27.64, |
|
"learning_rate": 3.1128358208955225e-05, |
|
"loss": 0.4571, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 27.78, |
|
"learning_rate": 3.1068656716417915e-05, |
|
"loss": 0.4607, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 27.91, |
|
"learning_rate": 3.1008955223880604e-05, |
|
"loss": 0.4452, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_loss": 0.2436205893754959, |
|
"eval_runtime": 126.071, |
|
"eval_samples_per_second": 4.22, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.10368322941441152, |
|
"step": 2016 |
|
}, |
|
{ |
|
"epoch": 28.06, |
|
"learning_rate": 3.094925373134329e-05, |
|
"loss": 0.4951, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 28.19, |
|
"learning_rate": 3.0889552238805976e-05, |
|
"loss": 0.4447, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 28.33, |
|
"learning_rate": 3.082985074626866e-05, |
|
"loss": 0.4491, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 28.47, |
|
"learning_rate": 3.077014925373135e-05, |
|
"loss": 0.4479, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 28.61, |
|
"learning_rate": 3.071044776119403e-05, |
|
"loss": 0.4443, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 28.75, |
|
"learning_rate": 3.065074626865672e-05, |
|
"loss": 0.466, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 28.89, |
|
"learning_rate": 3.05910447761194e-05, |
|
"loss": 0.4336, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"eval_loss": 0.2483624964952469, |
|
"eval_runtime": 125.3699, |
|
"eval_samples_per_second": 4.243, |
|
"eval_steps_per_second": 0.534, |
|
"eval_wer": 0.10702577617792633, |
|
"step": 2088 |
|
}, |
|
{ |
|
"epoch": 29.03, |
|
"learning_rate": 3.053134328358209e-05, |
|
"loss": 0.4867, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 29.17, |
|
"learning_rate": 3.0471641791044778e-05, |
|
"loss": 0.4363, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 29.3, |
|
"learning_rate": 3.0411940298507464e-05, |
|
"loss": 0.4688, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 29.44, |
|
"learning_rate": 3.0352238805970153e-05, |
|
"loss": 0.4433, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 29.58, |
|
"learning_rate": 3.0292537313432836e-05, |
|
"loss": 0.4739, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 29.72, |
|
"learning_rate": 3.0232835820895525e-05, |
|
"loss": 0.4511, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 29.86, |
|
"learning_rate": 3.0173134328358214e-05, |
|
"loss": 0.4634, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 3.0113432835820897e-05, |
|
"loss": 0.4628, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_loss": 0.2384675294160843, |
|
"eval_runtime": 126.0807, |
|
"eval_samples_per_second": 4.22, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.10046924214180111, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 30.14, |
|
"learning_rate": 3.0053731343283586e-05, |
|
"loss": 0.4556, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 30.28, |
|
"learning_rate": 2.999402985074627e-05, |
|
"loss": 0.4516, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 30.42, |
|
"learning_rate": 2.993432835820896e-05, |
|
"loss": 0.4221, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 30.55, |
|
"learning_rate": 2.9874626865671644e-05, |
|
"loss": 0.4583, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 30.69, |
|
"learning_rate": 2.981492537313433e-05, |
|
"loss": 0.4468, |
|
"step": 2210 |
|
}, |
|
{ |
|
"epoch": 30.83, |
|
"learning_rate": 2.9755223880597016e-05, |
|
"loss": 0.455, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 30.97, |
|
"learning_rate": 2.9695522388059706e-05, |
|
"loss": 0.4475, |
|
"step": 2230 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"eval_loss": 0.24459128081798553, |
|
"eval_runtime": 126.0753, |
|
"eval_samples_per_second": 4.22, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.10747573439609179, |
|
"step": 2232 |
|
}, |
|
{ |
|
"epoch": 31.11, |
|
"learning_rate": 2.9635820895522388e-05, |
|
"loss": 0.4667, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 31.25, |
|
"learning_rate": 2.9576119402985078e-05, |
|
"loss": 0.4471, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 31.39, |
|
"learning_rate": 2.9516417910447767e-05, |
|
"loss": 0.4454, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 31.53, |
|
"learning_rate": 2.945671641791045e-05, |
|
"loss": 0.4489, |
|
"step": 2270 |
|
}, |
|
{ |
|
"epoch": 31.66, |
|
"learning_rate": 2.939701492537314e-05, |
|
"loss": 0.4511, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 31.8, |
|
"learning_rate": 2.933731343283582e-05, |
|
"loss": 0.4664, |
|
"step": 2290 |
|
}, |
|
{ |
|
"epoch": 31.94, |
|
"learning_rate": 2.927761194029851e-05, |
|
"loss": 0.4264, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_loss": 0.2548329532146454, |
|
"eval_runtime": 125.9996, |
|
"eval_samples_per_second": 4.222, |
|
"eval_steps_per_second": 0.532, |
|
"eval_wer": 0.1067686571961175, |
|
"step": 2304 |
|
}, |
|
{ |
|
"epoch": 32.08, |
|
"learning_rate": 2.9217910447761197e-05, |
|
"loss": 0.4717, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 32.22, |
|
"learning_rate": 2.9158208955223883e-05, |
|
"loss": 0.4438, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 32.36, |
|
"learning_rate": 2.909850746268657e-05, |
|
"loss": 0.4552, |
|
"step": 2330 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 2.9038805970149258e-05, |
|
"loss": 0.4394, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 32.64, |
|
"learning_rate": 2.897910447761194e-05, |
|
"loss": 0.4329, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 32.78, |
|
"learning_rate": 2.891940298507463e-05, |
|
"loss": 0.4666, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 32.91, |
|
"learning_rate": 2.8859701492537316e-05, |
|
"loss": 0.4417, |
|
"step": 2370 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"eval_loss": 0.24417315423488617, |
|
"eval_runtime": 125.5305, |
|
"eval_samples_per_second": 4.238, |
|
"eval_steps_per_second": 0.534, |
|
"eval_wer": 0.10464742559619464, |
|
"step": 2376 |
|
}, |
|
{ |
|
"epoch": 33.06, |
|
"learning_rate": 2.8800000000000002e-05, |
|
"loss": 0.4544, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 33.19, |
|
"learning_rate": 2.8740298507462688e-05, |
|
"loss": 0.4269, |
|
"step": 2390 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 2.8680597014925377e-05, |
|
"loss": 0.4376, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 33.47, |
|
"learning_rate": 2.862089552238806e-05, |
|
"loss": 0.4531, |
|
"step": 2410 |
|
}, |
|
{ |
|
"epoch": 33.61, |
|
"learning_rate": 2.856119402985075e-05, |
|
"loss": 0.4256, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 33.75, |
|
"learning_rate": 2.8501492537313432e-05, |
|
"loss": 0.4369, |
|
"step": 2430 |
|
}, |
|
{ |
|
"epoch": 33.89, |
|
"learning_rate": 2.844179104477612e-05, |
|
"loss": 0.4165, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"eval_loss": 0.24577400088310242, |
|
"eval_runtime": 126.3581, |
|
"eval_samples_per_second": 4.21, |
|
"eval_steps_per_second": 0.53, |
|
"eval_wer": 0.10445458635983801, |
|
"step": 2448 |
|
}, |
|
{ |
|
"epoch": 34.03, |
|
"learning_rate": 2.838208955223881e-05, |
|
"loss": 0.4499, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 34.17, |
|
"learning_rate": 2.8322388059701493e-05, |
|
"loss": 0.4431, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 34.3, |
|
"learning_rate": 2.8262686567164183e-05, |
|
"loss": 0.4467, |
|
"step": 2470 |
|
}, |
|
{ |
|
"epoch": 34.44, |
|
"learning_rate": 2.820298507462687e-05, |
|
"loss": 0.4322, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 34.58, |
|
"learning_rate": 2.8143283582089555e-05, |
|
"loss": 0.4598, |
|
"step": 2490 |
|
}, |
|
{ |
|
"epoch": 34.72, |
|
"learning_rate": 2.808358208955224e-05, |
|
"loss": 0.4232, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 34.86, |
|
"learning_rate": 2.8029850746268658e-05, |
|
"loss": 0.4359, |
|
"step": 2510 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 2.7970149253731348e-05, |
|
"loss": 0.4398, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_loss": 0.2475011795759201, |
|
"eval_runtime": 126.4489, |
|
"eval_samples_per_second": 4.207, |
|
"eval_steps_per_second": 0.53, |
|
"eval_wer": 0.09789805232371279, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 35.14, |
|
"learning_rate": 2.791044776119403e-05, |
|
"loss": 0.4436, |
|
"step": 2530 |
|
}, |
|
{ |
|
"epoch": 35.28, |
|
"learning_rate": 2.785074626865672e-05, |
|
"loss": 0.4727, |
|
"step": 2540 |
|
}, |
|
{ |
|
"epoch": 35.42, |
|
"learning_rate": 2.7791044776119406e-05, |
|
"loss": 0.4219, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 35.55, |
|
"learning_rate": 2.773134328358209e-05, |
|
"loss": 0.4409, |
|
"step": 2560 |
|
}, |
|
{ |
|
"epoch": 35.69, |
|
"learning_rate": 2.7671641791044778e-05, |
|
"loss": 0.4418, |
|
"step": 2570 |
|
}, |
|
{ |
|
"epoch": 35.83, |
|
"learning_rate": 2.7611940298507467e-05, |
|
"loss": 0.4557, |
|
"step": 2580 |
|
}, |
|
{ |
|
"epoch": 35.97, |
|
"learning_rate": 2.755223880597015e-05, |
|
"loss": 0.4334, |
|
"step": 2590 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"eval_loss": 0.2375461310148239, |
|
"eval_runtime": 125.4731, |
|
"eval_samples_per_second": 4.24, |
|
"eval_steps_per_second": 0.534, |
|
"eval_wer": 0.1027190332326284, |
|
"step": 2592 |
|
}, |
|
{ |
|
"epoch": 36.11, |
|
"learning_rate": 2.749253731343284e-05, |
|
"loss": 0.4567, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 36.25, |
|
"learning_rate": 2.743283582089552e-05, |
|
"loss": 0.426, |
|
"step": 2610 |
|
}, |
|
{ |
|
"epoch": 36.39, |
|
"learning_rate": 2.737313432835821e-05, |
|
"loss": 0.4305, |
|
"step": 2620 |
|
}, |
|
{ |
|
"epoch": 36.53, |
|
"learning_rate": 2.73134328358209e-05, |
|
"loss": 0.4378, |
|
"step": 2630 |
|
}, |
|
{ |
|
"epoch": 36.66, |
|
"learning_rate": 2.7253731343283583e-05, |
|
"loss": 0.4179, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 36.8, |
|
"learning_rate": 2.7194029850746272e-05, |
|
"loss": 0.4446, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 36.94, |
|
"learning_rate": 2.7134328358208958e-05, |
|
"loss": 0.4279, |
|
"step": 2660 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"eval_loss": 0.24620497226715088, |
|
"eval_runtime": 125.763, |
|
"eval_samples_per_second": 4.23, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.10529022305071672, |
|
"step": 2664 |
|
}, |
|
{ |
|
"epoch": 37.08, |
|
"learning_rate": 2.7074626865671644e-05, |
|
"loss": 0.457, |
|
"step": 2670 |
|
}, |
|
{ |
|
"epoch": 37.22, |
|
"learning_rate": 2.701492537313433e-05, |
|
"loss": 0.4283, |
|
"step": 2680 |
|
}, |
|
{ |
|
"epoch": 37.36, |
|
"learning_rate": 2.695522388059702e-05, |
|
"loss": 0.4255, |
|
"step": 2690 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 2.6895522388059702e-05, |
|
"loss": 0.4277, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 37.64, |
|
"learning_rate": 2.683582089552239e-05, |
|
"loss": 0.4358, |
|
"step": 2710 |
|
}, |
|
{ |
|
"epoch": 37.78, |
|
"learning_rate": 2.6776119402985074e-05, |
|
"loss": 0.4362, |
|
"step": 2720 |
|
}, |
|
{ |
|
"epoch": 37.91, |
|
"learning_rate": 2.6716417910447763e-05, |
|
"loss": 0.4213, |
|
"step": 2730 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"eval_loss": 0.24020591378211975, |
|
"eval_runtime": 126.1172, |
|
"eval_samples_per_second": 4.218, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.1027833129780806, |
|
"step": 2736 |
|
}, |
|
{ |
|
"epoch": 38.06, |
|
"learning_rate": 2.665671641791045e-05, |
|
"loss": 0.4418, |
|
"step": 2740 |
|
}, |
|
{ |
|
"epoch": 38.19, |
|
"learning_rate": 2.6597014925373135e-05, |
|
"loss": 0.4241, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 38.33, |
|
"learning_rate": 2.653731343283582e-05, |
|
"loss": 0.4432, |
|
"step": 2760 |
|
}, |
|
{ |
|
"epoch": 38.47, |
|
"learning_rate": 2.647761194029851e-05, |
|
"loss": 0.422, |
|
"step": 2770 |
|
}, |
|
{ |
|
"epoch": 38.61, |
|
"learning_rate": 2.6417910447761193e-05, |
|
"loss": 0.4215, |
|
"step": 2780 |
|
}, |
|
{ |
|
"epoch": 38.75, |
|
"learning_rate": 2.6358208955223883e-05, |
|
"loss": 0.4316, |
|
"step": 2790 |
|
}, |
|
{ |
|
"epoch": 38.89, |
|
"learning_rate": 2.6298507462686572e-05, |
|
"loss": 0.4394, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"eval_loss": 0.23846423625946045, |
|
"eval_runtime": 126.0839, |
|
"eval_samples_per_second": 4.219, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.10201195603265412, |
|
"step": 2808 |
|
}, |
|
{ |
|
"epoch": 39.03, |
|
"learning_rate": 2.6238805970149254e-05, |
|
"loss": 0.4507, |
|
"step": 2810 |
|
}, |
|
{ |
|
"epoch": 39.17, |
|
"learning_rate": 2.6179104477611944e-05, |
|
"loss": 0.4209, |
|
"step": 2820 |
|
}, |
|
{ |
|
"epoch": 39.3, |
|
"learning_rate": 2.611940298507463e-05, |
|
"loss": 0.4576, |
|
"step": 2830 |
|
}, |
|
{ |
|
"epoch": 39.44, |
|
"learning_rate": 2.6059701492537316e-05, |
|
"loss": 0.4214, |
|
"step": 2840 |
|
}, |
|
{ |
|
"epoch": 39.58, |
|
"learning_rate": 2.6000000000000002e-05, |
|
"loss": 0.423, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 39.72, |
|
"learning_rate": 2.5940298507462688e-05, |
|
"loss": 0.4202, |
|
"step": 2860 |
|
}, |
|
{ |
|
"epoch": 39.86, |
|
"learning_rate": 2.5880597014925374e-05, |
|
"loss": 0.4318, |
|
"step": 2870 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 2.5820895522388063e-05, |
|
"loss": 0.4415, |
|
"step": 2880 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_loss": 0.24278585612773895, |
|
"eval_runtime": 125.8451, |
|
"eval_samples_per_second": 4.227, |
|
"eval_steps_per_second": 0.532, |
|
"eval_wer": 0.10207623577810632, |
|
"step": 2880 |
|
}, |
|
{ |
|
"epoch": 40.14, |
|
"learning_rate": 2.5761194029850746e-05, |
|
"loss": 0.4239, |
|
"step": 2890 |
|
}, |
|
{ |
|
"epoch": 40.28, |
|
"learning_rate": 2.5701492537313435e-05, |
|
"loss": 0.4266, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 40.42, |
|
"learning_rate": 2.5641791044776124e-05, |
|
"loss": 0.4243, |
|
"step": 2910 |
|
}, |
|
{ |
|
"epoch": 40.55, |
|
"learning_rate": 2.5582089552238807e-05, |
|
"loss": 0.4191, |
|
"step": 2920 |
|
}, |
|
{ |
|
"epoch": 40.69, |
|
"learning_rate": 2.5522388059701496e-05, |
|
"loss": 0.4151, |
|
"step": 2930 |
|
}, |
|
{ |
|
"epoch": 40.83, |
|
"learning_rate": 2.5462686567164182e-05, |
|
"loss": 0.4495, |
|
"step": 2940 |
|
}, |
|
{ |
|
"epoch": 40.97, |
|
"learning_rate": 2.540298507462687e-05, |
|
"loss": 0.4173, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"eval_loss": 0.23555202782154083, |
|
"eval_runtime": 126.2962, |
|
"eval_samples_per_second": 4.212, |
|
"eval_steps_per_second": 0.53, |
|
"eval_wer": 0.10162627755994086, |
|
"step": 2952 |
|
}, |
|
{ |
|
"epoch": 41.11, |
|
"learning_rate": 2.5343283582089554e-05, |
|
"loss": 0.4292, |
|
"step": 2960 |
|
}, |
|
{ |
|
"epoch": 41.25, |
|
"learning_rate": 2.528358208955224e-05, |
|
"loss": 0.4236, |
|
"step": 2970 |
|
}, |
|
{ |
|
"epoch": 41.39, |
|
"learning_rate": 2.5223880597014926e-05, |
|
"loss": 0.4154, |
|
"step": 2980 |
|
}, |
|
{ |
|
"epoch": 41.53, |
|
"learning_rate": 2.5164179104477616e-05, |
|
"loss": 0.4169, |
|
"step": 2990 |
|
}, |
|
{ |
|
"epoch": 41.66, |
|
"learning_rate": 2.5104477611940298e-05, |
|
"loss": 0.4204, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 41.8, |
|
"learning_rate": 2.5044776119402988e-05, |
|
"loss": 0.4267, |
|
"step": 3010 |
|
}, |
|
{ |
|
"epoch": 41.94, |
|
"learning_rate": 2.4985074626865674e-05, |
|
"loss": 0.4006, |
|
"step": 3020 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"eval_loss": 0.2461443990468979, |
|
"eval_runtime": 126.1445, |
|
"eval_samples_per_second": 4.217, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.10066208137815774, |
|
"step": 3024 |
|
}, |
|
{ |
|
"epoch": 42.08, |
|
"learning_rate": 2.492537313432836e-05, |
|
"loss": 0.4439, |
|
"step": 3030 |
|
}, |
|
{ |
|
"epoch": 42.22, |
|
"learning_rate": 2.4865671641791045e-05, |
|
"loss": 0.4252, |
|
"step": 3040 |
|
}, |
|
{ |
|
"epoch": 42.36, |
|
"learning_rate": 2.4805970149253735e-05, |
|
"loss": 0.4325, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 42.5, |
|
"learning_rate": 2.4746268656716417e-05, |
|
"loss": 0.4272, |
|
"step": 3060 |
|
}, |
|
{ |
|
"epoch": 42.64, |
|
"learning_rate": 2.4686567164179107e-05, |
|
"loss": 0.4098, |
|
"step": 3070 |
|
}, |
|
{ |
|
"epoch": 42.78, |
|
"learning_rate": 2.462686567164179e-05, |
|
"loss": 0.4486, |
|
"step": 3080 |
|
}, |
|
{ |
|
"epoch": 42.91, |
|
"learning_rate": 2.456716417910448e-05, |
|
"loss": 0.4055, |
|
"step": 3090 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"eval_loss": 0.2412189096212387, |
|
"eval_runtime": 126.0755, |
|
"eval_samples_per_second": 4.22, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.09982644468727904, |
|
"step": 3096 |
|
}, |
|
{ |
|
"epoch": 43.06, |
|
"learning_rate": 2.4507462686567168e-05, |
|
"loss": 0.4263, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 43.19, |
|
"learning_rate": 2.444776119402985e-05, |
|
"loss": 0.3997, |
|
"step": 3110 |
|
}, |
|
{ |
|
"epoch": 43.33, |
|
"learning_rate": 2.438805970149254e-05, |
|
"loss": 0.4182, |
|
"step": 3120 |
|
}, |
|
{ |
|
"epoch": 43.47, |
|
"learning_rate": 2.4328358208955226e-05, |
|
"loss": 0.3947, |
|
"step": 3130 |
|
}, |
|
{ |
|
"epoch": 43.61, |
|
"learning_rate": 2.4268656716417912e-05, |
|
"loss": 0.4137, |
|
"step": 3140 |
|
}, |
|
{ |
|
"epoch": 43.75, |
|
"learning_rate": 2.4208955223880598e-05, |
|
"loss": 0.4353, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 43.89, |
|
"learning_rate": 2.4149253731343287e-05, |
|
"loss": 0.4163, |
|
"step": 3160 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"eval_loss": 0.2378450334072113, |
|
"eval_runtime": 126.3642, |
|
"eval_samples_per_second": 4.21, |
|
"eval_steps_per_second": 0.53, |
|
"eval_wer": 0.09764093334190396, |
|
"step": 3168 |
|
}, |
|
{ |
|
"epoch": 44.03, |
|
"learning_rate": 2.408955223880597e-05, |
|
"loss": 0.4417, |
|
"step": 3170 |
|
}, |
|
{ |
|
"epoch": 44.17, |
|
"learning_rate": 2.402985074626866e-05, |
|
"loss": 0.4039, |
|
"step": 3180 |
|
}, |
|
{ |
|
"epoch": 44.3, |
|
"learning_rate": 2.397014925373135e-05, |
|
"loss": 0.4352, |
|
"step": 3190 |
|
}, |
|
{ |
|
"epoch": 44.44, |
|
"learning_rate": 2.391044776119403e-05, |
|
"loss": 0.427, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 44.58, |
|
"learning_rate": 2.385074626865672e-05, |
|
"loss": 0.4411, |
|
"step": 3210 |
|
}, |
|
{ |
|
"epoch": 44.72, |
|
"learning_rate": 2.3791044776119403e-05, |
|
"loss": 0.4179, |
|
"step": 3220 |
|
}, |
|
{ |
|
"epoch": 44.86, |
|
"learning_rate": 2.3731343283582093e-05, |
|
"loss": 0.4097, |
|
"step": 3230 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 2.367164179104478e-05, |
|
"loss": 0.4143, |
|
"step": 3240 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_loss": 0.24707278609275818, |
|
"eval_runtime": 125.5036, |
|
"eval_samples_per_second": 4.239, |
|
"eval_steps_per_second": 0.534, |
|
"eval_wer": 0.09963360545092241, |
|
"step": 3240 |
|
}, |
|
{ |
|
"epoch": 45.14, |
|
"learning_rate": 2.3611940298507465e-05, |
|
"loss": 0.4225, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 45.28, |
|
"learning_rate": 2.355223880597015e-05, |
|
"loss": 0.4257, |
|
"step": 3260 |
|
}, |
|
{ |
|
"epoch": 45.42, |
|
"learning_rate": 2.349253731343284e-05, |
|
"loss": 0.3972, |
|
"step": 3270 |
|
}, |
|
{ |
|
"epoch": 45.55, |
|
"learning_rate": 2.3432835820895522e-05, |
|
"loss": 0.4127, |
|
"step": 3280 |
|
}, |
|
{ |
|
"epoch": 45.69, |
|
"learning_rate": 2.3373134328358212e-05, |
|
"loss": 0.4115, |
|
"step": 3290 |
|
}, |
|
{ |
|
"epoch": 45.83, |
|
"learning_rate": 2.3313432835820898e-05, |
|
"loss": 0.4254, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 45.97, |
|
"learning_rate": 2.3253731343283584e-05, |
|
"loss": 0.4132, |
|
"step": 3310 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"eval_loss": 0.24572667479515076, |
|
"eval_runtime": 126.1861, |
|
"eval_samples_per_second": 4.216, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.10040496239634891, |
|
"step": 3312 |
|
}, |
|
{ |
|
"epoch": 46.11, |
|
"learning_rate": 2.319402985074627e-05, |
|
"loss": 0.425, |
|
"step": 3320 |
|
}, |
|
{ |
|
"epoch": 46.25, |
|
"learning_rate": 2.3134328358208956e-05, |
|
"loss": 0.4294, |
|
"step": 3330 |
|
}, |
|
{ |
|
"epoch": 46.39, |
|
"learning_rate": 2.3074626865671642e-05, |
|
"loss": 0.4125, |
|
"step": 3340 |
|
}, |
|
{ |
|
"epoch": 46.53, |
|
"learning_rate": 2.301492537313433e-05, |
|
"loss": 0.4113, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 46.66, |
|
"learning_rate": 2.2955223880597014e-05, |
|
"loss": 0.4102, |
|
"step": 3360 |
|
}, |
|
{ |
|
"epoch": 46.8, |
|
"learning_rate": 2.2895522388059703e-05, |
|
"loss": 0.4069, |
|
"step": 3370 |
|
}, |
|
{ |
|
"epoch": 46.94, |
|
"learning_rate": 2.2835820895522392e-05, |
|
"loss": 0.3991, |
|
"step": 3380 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"eval_loss": 0.23504526913166046, |
|
"eval_runtime": 126.0664, |
|
"eval_samples_per_second": 4.22, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.1019476762872019, |
|
"step": 3384 |
|
}, |
|
{ |
|
"epoch": 47.08, |
|
"learning_rate": 2.2776119402985075e-05, |
|
"loss": 0.4349, |
|
"step": 3390 |
|
}, |
|
{ |
|
"epoch": 47.22, |
|
"learning_rate": 2.2716417910447764e-05, |
|
"loss": 0.4032, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 47.36, |
|
"learning_rate": 2.265671641791045e-05, |
|
"loss": 0.4088, |
|
"step": 3410 |
|
}, |
|
{ |
|
"epoch": 47.5, |
|
"learning_rate": 2.2597014925373136e-05, |
|
"loss": 0.4182, |
|
"step": 3420 |
|
}, |
|
{ |
|
"epoch": 47.64, |
|
"learning_rate": 2.2537313432835822e-05, |
|
"loss": 0.4022, |
|
"step": 3430 |
|
}, |
|
{ |
|
"epoch": 47.78, |
|
"learning_rate": 2.2477611940298508e-05, |
|
"loss": 0.4224, |
|
"step": 3440 |
|
}, |
|
{ |
|
"epoch": 47.91, |
|
"learning_rate": 2.2417910447761194e-05, |
|
"loss": 0.4014, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"eval_loss": 0.24001802504062653, |
|
"eval_runtime": 125.8626, |
|
"eval_samples_per_second": 4.227, |
|
"eval_steps_per_second": 0.532, |
|
"eval_wer": 0.10252619399627177, |
|
"step": 3456 |
|
}, |
|
{ |
|
"epoch": 48.06, |
|
"learning_rate": 2.2358208955223884e-05, |
|
"loss": 0.4388, |
|
"step": 3460 |
|
}, |
|
{ |
|
"epoch": 48.19, |
|
"learning_rate": 2.2298507462686566e-05, |
|
"loss": 0.3885, |
|
"step": 3470 |
|
}, |
|
{ |
|
"epoch": 48.33, |
|
"learning_rate": 2.2238805970149256e-05, |
|
"loss": 0.4108, |
|
"step": 3480 |
|
}, |
|
{ |
|
"epoch": 48.47, |
|
"learning_rate": 2.2179104477611945e-05, |
|
"loss": 0.4112, |
|
"step": 3490 |
|
}, |
|
{ |
|
"epoch": 48.61, |
|
"learning_rate": 2.2119402985074627e-05, |
|
"loss": 0.4182, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 48.75, |
|
"learning_rate": 2.2059701492537317e-05, |
|
"loss": 0.4098, |
|
"step": 3510 |
|
}, |
|
{ |
|
"epoch": 48.89, |
|
"learning_rate": 2.2000000000000003e-05, |
|
"loss": 0.416, |
|
"step": 3520 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"eval_loss": 0.23704026639461517, |
|
"eval_runtime": 125.7226, |
|
"eval_samples_per_second": 4.232, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.10104775985087099, |
|
"step": 3528 |
|
}, |
|
{ |
|
"epoch": 49.03, |
|
"learning_rate": 2.194029850746269e-05, |
|
"loss": 0.4186, |
|
"step": 3530 |
|
}, |
|
{ |
|
"epoch": 49.17, |
|
"learning_rate": 2.1880597014925375e-05, |
|
"loss": 0.4057, |
|
"step": 3540 |
|
}, |
|
{ |
|
"epoch": 49.3, |
|
"learning_rate": 2.1820895522388064e-05, |
|
"loss": 0.4123, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 49.44, |
|
"learning_rate": 2.1761194029850747e-05, |
|
"loss": 0.4104, |
|
"step": 3560 |
|
}, |
|
{ |
|
"epoch": 49.58, |
|
"learning_rate": 2.1701492537313436e-05, |
|
"loss": 0.4368, |
|
"step": 3570 |
|
}, |
|
{ |
|
"epoch": 49.72, |
|
"learning_rate": 2.164179104477612e-05, |
|
"loss": 0.389, |
|
"step": 3580 |
|
}, |
|
{ |
|
"epoch": 49.86, |
|
"learning_rate": 2.1582089552238808e-05, |
|
"loss": 0.4215, |
|
"step": 3590 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 2.1522388059701494e-05, |
|
"loss": 0.4067, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_loss": 0.24442437291145325, |
|
"eval_runtime": 125.7495, |
|
"eval_samples_per_second": 4.231, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.10104775985087099, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 50.14, |
|
"learning_rate": 2.146268656716418e-05, |
|
"loss": 0.4199, |
|
"step": 3610 |
|
}, |
|
{ |
|
"epoch": 50.28, |
|
"learning_rate": 2.1402985074626866e-05, |
|
"loss": 0.4271, |
|
"step": 3620 |
|
}, |
|
{ |
|
"epoch": 50.42, |
|
"learning_rate": 2.1343283582089555e-05, |
|
"loss": 0.3991, |
|
"step": 3630 |
|
}, |
|
{ |
|
"epoch": 50.55, |
|
"learning_rate": 2.1283582089552238e-05, |
|
"loss": 0.4126, |
|
"step": 3640 |
|
}, |
|
{ |
|
"epoch": 50.69, |
|
"learning_rate": 2.1223880597014927e-05, |
|
"loss": 0.3957, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 50.83, |
|
"learning_rate": 2.1164179104477617e-05, |
|
"loss": 0.4149, |
|
"step": 3660 |
|
}, |
|
{ |
|
"epoch": 50.97, |
|
"learning_rate": 2.11044776119403e-05, |
|
"loss": 0.3876, |
|
"step": 3670 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"eval_loss": 0.24910078942775726, |
|
"eval_runtime": 125.9782, |
|
"eval_samples_per_second": 4.223, |
|
"eval_steps_per_second": 0.532, |
|
"eval_wer": 0.10574018126888217, |
|
"step": 3672 |
|
}, |
|
{ |
|
"epoch": 51.11, |
|
"learning_rate": 2.104477611940299e-05, |
|
"loss": 0.4163, |
|
"step": 3680 |
|
}, |
|
{ |
|
"epoch": 51.25, |
|
"learning_rate": 2.098507462686567e-05, |
|
"loss": 0.4077, |
|
"step": 3690 |
|
}, |
|
{ |
|
"epoch": 51.39, |
|
"learning_rate": 2.092537313432836e-05, |
|
"loss": 0.3953, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 51.53, |
|
"learning_rate": 2.0865671641791047e-05, |
|
"loss": 0.4068, |
|
"step": 3710 |
|
}, |
|
{ |
|
"epoch": 51.66, |
|
"learning_rate": 2.0805970149253732e-05, |
|
"loss": 0.393, |
|
"step": 3720 |
|
}, |
|
{ |
|
"epoch": 51.8, |
|
"learning_rate": 2.074626865671642e-05, |
|
"loss": 0.4173, |
|
"step": 3730 |
|
}, |
|
{ |
|
"epoch": 51.94, |
|
"learning_rate": 2.0686567164179108e-05, |
|
"loss": 0.3964, |
|
"step": 3740 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"eval_loss": 0.24512773752212524, |
|
"eval_runtime": 125.7387, |
|
"eval_samples_per_second": 4.231, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.107540014141544, |
|
"step": 3744 |
|
}, |
|
{ |
|
"epoch": 52.08, |
|
"learning_rate": 2.062686567164179e-05, |
|
"loss": 0.4127, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 52.22, |
|
"learning_rate": 2.056716417910448e-05, |
|
"loss": 0.3952, |
|
"step": 3760 |
|
}, |
|
{ |
|
"epoch": 52.36, |
|
"learning_rate": 2.050746268656717e-05, |
|
"loss": 0.4053, |
|
"step": 3770 |
|
}, |
|
{ |
|
"epoch": 52.5, |
|
"learning_rate": 2.0447761194029852e-05, |
|
"loss": 0.3971, |
|
"step": 3780 |
|
}, |
|
{ |
|
"epoch": 52.64, |
|
"learning_rate": 2.038805970149254e-05, |
|
"loss": 0.3991, |
|
"step": 3790 |
|
}, |
|
{ |
|
"epoch": 52.78, |
|
"learning_rate": 2.0328358208955224e-05, |
|
"loss": 0.4185, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 52.91, |
|
"learning_rate": 2.0268656716417913e-05, |
|
"loss": 0.3903, |
|
"step": 3810 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"eval_loss": 0.2395084798336029, |
|
"eval_runtime": 125.76, |
|
"eval_samples_per_second": 4.23, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.1003406826508967, |
|
"step": 3816 |
|
}, |
|
{ |
|
"epoch": 53.06, |
|
"learning_rate": 2.02089552238806e-05, |
|
"loss": 0.4163, |
|
"step": 3820 |
|
}, |
|
{ |
|
"epoch": 53.19, |
|
"learning_rate": 2.0149253731343285e-05, |
|
"loss": 0.3868, |
|
"step": 3830 |
|
}, |
|
{ |
|
"epoch": 53.33, |
|
"learning_rate": 2.008955223880597e-05, |
|
"loss": 0.4007, |
|
"step": 3840 |
|
}, |
|
{ |
|
"epoch": 53.47, |
|
"learning_rate": 2.002985074626866e-05, |
|
"loss": 0.3875, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 53.61, |
|
"learning_rate": 1.9970149253731346e-05, |
|
"loss": 0.4216, |
|
"step": 3860 |
|
}, |
|
{ |
|
"epoch": 53.75, |
|
"learning_rate": 1.9910447761194032e-05, |
|
"loss": 0.3949, |
|
"step": 3870 |
|
}, |
|
{ |
|
"epoch": 53.89, |
|
"learning_rate": 1.9850746268656718e-05, |
|
"loss": 0.4036, |
|
"step": 3880 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"eval_loss": 0.24462918937206268, |
|
"eval_runtime": 126.2945, |
|
"eval_samples_per_second": 4.212, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.10156199781448866, |
|
"step": 3888 |
|
}, |
|
{ |
|
"epoch": 54.03, |
|
"learning_rate": 1.9791044776119404e-05, |
|
"loss": 0.4073, |
|
"step": 3890 |
|
}, |
|
{ |
|
"epoch": 54.17, |
|
"learning_rate": 1.973134328358209e-05, |
|
"loss": 0.3996, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 54.3, |
|
"learning_rate": 1.9671641791044776e-05, |
|
"loss": 0.4012, |
|
"step": 3910 |
|
}, |
|
{ |
|
"epoch": 54.44, |
|
"learning_rate": 1.9611940298507462e-05, |
|
"loss": 0.3978, |
|
"step": 3920 |
|
}, |
|
{ |
|
"epoch": 54.58, |
|
"learning_rate": 1.955223880597015e-05, |
|
"loss": 0.4077, |
|
"step": 3930 |
|
}, |
|
{ |
|
"epoch": 54.72, |
|
"learning_rate": 1.9492537313432838e-05, |
|
"loss": 0.4047, |
|
"step": 3940 |
|
}, |
|
{ |
|
"epoch": 54.86, |
|
"learning_rate": 1.9432835820895523e-05, |
|
"loss": 0.4002, |
|
"step": 3950 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 1.937313432835821e-05, |
|
"loss": 0.3936, |
|
"step": 3960 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"eval_loss": 0.2519935965538025, |
|
"eval_runtime": 125.9606, |
|
"eval_samples_per_second": 4.224, |
|
"eval_steps_per_second": 0.532, |
|
"eval_wer": 0.09969788519637462, |
|
"step": 3960 |
|
}, |
|
{ |
|
"epoch": 55.14, |
|
"learning_rate": 1.93134328358209e-05, |
|
"loss": 0.3979, |
|
"step": 3970 |
|
}, |
|
{ |
|
"epoch": 55.28, |
|
"learning_rate": 1.9253731343283585e-05, |
|
"loss": 0.3871, |
|
"step": 3980 |
|
}, |
|
{ |
|
"epoch": 55.42, |
|
"learning_rate": 1.919402985074627e-05, |
|
"loss": 0.3952, |
|
"step": 3990 |
|
}, |
|
{ |
|
"epoch": 55.55, |
|
"learning_rate": 1.9134328358208957e-05, |
|
"loss": 0.4048, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 55.69, |
|
"learning_rate": 1.9074626865671643e-05, |
|
"loss": 0.386, |
|
"step": 4010 |
|
}, |
|
{ |
|
"epoch": 55.83, |
|
"learning_rate": 1.901492537313433e-05, |
|
"loss": 0.4128, |
|
"step": 4020 |
|
}, |
|
{ |
|
"epoch": 55.97, |
|
"learning_rate": 1.8955223880597015e-05, |
|
"loss": 0.4094, |
|
"step": 4030 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"eval_loss": 0.24009455740451813, |
|
"eval_runtime": 125.9732, |
|
"eval_samples_per_second": 4.223, |
|
"eval_steps_per_second": 0.532, |
|
"eval_wer": 0.09918364723275695, |
|
"step": 4032 |
|
}, |
|
{ |
|
"epoch": 56.11, |
|
"learning_rate": 1.8895522388059704e-05, |
|
"loss": 0.4111, |
|
"step": 4040 |
|
}, |
|
{ |
|
"epoch": 56.25, |
|
"learning_rate": 1.883582089552239e-05, |
|
"loss": 0.3947, |
|
"step": 4050 |
|
}, |
|
{ |
|
"epoch": 56.39, |
|
"learning_rate": 1.8776119402985076e-05, |
|
"loss": 0.3844, |
|
"step": 4060 |
|
}, |
|
{ |
|
"epoch": 56.53, |
|
"learning_rate": 1.8716417910447762e-05, |
|
"loss": 0.3964, |
|
"step": 4070 |
|
}, |
|
{ |
|
"epoch": 56.66, |
|
"learning_rate": 1.865671641791045e-05, |
|
"loss": 0.3786, |
|
"step": 4080 |
|
}, |
|
{ |
|
"epoch": 56.8, |
|
"learning_rate": 1.8597014925373137e-05, |
|
"loss": 0.3807, |
|
"step": 4090 |
|
}, |
|
{ |
|
"epoch": 56.94, |
|
"learning_rate": 1.8537313432835823e-05, |
|
"loss": 0.3977, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"eval_loss": 0.24981184303760529, |
|
"eval_runtime": 125.5082, |
|
"eval_samples_per_second": 4.239, |
|
"eval_steps_per_second": 0.534, |
|
"eval_wer": 0.10188339654174969, |
|
"step": 4104 |
|
}, |
|
{ |
|
"epoch": 57.08, |
|
"learning_rate": 1.847761194029851e-05, |
|
"loss": 0.4155, |
|
"step": 4110 |
|
}, |
|
{ |
|
"epoch": 57.22, |
|
"learning_rate": 1.8417910447761195e-05, |
|
"loss": 0.3968, |
|
"step": 4120 |
|
}, |
|
{ |
|
"epoch": 57.36, |
|
"learning_rate": 1.835820895522388e-05, |
|
"loss": 0.3924, |
|
"step": 4130 |
|
}, |
|
{ |
|
"epoch": 57.5, |
|
"learning_rate": 1.8298507462686567e-05, |
|
"loss": 0.3905, |
|
"step": 4140 |
|
}, |
|
{ |
|
"epoch": 57.64, |
|
"learning_rate": 1.8238805970149257e-05, |
|
"loss": 0.3886, |
|
"step": 4150 |
|
}, |
|
{ |
|
"epoch": 57.78, |
|
"learning_rate": 1.8179104477611943e-05, |
|
"loss": 0.3999, |
|
"step": 4160 |
|
}, |
|
{ |
|
"epoch": 57.91, |
|
"learning_rate": 1.811940298507463e-05, |
|
"loss": 0.3942, |
|
"step": 4170 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"eval_loss": 0.24964629113674164, |
|
"eval_runtime": 125.983, |
|
"eval_samples_per_second": 4.223, |
|
"eval_steps_per_second": 0.532, |
|
"eval_wer": 0.09892652825094812, |
|
"step": 4176 |
|
}, |
|
{ |
|
"epoch": 58.06, |
|
"learning_rate": 1.8059701492537314e-05, |
|
"loss": 0.4191, |
|
"step": 4180 |
|
}, |
|
{ |
|
"epoch": 58.19, |
|
"learning_rate": 1.8e-05, |
|
"loss": 0.3811, |
|
"step": 4190 |
|
}, |
|
{ |
|
"epoch": 58.33, |
|
"learning_rate": 1.7940298507462686e-05, |
|
"loss": 0.4079, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 58.47, |
|
"learning_rate": 1.7880597014925372e-05, |
|
"loss": 0.3942, |
|
"step": 4210 |
|
}, |
|
{ |
|
"epoch": 58.61, |
|
"learning_rate": 1.7820895522388062e-05, |
|
"loss": 0.412, |
|
"step": 4220 |
|
}, |
|
{ |
|
"epoch": 58.75, |
|
"learning_rate": 1.7761194029850748e-05, |
|
"loss": 0.4076, |
|
"step": 4230 |
|
}, |
|
{ |
|
"epoch": 58.89, |
|
"learning_rate": 1.7701492537313434e-05, |
|
"loss": 0.4052, |
|
"step": 4240 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"eval_loss": 0.25066983699798584, |
|
"eval_runtime": 125.8996, |
|
"eval_samples_per_second": 4.226, |
|
"eval_steps_per_second": 0.532, |
|
"eval_wer": 0.10214051552355853, |
|
"step": 4248 |
|
}, |
|
{ |
|
"epoch": 59.03, |
|
"learning_rate": 1.764179104477612e-05, |
|
"loss": 0.4133, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 59.17, |
|
"learning_rate": 1.758208955223881e-05, |
|
"loss": 0.386, |
|
"step": 4260 |
|
}, |
|
{ |
|
"epoch": 59.3, |
|
"learning_rate": 1.7522388059701495e-05, |
|
"loss": 0.4076, |
|
"step": 4270 |
|
}, |
|
{ |
|
"epoch": 59.44, |
|
"learning_rate": 1.746268656716418e-05, |
|
"loss": 0.3842, |
|
"step": 4280 |
|
}, |
|
{ |
|
"epoch": 59.58, |
|
"learning_rate": 1.7402985074626867e-05, |
|
"loss": 0.3998, |
|
"step": 4290 |
|
}, |
|
{ |
|
"epoch": 59.72, |
|
"learning_rate": 1.7343283582089553e-05, |
|
"loss": 0.3767, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 59.86, |
|
"learning_rate": 1.728358208955224e-05, |
|
"loss": 0.3866, |
|
"step": 4310 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 1.7223880597014925e-05, |
|
"loss": 0.3995, |
|
"step": 4320 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_loss": 0.23820430040359497, |
|
"eval_runtime": 125.7461, |
|
"eval_samples_per_second": 4.231, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.09989072443273124, |
|
"step": 4320 |
|
}, |
|
{ |
|
"epoch": 60.14, |
|
"learning_rate": 1.7164179104477614e-05, |
|
"loss": 0.4052, |
|
"step": 4330 |
|
}, |
|
{ |
|
"epoch": 60.28, |
|
"learning_rate": 1.71044776119403e-05, |
|
"loss": 0.3937, |
|
"step": 4340 |
|
}, |
|
{ |
|
"epoch": 60.42, |
|
"learning_rate": 1.7044776119402986e-05, |
|
"loss": 0.3793, |
|
"step": 4350 |
|
}, |
|
{ |
|
"epoch": 60.55, |
|
"learning_rate": 1.6985074626865676e-05, |
|
"loss": 0.3852, |
|
"step": 4360 |
|
}, |
|
{ |
|
"epoch": 60.69, |
|
"learning_rate": 1.692537313432836e-05, |
|
"loss": 0.3939, |
|
"step": 4370 |
|
}, |
|
{ |
|
"epoch": 60.83, |
|
"learning_rate": 1.6865671641791048e-05, |
|
"loss": 0.3996, |
|
"step": 4380 |
|
}, |
|
{ |
|
"epoch": 60.97, |
|
"learning_rate": 1.6805970149253734e-05, |
|
"loss": 0.407, |
|
"step": 4390 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"eval_loss": 0.25172731280326843, |
|
"eval_runtime": 126.4809, |
|
"eval_samples_per_second": 4.206, |
|
"eval_steps_per_second": 0.53, |
|
"eval_wer": 0.10374750915986372, |
|
"step": 4392 |
|
}, |
|
{ |
|
"epoch": 61.11, |
|
"learning_rate": 1.674626865671642e-05, |
|
"loss": 0.4135, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 61.25, |
|
"learning_rate": 1.6686567164179105e-05, |
|
"loss": 0.3796, |
|
"step": 4410 |
|
}, |
|
{ |
|
"epoch": 61.39, |
|
"learning_rate": 1.662686567164179e-05, |
|
"loss": 0.3941, |
|
"step": 4420 |
|
}, |
|
{ |
|
"epoch": 61.53, |
|
"learning_rate": 1.6567164179104477e-05, |
|
"loss": 0.3988, |
|
"step": 4430 |
|
}, |
|
{ |
|
"epoch": 61.66, |
|
"learning_rate": 1.6507462686567167e-05, |
|
"loss": 0.3897, |
|
"step": 4440 |
|
}, |
|
{ |
|
"epoch": 61.8, |
|
"learning_rate": 1.6447761194029853e-05, |
|
"loss": 0.3962, |
|
"step": 4450 |
|
}, |
|
{ |
|
"epoch": 61.94, |
|
"learning_rate": 1.638805970149254e-05, |
|
"loss": 0.4067, |
|
"step": 4460 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"eval_loss": 0.24298657476902008, |
|
"eval_runtime": 125.7439, |
|
"eval_samples_per_second": 4.231, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.10336183068715048, |
|
"step": 4464 |
|
}, |
|
{ |
|
"epoch": 62.08, |
|
"learning_rate": 1.6328358208955225e-05, |
|
"loss": 0.4137, |
|
"step": 4470 |
|
}, |
|
{ |
|
"epoch": 62.22, |
|
"learning_rate": 1.626865671641791e-05, |
|
"loss": 0.395, |
|
"step": 4480 |
|
}, |
|
{ |
|
"epoch": 62.36, |
|
"learning_rate": 1.6208955223880597e-05, |
|
"loss": 0.3931, |
|
"step": 4490 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 1.6149253731343283e-05, |
|
"loss": 0.3991, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 62.64, |
|
"learning_rate": 1.6089552238805972e-05, |
|
"loss": 0.3887, |
|
"step": 4510 |
|
}, |
|
{ |
|
"epoch": 62.78, |
|
"learning_rate": 1.6029850746268658e-05, |
|
"loss": 0.3973, |
|
"step": 4520 |
|
}, |
|
{ |
|
"epoch": 62.91, |
|
"learning_rate": 1.5970149253731344e-05, |
|
"loss": 0.3887, |
|
"step": 4530 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"eval_loss": 0.2414666712284088, |
|
"eval_runtime": 125.7838, |
|
"eval_samples_per_second": 4.229, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.09738381436009513, |
|
"step": 4536 |
|
}, |
|
{ |
|
"epoch": 63.06, |
|
"learning_rate": 1.5910447761194033e-05, |
|
"loss": 0.4203, |
|
"step": 4540 |
|
}, |
|
{ |
|
"epoch": 63.19, |
|
"learning_rate": 1.585074626865672e-05, |
|
"loss": 0.3823, |
|
"step": 4550 |
|
}, |
|
{ |
|
"epoch": 63.33, |
|
"learning_rate": 1.5791044776119405e-05, |
|
"loss": 0.4024, |
|
"step": 4560 |
|
}, |
|
{ |
|
"epoch": 63.47, |
|
"learning_rate": 1.573134328358209e-05, |
|
"loss": 0.3916, |
|
"step": 4570 |
|
}, |
|
{ |
|
"epoch": 63.61, |
|
"learning_rate": 1.5671641791044777e-05, |
|
"loss": 0.391, |
|
"step": 4580 |
|
}, |
|
{ |
|
"epoch": 63.75, |
|
"learning_rate": 1.5611940298507463e-05, |
|
"loss": 0.405, |
|
"step": 4590 |
|
}, |
|
{ |
|
"epoch": 63.89, |
|
"learning_rate": 1.555223880597015e-05, |
|
"loss": 0.3837, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"eval_loss": 0.24354642629623413, |
|
"eval_runtime": 125.7599, |
|
"eval_samples_per_second": 4.23, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.09905508774185254, |
|
"step": 4608 |
|
}, |
|
{ |
|
"epoch": 64.03, |
|
"learning_rate": 1.5492537313432835e-05, |
|
"loss": 0.4033, |
|
"step": 4610 |
|
}, |
|
{ |
|
"epoch": 64.17, |
|
"learning_rate": 1.5432835820895525e-05, |
|
"loss": 0.3812, |
|
"step": 4620 |
|
}, |
|
{ |
|
"epoch": 64.3, |
|
"learning_rate": 1.537313432835821e-05, |
|
"loss": 0.3991, |
|
"step": 4630 |
|
}, |
|
{ |
|
"epoch": 64.44, |
|
"learning_rate": 1.5313432835820896e-05, |
|
"loss": 0.373, |
|
"step": 4640 |
|
}, |
|
{ |
|
"epoch": 64.58, |
|
"learning_rate": 1.5253731343283584e-05, |
|
"loss": 0.3908, |
|
"step": 4650 |
|
}, |
|
{ |
|
"epoch": 64.72, |
|
"learning_rate": 1.519402985074627e-05, |
|
"loss": 0.3911, |
|
"step": 4660 |
|
}, |
|
{ |
|
"epoch": 64.86, |
|
"learning_rate": 1.5134328358208956e-05, |
|
"loss": 0.3842, |
|
"step": 4670 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 1.5074626865671642e-05, |
|
"loss": 0.3954, |
|
"step": 4680 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"eval_loss": 0.23839448392391205, |
|
"eval_runtime": 125.9622, |
|
"eval_samples_per_second": 4.223, |
|
"eval_steps_per_second": 0.532, |
|
"eval_wer": 0.09847657003278266, |
|
"step": 4680 |
|
}, |
|
{ |
|
"epoch": 65.14, |
|
"learning_rate": 1.501492537313433e-05, |
|
"loss": 0.3912, |
|
"step": 4690 |
|
}, |
|
{ |
|
"epoch": 65.28, |
|
"learning_rate": 1.4955223880597016e-05, |
|
"loss": 0.389, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 65.42, |
|
"learning_rate": 1.4895522388059702e-05, |
|
"loss": 0.3712, |
|
"step": 4710 |
|
}, |
|
{ |
|
"epoch": 65.55, |
|
"learning_rate": 1.4835820895522391e-05, |
|
"loss": 0.3878, |
|
"step": 4720 |
|
}, |
|
{ |
|
"epoch": 65.69, |
|
"learning_rate": 1.4776119402985077e-05, |
|
"loss": 0.3931, |
|
"step": 4730 |
|
}, |
|
{ |
|
"epoch": 65.83, |
|
"learning_rate": 1.4716417910447763e-05, |
|
"loss": 0.3928, |
|
"step": 4740 |
|
}, |
|
{ |
|
"epoch": 65.97, |
|
"learning_rate": 1.4656716417910449e-05, |
|
"loss": 0.3726, |
|
"step": 4750 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"eval_loss": 0.25504812598228455, |
|
"eval_runtime": 126.2351, |
|
"eval_samples_per_second": 4.214, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.10085492061451437, |
|
"step": 4752 |
|
}, |
|
{ |
|
"epoch": 66.11, |
|
"learning_rate": 1.4597014925373137e-05, |
|
"loss": 0.4016, |
|
"step": 4760 |
|
}, |
|
{ |
|
"epoch": 66.25, |
|
"learning_rate": 1.4537313432835823e-05, |
|
"loss": 0.3882, |
|
"step": 4770 |
|
}, |
|
{ |
|
"epoch": 66.39, |
|
"learning_rate": 1.4477611940298509e-05, |
|
"loss": 0.3835, |
|
"step": 4780 |
|
}, |
|
{ |
|
"epoch": 66.53, |
|
"learning_rate": 1.4417910447761195e-05, |
|
"loss": 0.3901, |
|
"step": 4790 |
|
}, |
|
{ |
|
"epoch": 66.66, |
|
"learning_rate": 1.4358208955223882e-05, |
|
"loss": 0.3746, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 66.8, |
|
"learning_rate": 1.4298507462686568e-05, |
|
"loss": 0.3787, |
|
"step": 4810 |
|
}, |
|
{ |
|
"epoch": 66.94, |
|
"learning_rate": 1.4238805970149254e-05, |
|
"loss": 0.3659, |
|
"step": 4820 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"eval_loss": 0.25230973958969116, |
|
"eval_runtime": 126.0733, |
|
"eval_samples_per_second": 4.22, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.09667673716012085, |
|
"step": 4824 |
|
}, |
|
{ |
|
"epoch": 67.08, |
|
"learning_rate": 1.4179104477611942e-05, |
|
"loss": 0.3873, |
|
"step": 4830 |
|
}, |
|
{ |
|
"epoch": 67.22, |
|
"learning_rate": 1.4119402985074628e-05, |
|
"loss": 0.3825, |
|
"step": 4840 |
|
}, |
|
{ |
|
"epoch": 67.36, |
|
"learning_rate": 1.4059701492537314e-05, |
|
"loss": 0.377, |
|
"step": 4850 |
|
}, |
|
{ |
|
"epoch": 67.5, |
|
"learning_rate": 1.4e-05, |
|
"loss": 0.3787, |
|
"step": 4860 |
|
}, |
|
{ |
|
"epoch": 67.64, |
|
"learning_rate": 1.394626865671642e-05, |
|
"loss": 0.3879, |
|
"step": 4870 |
|
}, |
|
{ |
|
"epoch": 67.78, |
|
"learning_rate": 1.3886567164179105e-05, |
|
"loss": 0.3969, |
|
"step": 4880 |
|
}, |
|
{ |
|
"epoch": 67.91, |
|
"learning_rate": 1.3826865671641791e-05, |
|
"loss": 0.376, |
|
"step": 4890 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"eval_loss": 0.2570798397064209, |
|
"eval_runtime": 126.3905, |
|
"eval_samples_per_second": 4.209, |
|
"eval_steps_per_second": 0.53, |
|
"eval_wer": 0.09731953461464292, |
|
"step": 4896 |
|
}, |
|
{ |
|
"epoch": 68.06, |
|
"learning_rate": 1.3767164179104479e-05, |
|
"loss": 0.4091, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 68.19, |
|
"learning_rate": 1.3707462686567165e-05, |
|
"loss": 0.3776, |
|
"step": 4910 |
|
}, |
|
{ |
|
"epoch": 68.33, |
|
"learning_rate": 1.364776119402985e-05, |
|
"loss": 0.3937, |
|
"step": 4920 |
|
}, |
|
{ |
|
"epoch": 68.47, |
|
"learning_rate": 1.3588059701492537e-05, |
|
"loss": 0.3853, |
|
"step": 4930 |
|
}, |
|
{ |
|
"epoch": 68.61, |
|
"learning_rate": 1.3528358208955226e-05, |
|
"loss": 0.3846, |
|
"step": 4940 |
|
}, |
|
{ |
|
"epoch": 68.75, |
|
"learning_rate": 1.3468656716417912e-05, |
|
"loss": 0.3816, |
|
"step": 4950 |
|
}, |
|
{ |
|
"epoch": 68.89, |
|
"learning_rate": 1.3408955223880598e-05, |
|
"loss": 0.3759, |
|
"step": 4960 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"eval_loss": 0.25279518961906433, |
|
"eval_runtime": 125.7411, |
|
"eval_samples_per_second": 4.231, |
|
"eval_steps_per_second": 0.533, |
|
"eval_wer": 0.09809089156006942, |
|
"step": 4968 |
|
}, |
|
{ |
|
"epoch": 69.03, |
|
"learning_rate": 1.3349253731343286e-05, |
|
"loss": 0.4008, |
|
"step": 4970 |
|
}, |
|
{ |
|
"epoch": 69.17, |
|
"learning_rate": 1.3289552238805972e-05, |
|
"loss": 0.3871, |
|
"step": 4980 |
|
}, |
|
{ |
|
"epoch": 69.3, |
|
"learning_rate": 1.3229850746268658e-05, |
|
"loss": 0.3825, |
|
"step": 4990 |
|
}, |
|
{ |
|
"epoch": 69.44, |
|
"learning_rate": 1.3170149253731344e-05, |
|
"loss": 0.3718, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 69.58, |
|
"learning_rate": 1.3110447761194031e-05, |
|
"loss": 0.3856, |
|
"step": 5010 |
|
}, |
|
{ |
|
"epoch": 69.72, |
|
"learning_rate": 1.3050746268656717e-05, |
|
"loss": 0.3877, |
|
"step": 5020 |
|
}, |
|
{ |
|
"epoch": 69.86, |
|
"learning_rate": 1.2991044776119403e-05, |
|
"loss": 0.3755, |
|
"step": 5030 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 1.293134328358209e-05, |
|
"loss": 0.3862, |
|
"step": 5040 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_loss": 0.2495904266834259, |
|
"eval_runtime": 126.3909, |
|
"eval_samples_per_second": 4.209, |
|
"eval_steps_per_second": 0.53, |
|
"eval_wer": 0.09757665359645176, |
|
"step": 5040 |
|
}, |
|
{ |
|
"epoch": 70.14, |
|
"learning_rate": 1.2871641791044777e-05, |
|
"loss": 0.3821, |
|
"step": 5050 |
|
}, |
|
{ |
|
"epoch": 70.28, |
|
"learning_rate": 1.2811940298507463e-05, |
|
"loss": 0.3767, |
|
"step": 5060 |
|
}, |
|
{ |
|
"epoch": 70.42, |
|
"learning_rate": 1.2752238805970149e-05, |
|
"loss": 0.368, |
|
"step": 5070 |
|
}, |
|
{ |
|
"epoch": 70.55, |
|
"learning_rate": 1.2692537313432838e-05, |
|
"loss": 0.3875, |
|
"step": 5080 |
|
}, |
|
{ |
|
"epoch": 70.69, |
|
"learning_rate": 1.2632835820895524e-05, |
|
"loss": 0.3934, |
|
"step": 5090 |
|
}, |
|
{ |
|
"epoch": 70.83, |
|
"learning_rate": 1.257313432835821e-05, |
|
"loss": 0.3745, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 70.97, |
|
"learning_rate": 1.2513432835820896e-05, |
|
"loss": 0.367, |
|
"step": 5110 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"eval_loss": 0.24647711217403412, |
|
"eval_runtime": 126.0137, |
|
"eval_samples_per_second": 4.222, |
|
"eval_steps_per_second": 0.532, |
|
"eval_wer": 0.09416982708748474, |
|
"step": 5112 |
|
}, |
|
{ |
|
"epoch": 71.11, |
|
"learning_rate": 1.2453731343283584e-05, |
|
"loss": 0.3946, |
|
"step": 5120 |
|
}, |
|
{ |
|
"epoch": 71.25, |
|
"learning_rate": 1.239402985074627e-05, |
|
"loss": 0.3764, |
|
"step": 5130 |
|
}, |
|
{ |
|
"epoch": 71.39, |
|
"learning_rate": 1.2334328358208956e-05, |
|
"loss": 0.3778, |
|
"step": 5140 |
|
}, |
|
{ |
|
"epoch": 71.53, |
|
"learning_rate": 1.2274626865671643e-05, |
|
"loss": 0.3732, |
|
"step": 5150 |
|
}, |
|
{ |
|
"epoch": 71.66, |
|
"learning_rate": 1.221492537313433e-05, |
|
"loss": 0.3759, |
|
"step": 5160 |
|
}, |
|
{ |
|
"epoch": 71.8, |
|
"learning_rate": 1.2155223880597015e-05, |
|
"loss": 0.3835, |
|
"step": 5170 |
|
}, |
|
{ |
|
"epoch": 71.94, |
|
"learning_rate": 1.2095522388059701e-05, |
|
"loss": 0.3688, |
|
"step": 5180 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"eval_loss": 0.2504793405532837, |
|
"eval_runtime": 126.1404, |
|
"eval_samples_per_second": 4.218, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.09680529665102526, |
|
"step": 5184 |
|
}, |
|
{ |
|
"epoch": 72.08, |
|
"learning_rate": 1.2035820895522389e-05, |
|
"loss": 0.3803, |
|
"step": 5190 |
|
}, |
|
{ |
|
"epoch": 72.22, |
|
"learning_rate": 1.1976119402985075e-05, |
|
"loss": 0.3717, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 72.36, |
|
"learning_rate": 1.1916417910447761e-05, |
|
"loss": 0.3727, |
|
"step": 5210 |
|
}, |
|
{ |
|
"epoch": 72.5, |
|
"learning_rate": 1.1856716417910447e-05, |
|
"loss": 0.3799, |
|
"step": 5220 |
|
}, |
|
{ |
|
"epoch": 72.64, |
|
"learning_rate": 1.1797014925373136e-05, |
|
"loss": 0.3785, |
|
"step": 5230 |
|
}, |
|
{ |
|
"epoch": 72.78, |
|
"learning_rate": 1.1737313432835822e-05, |
|
"loss": 0.3892, |
|
"step": 5240 |
|
}, |
|
{ |
|
"epoch": 72.91, |
|
"learning_rate": 1.1677611940298508e-05, |
|
"loss": 0.3817, |
|
"step": 5250 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"eval_loss": 0.25254279375076294, |
|
"eval_runtime": 126.2708, |
|
"eval_samples_per_second": 4.213, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.09725525486919072, |
|
"step": 5256 |
|
}, |
|
{ |
|
"epoch": 73.06, |
|
"learning_rate": 1.1617910447761196e-05, |
|
"loss": 0.4035, |
|
"step": 5260 |
|
}, |
|
{ |
|
"epoch": 73.19, |
|
"learning_rate": 1.1558208955223882e-05, |
|
"loss": 0.3606, |
|
"step": 5270 |
|
}, |
|
{ |
|
"epoch": 73.33, |
|
"learning_rate": 1.1498507462686568e-05, |
|
"loss": 0.3862, |
|
"step": 5280 |
|
}, |
|
{ |
|
"epoch": 73.47, |
|
"learning_rate": 1.1438805970149254e-05, |
|
"loss": 0.3756, |
|
"step": 5290 |
|
}, |
|
{ |
|
"epoch": 73.61, |
|
"learning_rate": 1.1379104477611942e-05, |
|
"loss": 0.3734, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 73.75, |
|
"learning_rate": 1.1319402985074628e-05, |
|
"loss": 0.3769, |
|
"step": 5310 |
|
}, |
|
{ |
|
"epoch": 73.89, |
|
"learning_rate": 1.1259701492537314e-05, |
|
"loss": 0.3675, |
|
"step": 5320 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"eval_loss": 0.2441263347864151, |
|
"eval_runtime": 126.579, |
|
"eval_samples_per_second": 4.203, |
|
"eval_steps_per_second": 0.529, |
|
"eval_wer": 0.0963553384328598, |
|
"step": 5328 |
|
}, |
|
{ |
|
"epoch": 74.03, |
|
"learning_rate": 1.1200000000000001e-05, |
|
"loss": 0.4084, |
|
"step": 5330 |
|
}, |
|
{ |
|
"epoch": 74.17, |
|
"learning_rate": 1.1140298507462687e-05, |
|
"loss": 0.3623, |
|
"step": 5340 |
|
}, |
|
{ |
|
"epoch": 74.3, |
|
"learning_rate": 1.1080597014925373e-05, |
|
"loss": 0.3865, |
|
"step": 5350 |
|
}, |
|
{ |
|
"epoch": 74.44, |
|
"learning_rate": 1.1020895522388059e-05, |
|
"loss": 0.3603, |
|
"step": 5360 |
|
}, |
|
{ |
|
"epoch": 74.58, |
|
"learning_rate": 1.0961194029850748e-05, |
|
"loss": 0.384, |
|
"step": 5370 |
|
}, |
|
{ |
|
"epoch": 74.72, |
|
"learning_rate": 1.0907462686567165e-05, |
|
"loss": 0.4013, |
|
"step": 5380 |
|
}, |
|
{ |
|
"epoch": 74.86, |
|
"learning_rate": 1.084776119402985e-05, |
|
"loss": 0.3818, |
|
"step": 5390 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 1.078805970149254e-05, |
|
"loss": 0.3727, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"eval_loss": 0.24404919147491455, |
|
"eval_runtime": 126.3208, |
|
"eval_samples_per_second": 4.211, |
|
"eval_steps_per_second": 0.53, |
|
"eval_wer": 0.09725525486919072, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 75.14, |
|
"learning_rate": 1.0728358208955226e-05, |
|
"loss": 0.3731, |
|
"step": 5410 |
|
}, |
|
{ |
|
"epoch": 75.28, |
|
"learning_rate": 1.0668656716417912e-05, |
|
"loss": 0.3824, |
|
"step": 5420 |
|
}, |
|
{ |
|
"epoch": 75.42, |
|
"learning_rate": 1.0608955223880598e-05, |
|
"loss": 0.3609, |
|
"step": 5430 |
|
}, |
|
{ |
|
"epoch": 75.55, |
|
"learning_rate": 1.0549253731343285e-05, |
|
"loss": 0.3729, |
|
"step": 5440 |
|
}, |
|
{ |
|
"epoch": 75.69, |
|
"learning_rate": 1.0489552238805971e-05, |
|
"loss": 0.3806, |
|
"step": 5450 |
|
}, |
|
{ |
|
"epoch": 75.83, |
|
"learning_rate": 1.0429850746268657e-05, |
|
"loss": 0.3742, |
|
"step": 5460 |
|
}, |
|
{ |
|
"epoch": 75.97, |
|
"learning_rate": 1.0370149253731343e-05, |
|
"loss": 0.371, |
|
"step": 5470 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"eval_loss": 0.25098714232444763, |
|
"eval_runtime": 126.6812, |
|
"eval_samples_per_second": 4.2, |
|
"eval_steps_per_second": 0.529, |
|
"eval_wer": 0.09706241563283409, |
|
"step": 5472 |
|
}, |
|
{ |
|
"epoch": 76.11, |
|
"learning_rate": 1.0310447761194031e-05, |
|
"loss": 0.378, |
|
"step": 5480 |
|
}, |
|
{ |
|
"epoch": 76.25, |
|
"learning_rate": 1.0250746268656717e-05, |
|
"loss": 0.391, |
|
"step": 5490 |
|
}, |
|
{ |
|
"epoch": 76.39, |
|
"learning_rate": 1.0191044776119403e-05, |
|
"loss": 0.3626, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 76.53, |
|
"learning_rate": 1.013134328358209e-05, |
|
"loss": 0.3842, |
|
"step": 5510 |
|
}, |
|
{ |
|
"epoch": 76.66, |
|
"learning_rate": 1.0071641791044777e-05, |
|
"loss": 0.3681, |
|
"step": 5520 |
|
}, |
|
{ |
|
"epoch": 76.8, |
|
"learning_rate": 1.0011940298507463e-05, |
|
"loss": 0.385, |
|
"step": 5530 |
|
}, |
|
{ |
|
"epoch": 76.94, |
|
"learning_rate": 9.95223880597015e-06, |
|
"loss": 0.3761, |
|
"step": 5540 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"eval_loss": 0.23980902135372162, |
|
"eval_runtime": 125.9466, |
|
"eval_samples_per_second": 4.224, |
|
"eval_steps_per_second": 0.532, |
|
"eval_wer": 0.09609821945105097, |
|
"step": 5544 |
|
}, |
|
{ |
|
"epoch": 77.08, |
|
"learning_rate": 9.892537313432836e-06, |
|
"loss": 0.3814, |
|
"step": 5550 |
|
}, |
|
{ |
|
"epoch": 77.22, |
|
"learning_rate": 9.832835820895524e-06, |
|
"loss": 0.3764, |
|
"step": 5560 |
|
}, |
|
{ |
|
"epoch": 77.36, |
|
"learning_rate": 9.77313432835821e-06, |
|
"loss": 0.3795, |
|
"step": 5570 |
|
}, |
|
{ |
|
"epoch": 77.5, |
|
"learning_rate": 9.713432835820896e-06, |
|
"loss": 0.3813, |
|
"step": 5580 |
|
}, |
|
{ |
|
"epoch": 77.64, |
|
"learning_rate": 9.653731343283584e-06, |
|
"loss": 0.3616, |
|
"step": 5590 |
|
}, |
|
{ |
|
"epoch": 77.78, |
|
"learning_rate": 9.59402985074627e-06, |
|
"loss": 0.3848, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 77.91, |
|
"learning_rate": 9.534328358208955e-06, |
|
"loss": 0.358, |
|
"step": 5610 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"eval_loss": 0.2484768182039261, |
|
"eval_runtime": 126.2204, |
|
"eval_samples_per_second": 4.215, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.09558398148743331, |
|
"step": 5616 |
|
}, |
|
{ |
|
"epoch": 78.06, |
|
"learning_rate": 9.474626865671641e-06, |
|
"loss": 0.3944, |
|
"step": 5620 |
|
}, |
|
{ |
|
"epoch": 78.19, |
|
"learning_rate": 9.414925373134329e-06, |
|
"loss": 0.3633, |
|
"step": 5630 |
|
}, |
|
{ |
|
"epoch": 78.33, |
|
"learning_rate": 9.355223880597015e-06, |
|
"loss": 0.3793, |
|
"step": 5640 |
|
}, |
|
{ |
|
"epoch": 78.47, |
|
"learning_rate": 9.295522388059703e-06, |
|
"loss": 0.3791, |
|
"step": 5650 |
|
}, |
|
{ |
|
"epoch": 78.61, |
|
"learning_rate": 9.235820895522389e-06, |
|
"loss": 0.3703, |
|
"step": 5660 |
|
}, |
|
{ |
|
"epoch": 78.75, |
|
"learning_rate": 9.176119402985075e-06, |
|
"loss": 0.3694, |
|
"step": 5670 |
|
}, |
|
{ |
|
"epoch": 78.89, |
|
"learning_rate": 9.116417910447762e-06, |
|
"loss": 0.3521, |
|
"step": 5680 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"eval_loss": 0.24381594359874725, |
|
"eval_runtime": 126.0722, |
|
"eval_samples_per_second": 4.22, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.0955197017419811, |
|
"step": 5688 |
|
}, |
|
{ |
|
"epoch": 79.03, |
|
"learning_rate": 9.056716417910448e-06, |
|
"loss": 0.3747, |
|
"step": 5690 |
|
}, |
|
{ |
|
"epoch": 79.17, |
|
"learning_rate": 8.997014925373136e-06, |
|
"loss": 0.3645, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 79.3, |
|
"learning_rate": 8.937313432835822e-06, |
|
"loss": 0.378, |
|
"step": 5710 |
|
}, |
|
{ |
|
"epoch": 79.44, |
|
"learning_rate": 8.877611940298508e-06, |
|
"loss": 0.3623, |
|
"step": 5720 |
|
}, |
|
{ |
|
"epoch": 79.58, |
|
"learning_rate": 8.817910447761194e-06, |
|
"loss": 0.3844, |
|
"step": 5730 |
|
}, |
|
{ |
|
"epoch": 79.72, |
|
"learning_rate": 8.758208955223882e-06, |
|
"loss": 0.3721, |
|
"step": 5740 |
|
}, |
|
{ |
|
"epoch": 79.86, |
|
"learning_rate": 8.698507462686568e-06, |
|
"loss": 0.3784, |
|
"step": 5750 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 8.638805970149254e-06, |
|
"loss": 0.3722, |
|
"step": 5760 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_loss": 0.24219834804534912, |
|
"eval_runtime": 125.9055, |
|
"eval_samples_per_second": 4.225, |
|
"eval_steps_per_second": 0.532, |
|
"eval_wer": 0.09667673716012085, |
|
"step": 5760 |
|
}, |
|
{ |
|
"epoch": 80.14, |
|
"learning_rate": 8.579104477611941e-06, |
|
"loss": 0.383, |
|
"step": 5770 |
|
}, |
|
{ |
|
"epoch": 80.28, |
|
"learning_rate": 8.519402985074627e-06, |
|
"loss": 0.3729, |
|
"step": 5780 |
|
}, |
|
{ |
|
"epoch": 80.42, |
|
"learning_rate": 8.459701492537315e-06, |
|
"loss": 0.3601, |
|
"step": 5790 |
|
}, |
|
{ |
|
"epoch": 80.55, |
|
"learning_rate": 8.400000000000001e-06, |
|
"loss": 0.3643, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 80.69, |
|
"learning_rate": 8.340298507462687e-06, |
|
"loss": 0.3696, |
|
"step": 5810 |
|
}, |
|
{ |
|
"epoch": 80.83, |
|
"learning_rate": 8.280597014925373e-06, |
|
"loss": 0.3732, |
|
"step": 5820 |
|
}, |
|
{ |
|
"epoch": 80.97, |
|
"learning_rate": 8.22089552238806e-06, |
|
"loss": 0.3663, |
|
"step": 5830 |
|
}, |
|
{ |
|
"epoch": 81.0, |
|
"eval_loss": 0.24629493057727814, |
|
"eval_runtime": 126.4381, |
|
"eval_samples_per_second": 4.208, |
|
"eval_steps_per_second": 0.53, |
|
"eval_wer": 0.09487690428745903, |
|
"step": 5832 |
|
}, |
|
{ |
|
"epoch": 81.11, |
|
"learning_rate": 8.161194029850748e-06, |
|
"loss": 0.3728, |
|
"step": 5840 |
|
}, |
|
{ |
|
"epoch": 81.25, |
|
"learning_rate": 8.101492537313434e-06, |
|
"loss": 0.3622, |
|
"step": 5850 |
|
}, |
|
{ |
|
"epoch": 81.39, |
|
"learning_rate": 8.04179104477612e-06, |
|
"loss": 0.3677, |
|
"step": 5860 |
|
}, |
|
{ |
|
"epoch": 81.53, |
|
"learning_rate": 7.982089552238806e-06, |
|
"loss": 0.3784, |
|
"step": 5870 |
|
}, |
|
{ |
|
"epoch": 81.66, |
|
"learning_rate": 7.922388059701494e-06, |
|
"loss": 0.3666, |
|
"step": 5880 |
|
}, |
|
{ |
|
"epoch": 81.8, |
|
"learning_rate": 7.86268656716418e-06, |
|
"loss": 0.3655, |
|
"step": 5890 |
|
}, |
|
{ |
|
"epoch": 81.94, |
|
"learning_rate": 7.802985074626866e-06, |
|
"loss": 0.3716, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"eval_loss": 0.24671228229999542, |
|
"eval_runtime": 126.2924, |
|
"eval_samples_per_second": 4.212, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.09648389792376422, |
|
"step": 5904 |
|
}, |
|
{ |
|
"epoch": 82.08, |
|
"learning_rate": 7.743283582089552e-06, |
|
"loss": 0.3919, |
|
"step": 5910 |
|
}, |
|
{ |
|
"epoch": 82.22, |
|
"learning_rate": 7.68358208955224e-06, |
|
"loss": 0.3622, |
|
"step": 5920 |
|
}, |
|
{ |
|
"epoch": 82.36, |
|
"learning_rate": 7.623880597014925e-06, |
|
"loss": 0.3764, |
|
"step": 5930 |
|
}, |
|
{ |
|
"epoch": 82.5, |
|
"learning_rate": 7.564179104477612e-06, |
|
"loss": 0.3775, |
|
"step": 5940 |
|
}, |
|
{ |
|
"epoch": 82.64, |
|
"learning_rate": 7.5044776119403e-06, |
|
"loss": 0.3653, |
|
"step": 5950 |
|
}, |
|
{ |
|
"epoch": 82.78, |
|
"learning_rate": 7.444776119402986e-06, |
|
"loss": 0.3765, |
|
"step": 5960 |
|
}, |
|
{ |
|
"epoch": 82.91, |
|
"learning_rate": 7.385074626865673e-06, |
|
"loss": 0.361, |
|
"step": 5970 |
|
}, |
|
{ |
|
"epoch": 83.0, |
|
"eval_loss": 0.245753675699234, |
|
"eval_runtime": 126.0053, |
|
"eval_samples_per_second": 4.222, |
|
"eval_steps_per_second": 0.532, |
|
"eval_wer": 0.09571254097833773, |
|
"step": 5976 |
|
}, |
|
{ |
|
"epoch": 83.06, |
|
"learning_rate": 7.325373134328359e-06, |
|
"loss": 0.3874, |
|
"step": 5980 |
|
}, |
|
{ |
|
"epoch": 83.19, |
|
"learning_rate": 7.2656716417910454e-06, |
|
"loss": 0.3644, |
|
"step": 5990 |
|
}, |
|
{ |
|
"epoch": 83.33, |
|
"learning_rate": 7.205970149253731e-06, |
|
"loss": 0.3625, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 83.47, |
|
"learning_rate": 7.146268656716418e-06, |
|
"loss": 0.3588, |
|
"step": 6010 |
|
}, |
|
{ |
|
"epoch": 83.61, |
|
"learning_rate": 7.086567164179104e-06, |
|
"loss": 0.3606, |
|
"step": 6020 |
|
}, |
|
{ |
|
"epoch": 83.75, |
|
"learning_rate": 7.026865671641792e-06, |
|
"loss": 0.379, |
|
"step": 6030 |
|
}, |
|
{ |
|
"epoch": 83.89, |
|
"learning_rate": 6.967164179104479e-06, |
|
"loss": 0.3626, |
|
"step": 6040 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"eval_loss": 0.25005075335502625, |
|
"eval_runtime": 126.2025, |
|
"eval_samples_per_second": 4.215, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.09423410683293694, |
|
"step": 6048 |
|
}, |
|
{ |
|
"epoch": 84.03, |
|
"learning_rate": 6.907462686567165e-06, |
|
"loss": 0.3799, |
|
"step": 6050 |
|
}, |
|
{ |
|
"epoch": 84.17, |
|
"learning_rate": 6.8477611940298515e-06, |
|
"loss": 0.353, |
|
"step": 6060 |
|
}, |
|
{ |
|
"epoch": 84.3, |
|
"learning_rate": 6.7880597014925375e-06, |
|
"loss": 0.3568, |
|
"step": 6070 |
|
}, |
|
{ |
|
"epoch": 84.44, |
|
"learning_rate": 6.728358208955224e-06, |
|
"loss": 0.3574, |
|
"step": 6080 |
|
}, |
|
{ |
|
"epoch": 84.58, |
|
"learning_rate": 6.66865671641791e-06, |
|
"loss": 0.3736, |
|
"step": 6090 |
|
}, |
|
{ |
|
"epoch": 84.72, |
|
"learning_rate": 6.608955223880598e-06, |
|
"loss": 0.3596, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 84.86, |
|
"learning_rate": 6.549253731343284e-06, |
|
"loss": 0.3554, |
|
"step": 6110 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"learning_rate": 6.489552238805971e-06, |
|
"loss": 0.3755, |
|
"step": 6120 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"eval_loss": 0.24857336282730103, |
|
"eval_runtime": 126.5358, |
|
"eval_samples_per_second": 4.204, |
|
"eval_steps_per_second": 0.529, |
|
"eval_wer": 0.09429838657838915, |
|
"step": 6120 |
|
}, |
|
{ |
|
"epoch": 85.14, |
|
"learning_rate": 6.4298507462686576e-06, |
|
"loss": 0.3733, |
|
"step": 6130 |
|
}, |
|
{ |
|
"epoch": 85.28, |
|
"learning_rate": 6.3701492537313435e-06, |
|
"loss": 0.3769, |
|
"step": 6140 |
|
}, |
|
{ |
|
"epoch": 85.42, |
|
"learning_rate": 6.31044776119403e-06, |
|
"loss": 0.3559, |
|
"step": 6150 |
|
}, |
|
{ |
|
"epoch": 85.55, |
|
"learning_rate": 6.250746268656716e-06, |
|
"loss": 0.3741, |
|
"step": 6160 |
|
}, |
|
{ |
|
"epoch": 85.69, |
|
"learning_rate": 6.191044776119404e-06, |
|
"loss": 0.3596, |
|
"step": 6170 |
|
}, |
|
{ |
|
"epoch": 85.83, |
|
"learning_rate": 6.13134328358209e-06, |
|
"loss": 0.3658, |
|
"step": 6180 |
|
}, |
|
{ |
|
"epoch": 85.97, |
|
"learning_rate": 6.071641791044777e-06, |
|
"loss": 0.3693, |
|
"step": 6190 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"eval_loss": 0.2496652752161026, |
|
"eval_runtime": 126.003, |
|
"eval_samples_per_second": 4.222, |
|
"eval_steps_per_second": 0.532, |
|
"eval_wer": 0.0946840650511024, |
|
"step": 6192 |
|
}, |
|
{ |
|
"epoch": 86.11, |
|
"learning_rate": 6.011940298507463e-06, |
|
"loss": 0.362, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 86.25, |
|
"learning_rate": 5.95223880597015e-06, |
|
"loss": 0.3745, |
|
"step": 6210 |
|
}, |
|
{ |
|
"epoch": 86.39, |
|
"learning_rate": 5.8925373134328364e-06, |
|
"loss": 0.3474, |
|
"step": 6220 |
|
}, |
|
{ |
|
"epoch": 86.53, |
|
"learning_rate": 5.832835820895522e-06, |
|
"loss": 0.3797, |
|
"step": 6230 |
|
}, |
|
{ |
|
"epoch": 86.66, |
|
"learning_rate": 5.77313432835821e-06, |
|
"loss": 0.3523, |
|
"step": 6240 |
|
}, |
|
{ |
|
"epoch": 86.8, |
|
"learning_rate": 5.713432835820896e-06, |
|
"loss": 0.3586, |
|
"step": 6250 |
|
}, |
|
{ |
|
"epoch": 86.94, |
|
"learning_rate": 5.653731343283583e-06, |
|
"loss": 0.3548, |
|
"step": 6260 |
|
}, |
|
{ |
|
"epoch": 87.0, |
|
"eval_loss": 0.25147101283073425, |
|
"eval_runtime": 126.4802, |
|
"eval_samples_per_second": 4.206, |
|
"eval_steps_per_second": 0.53, |
|
"eval_wer": 0.09584110046924214, |
|
"step": 6264 |
|
}, |
|
{ |
|
"epoch": 87.08, |
|
"learning_rate": 5.594029850746269e-06, |
|
"loss": 0.3739, |
|
"step": 6270 |
|
}, |
|
{ |
|
"epoch": 87.22, |
|
"learning_rate": 5.534328358208956e-06, |
|
"loss": 0.3522, |
|
"step": 6280 |
|
}, |
|
{ |
|
"epoch": 87.36, |
|
"learning_rate": 5.474626865671642e-06, |
|
"loss": 0.3625, |
|
"step": 6290 |
|
}, |
|
{ |
|
"epoch": 87.5, |
|
"learning_rate": 5.4149253731343285e-06, |
|
"loss": 0.3659, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 87.64, |
|
"learning_rate": 5.355223880597016e-06, |
|
"loss": 0.3545, |
|
"step": 6310 |
|
}, |
|
{ |
|
"epoch": 87.78, |
|
"learning_rate": 5.295522388059702e-06, |
|
"loss": 0.3676, |
|
"step": 6320 |
|
}, |
|
{ |
|
"epoch": 87.91, |
|
"learning_rate": 5.235820895522389e-06, |
|
"loss": 0.3583, |
|
"step": 6330 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"eval_loss": 0.2432575672864914, |
|
"eval_runtime": 126.5989, |
|
"eval_samples_per_second": 4.202, |
|
"eval_steps_per_second": 0.529, |
|
"eval_wer": 0.09371986886931928, |
|
"step": 6336 |
|
}, |
|
{ |
|
"epoch": 88.06, |
|
"learning_rate": 5.176119402985075e-06, |
|
"loss": 0.3746, |
|
"step": 6340 |
|
}, |
|
{ |
|
"epoch": 88.19, |
|
"learning_rate": 5.116417910447762e-06, |
|
"loss": 0.3734, |
|
"step": 6350 |
|
}, |
|
{ |
|
"epoch": 88.33, |
|
"learning_rate": 5.056716417910448e-06, |
|
"loss": 0.359, |
|
"step": 6360 |
|
}, |
|
{ |
|
"epoch": 88.47, |
|
"learning_rate": 4.9970149253731345e-06, |
|
"loss": 0.3556, |
|
"step": 6370 |
|
}, |
|
{ |
|
"epoch": 88.61, |
|
"learning_rate": 4.937313432835821e-06, |
|
"loss": 0.3668, |
|
"step": 6380 |
|
}, |
|
{ |
|
"epoch": 88.75, |
|
"learning_rate": 4.877611940298508e-06, |
|
"loss": 0.3639, |
|
"step": 6390 |
|
}, |
|
{ |
|
"epoch": 88.89, |
|
"learning_rate": 4.817910447761194e-06, |
|
"loss": 0.3678, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 89.0, |
|
"eval_loss": 0.24557340145111084, |
|
"eval_runtime": 126.3231, |
|
"eval_samples_per_second": 4.211, |
|
"eval_steps_per_second": 0.53, |
|
"eval_wer": 0.09333419039660602, |
|
"step": 6408 |
|
}, |
|
{ |
|
"epoch": 89.03, |
|
"learning_rate": 4.758208955223881e-06, |
|
"loss": 0.3804, |
|
"step": 6410 |
|
}, |
|
{ |
|
"epoch": 89.17, |
|
"learning_rate": 4.698507462686567e-06, |
|
"loss": 0.3585, |
|
"step": 6420 |
|
}, |
|
{ |
|
"epoch": 89.3, |
|
"learning_rate": 4.638805970149255e-06, |
|
"loss": 0.3746, |
|
"step": 6430 |
|
}, |
|
{ |
|
"epoch": 89.44, |
|
"learning_rate": 4.579104477611941e-06, |
|
"loss": 0.3697, |
|
"step": 6440 |
|
}, |
|
{ |
|
"epoch": 89.58, |
|
"learning_rate": 4.5194029850746274e-06, |
|
"loss": 0.3686, |
|
"step": 6450 |
|
}, |
|
{ |
|
"epoch": 89.72, |
|
"learning_rate": 4.459701492537314e-06, |
|
"loss": 0.3484, |
|
"step": 6460 |
|
}, |
|
{ |
|
"epoch": 89.86, |
|
"learning_rate": 4.4e-06, |
|
"loss": 0.3654, |
|
"step": 6470 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"learning_rate": 4.340298507462687e-06, |
|
"loss": 0.3718, |
|
"step": 6480 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"eval_loss": 0.24429138004779816, |
|
"eval_runtime": 126.5268, |
|
"eval_samples_per_second": 4.205, |
|
"eval_steps_per_second": 0.53, |
|
"eval_wer": 0.09365558912386707, |
|
"step": 6480 |
|
}, |
|
{ |
|
"epoch": 90.14, |
|
"learning_rate": 4.280597014925373e-06, |
|
"loss": 0.3564, |
|
"step": 6490 |
|
}, |
|
{ |
|
"epoch": 90.28, |
|
"learning_rate": 4.22089552238806e-06, |
|
"loss": 0.3637, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 90.42, |
|
"learning_rate": 4.161194029850747e-06, |
|
"loss": 0.3622, |
|
"step": 6510 |
|
}, |
|
{ |
|
"epoch": 90.55, |
|
"learning_rate": 4.1014925373134335e-06, |
|
"loss": 0.3715, |
|
"step": 6520 |
|
}, |
|
{ |
|
"epoch": 90.69, |
|
"learning_rate": 4.04179104477612e-06, |
|
"loss": 0.3717, |
|
"step": 6530 |
|
}, |
|
{ |
|
"epoch": 90.83, |
|
"learning_rate": 3.982089552238806e-06, |
|
"loss": 0.3726, |
|
"step": 6540 |
|
}, |
|
{ |
|
"epoch": 90.97, |
|
"learning_rate": 3.922388059701493e-06, |
|
"loss": 0.3614, |
|
"step": 6550 |
|
}, |
|
{ |
|
"epoch": 91.0, |
|
"eval_loss": 0.2457391619682312, |
|
"eval_runtime": 126.5865, |
|
"eval_samples_per_second": 4.203, |
|
"eval_steps_per_second": 0.529, |
|
"eval_wer": 0.09571254097833773, |
|
"step": 6552 |
|
}, |
|
{ |
|
"epoch": 91.11, |
|
"learning_rate": 3.862686567164179e-06, |
|
"loss": 0.3646, |
|
"step": 6560 |
|
}, |
|
{ |
|
"epoch": 91.25, |
|
"learning_rate": 3.802985074626866e-06, |
|
"loss": 0.3602, |
|
"step": 6570 |
|
}, |
|
{ |
|
"epoch": 91.39, |
|
"learning_rate": 3.7432835820895523e-06, |
|
"loss": 0.3383, |
|
"step": 6580 |
|
}, |
|
{ |
|
"epoch": 91.53, |
|
"learning_rate": 3.683582089552239e-06, |
|
"loss": 0.3785, |
|
"step": 6590 |
|
}, |
|
{ |
|
"epoch": 91.66, |
|
"learning_rate": 3.6238805970149255e-06, |
|
"loss": 0.3614, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 91.8, |
|
"learning_rate": 3.5641791044776124e-06, |
|
"loss": 0.3635, |
|
"step": 6610 |
|
}, |
|
{ |
|
"epoch": 91.94, |
|
"learning_rate": 3.504477611940299e-06, |
|
"loss": 0.3541, |
|
"step": 6620 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"eval_loss": 0.2450367957353592, |
|
"eval_runtime": 126.1745, |
|
"eval_samples_per_second": 4.216, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.09526258276017227, |
|
"step": 6624 |
|
}, |
|
{ |
|
"epoch": 92.08, |
|
"learning_rate": 3.4447761194029856e-06, |
|
"loss": 0.3644, |
|
"step": 6630 |
|
}, |
|
{ |
|
"epoch": 92.22, |
|
"learning_rate": 3.385074626865672e-06, |
|
"loss": 0.3576, |
|
"step": 6640 |
|
}, |
|
{ |
|
"epoch": 92.36, |
|
"learning_rate": 3.3253731343283584e-06, |
|
"loss": 0.3518, |
|
"step": 6650 |
|
}, |
|
{ |
|
"epoch": 92.5, |
|
"learning_rate": 3.265671641791045e-06, |
|
"loss": 0.3674, |
|
"step": 6660 |
|
}, |
|
{ |
|
"epoch": 92.64, |
|
"learning_rate": 3.2059701492537316e-06, |
|
"loss": 0.3541, |
|
"step": 6670 |
|
}, |
|
{ |
|
"epoch": 92.78, |
|
"learning_rate": 3.146268656716418e-06, |
|
"loss": 0.3568, |
|
"step": 6680 |
|
}, |
|
{ |
|
"epoch": 92.91, |
|
"learning_rate": 3.0865671641791044e-06, |
|
"loss": 0.3671, |
|
"step": 6690 |
|
}, |
|
{ |
|
"epoch": 93.0, |
|
"eval_loss": 0.24693527817726135, |
|
"eval_runtime": 126.234, |
|
"eval_samples_per_second": 4.214, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.09436266632384135, |
|
"step": 6696 |
|
}, |
|
{ |
|
"epoch": 93.06, |
|
"learning_rate": 3.0268656716417916e-06, |
|
"loss": 0.371, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 93.19, |
|
"learning_rate": 2.967164179104478e-06, |
|
"loss": 0.341, |
|
"step": 6710 |
|
}, |
|
{ |
|
"epoch": 93.33, |
|
"learning_rate": 2.9074626865671644e-06, |
|
"loss": 0.3713, |
|
"step": 6720 |
|
}, |
|
{ |
|
"epoch": 93.47, |
|
"learning_rate": 2.8477611940298513e-06, |
|
"loss": 0.3562, |
|
"step": 6730 |
|
}, |
|
{ |
|
"epoch": 93.61, |
|
"learning_rate": 2.7880597014925377e-06, |
|
"loss": 0.3602, |
|
"step": 6740 |
|
}, |
|
{ |
|
"epoch": 93.75, |
|
"learning_rate": 2.728358208955224e-06, |
|
"loss": 0.3602, |
|
"step": 6750 |
|
}, |
|
{ |
|
"epoch": 93.89, |
|
"learning_rate": 2.6686567164179105e-06, |
|
"loss": 0.3503, |
|
"step": 6760 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"eval_loss": 0.245883047580719, |
|
"eval_runtime": 126.2391, |
|
"eval_samples_per_second": 4.214, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.09558398148743331, |
|
"step": 6768 |
|
}, |
|
{ |
|
"epoch": 94.03, |
|
"learning_rate": 2.6089552238805973e-06, |
|
"loss": 0.3712, |
|
"step": 6770 |
|
}, |
|
{ |
|
"epoch": 94.17, |
|
"learning_rate": 2.5492537313432837e-06, |
|
"loss": 0.3412, |
|
"step": 6780 |
|
}, |
|
{ |
|
"epoch": 94.3, |
|
"learning_rate": 2.4895522388059705e-06, |
|
"loss": 0.3595, |
|
"step": 6790 |
|
}, |
|
{ |
|
"epoch": 94.44, |
|
"learning_rate": 2.429850746268657e-06, |
|
"loss": 0.3359, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 94.58, |
|
"learning_rate": 2.3701492537313433e-06, |
|
"loss": 0.3618, |
|
"step": 6810 |
|
}, |
|
{ |
|
"epoch": 94.72, |
|
"learning_rate": 2.31044776119403e-06, |
|
"loss": 0.3746, |
|
"step": 6820 |
|
}, |
|
{ |
|
"epoch": 94.86, |
|
"learning_rate": 2.2507462686567165e-06, |
|
"loss": 0.3701, |
|
"step": 6830 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"learning_rate": 2.1910447761194033e-06, |
|
"loss": 0.3692, |
|
"step": 6840 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"eval_loss": 0.24608032405376434, |
|
"eval_runtime": 125.9468, |
|
"eval_samples_per_second": 4.224, |
|
"eval_steps_per_second": 0.532, |
|
"eval_wer": 0.09442694606929357, |
|
"step": 6840 |
|
}, |
|
{ |
|
"epoch": 95.14, |
|
"learning_rate": 2.1313432835820897e-06, |
|
"loss": 0.3589, |
|
"step": 6850 |
|
}, |
|
{ |
|
"epoch": 95.28, |
|
"learning_rate": 2.0716417910447766e-06, |
|
"loss": 0.3706, |
|
"step": 6860 |
|
}, |
|
{ |
|
"epoch": 95.42, |
|
"learning_rate": 2.011940298507463e-06, |
|
"loss": 0.3565, |
|
"step": 6870 |
|
}, |
|
{ |
|
"epoch": 95.55, |
|
"learning_rate": 1.9522388059701494e-06, |
|
"loss": 0.3724, |
|
"step": 6880 |
|
}, |
|
{ |
|
"epoch": 95.69, |
|
"learning_rate": 1.892537313432836e-06, |
|
"loss": 0.3391, |
|
"step": 6890 |
|
}, |
|
{ |
|
"epoch": 95.83, |
|
"learning_rate": 1.8328358208955224e-06, |
|
"loss": 0.3806, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 95.97, |
|
"learning_rate": 1.7731343283582092e-06, |
|
"loss": 0.362, |
|
"step": 6910 |
|
}, |
|
{ |
|
"epoch": 96.0, |
|
"eval_loss": 0.2430029809474945, |
|
"eval_runtime": 126.2473, |
|
"eval_samples_per_second": 4.214, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.09449122581474577, |
|
"step": 6912 |
|
}, |
|
{ |
|
"epoch": 96.11, |
|
"learning_rate": 1.7134328358208956e-06, |
|
"loss": 0.3722, |
|
"step": 6920 |
|
}, |
|
{ |
|
"epoch": 96.25, |
|
"learning_rate": 1.6537313432835822e-06, |
|
"loss": 0.356, |
|
"step": 6930 |
|
}, |
|
{ |
|
"epoch": 96.39, |
|
"learning_rate": 1.5940298507462686e-06, |
|
"loss": 0.3568, |
|
"step": 6940 |
|
}, |
|
{ |
|
"epoch": 96.53, |
|
"learning_rate": 1.5343283582089554e-06, |
|
"loss": 0.3616, |
|
"step": 6950 |
|
}, |
|
{ |
|
"epoch": 96.66, |
|
"learning_rate": 1.474626865671642e-06, |
|
"loss": 0.3552, |
|
"step": 6960 |
|
}, |
|
{ |
|
"epoch": 96.8, |
|
"learning_rate": 1.4149253731343284e-06, |
|
"loss": 0.3627, |
|
"step": 6970 |
|
}, |
|
{ |
|
"epoch": 96.94, |
|
"learning_rate": 1.355223880597015e-06, |
|
"loss": 0.3431, |
|
"step": 6980 |
|
}, |
|
{ |
|
"epoch": 97.0, |
|
"eval_loss": 0.24542047083377838, |
|
"eval_runtime": 126.1334, |
|
"eval_samples_per_second": 4.218, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.09519830301472006, |
|
"step": 6984 |
|
}, |
|
{ |
|
"epoch": 97.08, |
|
"learning_rate": 1.2955223880597015e-06, |
|
"loss": 0.375, |
|
"step": 6990 |
|
}, |
|
{ |
|
"epoch": 97.22, |
|
"learning_rate": 1.235820895522388e-06, |
|
"loss": 0.3651, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 97.36, |
|
"learning_rate": 1.1761194029850747e-06, |
|
"loss": 0.3665, |
|
"step": 7010 |
|
}, |
|
{ |
|
"epoch": 97.5, |
|
"learning_rate": 1.1164179104477613e-06, |
|
"loss": 0.3532, |
|
"step": 7020 |
|
}, |
|
{ |
|
"epoch": 97.64, |
|
"learning_rate": 1.056716417910448e-06, |
|
"loss": 0.3494, |
|
"step": 7030 |
|
}, |
|
{ |
|
"epoch": 97.78, |
|
"learning_rate": 9.970149253731345e-07, |
|
"loss": 0.3649, |
|
"step": 7040 |
|
}, |
|
{ |
|
"epoch": 97.91, |
|
"learning_rate": 9.373134328358209e-07, |
|
"loss": 0.3597, |
|
"step": 7050 |
|
}, |
|
{ |
|
"epoch": 98.0, |
|
"eval_loss": 0.2453533262014389, |
|
"eval_runtime": 126.2759, |
|
"eval_samples_per_second": 4.213, |
|
"eval_steps_per_second": 0.531, |
|
"eval_wer": 0.09404126759658031, |
|
"step": 7056 |
|
}, |
|
{ |
|
"epoch": 98.06, |
|
"learning_rate": 8.776119402985075e-07, |
|
"loss": 0.3771, |
|
"step": 7060 |
|
}, |
|
{ |
|
"epoch": 98.19, |
|
"learning_rate": 8.17910447761194e-07, |
|
"loss": 0.3527, |
|
"step": 7070 |
|
}, |
|
{ |
|
"epoch": 98.33, |
|
"learning_rate": 7.582089552238807e-07, |
|
"loss": 0.3718, |
|
"step": 7080 |
|
}, |
|
{ |
|
"epoch": 98.47, |
|
"learning_rate": 6.985074626865671e-07, |
|
"loss": 0.3572, |
|
"step": 7090 |
|
}, |
|
{ |
|
"epoch": 98.61, |
|
"learning_rate": 6.388059701492539e-07, |
|
"loss": 0.3653, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 98.75, |
|
"learning_rate": 5.791044776119404e-07, |
|
"loss": 0.3659, |
|
"step": 7110 |
|
}, |
|
{ |
|
"epoch": 98.89, |
|
"learning_rate": 5.194029850746269e-07, |
|
"loss": 0.354, |
|
"step": 7120 |
|
}, |
|
{ |
|
"epoch": 99.0, |
|
"eval_loss": 0.24476979672908783, |
|
"eval_runtime": 126.4792, |
|
"eval_samples_per_second": 4.206, |
|
"eval_steps_per_second": 0.53, |
|
"eval_wer": 0.0939127081056759, |
|
"step": 7128 |
|
}, |
|
{ |
|
"epoch": 99.03, |
|
"learning_rate": 4.597014925373135e-07, |
|
"loss": 0.3659, |
|
"step": 7130 |
|
}, |
|
{ |
|
"epoch": 99.17, |
|
"learning_rate": 4.0000000000000003e-07, |
|
"loss": 0.3588, |
|
"step": 7140 |
|
}, |
|
{ |
|
"epoch": 99.3, |
|
"learning_rate": 3.402985074626866e-07, |
|
"loss": 0.3592, |
|
"step": 7150 |
|
}, |
|
{ |
|
"epoch": 99.44, |
|
"learning_rate": 2.8059701492537315e-07, |
|
"loss": 0.3593, |
|
"step": 7160 |
|
}, |
|
{ |
|
"epoch": 99.58, |
|
"learning_rate": 2.208955223880597e-07, |
|
"loss": 0.3569, |
|
"step": 7170 |
|
}, |
|
{ |
|
"epoch": 99.72, |
|
"learning_rate": 1.611940298507463e-07, |
|
"loss": 0.342, |
|
"step": 7180 |
|
}, |
|
{ |
|
"epoch": 99.86, |
|
"learning_rate": 1.0149253731343284e-07, |
|
"loss": 0.3651, |
|
"step": 7190 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 4.17910447761194e-08, |
|
"loss": 0.3597, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"eval_loss": 0.24488836526870728, |
|
"eval_runtime": 126.5664, |
|
"eval_samples_per_second": 4.203, |
|
"eval_steps_per_second": 0.529, |
|
"eval_wer": 0.09429838657838915, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"step": 7200, |
|
"total_flos": 2.7207912943242897e+20, |
|
"train_loss": 0.4461001957125134, |
|
"train_runtime": 190312.0058, |
|
"train_samples_per_second": 2.427, |
|
"train_steps_per_second": 0.038 |
|
} |
|
], |
|
"max_steps": 7200, |
|
"num_train_epochs": 100, |
|
"total_flos": 2.7207912943242897e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|