|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 48.349726775956285, |
|
"global_step": 4400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 2.5e-06, |
|
"loss": 3.5867, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 5e-06, |
|
"loss": 3.5457, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 7.5e-06, |
|
"loss": 3.4513, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 1e-05, |
|
"loss": 3.3432, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 1.25e-05, |
|
"loss": 3.3533, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"eval_loss": 3.2806732654571533, |
|
"eval_runtime": 190.4728, |
|
"eval_samples_per_second": 25.426, |
|
"eval_steps_per_second": 0.798, |
|
"eval_wer": 1.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 1.5e-05, |
|
"loss": 3.2217, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 1.7500000000000002e-05, |
|
"loss": 3.1765, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 2e-05, |
|
"loss": 3.1408, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 2.2499999999999998e-05, |
|
"loss": 3.1165, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 2.5e-05, |
|
"loss": 3.1709, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"eval_loss": 3.1325438022613525, |
|
"eval_runtime": 192.4978, |
|
"eval_samples_per_second": 25.159, |
|
"eval_steps_per_second": 0.79, |
|
"eval_wer": 1.0, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 2.75e-05, |
|
"loss": 3.079, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 3e-05, |
|
"loss": 3.0677, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 3.2500000000000004e-05, |
|
"loss": 3.0656, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 3.5000000000000004e-05, |
|
"loss": 3.1463, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 3.75e-05, |
|
"loss": 3.0573, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"eval_loss": 3.0614514350891113, |
|
"eval_runtime": 194.36, |
|
"eval_samples_per_second": 24.918, |
|
"eval_steps_per_second": 0.782, |
|
"eval_wer": 1.0, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 4e-05, |
|
"loss": 3.0511, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 4.25e-05, |
|
"loss": 3.0358, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 4.4999999999999996e-05, |
|
"loss": 3.0416, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 4.75e-05, |
|
"loss": 3.1053, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 5e-05, |
|
"loss": 3.0314, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"eval_loss": 3.0990231037139893, |
|
"eval_runtime": 198.3688, |
|
"eval_samples_per_second": 24.414, |
|
"eval_steps_per_second": 0.766, |
|
"eval_wer": 1.0, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 5.25e-05, |
|
"loss": 3.0309, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 5.5e-05, |
|
"loss": 3.0259, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 5.75e-05, |
|
"loss": 3.0998, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"learning_rate": 6e-05, |
|
"loss": 3.0152, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 5.49, |
|
"learning_rate": 6.25e-05, |
|
"loss": 3.0129, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 5.49, |
|
"eval_loss": 3.039973497390747, |
|
"eval_runtime": 190.8567, |
|
"eval_samples_per_second": 25.375, |
|
"eval_steps_per_second": 0.796, |
|
"eval_wer": 1.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 5.71, |
|
"learning_rate": 6.500000000000001e-05, |
|
"loss": 3.0088, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 5.93, |
|
"learning_rate": 6.75e-05, |
|
"loss": 3.0051, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 6.15, |
|
"learning_rate": 7.000000000000001e-05, |
|
"loss": 3.073, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 6.37, |
|
"learning_rate": 7.25e-05, |
|
"loss": 3.0031, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 6.59, |
|
"learning_rate": 7.5e-05, |
|
"loss": 2.9964, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 6.59, |
|
"eval_loss": 2.998962640762329, |
|
"eval_runtime": 193.5213, |
|
"eval_samples_per_second": 25.026, |
|
"eval_steps_per_second": 0.785, |
|
"eval_wer": 1.0, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 6.81, |
|
"learning_rate": 7.75e-05, |
|
"loss": 2.9921, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"learning_rate": 8e-05, |
|
"loss": 3.0665, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 7.25, |
|
"learning_rate": 8.25e-05, |
|
"loss": 2.9826, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 7.47, |
|
"learning_rate": 8.5e-05, |
|
"loss": 2.9689, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 7.69, |
|
"learning_rate": 8.75e-05, |
|
"loss": 2.9602, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 7.69, |
|
"eval_loss": 2.9620397090911865, |
|
"eval_runtime": 193.5851, |
|
"eval_samples_per_second": 25.017, |
|
"eval_steps_per_second": 0.785, |
|
"eval_wer": 1.0, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 7.91, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 2.9639, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 8.13, |
|
"learning_rate": 9.25e-05, |
|
"loss": 3.0215, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 8.35, |
|
"learning_rate": 9.5e-05, |
|
"loss": 2.9454, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 8.57, |
|
"learning_rate": 9.750000000000001e-05, |
|
"loss": 2.9239, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 8.79, |
|
"learning_rate": 0.0001, |
|
"loss": 2.8756, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 8.79, |
|
"eval_loss": 2.7302000522613525, |
|
"eval_runtime": 191.8065, |
|
"eval_samples_per_second": 25.249, |
|
"eval_steps_per_second": 0.792, |
|
"eval_wer": 1.0, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"learning_rate": 0.0001025, |
|
"loss": 2.8933, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 9.23, |
|
"learning_rate": 0.000105, |
|
"loss": 2.7318, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 9.45, |
|
"learning_rate": 0.0001075, |
|
"loss": 2.5941, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 9.67, |
|
"learning_rate": 0.00011, |
|
"loss": 2.4441, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 9.89, |
|
"learning_rate": 0.00011250000000000001, |
|
"loss": 2.2931, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 9.89, |
|
"eval_loss": 1.5057899951934814, |
|
"eval_runtime": 196.4368, |
|
"eval_samples_per_second": 24.654, |
|
"eval_steps_per_second": 0.774, |
|
"eval_wer": 0.9775759296054499, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 10.11, |
|
"learning_rate": 0.000115, |
|
"loss": 2.1999, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 10.33, |
|
"learning_rate": 0.0001175, |
|
"loss": 2.0574, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 10.55, |
|
"learning_rate": 0.00012, |
|
"loss": 1.9672, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 10.77, |
|
"learning_rate": 0.0001225, |
|
"loss": 1.9015, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 10.98, |
|
"learning_rate": 0.000125, |
|
"loss": 1.8427, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 10.98, |
|
"eval_loss": 0.9154536724090576, |
|
"eval_runtime": 203.1168, |
|
"eval_samples_per_second": 23.843, |
|
"eval_steps_per_second": 0.748, |
|
"eval_wer": 0.7832245245529378, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 11.21, |
|
"learning_rate": 0.0001275, |
|
"loss": 1.8155, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 11.43, |
|
"learning_rate": 0.00013000000000000002, |
|
"loss": 1.6767, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 11.64, |
|
"learning_rate": 0.00013250000000000002, |
|
"loss": 1.5184, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 11.86, |
|
"learning_rate": 0.000135, |
|
"loss": 1.4505, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 12.09, |
|
"learning_rate": 0.0001375, |
|
"loss": 1.4286, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 12.09, |
|
"eval_loss": 0.4074769914150238, |
|
"eval_runtime": 194.7798, |
|
"eval_samples_per_second": 24.864, |
|
"eval_steps_per_second": 0.78, |
|
"eval_wer": 0.37964802724950325, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 12.31, |
|
"learning_rate": 0.00014000000000000001, |
|
"loss": 1.3478, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 12.52, |
|
"learning_rate": 0.0001425, |
|
"loss": 1.3164, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 12.74, |
|
"learning_rate": 0.000145, |
|
"loss": 1.269, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 12.96, |
|
"learning_rate": 0.0001475, |
|
"loss": 1.2292, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 13.19, |
|
"learning_rate": 0.00015, |
|
"loss": 1.2229, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 13.19, |
|
"eval_loss": 0.28933778405189514, |
|
"eval_runtime": 192.6398, |
|
"eval_samples_per_second": 25.14, |
|
"eval_steps_per_second": 0.789, |
|
"eval_wer": 0.26520011353959694, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 13.4, |
|
"learning_rate": 0.0001525, |
|
"loss": 1.182, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 13.62, |
|
"learning_rate": 0.000155, |
|
"loss": 1.1768, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 13.84, |
|
"learning_rate": 0.0001575, |
|
"loss": 1.1534, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 14.07, |
|
"learning_rate": 0.00016, |
|
"loss": 1.1832, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 14.28, |
|
"learning_rate": 0.00016250000000000002, |
|
"loss": 1.1106, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 14.28, |
|
"eval_loss": 0.24685777723789215, |
|
"eval_runtime": 198.5067, |
|
"eval_samples_per_second": 24.397, |
|
"eval_steps_per_second": 0.766, |
|
"eval_wer": 0.2253760999148453, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 0.000165, |
|
"loss": 1.1217, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 14.72, |
|
"learning_rate": 0.0001675, |
|
"loss": 1.1236, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 14.94, |
|
"learning_rate": 0.00017, |
|
"loss": 1.0969, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 15.16, |
|
"learning_rate": 0.0001725, |
|
"loss": 1.0971, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 15.38, |
|
"learning_rate": 0.000175, |
|
"loss": 1.0663, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 15.38, |
|
"eval_loss": 0.22192780673503876, |
|
"eval_runtime": 189.4566, |
|
"eval_samples_per_second": 25.563, |
|
"eval_steps_per_second": 0.802, |
|
"eval_wer": 0.19727504967357365, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 15.6, |
|
"learning_rate": 0.0001775, |
|
"loss": 1.0733, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 15.82, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 1.0635, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 16.04, |
|
"learning_rate": 0.0001825, |
|
"loss": 1.1079, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 16.26, |
|
"learning_rate": 0.000185, |
|
"loss": 1.0467, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 16.48, |
|
"learning_rate": 0.0001875, |
|
"loss": 1.0667, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 16.48, |
|
"eval_loss": 0.21288961172103882, |
|
"eval_runtime": 189.0078, |
|
"eval_samples_per_second": 25.623, |
|
"eval_steps_per_second": 0.804, |
|
"eval_wer": 0.18944081748509792, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 16.7, |
|
"learning_rate": 0.00019, |
|
"loss": 1.0204, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 16.92, |
|
"learning_rate": 0.00019250000000000002, |
|
"loss": 1.0356, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 17.14, |
|
"learning_rate": 0.00019500000000000002, |
|
"loss": 1.0538, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 17.36, |
|
"learning_rate": 0.0001975, |
|
"loss": 1.0228, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 17.58, |
|
"learning_rate": 0.0002, |
|
"loss": 1.0193, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 17.58, |
|
"eval_loss": 0.1991206556558609, |
|
"eval_runtime": 191.1717, |
|
"eval_samples_per_second": 25.333, |
|
"eval_steps_per_second": 0.795, |
|
"eval_wer": 0.17885325007096226, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 17.8, |
|
"learning_rate": 0.00020250000000000002, |
|
"loss": 1.0322, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 18.02, |
|
"learning_rate": 0.000205, |
|
"loss": 1.0176, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 18.24, |
|
"learning_rate": 0.0002075, |
|
"loss": 1.0272, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 18.46, |
|
"learning_rate": 0.00021, |
|
"loss": 0.9675, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 18.68, |
|
"learning_rate": 0.0002125, |
|
"loss": 0.9816, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 18.68, |
|
"eval_loss": 0.19402356445789337, |
|
"eval_runtime": 194.75, |
|
"eval_samples_per_second": 24.868, |
|
"eval_steps_per_second": 0.78, |
|
"eval_wer": 0.18010218563724098, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 18.9, |
|
"learning_rate": 0.000215, |
|
"loss": 1.0023, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 19.12, |
|
"learning_rate": 0.0002175, |
|
"loss": 1.013, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 19.34, |
|
"learning_rate": 0.00022, |
|
"loss": 0.9664, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 19.56, |
|
"learning_rate": 0.00022250000000000001, |
|
"loss": 0.9736, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 19.78, |
|
"learning_rate": 0.00022500000000000002, |
|
"loss": 0.9814, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 19.78, |
|
"eval_loss": 0.18596723675727844, |
|
"eval_runtime": 188.4639, |
|
"eval_samples_per_second": 25.697, |
|
"eval_steps_per_second": 0.807, |
|
"eval_wer": 0.16667612829974454, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 19.99, |
|
"learning_rate": 0.0002275, |
|
"loss": 1.0064, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 20.22, |
|
"learning_rate": 0.00023, |
|
"loss": 0.9583, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 20.44, |
|
"learning_rate": 0.0002325, |
|
"loss": 0.9646, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 20.66, |
|
"learning_rate": 0.000235, |
|
"loss": 0.9762, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 20.87, |
|
"learning_rate": 0.0002375, |
|
"loss": 0.9787, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 20.87, |
|
"eval_loss": 0.18878202140331268, |
|
"eval_runtime": 188.244, |
|
"eval_samples_per_second": 25.727, |
|
"eval_steps_per_second": 0.807, |
|
"eval_wer": 0.16420664206642066, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 21.1, |
|
"learning_rate": 0.00024, |
|
"loss": 1.0218, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 21.32, |
|
"learning_rate": 0.00024249999999999999, |
|
"loss": 0.9505, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 21.54, |
|
"learning_rate": 0.000245, |
|
"loss": 0.9554, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 21.75, |
|
"learning_rate": 0.0002475, |
|
"loss": 0.9728, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 21.97, |
|
"learning_rate": 0.00025, |
|
"loss": 0.9699, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 21.97, |
|
"eval_loss": 0.18748582899570465, |
|
"eval_runtime": 190.7875, |
|
"eval_samples_per_second": 25.384, |
|
"eval_steps_per_second": 0.797, |
|
"eval_wer": 0.17042293499858074, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 22.2, |
|
"learning_rate": 0.00024803921568627453, |
|
"loss": 0.9624, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 22.42, |
|
"learning_rate": 0.000246078431372549, |
|
"loss": 0.9419, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 22.63, |
|
"learning_rate": 0.00024411764705882354, |
|
"loss": 0.9563, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 22.85, |
|
"learning_rate": 0.00024215686274509804, |
|
"loss": 0.9643, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 23.08, |
|
"learning_rate": 0.00024019607843137256, |
|
"loss": 0.9616, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 23.08, |
|
"eval_loss": 0.18017500638961792, |
|
"eval_runtime": 191.3932, |
|
"eval_samples_per_second": 25.304, |
|
"eval_steps_per_second": 0.794, |
|
"eval_wer": 0.16173715583309678, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 23.3, |
|
"learning_rate": 0.00023823529411764704, |
|
"loss": 0.917, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 23.51, |
|
"learning_rate": 0.00023627450980392157, |
|
"loss": 0.945, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 23.73, |
|
"learning_rate": 0.0002343137254901961, |
|
"loss": 0.9243, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 23.95, |
|
"learning_rate": 0.0002323529411764706, |
|
"loss": 0.9288, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 24.17, |
|
"learning_rate": 0.0002303921568627451, |
|
"loss": 0.9378, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 24.17, |
|
"eval_loss": 0.17928896844387054, |
|
"eval_runtime": 189.7619, |
|
"eval_samples_per_second": 25.521, |
|
"eval_steps_per_second": 0.801, |
|
"eval_wer": 0.1577348850411581, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 24.39, |
|
"learning_rate": 0.0002284313725490196, |
|
"loss": 0.9071, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 24.61, |
|
"learning_rate": 0.00022647058823529412, |
|
"loss": 0.9054, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 24.83, |
|
"learning_rate": 0.0002246078431372549, |
|
"loss": 0.9303, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 25.05, |
|
"learning_rate": 0.00022264705882352943, |
|
"loss": 0.9376, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 25.27, |
|
"learning_rate": 0.0002206862745098039, |
|
"loss": 0.888, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 25.27, |
|
"eval_loss": 0.17642559111118317, |
|
"eval_runtime": 187.0437, |
|
"eval_samples_per_second": 25.892, |
|
"eval_steps_per_second": 0.813, |
|
"eval_wer": 0.15452739142776042, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 25.49, |
|
"learning_rate": 0.00021872549019607843, |
|
"loss": 0.9135, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 25.71, |
|
"learning_rate": 0.00021676470588235294, |
|
"loss": 0.9094, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 25.93, |
|
"learning_rate": 0.00021480392156862746, |
|
"loss": 0.8879, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 26.15, |
|
"learning_rate": 0.00021284313725490196, |
|
"loss": 0.929, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 26.37, |
|
"learning_rate": 0.00021088235294117647, |
|
"loss": 0.8942, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 26.37, |
|
"eval_loss": 0.16744859516620636, |
|
"eval_runtime": 190.6796, |
|
"eval_samples_per_second": 25.399, |
|
"eval_steps_per_second": 0.797, |
|
"eval_wer": 0.14916264547260857, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 26.59, |
|
"learning_rate": 0.000208921568627451, |
|
"loss": 0.8717, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 26.81, |
|
"learning_rate": 0.0002069607843137255, |
|
"loss": 0.8952, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 27.03, |
|
"learning_rate": 0.000205, |
|
"loss": 0.8849, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 27.25, |
|
"learning_rate": 0.00020313725490196078, |
|
"loss": 0.8781, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 27.47, |
|
"learning_rate": 0.0002011764705882353, |
|
"loss": 0.8701, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 27.47, |
|
"eval_loss": 0.1738910973072052, |
|
"eval_runtime": 187.7205, |
|
"eval_samples_per_second": 25.799, |
|
"eval_steps_per_second": 0.81, |
|
"eval_wer": 0.1511779733181947, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 27.69, |
|
"learning_rate": 0.0001992156862745098, |
|
"loss": 0.8674, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 27.91, |
|
"learning_rate": 0.00019725490196078433, |
|
"loss": 0.8752, |
|
"step": 2540 |
|
}, |
|
{ |
|
"epoch": 28.13, |
|
"learning_rate": 0.0001952941176470588, |
|
"loss": 0.91, |
|
"step": 2560 |
|
}, |
|
{ |
|
"epoch": 28.35, |
|
"learning_rate": 0.00019333333333333333, |
|
"loss": 0.8693, |
|
"step": 2580 |
|
}, |
|
{ |
|
"epoch": 28.57, |
|
"learning_rate": 0.00019137254901960786, |
|
"loss": 0.8555, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 28.57, |
|
"eval_loss": 0.1689654141664505, |
|
"eval_runtime": 196.2665, |
|
"eval_samples_per_second": 24.676, |
|
"eval_steps_per_second": 0.774, |
|
"eval_wer": 0.14459267669599773, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 28.79, |
|
"learning_rate": 0.00018941176470588236, |
|
"loss": 0.8796, |
|
"step": 2620 |
|
}, |
|
{ |
|
"epoch": 29.01, |
|
"learning_rate": 0.00018745098039215686, |
|
"loss": 0.9045, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 29.23, |
|
"learning_rate": 0.00018549019607843137, |
|
"loss": 0.8515, |
|
"step": 2660 |
|
}, |
|
{ |
|
"epoch": 29.45, |
|
"learning_rate": 0.0001835294117647059, |
|
"loss": 0.861, |
|
"step": 2680 |
|
}, |
|
{ |
|
"epoch": 29.67, |
|
"learning_rate": 0.0001815686274509804, |
|
"loss": 0.8513, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 29.67, |
|
"eval_loss": 0.16488835215568542, |
|
"eval_runtime": 189.1938, |
|
"eval_samples_per_second": 25.598, |
|
"eval_steps_per_second": 0.803, |
|
"eval_wer": 0.14774340051092819, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 29.89, |
|
"learning_rate": 0.0001796078431372549, |
|
"loss": 0.8573, |
|
"step": 2720 |
|
}, |
|
{ |
|
"epoch": 30.11, |
|
"learning_rate": 0.00017764705882352942, |
|
"loss": 0.8686, |
|
"step": 2740 |
|
}, |
|
{ |
|
"epoch": 30.33, |
|
"learning_rate": 0.00017568627450980392, |
|
"loss": 0.8462, |
|
"step": 2760 |
|
}, |
|
{ |
|
"epoch": 30.55, |
|
"learning_rate": 0.00017372549019607845, |
|
"loss": 0.8419, |
|
"step": 2780 |
|
}, |
|
{ |
|
"epoch": 30.77, |
|
"learning_rate": 0.00017176470588235293, |
|
"loss": 0.8659, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 30.77, |
|
"eval_loss": 0.1636602133512497, |
|
"eval_runtime": 190.245, |
|
"eval_samples_per_second": 25.457, |
|
"eval_steps_per_second": 0.799, |
|
"eval_wer": 0.1422367300596083, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 30.98, |
|
"learning_rate": 0.00016980392156862745, |
|
"loss": 0.8607, |
|
"step": 2820 |
|
}, |
|
{ |
|
"epoch": 31.21, |
|
"learning_rate": 0.00016784313725490196, |
|
"loss": 0.8484, |
|
"step": 2840 |
|
}, |
|
{ |
|
"epoch": 31.43, |
|
"learning_rate": 0.00016588235294117648, |
|
"loss": 0.8335, |
|
"step": 2860 |
|
}, |
|
{ |
|
"epoch": 31.64, |
|
"learning_rate": 0.00016392156862745098, |
|
"loss": 0.8321, |
|
"step": 2880 |
|
}, |
|
{ |
|
"epoch": 31.86, |
|
"learning_rate": 0.00016196078431372549, |
|
"loss": 0.8419, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 31.86, |
|
"eval_loss": 0.16137854754924774, |
|
"eval_runtime": 190.2053, |
|
"eval_samples_per_second": 25.462, |
|
"eval_steps_per_second": 0.799, |
|
"eval_wer": 0.1397104740278172, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 32.09, |
|
"learning_rate": 0.00016, |
|
"loss": 0.8634, |
|
"step": 2920 |
|
}, |
|
{ |
|
"epoch": 32.31, |
|
"learning_rate": 0.00015803921568627451, |
|
"loss": 0.8351, |
|
"step": 2940 |
|
}, |
|
{ |
|
"epoch": 32.52, |
|
"learning_rate": 0.00015607843137254901, |
|
"loss": 0.8427, |
|
"step": 2960 |
|
}, |
|
{ |
|
"epoch": 32.74, |
|
"learning_rate": 0.00015411764705882352, |
|
"loss": 0.8324, |
|
"step": 2980 |
|
}, |
|
{ |
|
"epoch": 32.96, |
|
"learning_rate": 0.00015215686274509804, |
|
"loss": 0.8491, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 32.96, |
|
"eval_loss": 0.1594749242067337, |
|
"eval_runtime": 191.4631, |
|
"eval_samples_per_second": 25.295, |
|
"eval_steps_per_second": 0.794, |
|
"eval_wer": 0.1401362475163213, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 33.19, |
|
"learning_rate": 0.00015019607843137257, |
|
"loss": 0.8263, |
|
"step": 3020 |
|
}, |
|
{ |
|
"epoch": 33.4, |
|
"learning_rate": 0.00014823529411764705, |
|
"loss": 0.8203, |
|
"step": 3040 |
|
}, |
|
{ |
|
"epoch": 33.62, |
|
"learning_rate": 0.00014627450980392157, |
|
"loss": 0.8277, |
|
"step": 3060 |
|
}, |
|
{ |
|
"epoch": 33.84, |
|
"learning_rate": 0.00014431372549019607, |
|
"loss": 0.8183, |
|
"step": 3080 |
|
}, |
|
{ |
|
"epoch": 34.07, |
|
"learning_rate": 0.0001423529411764706, |
|
"loss": 0.8395, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 34.07, |
|
"eval_loss": 0.1606692373752594, |
|
"eval_runtime": 190.8222, |
|
"eval_samples_per_second": 25.38, |
|
"eval_steps_per_second": 0.797, |
|
"eval_wer": 0.13755322168606302, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 34.28, |
|
"learning_rate": 0.0001403921568627451, |
|
"loss": 0.8207, |
|
"step": 3120 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 0.0001384313725490196, |
|
"loss": 0.8106, |
|
"step": 3140 |
|
}, |
|
{ |
|
"epoch": 34.72, |
|
"learning_rate": 0.00013647058823529413, |
|
"loss": 0.8329, |
|
"step": 3160 |
|
}, |
|
{ |
|
"epoch": 34.94, |
|
"learning_rate": 0.00013450980392156863, |
|
"loss": 0.8048, |
|
"step": 3180 |
|
}, |
|
{ |
|
"epoch": 35.16, |
|
"learning_rate": 0.00013254901960784313, |
|
"loss": 0.83, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 35.16, |
|
"eval_loss": 0.15384173393249512, |
|
"eval_runtime": 191.1912, |
|
"eval_samples_per_second": 25.331, |
|
"eval_steps_per_second": 0.795, |
|
"eval_wer": 0.1379222253760999, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 35.38, |
|
"learning_rate": 0.00013058823529411764, |
|
"loss": 0.7963, |
|
"step": 3220 |
|
}, |
|
{ |
|
"epoch": 35.6, |
|
"learning_rate": 0.00012862745098039216, |
|
"loss": 0.7895, |
|
"step": 3240 |
|
}, |
|
{ |
|
"epoch": 35.82, |
|
"learning_rate": 0.0001266666666666667, |
|
"loss": 0.7964, |
|
"step": 3260 |
|
}, |
|
{ |
|
"epoch": 36.04, |
|
"learning_rate": 0.0001247058823529412, |
|
"loss": 0.7931, |
|
"step": 3280 |
|
}, |
|
{ |
|
"epoch": 36.26, |
|
"learning_rate": 0.0001227450980392157, |
|
"loss": 0.7835, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 36.26, |
|
"eval_loss": 0.16020993888378143, |
|
"eval_runtime": 192.1405, |
|
"eval_samples_per_second": 25.206, |
|
"eval_steps_per_second": 0.791, |
|
"eval_wer": 0.1408458699971615, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 36.48, |
|
"learning_rate": 0.00012078431372549021, |
|
"loss": 0.772, |
|
"step": 3320 |
|
}, |
|
{ |
|
"epoch": 36.7, |
|
"learning_rate": 0.00011882352941176471, |
|
"loss": 0.7818, |
|
"step": 3340 |
|
}, |
|
{ |
|
"epoch": 36.92, |
|
"learning_rate": 0.00011696078431372549, |
|
"loss": 0.8016, |
|
"step": 3360 |
|
}, |
|
{ |
|
"epoch": 37.14, |
|
"learning_rate": 0.000115, |
|
"loss": 0.8061, |
|
"step": 3380 |
|
}, |
|
{ |
|
"epoch": 37.36, |
|
"learning_rate": 0.0001130392156862745, |
|
"loss": 0.7703, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 37.36, |
|
"eval_loss": 0.16011376678943634, |
|
"eval_runtime": 187.5367, |
|
"eval_samples_per_second": 25.824, |
|
"eval_steps_per_second": 0.811, |
|
"eval_wer": 0.13692875390292364, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 37.58, |
|
"learning_rate": 0.00011107843137254903, |
|
"loss": 0.7713, |
|
"step": 3420 |
|
}, |
|
{ |
|
"epoch": 37.8, |
|
"learning_rate": 0.00010911764705882353, |
|
"loss": 0.7712, |
|
"step": 3440 |
|
}, |
|
{ |
|
"epoch": 38.02, |
|
"learning_rate": 0.00010715686274509805, |
|
"loss": 0.8149, |
|
"step": 3460 |
|
}, |
|
{ |
|
"epoch": 38.24, |
|
"learning_rate": 0.00010519607843137255, |
|
"loss": 0.7885, |
|
"step": 3480 |
|
}, |
|
{ |
|
"epoch": 38.46, |
|
"learning_rate": 0.00010323529411764706, |
|
"loss": 0.7474, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 38.46, |
|
"eval_loss": 0.1514146625995636, |
|
"eval_runtime": 190.8228, |
|
"eval_samples_per_second": 25.38, |
|
"eval_steps_per_second": 0.797, |
|
"eval_wer": 0.1342321884757309, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 38.68, |
|
"learning_rate": 0.00010127450980392156, |
|
"loss": 0.785, |
|
"step": 3520 |
|
}, |
|
{ |
|
"epoch": 38.9, |
|
"learning_rate": 9.931372549019609e-05, |
|
"loss": 0.7677, |
|
"step": 3540 |
|
}, |
|
{ |
|
"epoch": 39.12, |
|
"learning_rate": 9.73529411764706e-05, |
|
"loss": 0.7849, |
|
"step": 3560 |
|
}, |
|
{ |
|
"epoch": 39.34, |
|
"learning_rate": 9.539215686274511e-05, |
|
"loss": 0.7637, |
|
"step": 3580 |
|
}, |
|
{ |
|
"epoch": 39.56, |
|
"learning_rate": 9.343137254901961e-05, |
|
"loss": 0.7719, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 39.56, |
|
"eval_loss": 0.15932896733283997, |
|
"eval_runtime": 189.6806, |
|
"eval_samples_per_second": 25.532, |
|
"eval_steps_per_second": 0.801, |
|
"eval_wer": 0.1352540448481408, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 39.78, |
|
"learning_rate": 9.147058823529412e-05, |
|
"loss": 0.7591, |
|
"step": 3620 |
|
}, |
|
{ |
|
"epoch": 39.99, |
|
"learning_rate": 8.950980392156862e-05, |
|
"loss": 0.7706, |
|
"step": 3640 |
|
}, |
|
{ |
|
"epoch": 40.22, |
|
"learning_rate": 8.754901960784314e-05, |
|
"loss": 0.7805, |
|
"step": 3660 |
|
}, |
|
{ |
|
"epoch": 40.44, |
|
"learning_rate": 8.558823529411765e-05, |
|
"loss": 0.7753, |
|
"step": 3680 |
|
}, |
|
{ |
|
"epoch": 40.66, |
|
"learning_rate": 8.362745098039217e-05, |
|
"loss": 0.7638, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 40.66, |
|
"eval_loss": 0.15362653136253357, |
|
"eval_runtime": 192.1118, |
|
"eval_samples_per_second": 25.209, |
|
"eval_steps_per_second": 0.791, |
|
"eval_wer": 0.13380641498722678, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 40.87, |
|
"learning_rate": 8.166666666666667e-05, |
|
"loss": 0.7791, |
|
"step": 3720 |
|
}, |
|
{ |
|
"epoch": 41.1, |
|
"learning_rate": 7.970588235294118e-05, |
|
"loss": 0.7931, |
|
"step": 3740 |
|
}, |
|
{ |
|
"epoch": 41.32, |
|
"learning_rate": 7.774509803921568e-05, |
|
"loss": 0.741, |
|
"step": 3760 |
|
}, |
|
{ |
|
"epoch": 41.54, |
|
"learning_rate": 7.57843137254902e-05, |
|
"loss": 0.7434, |
|
"step": 3780 |
|
}, |
|
{ |
|
"epoch": 41.75, |
|
"learning_rate": 7.38235294117647e-05, |
|
"loss": 0.771, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 41.75, |
|
"eval_loss": 0.1530592143535614, |
|
"eval_runtime": 186.414, |
|
"eval_samples_per_second": 25.98, |
|
"eval_steps_per_second": 0.815, |
|
"eval_wer": 0.13170593244393983, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 41.97, |
|
"learning_rate": 7.186274509803923e-05, |
|
"loss": 0.7765, |
|
"step": 3820 |
|
}, |
|
{ |
|
"epoch": 42.2, |
|
"learning_rate": 6.990196078431373e-05, |
|
"loss": 0.7599, |
|
"step": 3840 |
|
}, |
|
{ |
|
"epoch": 42.42, |
|
"learning_rate": 6.794117647058824e-05, |
|
"loss": 0.7782, |
|
"step": 3860 |
|
}, |
|
{ |
|
"epoch": 42.63, |
|
"learning_rate": 6.598039215686274e-05, |
|
"loss": 0.7395, |
|
"step": 3880 |
|
}, |
|
{ |
|
"epoch": 42.85, |
|
"learning_rate": 6.401960784313726e-05, |
|
"loss": 0.7594, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 42.85, |
|
"eval_loss": 0.14983513951301575, |
|
"eval_runtime": 190.4439, |
|
"eval_samples_per_second": 25.43, |
|
"eval_steps_per_second": 0.798, |
|
"eval_wer": 0.12883905762134545, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 43.08, |
|
"learning_rate": 6.205882352941176e-05, |
|
"loss": 0.7691, |
|
"step": 3920 |
|
}, |
|
{ |
|
"epoch": 43.3, |
|
"learning_rate": 6.0098039215686274e-05, |
|
"loss": 0.728, |
|
"step": 3940 |
|
}, |
|
{ |
|
"epoch": 43.51, |
|
"learning_rate": 5.813725490196078e-05, |
|
"loss": 0.7585, |
|
"step": 3960 |
|
}, |
|
{ |
|
"epoch": 43.73, |
|
"learning_rate": 5.6176470588235296e-05, |
|
"loss": 0.7564, |
|
"step": 3980 |
|
}, |
|
{ |
|
"epoch": 43.95, |
|
"learning_rate": 5.4215686274509804e-05, |
|
"loss": 0.7383, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 43.95, |
|
"eval_loss": 0.15269021689891815, |
|
"eval_runtime": 188.5816, |
|
"eval_samples_per_second": 25.681, |
|
"eval_steps_per_second": 0.806, |
|
"eval_wer": 0.13003122338915696, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 44.17, |
|
"learning_rate": 5.225490196078431e-05, |
|
"loss": 0.7693, |
|
"step": 4020 |
|
}, |
|
{ |
|
"epoch": 44.39, |
|
"learning_rate": 5.0294117647058826e-05, |
|
"loss": 0.7347, |
|
"step": 4040 |
|
}, |
|
{ |
|
"epoch": 44.61, |
|
"learning_rate": 4.8333333333333334e-05, |
|
"loss": 0.7185, |
|
"step": 4060 |
|
}, |
|
{ |
|
"epoch": 44.83, |
|
"learning_rate": 4.637254901960784e-05, |
|
"loss": 0.7394, |
|
"step": 4080 |
|
}, |
|
{ |
|
"epoch": 45.05, |
|
"learning_rate": 4.4411764705882356e-05, |
|
"loss": 0.7565, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 45.05, |
|
"eval_loss": 0.14823457598686218, |
|
"eval_runtime": 191.1254, |
|
"eval_samples_per_second": 25.339, |
|
"eval_steps_per_second": 0.795, |
|
"eval_wer": 0.12886744252057905, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 45.27, |
|
"learning_rate": 4.2450980392156864e-05, |
|
"loss": 0.7535, |
|
"step": 4120 |
|
}, |
|
{ |
|
"epoch": 45.49, |
|
"learning_rate": 4.049019607843137e-05, |
|
"loss": 0.737, |
|
"step": 4140 |
|
}, |
|
{ |
|
"epoch": 45.71, |
|
"learning_rate": 3.852941176470588e-05, |
|
"loss": 0.7501, |
|
"step": 4160 |
|
}, |
|
{ |
|
"epoch": 45.93, |
|
"learning_rate": 3.6568627450980393e-05, |
|
"loss": 0.7285, |
|
"step": 4180 |
|
}, |
|
{ |
|
"epoch": 46.15, |
|
"learning_rate": 3.46078431372549e-05, |
|
"loss": 0.7697, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 46.15, |
|
"eval_loss": 0.1494804471731186, |
|
"eval_runtime": 190.3868, |
|
"eval_samples_per_second": 25.438, |
|
"eval_steps_per_second": 0.798, |
|
"eval_wer": 0.1271927334657962, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 46.37, |
|
"learning_rate": 3.264705882352941e-05, |
|
"loss": 0.7283, |
|
"step": 4220 |
|
}, |
|
{ |
|
"epoch": 46.59, |
|
"learning_rate": 3.0686274509803923e-05, |
|
"loss": 0.7218, |
|
"step": 4240 |
|
}, |
|
{ |
|
"epoch": 46.81, |
|
"learning_rate": 2.872549019607843e-05, |
|
"loss": 0.7341, |
|
"step": 4260 |
|
}, |
|
{ |
|
"epoch": 47.03, |
|
"learning_rate": 2.6764705882352942e-05, |
|
"loss": 0.7293, |
|
"step": 4280 |
|
}, |
|
{ |
|
"epoch": 47.25, |
|
"learning_rate": 2.480392156862745e-05, |
|
"loss": 0.7194, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 47.25, |
|
"eval_loss": 0.14928147196769714, |
|
"eval_runtime": 189.625, |
|
"eval_samples_per_second": 25.54, |
|
"eval_steps_per_second": 0.802, |
|
"eval_wer": 0.12693726937269373, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 47.47, |
|
"learning_rate": 2.284313725490196e-05, |
|
"loss": 0.7326, |
|
"step": 4320 |
|
}, |
|
{ |
|
"epoch": 47.69, |
|
"learning_rate": 2.088235294117647e-05, |
|
"loss": 0.724, |
|
"step": 4340 |
|
}, |
|
{ |
|
"epoch": 47.91, |
|
"learning_rate": 1.892156862745098e-05, |
|
"loss": 0.7316, |
|
"step": 4360 |
|
}, |
|
{ |
|
"epoch": 48.13, |
|
"learning_rate": 1.696078431372549e-05, |
|
"loss": 0.7566, |
|
"step": 4380 |
|
}, |
|
{ |
|
"epoch": 48.35, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.7479, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 48.35, |
|
"eval_loss": 0.1490115076303482, |
|
"eval_runtime": 188.3681, |
|
"eval_samples_per_second": 25.71, |
|
"eval_steps_per_second": 0.807, |
|
"eval_wer": 0.12761850695430033, |
|
"step": 4400 |
|
} |
|
], |
|
"max_steps": 4550, |
|
"num_train_epochs": 50, |
|
"total_flos": 6.805564586486029e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|