|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 50.0, |
|
"global_step": 5750, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 7.35e-06, |
|
"loss": 11.7893, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 1.485e-05, |
|
"loss": 4.6989, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 2.2349999999999998e-05, |
|
"loss": 3.5677, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 2.985e-05, |
|
"loss": 3.1589, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"eval_loss": 3.0830252170562744, |
|
"eval_runtime": 76.0167, |
|
"eval_samples_per_second": 22.363, |
|
"eval_steps_per_second": 1.408, |
|
"eval_wer": 1.0, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 3.735e-05, |
|
"loss": 3.0279, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"learning_rate": 4.484999999999999e-05, |
|
"loss": 2.9807, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 6.09, |
|
"learning_rate": 5.234999999999999e-05, |
|
"loss": 2.9589, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 6.96, |
|
"learning_rate": 5.985e-05, |
|
"loss": 2.8921, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 6.96, |
|
"eval_loss": 2.6604607105255127, |
|
"eval_runtime": 76.2841, |
|
"eval_samples_per_second": 22.285, |
|
"eval_steps_per_second": 1.403, |
|
"eval_wer": 0.9981590808100045, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 7.83, |
|
"learning_rate": 6.735e-05, |
|
"loss": 2.346, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 8.7, |
|
"learning_rate": 7.484999999999999e-05, |
|
"loss": 1.6326, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 9.57, |
|
"learning_rate": 7.345263157894736e-05, |
|
"loss": 1.4075, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 10.43, |
|
"learning_rate": 7.187368421052631e-05, |
|
"loss": 1.3049, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 10.43, |
|
"eval_loss": 0.5069074630737305, |
|
"eval_runtime": 75.7661, |
|
"eval_samples_per_second": 22.437, |
|
"eval_steps_per_second": 1.412, |
|
"eval_wer": 0.5706849488986225, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 11.3, |
|
"learning_rate": 7.029473684210525e-05, |
|
"loss": 1.2407, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 12.17, |
|
"learning_rate": 6.87157894736842e-05, |
|
"loss": 1.1999, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 13.04, |
|
"learning_rate": 6.713684210526315e-05, |
|
"loss": 1.167, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 13.91, |
|
"learning_rate": 6.55578947368421e-05, |
|
"loss": 1.1349, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 13.91, |
|
"eval_loss": 0.4158603847026825, |
|
"eval_runtime": 75.9244, |
|
"eval_samples_per_second": 22.391, |
|
"eval_steps_per_second": 1.409, |
|
"eval_wer": 0.5040944581984383, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 14.78, |
|
"learning_rate": 6.397894736842105e-05, |
|
"loss": 1.1161, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 15.65, |
|
"learning_rate": 6.239999999999999e-05, |
|
"loss": 1.0982, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 16.52, |
|
"learning_rate": 6.0821052631578945e-05, |
|
"loss": 1.0841, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 17.39, |
|
"learning_rate": 5.924210526315789e-05, |
|
"loss": 1.0686, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 17.39, |
|
"eval_loss": 0.38148975372314453, |
|
"eval_runtime": 77.84, |
|
"eval_samples_per_second": 21.84, |
|
"eval_steps_per_second": 1.375, |
|
"eval_wer": 0.4746397511585095, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 18.26, |
|
"learning_rate": 5.7663157894736836e-05, |
|
"loss": 1.047, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 19.13, |
|
"learning_rate": 5.608421052631579e-05, |
|
"loss": 1.03, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 5.450526315789473e-05, |
|
"loss": 1.0144, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 20.87, |
|
"learning_rate": 5.292631578947368e-05, |
|
"loss": 0.999, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 20.87, |
|
"eval_loss": 0.3541375696659088, |
|
"eval_runtime": 75.1099, |
|
"eval_samples_per_second": 22.633, |
|
"eval_steps_per_second": 1.425, |
|
"eval_wer": 0.4343299688948137, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 21.74, |
|
"learning_rate": 5.1347368421052626e-05, |
|
"loss": 0.993, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 22.61, |
|
"learning_rate": 4.976842105263157e-05, |
|
"loss": 0.9766, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 23.48, |
|
"learning_rate": 4.8189473684210524e-05, |
|
"loss": 0.9573, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 24.35, |
|
"learning_rate": 4.661052631578947e-05, |
|
"loss": 0.945, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 24.35, |
|
"eval_loss": 0.3265579342842102, |
|
"eval_runtime": 75.4543, |
|
"eval_samples_per_second": 22.53, |
|
"eval_steps_per_second": 1.418, |
|
"eval_wer": 0.4131911381958992, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 25.22, |
|
"learning_rate": 4.5031578947368415e-05, |
|
"loss": 0.9309, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 26.09, |
|
"learning_rate": 4.345263157894736e-05, |
|
"loss": 0.9151, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 26.96, |
|
"learning_rate": 4.1873684210526314e-05, |
|
"loss": 0.9138, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 27.83, |
|
"learning_rate": 4.031052631578947e-05, |
|
"loss": 0.9058, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 27.83, |
|
"eval_loss": 0.2969067096710205, |
|
"eval_runtime": 75.0182, |
|
"eval_samples_per_second": 22.661, |
|
"eval_steps_per_second": 1.426, |
|
"eval_wer": 0.377071034088745, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 28.7, |
|
"learning_rate": 3.8731578947368425e-05, |
|
"loss": 0.9001, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 29.57, |
|
"learning_rate": 3.7152631578947364e-05, |
|
"loss": 0.8882, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 30.43, |
|
"learning_rate": 3.557368421052631e-05, |
|
"loss": 0.867, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 31.3, |
|
"learning_rate": 3.399473684210526e-05, |
|
"loss": 0.8672, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 31.3, |
|
"eval_loss": 0.2802095413208008, |
|
"eval_runtime": 75.6097, |
|
"eval_samples_per_second": 22.484, |
|
"eval_steps_per_second": 1.415, |
|
"eval_wer": 0.35529740366914236, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 32.17, |
|
"learning_rate": 3.241578947368421e-05, |
|
"loss": 0.8522, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 33.04, |
|
"learning_rate": 3.0836842105263153e-05, |
|
"loss": 0.8391, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 33.91, |
|
"learning_rate": 2.9257894736842103e-05, |
|
"loss": 0.8368, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 34.78, |
|
"learning_rate": 2.767894736842105e-05, |
|
"loss": 0.8313, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 34.78, |
|
"eval_loss": 0.26624372601509094, |
|
"eval_runtime": 74.7639, |
|
"eval_samples_per_second": 22.738, |
|
"eval_steps_per_second": 1.431, |
|
"eval_wer": 0.3379673712943566, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 35.65, |
|
"learning_rate": 2.6099999999999997e-05, |
|
"loss": 0.8188, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 36.52, |
|
"learning_rate": 2.4521052631578947e-05, |
|
"loss": 0.8168, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 37.39, |
|
"learning_rate": 2.2942105263157892e-05, |
|
"loss": 0.8103, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 38.26, |
|
"learning_rate": 2.136315789473684e-05, |
|
"loss": 0.8068, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 38.26, |
|
"eval_loss": 0.25275924801826477, |
|
"eval_runtime": 75.264, |
|
"eval_samples_per_second": 22.587, |
|
"eval_steps_per_second": 1.422, |
|
"eval_wer": 0.3180981400368184, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 39.13, |
|
"learning_rate": 1.9784210526315787e-05, |
|
"loss": 0.8046, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 1.8205263157894736e-05, |
|
"loss": 0.7929, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 40.87, |
|
"learning_rate": 1.6626315789473682e-05, |
|
"loss": 0.7817, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 41.74, |
|
"learning_rate": 1.504736842105263e-05, |
|
"loss": 0.7796, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 41.74, |
|
"eval_loss": 0.25369659066200256, |
|
"eval_runtime": 74.2196, |
|
"eval_samples_per_second": 22.905, |
|
"eval_steps_per_second": 1.442, |
|
"eval_wer": 0.3073065447851203, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 42.61, |
|
"learning_rate": 1.3468421052631578e-05, |
|
"loss": 0.7789, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 43.48, |
|
"learning_rate": 1.1889473684210526e-05, |
|
"loss": 0.7736, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 44.35, |
|
"learning_rate": 1.0310526315789472e-05, |
|
"loss": 0.783, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 45.22, |
|
"learning_rate": 8.73157894736842e-06, |
|
"loss": 0.7621, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 45.22, |
|
"eval_loss": 0.2502739131450653, |
|
"eval_runtime": 74.1963, |
|
"eval_samples_per_second": 22.912, |
|
"eval_steps_per_second": 1.442, |
|
"eval_wer": 0.3035612264330604, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 46.09, |
|
"learning_rate": 7.168421052631579e-06, |
|
"loss": 0.767, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 46.96, |
|
"learning_rate": 5.589473684210526e-06, |
|
"loss": 0.7659, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 47.83, |
|
"learning_rate": 4.0105263157894735e-06, |
|
"loss": 0.7622, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 48.7, |
|
"learning_rate": 2.431578947368421e-06, |
|
"loss": 0.7611, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 48.7, |
|
"eval_loss": 0.24768942594528198, |
|
"eval_runtime": 74.3172, |
|
"eval_samples_per_second": 22.875, |
|
"eval_steps_per_second": 1.44, |
|
"eval_wer": 0.29905414841617467, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 49.57, |
|
"learning_rate": 8.526315789473684e-07, |
|
"loss": 0.7494, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"step": 5750, |
|
"total_flos": 2.9609555446330925e+19, |
|
"train_loss": 1.4450044761326002, |
|
"train_runtime": 12353.9156, |
|
"train_samples_per_second": 14.878, |
|
"train_steps_per_second": 0.465 |
|
} |
|
], |
|
"max_steps": 5750, |
|
"num_train_epochs": 50, |
|
"total_flos": 2.9609555446330925e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|