|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 50.0, |
|
"global_step": 11100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 3.6e-06, |
|
"loss": 38.551, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 7.35e-06, |
|
"loss": 24.0076, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 1.1099999999999999e-05, |
|
"loss": 15.8812, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 1.485e-05, |
|
"loss": 12.8964, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 1.8599999999999998e-05, |
|
"loss": 9.9344, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 2.2349999999999998e-05, |
|
"loss": 7.1613, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 2.6099999999999997e-05, |
|
"loss": 5.3772, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 2.985e-05, |
|
"loss": 4.6404, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 3.36e-05, |
|
"loss": 4.3831, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 3.735e-05, |
|
"loss": 4.221, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"eval_loss": 4.1194539070129395, |
|
"eval_runtime": 239.8438, |
|
"eval_samples_per_second": 18.691, |
|
"eval_steps_per_second": 2.339, |
|
"eval_wer": 2.4023576512455516, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 4.11e-05, |
|
"loss": 4.1513, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 5.41, |
|
"learning_rate": 4.484999999999999e-05, |
|
"loss": 4.1055, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"learning_rate": 4.8599999999999995e-05, |
|
"loss": 4.0677, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 6.31, |
|
"learning_rate": 5.234999999999999e-05, |
|
"loss": 4.0164, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 6.76, |
|
"learning_rate": 5.6099999999999995e-05, |
|
"loss": 3.9636, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 7.21, |
|
"learning_rate": 5.985e-05, |
|
"loss": 3.7377, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 7.66, |
|
"learning_rate": 6.359999999999999e-05, |
|
"loss": 3.1744, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 8.11, |
|
"learning_rate": 6.735e-05, |
|
"loss": 2.704, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 8.56, |
|
"learning_rate": 7.11e-05, |
|
"loss": 2.497, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"learning_rate": 7.484999999999999e-05, |
|
"loss": 2.3597, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"eval_loss": 1.102413535118103, |
|
"eval_runtime": 237.4335, |
|
"eval_samples_per_second": 18.881, |
|
"eval_steps_per_second": 2.363, |
|
"eval_wer": 2.761788256227758, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 9.46, |
|
"learning_rate": 7.42087912087912e-05, |
|
"loss": 2.2495, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 9.91, |
|
"learning_rate": 7.338461538461538e-05, |
|
"loss": 2.1767, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 10.36, |
|
"learning_rate": 7.256043956043956e-05, |
|
"loss": 2.1115, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 10.81, |
|
"learning_rate": 7.173626373626374e-05, |
|
"loss": 2.0653, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 11.26, |
|
"learning_rate": 7.09120879120879e-05, |
|
"loss": 2.0077, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 11.71, |
|
"learning_rate": 7.008791208791208e-05, |
|
"loss": 1.9933, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 12.16, |
|
"learning_rate": 6.926373626373626e-05, |
|
"loss": 1.9756, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 12.61, |
|
"learning_rate": 6.843956043956044e-05, |
|
"loss": 1.9246, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 13.06, |
|
"learning_rate": 6.761538461538461e-05, |
|
"loss": 1.9044, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 13.51, |
|
"learning_rate": 6.679120879120879e-05, |
|
"loss": 1.8795, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 13.51, |
|
"eval_loss": 0.749809205532074, |
|
"eval_runtime": 236.7828, |
|
"eval_samples_per_second": 18.933, |
|
"eval_steps_per_second": 2.369, |
|
"eval_wer": 2.588523131672598, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 13.96, |
|
"learning_rate": 6.596703296703297e-05, |
|
"loss": 1.8683, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 14.41, |
|
"learning_rate": 6.514285714285713e-05, |
|
"loss": 1.8367, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 14.86, |
|
"learning_rate": 6.431868131868131e-05, |
|
"loss": 1.8273, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 15.32, |
|
"learning_rate": 6.349450549450548e-05, |
|
"loss": 1.7743, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 15.77, |
|
"learning_rate": 6.267032967032966e-05, |
|
"loss": 1.7832, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 16.22, |
|
"learning_rate": 6.184615384615384e-05, |
|
"loss": 1.7915, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"learning_rate": 6.102197802197801e-05, |
|
"loss": 1.7488, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 17.12, |
|
"learning_rate": 6.019780219780219e-05, |
|
"loss": 1.7178, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 17.57, |
|
"learning_rate": 5.937362637362637e-05, |
|
"loss": 1.6903, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 18.02, |
|
"learning_rate": 5.854945054945054e-05, |
|
"loss": 1.7143, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 18.02, |
|
"eval_loss": 0.6538747549057007, |
|
"eval_runtime": 236.0252, |
|
"eval_samples_per_second": 18.994, |
|
"eval_steps_per_second": 2.377, |
|
"eval_wer": 2.5976423487544484, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 18.47, |
|
"learning_rate": 5.772527472527472e-05, |
|
"loss": 1.7023, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 18.92, |
|
"learning_rate": 5.69010989010989e-05, |
|
"loss": 1.6887, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 19.37, |
|
"learning_rate": 5.607692307692307e-05, |
|
"loss": 1.6596, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 19.82, |
|
"learning_rate": 5.525274725274725e-05, |
|
"loss": 1.6613, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 20.27, |
|
"learning_rate": 5.442857142857143e-05, |
|
"loss": 1.6575, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 20.72, |
|
"learning_rate": 5.36043956043956e-05, |
|
"loss": 1.6486, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 21.17, |
|
"learning_rate": 5.278021978021977e-05, |
|
"loss": 1.6467, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 21.62, |
|
"learning_rate": 5.195604395604395e-05, |
|
"loss": 1.6206, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 22.07, |
|
"learning_rate": 5.1140109890109885e-05, |
|
"loss": 1.6048, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 22.52, |
|
"learning_rate": 5.031593406593406e-05, |
|
"loss": 1.6025, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 22.52, |
|
"eval_loss": 0.598939836025238, |
|
"eval_runtime": 237.9623, |
|
"eval_samples_per_second": 18.839, |
|
"eval_steps_per_second": 2.358, |
|
"eval_wer": 2.6034252669039146, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 22.97, |
|
"learning_rate": 4.949175824175824e-05, |
|
"loss": 1.6151, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 23.42, |
|
"learning_rate": 4.866758241758242e-05, |
|
"loss": 1.5959, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 23.87, |
|
"learning_rate": 4.784340659340658e-05, |
|
"loss": 1.5845, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 24.32, |
|
"learning_rate": 4.701923076923076e-05, |
|
"loss": 1.5662, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 24.77, |
|
"learning_rate": 4.619505494505494e-05, |
|
"loss": 1.5759, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 25.23, |
|
"learning_rate": 4.5370879120879115e-05, |
|
"loss": 1.5928, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 25.68, |
|
"learning_rate": 4.4546703296703295e-05, |
|
"loss": 1.5576, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 26.13, |
|
"learning_rate": 4.3722527472527474e-05, |
|
"loss": 1.5289, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 26.58, |
|
"learning_rate": 4.289835164835164e-05, |
|
"loss": 1.5381, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 27.03, |
|
"learning_rate": 4.207417582417582e-05, |
|
"loss": 1.5403, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 27.03, |
|
"eval_loss": 0.6035020351409912, |
|
"eval_runtime": 237.8256, |
|
"eval_samples_per_second": 18.85, |
|
"eval_steps_per_second": 2.359, |
|
"eval_wer": 2.69461743772242, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 27.48, |
|
"learning_rate": 4.125e-05, |
|
"loss": 1.5314, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 27.93, |
|
"learning_rate": 4.042582417582417e-05, |
|
"loss": 1.5186, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 28.38, |
|
"learning_rate": 3.960164835164835e-05, |
|
"loss": 1.5097, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 28.83, |
|
"learning_rate": 3.8777472527472525e-05, |
|
"loss": 1.52, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 29.28, |
|
"learning_rate": 3.79532967032967e-05, |
|
"loss": 1.51, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 29.73, |
|
"learning_rate": 3.712912087912088e-05, |
|
"loss": 1.5109, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 30.18, |
|
"learning_rate": 3.630494505494505e-05, |
|
"loss": 1.5219, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 30.63, |
|
"learning_rate": 3.548076923076922e-05, |
|
"loss": 1.5006, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 31.08, |
|
"learning_rate": 3.46565934065934e-05, |
|
"loss": 1.475, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 31.53, |
|
"learning_rate": 3.383241758241758e-05, |
|
"loss": 1.4773, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 31.53, |
|
"eval_loss": 0.5646877288818359, |
|
"eval_runtime": 239.6564, |
|
"eval_samples_per_second": 18.706, |
|
"eval_steps_per_second": 2.341, |
|
"eval_wer": 2.555827402135231, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 31.98, |
|
"learning_rate": 3.3008241758241755e-05, |
|
"loss": 1.4921, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 32.43, |
|
"learning_rate": 3.218406593406593e-05, |
|
"loss": 1.4634, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 32.88, |
|
"learning_rate": 3.135989010989011e-05, |
|
"loss": 1.4716, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 3.054395604395604e-05, |
|
"loss": 1.4313, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 33.78, |
|
"learning_rate": 2.971978021978022e-05, |
|
"loss": 1.4702, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 34.23, |
|
"learning_rate": 2.8895604395604393e-05, |
|
"loss": 1.4679, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 34.68, |
|
"learning_rate": 2.807142857142857e-05, |
|
"loss": 1.4379, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 35.14, |
|
"learning_rate": 2.7247252747252742e-05, |
|
"loss": 1.4211, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 35.59, |
|
"learning_rate": 2.642307692307692e-05, |
|
"loss": 1.4295, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 36.04, |
|
"learning_rate": 2.5598901098901098e-05, |
|
"loss": 1.4228, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 36.04, |
|
"eval_loss": 0.5477184057235718, |
|
"eval_runtime": 235.9806, |
|
"eval_samples_per_second": 18.997, |
|
"eval_steps_per_second": 2.377, |
|
"eval_wer": 2.5676156583629894, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 36.49, |
|
"learning_rate": 2.477472527472527e-05, |
|
"loss": 1.4199, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 36.94, |
|
"learning_rate": 2.395054945054945e-05, |
|
"loss": 1.4317, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 37.39, |
|
"learning_rate": 2.3126373626373626e-05, |
|
"loss": 1.4048, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 37.84, |
|
"learning_rate": 2.23021978021978e-05, |
|
"loss": 1.4232, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 38.29, |
|
"learning_rate": 2.147802197802198e-05, |
|
"loss": 1.4076, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 38.74, |
|
"learning_rate": 2.065384615384615e-05, |
|
"loss": 1.4009, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 39.19, |
|
"learning_rate": 1.9829670329670328e-05, |
|
"loss": 1.4173, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 39.64, |
|
"learning_rate": 1.90054945054945e-05, |
|
"loss": 1.4028, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 40.09, |
|
"learning_rate": 1.818131868131868e-05, |
|
"loss": 1.3694, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 40.54, |
|
"learning_rate": 1.7357142857142856e-05, |
|
"loss": 1.3801, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 40.54, |
|
"eval_loss": 0.5413390398025513, |
|
"eval_runtime": 239.6653, |
|
"eval_samples_per_second": 18.705, |
|
"eval_steps_per_second": 2.341, |
|
"eval_wer": 2.619217081850534, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 40.99, |
|
"learning_rate": 1.6532967032967033e-05, |
|
"loss": 1.4054, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 41.44, |
|
"learning_rate": 1.5708791208791206e-05, |
|
"loss": 1.3823, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 41.89, |
|
"learning_rate": 1.4884615384615383e-05, |
|
"loss": 1.3902, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 42.34, |
|
"learning_rate": 1.4060439560439558e-05, |
|
"loss": 1.3568, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 42.79, |
|
"learning_rate": 1.3236263736263736e-05, |
|
"loss": 1.3766, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 43.24, |
|
"learning_rate": 1.2412087912087912e-05, |
|
"loss": 1.3883, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 43.69, |
|
"learning_rate": 1.1587912087912087e-05, |
|
"loss": 1.3666, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 44.14, |
|
"learning_rate": 1.0763736263736263e-05, |
|
"loss": 1.3718, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 44.59, |
|
"learning_rate": 9.939560439560437e-06, |
|
"loss": 1.367, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 45.05, |
|
"learning_rate": 9.115384615384615e-06, |
|
"loss": 1.3558, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 45.05, |
|
"eval_loss": 0.5343097448348999, |
|
"eval_runtime": 238.241, |
|
"eval_samples_per_second": 18.817, |
|
"eval_steps_per_second": 2.355, |
|
"eval_wer": 2.657473309608541, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 45.5, |
|
"learning_rate": 8.29120879120879e-06, |
|
"loss": 1.3595, |
|
"step": 10100 |
|
}, |
|
{ |
|
"epoch": 45.95, |
|
"learning_rate": 7.467032967032966e-06, |
|
"loss": 1.3697, |
|
"step": 10200 |
|
}, |
|
{ |
|
"epoch": 46.4, |
|
"learning_rate": 6.6428571428571416e-06, |
|
"loss": 1.3423, |
|
"step": 10300 |
|
}, |
|
{ |
|
"epoch": 46.85, |
|
"learning_rate": 5.818681318681319e-06, |
|
"loss": 1.341, |
|
"step": 10400 |
|
}, |
|
{ |
|
"epoch": 47.3, |
|
"learning_rate": 4.994505494505494e-06, |
|
"loss": 1.3289, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 47.75, |
|
"learning_rate": 4.17032967032967e-06, |
|
"loss": 1.3462, |
|
"step": 10600 |
|
}, |
|
{ |
|
"epoch": 48.2, |
|
"learning_rate": 3.3461538461538457e-06, |
|
"loss": 1.3636, |
|
"step": 10700 |
|
}, |
|
{ |
|
"epoch": 48.65, |
|
"learning_rate": 2.521978021978022e-06, |
|
"loss": 1.3366, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 49.1, |
|
"learning_rate": 1.6978021978021975e-06, |
|
"loss": 1.337, |
|
"step": 10900 |
|
}, |
|
{ |
|
"epoch": 49.55, |
|
"learning_rate": 8.818681318681318e-07, |
|
"loss": 1.3298, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 49.55, |
|
"eval_loss": 0.5348710417747498, |
|
"eval_runtime": 238.2007, |
|
"eval_samples_per_second": 18.82, |
|
"eval_steps_per_second": 2.355, |
|
"eval_wer": 2.627446619217082, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 5.769230769230769e-08, |
|
"loss": 1.3663, |
|
"step": 11100 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"step": 11100, |
|
"total_flos": 8.246726177897223e+19, |
|
"train_loss": 2.741275282851211, |
|
"train_runtime": 31325.7849, |
|
"train_samples_per_second": 16.956, |
|
"train_steps_per_second": 0.354 |
|
} |
|
], |
|
"max_steps": 11100, |
|
"num_train_epochs": 50, |
|
"total_flos": 8.246726177897223e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|