|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 52.08311688311688, |
|
"global_step": 5000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 3.828125e-06, |
|
"loss": 15.104, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 7.6953125e-06, |
|
"loss": 12.2149, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 1.16015625e-05, |
|
"loss": 4.9885, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 1.55078125e-05, |
|
"loss": 3.3788, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 5.21, |
|
"learning_rate": 1.94140625e-05, |
|
"loss": 3.1705, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 2.3320312499999995e-05, |
|
"loss": 3.11, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 7.29, |
|
"learning_rate": 2.72265625e-05, |
|
"loss": 3.0526, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 3.11328125e-05, |
|
"loss": 3.0113, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 9.37, |
|
"learning_rate": 3.5039062499999995e-05, |
|
"loss": 2.9601, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 10.42, |
|
"learning_rate": 3.89453125e-05, |
|
"loss": 2.9099, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 10.42, |
|
"eval_loss": 2.8369038105010986, |
|
"eval_runtime": 198.5866, |
|
"eval_samples_per_second": 25.405, |
|
"eval_steps_per_second": 0.796, |
|
"eval_wer": 1.0, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 11.46, |
|
"learning_rate": 4.28515625e-05, |
|
"loss": 2.6653, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 4.675781249999999e-05, |
|
"loss": 1.8958, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 13.54, |
|
"learning_rate": 5.0664062499999996e-05, |
|
"loss": 1.4812, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 14.58, |
|
"learning_rate": 5.45703125e-05, |
|
"loss": 1.3358, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 15.62, |
|
"learning_rate": 5.8476562499999996e-05, |
|
"loss": 1.2522, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 16.66, |
|
"learning_rate": 6.238281249999999e-05, |
|
"loss": 1.1919, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 17.71, |
|
"learning_rate": 6.62890625e-05, |
|
"loss": 1.1663, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 18.75, |
|
"learning_rate": 7.019531249999999e-05, |
|
"loss": 1.1289, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 19.79, |
|
"learning_rate": 7.410156249999999e-05, |
|
"loss": 1.0902, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 20.83, |
|
"learning_rate": 7.4248046875e-05, |
|
"loss": 1.0745, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 20.83, |
|
"eval_loss": 0.19572903215885162, |
|
"eval_runtime": 202.06, |
|
"eval_samples_per_second": 24.968, |
|
"eval_steps_per_second": 0.782, |
|
"eval_wer": 0.16725371193237237, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 21.87, |
|
"learning_rate": 7.327148437499999e-05, |
|
"loss": 1.0485, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 22.91, |
|
"learning_rate": 7.2294921875e-05, |
|
"loss": 1.0291, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 23.96, |
|
"learning_rate": 7.1318359375e-05, |
|
"loss": 1.007, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 7.0341796875e-05, |
|
"loss": 1.0008, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 26.04, |
|
"learning_rate": 6.9365234375e-05, |
|
"loss": 0.988, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 27.08, |
|
"learning_rate": 6.8388671875e-05, |
|
"loss": 0.9766, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 28.12, |
|
"learning_rate": 6.7412109375e-05, |
|
"loss": 0.9663, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 29.17, |
|
"learning_rate": 6.6435546875e-05, |
|
"loss": 0.9539, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 30.21, |
|
"learning_rate": 6.545898437499999e-05, |
|
"loss": 0.9479, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 31.25, |
|
"learning_rate": 6.448242187499999e-05, |
|
"loss": 0.934, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 31.25, |
|
"eval_loss": 0.1579357087612152, |
|
"eval_runtime": 198.7733, |
|
"eval_samples_per_second": 25.381, |
|
"eval_steps_per_second": 0.795, |
|
"eval_wer": 0.1388858784003468, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 32.29, |
|
"learning_rate": 6.3505859375e-05, |
|
"loss": 0.9285, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 6.252929687499999e-05, |
|
"loss": 0.9121, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 34.37, |
|
"learning_rate": 6.155273437499999e-05, |
|
"loss": 0.9016, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 35.42, |
|
"learning_rate": 6.0576171875e-05, |
|
"loss": 0.9023, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 36.46, |
|
"learning_rate": 5.9599609374999994e-05, |
|
"loss": 0.9004, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 5.862304687499999e-05, |
|
"loss": 0.8844, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 38.54, |
|
"learning_rate": 5.7646484375e-05, |
|
"loss": 0.8771, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 39.58, |
|
"learning_rate": 5.6669921875e-05, |
|
"loss": 0.876, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 40.62, |
|
"learning_rate": 5.569335937499999e-05, |
|
"loss": 0.8708, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 41.66, |
|
"learning_rate": 5.4716796874999997e-05, |
|
"loss": 0.8691, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 41.66, |
|
"eval_loss": 0.14571049809455872, |
|
"eval_runtime": 195.237, |
|
"eval_samples_per_second": 25.84, |
|
"eval_steps_per_second": 0.809, |
|
"eval_wer": 0.12899642353961202, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 42.71, |
|
"learning_rate": 5.3740234374999996e-05, |
|
"loss": 0.8624, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 43.75, |
|
"learning_rate": 5.2763671874999995e-05, |
|
"loss": 0.8556, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 44.79, |
|
"learning_rate": 5.1787109375e-05, |
|
"loss": 0.8607, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 45.83, |
|
"learning_rate": 5.0810546875e-05, |
|
"loss": 0.8536, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 46.87, |
|
"learning_rate": 4.983398437499999e-05, |
|
"loss": 0.8493, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 47.91, |
|
"learning_rate": 4.8857421875e-05, |
|
"loss": 0.8456, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 48.96, |
|
"learning_rate": 4.7880859375e-05, |
|
"loss": 0.8333, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 4.6904296874999996e-05, |
|
"loss": 0.8346, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 51.04, |
|
"learning_rate": 4.5927734375e-05, |
|
"loss": 0.8403, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 52.08, |
|
"learning_rate": 4.4951171874999995e-05, |
|
"loss": 0.8328, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 52.08, |
|
"eval_loss": 0.14348936080932617, |
|
"eval_runtime": 197.7739, |
|
"eval_samples_per_second": 25.509, |
|
"eval_steps_per_second": 0.799, |
|
"eval_wer": 0.12054297171344966, |
|
"step": 5000 |
|
} |
|
], |
|
"max_steps": 9600, |
|
"num_train_epochs": 100, |
|
"total_flos": 7.771701490085208e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|