|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 9.603072983354673, |
|
"global_step": 45000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.9569123419101614e-05, |
|
"loss": 336.5875, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.869690361971217e-05, |
|
"loss": 219.0754, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"eval_loss": 164.8076629638672, |
|
"eval_runtime": 187.3312, |
|
"eval_samples_per_second": 10.644, |
|
"eval_steps_per_second": 10.644, |
|
"eval_wer": 1.0029591701024072, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 1.7825119930222416e-05, |
|
"loss": 184.2771, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 1.6953336240732667e-05, |
|
"loss": 128.2162, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 1.6081116441343222e-05, |
|
"loss": 109.27, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"eval_loss": 79.24954986572266, |
|
"eval_runtime": 187.7771, |
|
"eval_samples_per_second": 10.619, |
|
"eval_steps_per_second": 10.619, |
|
"eval_wer": 0.8314935496741588, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 1.5209332751853467e-05, |
|
"loss": 97.8815, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 1.4337112952464022e-05, |
|
"loss": 88.9743, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"eval_loss": 61.599853515625, |
|
"eval_runtime": 188.2187, |
|
"eval_samples_per_second": 10.594, |
|
"eval_steps_per_second": 10.594, |
|
"eval_wer": 0.6520813938023673, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 1.3464893153074576e-05, |
|
"loss": 81.1935, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 1.2593109463584824e-05, |
|
"loss": 78.2627, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 1.1720889664195378e-05, |
|
"loss": 74.458, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"eval_loss": 54.358341217041016, |
|
"eval_runtime": 216.5604, |
|
"eval_samples_per_second": 9.208, |
|
"eval_steps_per_second": 9.208, |
|
"eval_wer": 0.5824577736401118, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 1.0849542084605322e-05, |
|
"loss": 71.4817, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"learning_rate": 9.977322285215875e-06, |
|
"loss": 66.9921, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 5.34, |
|
"eval_loss": 49.08004379272461, |
|
"eval_runtime": 189.9086, |
|
"eval_samples_per_second": 10.5, |
|
"eval_steps_per_second": 10.5, |
|
"eval_wer": 0.5399321718313606, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 5.55, |
|
"learning_rate": 9.105974705625819e-06, |
|
"loss": 66.8826, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 5.98, |
|
"learning_rate": 8.233754906236373e-06, |
|
"loss": 65.702, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"learning_rate": 7.361535106846927e-06, |
|
"loss": 63.3176, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"eval_loss": 47.168060302734375, |
|
"eval_runtime": 188.2915, |
|
"eval_samples_per_second": 10.59, |
|
"eval_steps_per_second": 10.59, |
|
"eval_wer": 0.5138648756483575, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 6.83, |
|
"learning_rate": 6.490187527256869e-06, |
|
"loss": 61.7128, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 7.26, |
|
"learning_rate": 5.6179677278674225e-06, |
|
"loss": 60.3547, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 7.47, |
|
"eval_loss": 45.36619567871094, |
|
"eval_runtime": 217.9659, |
|
"eval_samples_per_second": 9.148, |
|
"eval_steps_per_second": 9.148, |
|
"eval_wer": 0.4957108658066232, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 7.68, |
|
"learning_rate": 4.746184038377671e-06, |
|
"loss": 60.0223, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 8.11, |
|
"learning_rate": 3.8739642389882255e-06, |
|
"loss": 58.8691, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 8.54, |
|
"learning_rate": 3.0017444395987794e-06, |
|
"loss": 58.391, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 8.54, |
|
"eval_loss": 43.8188591003418, |
|
"eval_runtime": 187.5787, |
|
"eval_samples_per_second": 10.63, |
|
"eval_steps_per_second": 10.63, |
|
"eval_wer": 0.47882032185131, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 8.96, |
|
"learning_rate": 2.1303968600087223e-06, |
|
"loss": 57.0914, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 9.39, |
|
"learning_rate": 1.258177060619276e-06, |
|
"loss": 58.6047, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"eval_loss": 43.90667724609375, |
|
"eval_runtime": 188.2012, |
|
"eval_samples_per_second": 10.595, |
|
"eval_steps_per_second": 10.595, |
|
"eval_wer": 0.4761271445670967, |
|
"step": 45000 |
|
} |
|
], |
|
"max_steps": 46860, |
|
"num_train_epochs": 10, |
|
"total_flos": 5.571669557707454e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|