|
{ |
|
"best_metric": 21.278825995807125, |
|
"best_model_checkpoint": "./checkpoint-700", |
|
"epoch": 66.66666666666667, |
|
"global_step": 800, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 5.081432784565141e-06, |
|
"loss": 0.9461, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 6.3901594783484795e-06, |
|
"loss": 0.1654, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 7.1145874994768475e-06, |
|
"loss": 0.0159, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 7.618084285704403e-06, |
|
"loss": 0.007, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"eval_loss": 0.57275390625, |
|
"eval_runtime": 17.5098, |
|
"eval_samples_per_second": 3.198, |
|
"eval_steps_per_second": 0.228, |
|
"eval_wer": 21.48846960167715, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 10.42, |
|
"learning_rate": 8.004371064686715e-06, |
|
"loss": 0.0035, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 8.317843796829062e-06, |
|
"loss": 0.0014, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 14.58, |
|
"learning_rate": 8.581647301884627e-06, |
|
"loss": 0.0009, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"learning_rate": 8.809389787307027e-06, |
|
"loss": 0.0007, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"eval_loss": 0.70166015625, |
|
"eval_runtime": 17.8855, |
|
"eval_samples_per_second": 3.131, |
|
"eval_steps_per_second": 0.224, |
|
"eval_wer": 22.11740041928721, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 18.75, |
|
"learning_rate": 9.009754892271524e-06, |
|
"loss": 0.0006, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 20.83, |
|
"learning_rate": 9.18862407120431e-06, |
|
"loss": 0.0005, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 22.92, |
|
"learning_rate": 9.350166018396598e-06, |
|
"loss": 0.0004, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 9.497443253787987e-06, |
|
"loss": 0.0003, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_loss": 0.73583984375, |
|
"eval_runtime": 17.8146, |
|
"eval_samples_per_second": 3.143, |
|
"eval_steps_per_second": 0.225, |
|
"eval_wer": 21.59329140461216, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 27.08, |
|
"learning_rate": 9.632771883655444e-06, |
|
"loss": 0.0003, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 29.17, |
|
"learning_rate": 9.75794603988726e-06, |
|
"loss": 0.0003, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 31.25, |
|
"learning_rate": 9.87438382734506e-06, |
|
"loss": 0.0002, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 9.983225577361795e-06, |
|
"loss": 0.0002, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"eval_loss": 0.759765625, |
|
"eval_runtime": 17.9019, |
|
"eval_samples_per_second": 3.128, |
|
"eval_steps_per_second": 0.223, |
|
"eval_wer": 21.59329140461216, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 35.42, |
|
"learning_rate": 9.5e-06, |
|
"loss": 0.0002, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 8.875e-06, |
|
"loss": 0.0002, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 39.58, |
|
"learning_rate": 8.25e-06, |
|
"loss": 0.0002, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 41.67, |
|
"learning_rate": 7.625e-06, |
|
"loss": 0.0002, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 41.67, |
|
"eval_loss": 0.779296875, |
|
"eval_runtime": 17.9031, |
|
"eval_samples_per_second": 3.128, |
|
"eval_steps_per_second": 0.223, |
|
"eval_wer": 22.0125786163522, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 43.75, |
|
"learning_rate": 7e-06, |
|
"loss": 0.0001, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 45.83, |
|
"learning_rate": 6.375e-06, |
|
"loss": 0.0001, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 47.92, |
|
"learning_rate": 5.75e-06, |
|
"loss": 0.0001, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 5.125e-06, |
|
"loss": 0.0001, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_loss": 0.78955078125, |
|
"eval_runtime": 17.7601, |
|
"eval_samples_per_second": 3.153, |
|
"eval_steps_per_second": 0.225, |
|
"eval_wer": 22.0125786163522, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 52.08, |
|
"learning_rate": 4.5e-06, |
|
"loss": 0.0001, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 54.17, |
|
"learning_rate": 3.875e-06, |
|
"loss": 0.0001, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 56.25, |
|
"learning_rate": 3.2500000000000002e-06, |
|
"loss": 0.0001, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 58.33, |
|
"learning_rate": 2.6250000000000003e-06, |
|
"loss": 0.0001, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 58.33, |
|
"eval_loss": 0.796875, |
|
"eval_runtime": 17.8587, |
|
"eval_samples_per_second": 3.136, |
|
"eval_steps_per_second": 0.224, |
|
"eval_wer": 21.278825995807125, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 60.42, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 0.0001, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 1.3750000000000002e-06, |
|
"loss": 0.0001, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 64.58, |
|
"learning_rate": 7.5e-07, |
|
"loss": 0.0001, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 66.67, |
|
"learning_rate": 1.2500000000000002e-07, |
|
"loss": 0.0001, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 66.67, |
|
"eval_loss": 0.79931640625, |
|
"eval_runtime": 17.7869, |
|
"eval_samples_per_second": 3.148, |
|
"eval_steps_per_second": 0.225, |
|
"eval_wer": 21.278825995807125, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 66.67, |
|
"step": 800, |
|
"total_flos": 2.140496152649437e+20, |
|
"train_loss": 0.035802456135861575, |
|
"train_runtime": 14846.7625, |
|
"train_samples_per_second": 6.897, |
|
"train_steps_per_second": 0.054 |
|
} |
|
], |
|
"max_steps": 800, |
|
"num_train_epochs": 67, |
|
"total_flos": 2.140496152649437e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|