|
{
|
|
"best_metric": 0.0,
|
|
"best_model_checkpoint": "D:\\development\\whisper-finetune\\output\\checkpoint-1000",
|
|
"epoch": 15000.0,
|
|
"eval_steps": 1000,
|
|
"global_step": 15000,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 500.0,
|
|
"grad_norm": 0.000682959274854511,
|
|
"learning_rate": 9.920000000000002e-06,
|
|
"loss": 0.5312,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 1000.0,
|
|
"grad_norm": 0.00020609197963494807,
|
|
"learning_rate": 9.65793103448276e-06,
|
|
"loss": 0.0,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 1000.0,
|
|
"eval_loss": 2.3341947326116497e-06,
|
|
"eval_runtime": 0.6984,
|
|
"eval_samples_per_second": 4.295,
|
|
"eval_steps_per_second": 1.432,
|
|
"eval_wer": 0.0,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 1500.0,
|
|
"grad_norm": 0.0001350301317870617,
|
|
"learning_rate": 9.313103448275864e-06,
|
|
"loss": 0.0,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 2000.0,
|
|
"grad_norm": 0.00011808969429694116,
|
|
"learning_rate": 8.968275862068966e-06,
|
|
"loss": 0.0,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 2000.0,
|
|
"eval_loss": 1.0998018069585669e-06,
|
|
"eval_runtime": 0.7217,
|
|
"eval_samples_per_second": 4.157,
|
|
"eval_steps_per_second": 1.386,
|
|
"eval_wer": 0.0,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 2500.0,
|
|
"grad_norm": 8.354395686183125e-05,
|
|
"learning_rate": 8.623448275862069e-06,
|
|
"loss": 0.0,
|
|
"step": 2500
|
|
},
|
|
{
|
|
"epoch": 3000.0,
|
|
"grad_norm": 8.122230065055192e-05,
|
|
"learning_rate": 8.278620689655173e-06,
|
|
"loss": 0.0,
|
|
"step": 3000
|
|
},
|
|
{
|
|
"epoch": 3000.0,
|
|
"eval_loss": 6.614192784581974e-07,
|
|
"eval_runtime": 0.6797,
|
|
"eval_samples_per_second": 4.414,
|
|
"eval_steps_per_second": 1.471,
|
|
"eval_wer": 0.0,
|
|
"step": 3000
|
|
},
|
|
{
|
|
"epoch": 3500.0,
|
|
"grad_norm": 8.345582318725064e-05,
|
|
"learning_rate": 7.933793103448275e-06,
|
|
"loss": 0.0,
|
|
"step": 3500
|
|
},
|
|
{
|
|
"epoch": 4000.0,
|
|
"grad_norm": 8.580592839280143e-05,
|
|
"learning_rate": 7.58896551724138e-06,
|
|
"loss": 0.0,
|
|
"step": 4000
|
|
},
|
|
{
|
|
"epoch": 4000.0,
|
|
"eval_loss": 6.152737341835746e-07,
|
|
"eval_runtime": 0.6701,
|
|
"eval_samples_per_second": 4.477,
|
|
"eval_steps_per_second": 1.492,
|
|
"eval_wer": 0.0,
|
|
"step": 4000
|
|
},
|
|
{
|
|
"epoch": 4500.0,
|
|
"grad_norm": 8.349263953277841e-05,
|
|
"learning_rate": 7.244137931034483e-06,
|
|
"loss": 0.0,
|
|
"step": 4500
|
|
},
|
|
{
|
|
"epoch": 5000.0,
|
|
"grad_norm": 8.662583422847092e-05,
|
|
"learning_rate": 6.899310344827586e-06,
|
|
"loss": 0.0,
|
|
"step": 5000
|
|
},
|
|
{
|
|
"epoch": 5000.0,
|
|
"eval_loss": 5.268281597636815e-07,
|
|
"eval_runtime": 0.6852,
|
|
"eval_samples_per_second": 4.378,
|
|
"eval_steps_per_second": 1.459,
|
|
"eval_wer": 0.0,
|
|
"step": 5000
|
|
},
|
|
{
|
|
"epoch": 5500.0,
|
|
"grad_norm": 9.030300134327263e-05,
|
|
"learning_rate": 6.5544827586206905e-06,
|
|
"loss": 0.0,
|
|
"step": 5500
|
|
},
|
|
{
|
|
"epoch": 6000.0,
|
|
"grad_norm": 9.486892668064684e-05,
|
|
"learning_rate": 6.209655172413793e-06,
|
|
"loss": 0.0,
|
|
"step": 6000
|
|
},
|
|
{
|
|
"epoch": 6000.0,
|
|
"eval_loss": 5.345190743355488e-07,
|
|
"eval_runtime": 0.6873,
|
|
"eval_samples_per_second": 4.365,
|
|
"eval_steps_per_second": 1.455,
|
|
"eval_wer": 0.0,
|
|
"step": 6000
|
|
},
|
|
{
|
|
"epoch": 6500.0,
|
|
"grad_norm": 6.878418935230002e-05,
|
|
"learning_rate": 5.864827586206897e-06,
|
|
"loss": 0.0,
|
|
"step": 6500
|
|
},
|
|
{
|
|
"epoch": 7000.0,
|
|
"grad_norm": 0.0008927604649215937,
|
|
"learning_rate": 5.5200000000000005e-06,
|
|
"loss": 0.0,
|
|
"step": 7000
|
|
},
|
|
{
|
|
"epoch": 7000.0,
|
|
"eval_loss": 2.384185791015625e-07,
|
|
"eval_runtime": 0.6998,
|
|
"eval_samples_per_second": 4.287,
|
|
"eval_steps_per_second": 1.429,
|
|
"eval_wer": 0.0,
|
|
"step": 7000
|
|
},
|
|
{
|
|
"epoch": 7500.0,
|
|
"grad_norm": 0.0001970465964404866,
|
|
"learning_rate": 5.175172413793104e-06,
|
|
"loss": 0.0,
|
|
"step": 7500
|
|
},
|
|
{
|
|
"epoch": 8000.0,
|
|
"grad_norm": 0.00019531552970875055,
|
|
"learning_rate": 4.830344827586207e-06,
|
|
"loss": 0.0,
|
|
"step": 8000
|
|
},
|
|
{
|
|
"epoch": 8000.0,
|
|
"eval_loss": 1.5766390504268202e-07,
|
|
"eval_runtime": 0.7272,
|
|
"eval_samples_per_second": 4.126,
|
|
"eval_steps_per_second": 1.375,
|
|
"eval_wer": 0.0,
|
|
"step": 8000
|
|
},
|
|
{
|
|
"epoch": 8500.0,
|
|
"grad_norm": 8.719138713786379e-05,
|
|
"learning_rate": 4.4855172413793105e-06,
|
|
"loss": 0.0,
|
|
"step": 8500
|
|
},
|
|
{
|
|
"epoch": 9000.0,
|
|
"grad_norm": 9.791918273549527e-05,
|
|
"learning_rate": 4.140689655172414e-06,
|
|
"loss": 0.0,
|
|
"step": 9000
|
|
},
|
|
{
|
|
"epoch": 9000.0,
|
|
"eval_loss": 3.076368670917873e-07,
|
|
"eval_runtime": 0.7114,
|
|
"eval_samples_per_second": 4.217,
|
|
"eval_steps_per_second": 1.406,
|
|
"eval_wer": 0.0,
|
|
"step": 9000
|
|
},
|
|
{
|
|
"epoch": 9500.0,
|
|
"grad_norm": 0.0005698113818652928,
|
|
"learning_rate": 3.795862068965517e-06,
|
|
"loss": 0.0,
|
|
"step": 9500
|
|
},
|
|
{
|
|
"epoch": 10000.0,
|
|
"grad_norm": 0.00010731745715020224,
|
|
"learning_rate": 3.4510344827586214e-06,
|
|
"loss": 0.0,
|
|
"step": 10000
|
|
},
|
|
{
|
|
"epoch": 10000.0,
|
|
"eval_loss": 5.7681912579710115e-08,
|
|
"eval_runtime": 0.7029,
|
|
"eval_samples_per_second": 4.268,
|
|
"eval_steps_per_second": 1.423,
|
|
"eval_wer": 0.0,
|
|
"step": 10000
|
|
},
|
|
{
|
|
"epoch": 10500.0,
|
|
"grad_norm": 0.00012043450260534883,
|
|
"learning_rate": 3.1062068965517243e-06,
|
|
"loss": 0.0,
|
|
"step": 10500
|
|
},
|
|
{
|
|
"epoch": 11000.0,
|
|
"grad_norm": 0.00018969921802636236,
|
|
"learning_rate": 2.7613793103448276e-06,
|
|
"loss": 0.0,
|
|
"step": 11000
|
|
},
|
|
{
|
|
"epoch": 11000.0,
|
|
"eval_loss": 1.3074567561943695e-07,
|
|
"eval_runtime": 0.698,
|
|
"eval_samples_per_second": 4.298,
|
|
"eval_steps_per_second": 1.433,
|
|
"eval_wer": 0.0,
|
|
"step": 11000
|
|
},
|
|
{
|
|
"epoch": 11500.0,
|
|
"grad_norm": 0.00011485564755275846,
|
|
"learning_rate": 2.4165517241379314e-06,
|
|
"loss": 0.0,
|
|
"step": 11500
|
|
},
|
|
{
|
|
"epoch": 12000.0,
|
|
"grad_norm": 9.144964133156464e-05,
|
|
"learning_rate": 2.0717241379310347e-06,
|
|
"loss": 0.0,
|
|
"step": 12000
|
|
},
|
|
{
|
|
"epoch": 12000.0,
|
|
"eval_loss": 2.1150034967831743e-07,
|
|
"eval_runtime": 0.6885,
|
|
"eval_samples_per_second": 4.357,
|
|
"eval_steps_per_second": 1.452,
|
|
"eval_wer": 0.0,
|
|
"step": 12000
|
|
},
|
|
{
|
|
"epoch": 12500.0,
|
|
"grad_norm": 8.384387183468789e-05,
|
|
"learning_rate": 1.7268965517241382e-06,
|
|
"loss": 0.0,
|
|
"step": 12500
|
|
},
|
|
{
|
|
"epoch": 13000.0,
|
|
"grad_norm": 8.791466825641692e-05,
|
|
"learning_rate": 1.3820689655172416e-06,
|
|
"loss": 0.0,
|
|
"step": 13000
|
|
},
|
|
{
|
|
"epoch": 13000.0,
|
|
"eval_loss": 9.998198180483087e-08,
|
|
"eval_runtime": 0.6646,
|
|
"eval_samples_per_second": 4.514,
|
|
"eval_steps_per_second": 1.505,
|
|
"eval_wer": 0.0,
|
|
"step": 13000
|
|
},
|
|
{
|
|
"epoch": 13500.0,
|
|
"grad_norm": 6.821281567681581e-05,
|
|
"learning_rate": 1.0372413793103449e-06,
|
|
"loss": 0.0,
|
|
"step": 13500
|
|
},
|
|
{
|
|
"epoch": 14000.0,
|
|
"grad_norm": 5.373560270527378e-05,
|
|
"learning_rate": 6.924137931034483e-07,
|
|
"loss": 0.0,
|
|
"step": 14000
|
|
},
|
|
{
|
|
"epoch": 14000.0,
|
|
"eval_loss": 1.3459113290537061e-07,
|
|
"eval_runtime": 0.6976,
|
|
"eval_samples_per_second": 4.301,
|
|
"eval_steps_per_second": 1.434,
|
|
"eval_wer": 0.0,
|
|
"step": 14000
|
|
},
|
|
{
|
|
"epoch": 14500.0,
|
|
"grad_norm": 8.191769302356988e-05,
|
|
"learning_rate": 3.475862068965517e-07,
|
|
"loss": 0.0,
|
|
"step": 14500
|
|
},
|
|
{
|
|
"epoch": 15000.0,
|
|
"grad_norm": 5.6634093198226765e-05,
|
|
"learning_rate": 2.758620689655173e-09,
|
|
"loss": 0.0,
|
|
"step": 15000
|
|
},
|
|
{
|
|
"epoch": 15000.0,
|
|
"eval_loss": 1.3843659019130428e-07,
|
|
"eval_runtime": 0.6813,
|
|
"eval_samples_per_second": 4.403,
|
|
"eval_steps_per_second": 1.468,
|
|
"eval_wer": 0.0,
|
|
"step": 15000
|
|
}
|
|
],
|
|
"logging_steps": 500,
|
|
"max_steps": 15000,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 15000,
|
|
"save_steps": 1000,
|
|
"total_flos": 2.9186998272e+18,
|
|
"train_batch_size": 8,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|