|
{ |
|
"best_metric": 42.72474513438369, |
|
"best_model_checkpoint": "/cosmos/home/sp-operator/ai/training/models/huggingface/scripts/../breeze-dsw-base-ml/checkpoint-800", |
|
"epoch": 29.02, |
|
"eval_steps": 200, |
|
"global_step": 1200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.0453611334320685e-06, |
|
"loss": 1.6886, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 6.229195710491767e-06, |
|
"loss": 1.2987, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 6.903829450223392e-06, |
|
"loss": 1.0959, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 7.361221988663844e-06, |
|
"loss": 0.8535, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 7.730207550743121e-06, |
|
"loss": 0.6174, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 8.03016458599496e-06, |
|
"loss": 0.4687, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 8.282894746203441e-06, |
|
"loss": 0.3515, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 8.501266121799902e-06, |
|
"loss": 0.3151, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"eval_loss": 0.45166015625, |
|
"eval_runtime": 1605.5021, |
|
"eval_samples_per_second": 0.413, |
|
"eval_steps_per_second": 0.026, |
|
"eval_wer": 54.51343836886006, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"learning_rate": 8.693512601774437e-06, |
|
"loss": 0.2339, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 8.865222471593567e-06, |
|
"loss": 0.1966, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"learning_rate": 9.020362953730323e-06, |
|
"loss": 0.1695, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 7.01, |
|
"learning_rate": 9.161852281961698e-06, |
|
"loss": 0.134, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 9.29189975311636e-06, |
|
"loss": 0.1164, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 8.02, |
|
"learning_rate": 9.412218256259678e-06, |
|
"loss": 0.0968, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"learning_rate": 9.524162683365145e-06, |
|
"loss": 0.0761, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 9.02, |
|
"learning_rate": 9.62882322733502e-06, |
|
"loss": 0.0703, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 9.02, |
|
"eval_loss": 0.4560546875, |
|
"eval_runtime": 1612.3575, |
|
"eval_samples_per_second": 0.411, |
|
"eval_steps_per_second": 0.026, |
|
"eval_wer": 46.72845227062095, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 10.01, |
|
"learning_rate": 9.727090137141168e-06, |
|
"loss": 0.0476, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 11.01, |
|
"learning_rate": 9.819699807237934e-06, |
|
"loss": 0.0431, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 11.02, |
|
"learning_rate": 9.907268307310855e-06, |
|
"loss": 0.0363, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 12.01, |
|
"learning_rate": 9.990316248055788e-06, |
|
"loss": 0.0278, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 9.86e-06, |
|
"loss": 0.0231, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 13.02, |
|
"learning_rate": 9.693333333333334e-06, |
|
"loss": 0.0222, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 14.01, |
|
"learning_rate": 9.526666666666668e-06, |
|
"loss": 0.0161, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 14.02, |
|
"learning_rate": 9.360000000000002e-06, |
|
"loss": 0.0144, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 14.02, |
|
"eval_loss": 0.5625, |
|
"eval_runtime": 1602.9774, |
|
"eval_samples_per_second": 0.414, |
|
"eval_steps_per_second": 0.026, |
|
"eval_wer": 43.76274328081557, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 15.01, |
|
"learning_rate": 9.193333333333334e-06, |
|
"loss": 0.0108, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 9.026666666666666e-06, |
|
"loss": 0.01, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 16.02, |
|
"learning_rate": 8.860000000000002e-06, |
|
"loss": 0.0097, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 17.01, |
|
"learning_rate": 8.693333333333334e-06, |
|
"loss": 0.0064, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 8.526666666666667e-06, |
|
"loss": 0.0087, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 18.02, |
|
"learning_rate": 8.36e-06, |
|
"loss": 0.008, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 19.01, |
|
"learning_rate": 8.193333333333335e-06, |
|
"loss": 0.0073, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 19.02, |
|
"learning_rate": 8.026666666666667e-06, |
|
"loss": 0.006, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 19.02, |
|
"eval_loss": 0.6259765625, |
|
"eval_runtime": 1596.5675, |
|
"eval_samples_per_second": 0.415, |
|
"eval_steps_per_second": 0.026, |
|
"eval_wer": 42.72474513438369, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 20.01, |
|
"learning_rate": 7.860000000000001e-06, |
|
"loss": 0.0041, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 7.693333333333333e-06, |
|
"loss": 0.0043, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 21.02, |
|
"learning_rate": 7.526666666666668e-06, |
|
"loss": 0.0043, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 22.01, |
|
"learning_rate": 7.360000000000001e-06, |
|
"loss": 0.0034, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 7.1933333333333345e-06, |
|
"loss": 0.0036, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 23.02, |
|
"learning_rate": 7.0266666666666674e-06, |
|
"loss": 0.0033, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 24.01, |
|
"learning_rate": 6.860000000000001e-06, |
|
"loss": 0.0024, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 24.02, |
|
"learning_rate": 6.693333333333334e-06, |
|
"loss": 0.0024, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 24.02, |
|
"eval_loss": 0.69384765625, |
|
"eval_runtime": 1597.407, |
|
"eval_samples_per_second": 0.415, |
|
"eval_steps_per_second": 0.026, |
|
"eval_wer": 43.03058387395737, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 25.01, |
|
"learning_rate": 6.526666666666666e-06, |
|
"loss": 0.003, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 6.360000000000001e-06, |
|
"loss": 0.0026, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 26.02, |
|
"learning_rate": 6.193333333333333e-06, |
|
"loss": 0.0022, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 27.01, |
|
"learning_rate": 6.026666666666668e-06, |
|
"loss": 0.0017, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 5.86e-06, |
|
"loss": 0.0013, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 28.02, |
|
"learning_rate": 5.6933333333333344e-06, |
|
"loss": 0.0011, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 29.01, |
|
"learning_rate": 5.5266666666666666e-06, |
|
"loss": 0.0015, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 29.02, |
|
"learning_rate": 5.36e-06, |
|
"loss": 0.0012, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 29.02, |
|
"eval_loss": 0.7353515625, |
|
"eval_runtime": 1601.8604, |
|
"eval_samples_per_second": 0.414, |
|
"eval_steps_per_second": 0.026, |
|
"eval_wer": 44.21686746987952, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 29.02, |
|
"step": 1200, |
|
"total_flos": 2.443924644411998e+18, |
|
"train_loss": 0.0, |
|
"train_runtime": 41.2826, |
|
"train_samples_per_second": 1550.288, |
|
"train_steps_per_second": 48.447 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 2000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 200, |
|
"total_flos": 2.443924644411998e+18, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|