|
{ |
|
"best_metric": 54.37442075996293, |
|
"best_model_checkpoint": "/cosmos/home/sp-operator/ai/training/models/huggingface/scripts/../breeze-dsw-tiny-ml/checkpoint-600", |
|
"epoch": 24.04, |
|
"eval_steps": 100, |
|
"global_step": 1000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.898977360288234e-06, |
|
"loss": 1.982, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 6.160712527409633e-06, |
|
"loss": 1.4663, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 6.85912902234906e-06, |
|
"loss": 1.2536, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 7.344547104469332e-06, |
|
"loss": 1.1736, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"eval_loss": 1.1669921875, |
|
"eval_runtime": 744.9075, |
|
"eval_samples_per_second": 0.89, |
|
"eval_steps_per_second": 0.056, |
|
"eval_wer": 99.7775718257646, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 7.716963756434345e-06, |
|
"loss": 1.0945, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 8.019180844200955e-06, |
|
"loss": 1.058, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 8.27351214279797e-06, |
|
"loss": 0.9986, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 8.49307723936858e-06, |
|
"loss": 0.9647, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"eval_loss": 1.0048828125, |
|
"eval_runtime": 741.5505, |
|
"eval_samples_per_second": 0.894, |
|
"eval_steps_per_second": 0.057, |
|
"eval_wer": 95.48656163113995, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"learning_rate": 8.686247975778677e-06, |
|
"loss": 0.8708, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 8.858694625217149e-06, |
|
"loss": 0.7678, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 6.04, |
|
"learning_rate": 9.014436199608479e-06, |
|
"loss": 0.6767, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"learning_rate": 9.156425255148058e-06, |
|
"loss": 0.5311, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"eval_loss": 0.6806640625, |
|
"eval_runtime": 738.6797, |
|
"eval_samples_per_second": 0.898, |
|
"eval_steps_per_second": 0.057, |
|
"eval_wer": 74.55977757182578, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 8.01, |
|
"learning_rate": 9.28689473531776e-06, |
|
"loss": 0.4551, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 8.03, |
|
"learning_rate": 9.407574351377137e-06, |
|
"loss": 0.3868, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 9.02, |
|
"learning_rate": 9.519831289296397e-06, |
|
"loss": 0.3221, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 9.04, |
|
"learning_rate": 9.624764935335318e-06, |
|
"loss": 0.3036, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 9.04, |
|
"eval_loss": 0.541015625, |
|
"eval_runtime": 740.1324, |
|
"eval_samples_per_second": 0.896, |
|
"eval_steps_per_second": 0.057, |
|
"eval_wer": 61.575532900834105, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 10.03, |
|
"learning_rate": 9.723272550712454e-06, |
|
"loss": 0.2357, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 11.01, |
|
"learning_rate": 9.816095971633122e-06, |
|
"loss": 0.2188, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 11.04, |
|
"learning_rate": 9.90385555539545e-06, |
|
"loss": 0.1986, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 12.02, |
|
"learning_rate": 9.987075336738768e-06, |
|
"loss": 0.1672, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 12.02, |
|
"eval_loss": 0.5146484375, |
|
"eval_runtime": 740.3934, |
|
"eval_samples_per_second": 0.895, |
|
"eval_steps_per_second": 0.057, |
|
"eval_wer": 56.570898980537535, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 13.01, |
|
"learning_rate": 9.600000000000001e-06, |
|
"loss": 0.148, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 13.03, |
|
"learning_rate": 9.100000000000001e-06, |
|
"loss": 0.1238, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 14.02, |
|
"learning_rate": 8.6e-06, |
|
"loss": 0.1068, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 14.04, |
|
"learning_rate": 8.1e-06, |
|
"loss": 0.1006, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 14.04, |
|
"eval_loss": 0.55029296875, |
|
"eval_runtime": 738.8532, |
|
"eval_samples_per_second": 0.897, |
|
"eval_steps_per_second": 0.057, |
|
"eval_wer": 54.37442075996293, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 15.03, |
|
"learning_rate": 7.600000000000001e-06, |
|
"loss": 0.0787, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 16.01, |
|
"learning_rate": 7.100000000000001e-06, |
|
"loss": 0.0677, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 16.04, |
|
"learning_rate": 6.600000000000001e-06, |
|
"loss": 0.0644, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 17.02, |
|
"learning_rate": 6.1e-06, |
|
"loss": 0.0484, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 17.02, |
|
"eval_loss": 0.5859375, |
|
"eval_runtime": 738.8622, |
|
"eval_samples_per_second": 0.897, |
|
"eval_steps_per_second": 0.057, |
|
"eval_wer": 54.50417052826692, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 5.600000000000001e-06, |
|
"loss": 0.0474, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 18.03, |
|
"learning_rate": 5.1e-06, |
|
"loss": 0.037, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 19.02, |
|
"learning_rate": 4.600000000000001e-06, |
|
"loss": 0.0315, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 19.04, |
|
"learning_rate": 4.1e-06, |
|
"loss": 0.0305, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 19.04, |
|
"eval_loss": 0.65625, |
|
"eval_runtime": 739.3222, |
|
"eval_samples_per_second": 0.897, |
|
"eval_steps_per_second": 0.057, |
|
"eval_wer": 55.41241890639481, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 20.02, |
|
"learning_rate": 3.6000000000000003e-06, |
|
"loss": 0.0211, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 21.01, |
|
"learning_rate": 3.1000000000000004e-06, |
|
"loss": 0.0205, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 21.04, |
|
"learning_rate": 2.6e-06, |
|
"loss": 0.0181, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 22.02, |
|
"learning_rate": 2.1000000000000002e-06, |
|
"loss": 0.0147, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 22.02, |
|
"eval_loss": 0.70947265625, |
|
"eval_runtime": 740.6818, |
|
"eval_samples_per_second": 0.895, |
|
"eval_steps_per_second": 0.057, |
|
"eval_wer": 54.87488415199259, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 1.6000000000000001e-06, |
|
"loss": 0.0145, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 23.03, |
|
"learning_rate": 1.1e-06, |
|
"loss": 0.0121, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 24.02, |
|
"learning_rate": 6.000000000000001e-07, |
|
"loss": 0.0105, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 24.04, |
|
"learning_rate": 1.0000000000000001e-07, |
|
"loss": 0.0116, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 24.04, |
|
"eval_loss": 0.73828125, |
|
"eval_runtime": 740.4643, |
|
"eval_samples_per_second": 0.895, |
|
"eval_steps_per_second": 0.057, |
|
"eval_wer": 55.097312326227986, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 24.04, |
|
"step": 1000, |
|
"total_flos": 7.730329851579597e+17, |
|
"train_loss": 0.0, |
|
"train_runtime": 26.9074, |
|
"train_samples_per_second": 2378.53, |
|
"train_steps_per_second": 74.329 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 2000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 100, |
|
"total_flos": 7.730329851579597e+17, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|