|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 60.0, |
|
"global_step": 18720, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.516245487364621, |
|
"eval_loss": 30.45633316040039, |
|
"eval_runtime": 6.9248, |
|
"eval_samples_per_second": 40.001, |
|
"eval_steps_per_second": 5.054, |
|
"step": 312 |
|
}, |
|
{ |
|
"best_epoch": 0, |
|
"best_eval_accuracy": 0.516245487364621, |
|
"epoch": 1.0, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0009732905982905983, |
|
"loss": 31.8281, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 28.268251419067383, |
|
"eval_runtime": 6.8798, |
|
"eval_samples_per_second": 40.263, |
|
"eval_steps_per_second": 5.087, |
|
"step": 624 |
|
}, |
|
{ |
|
"best_epoch": 0, |
|
"best_eval_accuracy": 0.516245487364621, |
|
"epoch": 2.0, |
|
"step": 624 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 22.482925415039062, |
|
"eval_runtime": 6.8633, |
|
"eval_samples_per_second": 40.36, |
|
"eval_steps_per_second": 5.1, |
|
"step": 936 |
|
}, |
|
{ |
|
"best_epoch": 0, |
|
"best_eval_accuracy": 0.516245487364621, |
|
"epoch": 3.0, |
|
"step": 936 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 0.0009465811965811966, |
|
"loss": 26.6026, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 17.250818252563477, |
|
"eval_runtime": 6.8768, |
|
"eval_samples_per_second": 40.281, |
|
"eval_steps_per_second": 5.09, |
|
"step": 1248 |
|
}, |
|
{ |
|
"best_epoch": 0, |
|
"best_eval_accuracy": 0.516245487364621, |
|
"epoch": 4.0, |
|
"step": 1248 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 0.0009198717948717949, |
|
"loss": 20.7188, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 15.695619583129883, |
|
"eval_runtime": 6.9281, |
|
"eval_samples_per_second": 39.982, |
|
"eval_steps_per_second": 5.052, |
|
"step": 1560 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 5.0, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 14.759888648986816, |
|
"eval_runtime": 6.832, |
|
"eval_samples_per_second": 40.545, |
|
"eval_steps_per_second": 5.123, |
|
"step": 1872 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 6.0, |
|
"step": 1872 |
|
}, |
|
{ |
|
"epoch": 6.41, |
|
"learning_rate": 0.0008931623931623932, |
|
"loss": 18.7808, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 14.433088302612305, |
|
"eval_runtime": 6.8638, |
|
"eval_samples_per_second": 40.356, |
|
"eval_steps_per_second": 5.099, |
|
"step": 2184 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 7.0, |
|
"step": 2184 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 13.9365873336792, |
|
"eval_runtime": 6.8545, |
|
"eval_samples_per_second": 40.411, |
|
"eval_steps_per_second": 5.106, |
|
"step": 2496 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 8.0, |
|
"step": 2496 |
|
}, |
|
{ |
|
"epoch": 8.01, |
|
"learning_rate": 0.0008664529914529915, |
|
"loss": 18.0838, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 13.634007453918457, |
|
"eval_runtime": 6.7394, |
|
"eval_samples_per_second": 41.102, |
|
"eval_steps_per_second": 5.193, |
|
"step": 2808 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 9.0, |
|
"step": 2808 |
|
}, |
|
{ |
|
"epoch": 9.62, |
|
"learning_rate": 0.0008397435897435898, |
|
"loss": 17.722, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 13.437921524047852, |
|
"eval_runtime": 6.8897, |
|
"eval_samples_per_second": 40.205, |
|
"eval_steps_per_second": 5.08, |
|
"step": 3120 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 10.0, |
|
"step": 3120 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 13.439308166503906, |
|
"eval_runtime": 6.8739, |
|
"eval_samples_per_second": 40.297, |
|
"eval_steps_per_second": 5.092, |
|
"step": 3432 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 11.0, |
|
"step": 3432 |
|
}, |
|
{ |
|
"epoch": 11.22, |
|
"learning_rate": 0.0008130341880341881, |
|
"loss": 17.4783, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 13.137593269348145, |
|
"eval_runtime": 6.874, |
|
"eval_samples_per_second": 40.297, |
|
"eval_steps_per_second": 5.092, |
|
"step": 3744 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 12.0, |
|
"step": 3744 |
|
}, |
|
{ |
|
"epoch": 12.82, |
|
"learning_rate": 0.0007863247863247863, |
|
"loss": 17.2699, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 12.95992374420166, |
|
"eval_runtime": 6.8727, |
|
"eval_samples_per_second": 40.304, |
|
"eval_steps_per_second": 5.093, |
|
"step": 4056 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 13.0, |
|
"step": 4056 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 12.848048210144043, |
|
"eval_runtime": 6.8528, |
|
"eval_samples_per_second": 40.421, |
|
"eval_steps_per_second": 5.107, |
|
"step": 4368 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 14.0, |
|
"step": 4368 |
|
}, |
|
{ |
|
"epoch": 14.42, |
|
"learning_rate": 0.0007596153846153846, |
|
"loss": 17.0966, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 12.781341552734375, |
|
"eval_runtime": 6.8525, |
|
"eval_samples_per_second": 40.423, |
|
"eval_steps_per_second": 5.108, |
|
"step": 4680 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 15.0, |
|
"step": 4680 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 12.691984176635742, |
|
"eval_runtime": 6.9197, |
|
"eval_samples_per_second": 40.03, |
|
"eval_steps_per_second": 5.058, |
|
"step": 4992 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 16.0, |
|
"step": 4992 |
|
}, |
|
{ |
|
"epoch": 16.03, |
|
"learning_rate": 0.0007329059829059829, |
|
"loss": 16.9613, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 12.569376945495605, |
|
"eval_runtime": 6.837, |
|
"eval_samples_per_second": 40.515, |
|
"eval_steps_per_second": 5.119, |
|
"step": 5304 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 17.0, |
|
"step": 5304 |
|
}, |
|
{ |
|
"epoch": 17.63, |
|
"learning_rate": 0.0007061965811965813, |
|
"loss": 16.848, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 12.51942253112793, |
|
"eval_runtime": 6.9126, |
|
"eval_samples_per_second": 40.072, |
|
"eval_steps_per_second": 5.063, |
|
"step": 5616 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 18.0, |
|
"step": 5616 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 12.459056854248047, |
|
"eval_runtime": 6.8379, |
|
"eval_samples_per_second": 40.509, |
|
"eval_steps_per_second": 5.119, |
|
"step": 5928 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 19.0, |
|
"step": 5928 |
|
}, |
|
{ |
|
"epoch": 19.23, |
|
"learning_rate": 0.0006794871794871796, |
|
"loss": 16.7661, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 12.382737159729004, |
|
"eval_runtime": 6.8814, |
|
"eval_samples_per_second": 40.253, |
|
"eval_steps_per_second": 5.086, |
|
"step": 6240 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 20.0, |
|
"step": 6240 |
|
}, |
|
{ |
|
"epoch": 20.83, |
|
"learning_rate": 0.0006527777777777778, |
|
"loss": 16.6825, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 12.341012001037598, |
|
"eval_runtime": 6.8719, |
|
"eval_samples_per_second": 40.309, |
|
"eval_steps_per_second": 5.093, |
|
"step": 6552 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 21.0, |
|
"step": 6552 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 12.324116706848145, |
|
"eval_runtime": 6.8995, |
|
"eval_samples_per_second": 40.148, |
|
"eval_steps_per_second": 5.073, |
|
"step": 6864 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 22.0, |
|
"step": 6864 |
|
}, |
|
{ |
|
"epoch": 22.44, |
|
"learning_rate": 0.0006260683760683761, |
|
"loss": 16.5963, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 12.329614639282227, |
|
"eval_runtime": 6.8549, |
|
"eval_samples_per_second": 40.409, |
|
"eval_steps_per_second": 5.106, |
|
"step": 7176 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 23.0, |
|
"step": 7176 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 12.261089324951172, |
|
"eval_runtime": 6.8822, |
|
"eval_samples_per_second": 40.249, |
|
"eval_steps_per_second": 5.086, |
|
"step": 7488 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 24.0, |
|
"step": 7488 |
|
}, |
|
{ |
|
"epoch": 24.04, |
|
"learning_rate": 0.0005993589743589744, |
|
"loss": 16.5513, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 12.151515007019043, |
|
"eval_runtime": 6.8798, |
|
"eval_samples_per_second": 40.263, |
|
"eval_steps_per_second": 5.087, |
|
"step": 7800 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 25.0, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 25.64, |
|
"learning_rate": 0.0005726495726495727, |
|
"loss": 16.4926, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 12.119367599487305, |
|
"eval_runtime": 6.8242, |
|
"eval_samples_per_second": 40.591, |
|
"eval_steps_per_second": 5.129, |
|
"step": 8112 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 26.0, |
|
"step": 8112 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 12.105155944824219, |
|
"eval_runtime": 6.9189, |
|
"eval_samples_per_second": 40.035, |
|
"eval_steps_per_second": 5.059, |
|
"step": 8424 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 27.0, |
|
"step": 8424 |
|
}, |
|
{ |
|
"epoch": 27.24, |
|
"learning_rate": 0.000545940170940171, |
|
"loss": 16.4398, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 12.051624298095703, |
|
"eval_runtime": 6.8467, |
|
"eval_samples_per_second": 40.457, |
|
"eval_steps_per_second": 5.112, |
|
"step": 8736 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 28.0, |
|
"step": 8736 |
|
}, |
|
{ |
|
"epoch": 28.85, |
|
"learning_rate": 0.0005192307692307693, |
|
"loss": 16.399, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"eval_accuracy": 0.49458483754512633, |
|
"eval_loss": 12.021004676818848, |
|
"eval_runtime": 6.9157, |
|
"eval_samples_per_second": 40.054, |
|
"eval_steps_per_second": 5.061, |
|
"step": 9048 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 29.0, |
|
"step": 9048 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 12.005374908447266, |
|
"eval_runtime": 6.9233, |
|
"eval_samples_per_second": 40.01, |
|
"eval_steps_per_second": 5.055, |
|
"step": 9360 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 30.0, |
|
"step": 9360 |
|
}, |
|
{ |
|
"epoch": 30.45, |
|
"learning_rate": 0.0004925213675213676, |
|
"loss": 16.3657, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 11.996041297912598, |
|
"eval_runtime": 6.8696, |
|
"eval_samples_per_second": 40.323, |
|
"eval_steps_per_second": 5.095, |
|
"step": 9672 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 31.0, |
|
"step": 9672 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 11.954824447631836, |
|
"eval_runtime": 6.8336, |
|
"eval_samples_per_second": 40.535, |
|
"eval_steps_per_second": 5.122, |
|
"step": 9984 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 32.0, |
|
"step": 9984 |
|
}, |
|
{ |
|
"epoch": 32.05, |
|
"learning_rate": 0.00046581196581196583, |
|
"loss": 16.3306, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 11.933246612548828, |
|
"eval_runtime": 6.8702, |
|
"eval_samples_per_second": 40.319, |
|
"eval_steps_per_second": 5.094, |
|
"step": 10296 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 33.0, |
|
"step": 10296 |
|
}, |
|
{ |
|
"epoch": 33.65, |
|
"learning_rate": 0.0004391025641025641, |
|
"loss": 16.294, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.914789199829102, |
|
"eval_runtime": 6.8807, |
|
"eval_samples_per_second": 40.258, |
|
"eval_steps_per_second": 5.087, |
|
"step": 10608 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 34.0, |
|
"step": 10608 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.92249870300293, |
|
"eval_runtime": 6.8751, |
|
"eval_samples_per_second": 40.29, |
|
"eval_steps_per_second": 5.091, |
|
"step": 10920 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 35.0, |
|
"step": 10920 |
|
}, |
|
{ |
|
"epoch": 35.26, |
|
"learning_rate": 0.0004123931623931624, |
|
"loss": 16.2657, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"eval_accuracy": 0.47653429602888087, |
|
"eval_loss": 11.87264633178711, |
|
"eval_runtime": 6.849, |
|
"eval_samples_per_second": 40.444, |
|
"eval_steps_per_second": 5.11, |
|
"step": 11232 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 36.0, |
|
"step": 11232 |
|
}, |
|
{ |
|
"epoch": 36.86, |
|
"learning_rate": 0.0003856837606837607, |
|
"loss": 16.2465, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.845185279846191, |
|
"eval_runtime": 6.8882, |
|
"eval_samples_per_second": 40.214, |
|
"eval_steps_per_second": 5.081, |
|
"step": 11544 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 37.0, |
|
"step": 11544 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 11.834094047546387, |
|
"eval_runtime": 6.8409, |
|
"eval_samples_per_second": 40.492, |
|
"eval_steps_per_second": 5.116, |
|
"step": 11856 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 38.0, |
|
"step": 11856 |
|
}, |
|
{ |
|
"epoch": 38.46, |
|
"learning_rate": 0.000358974358974359, |
|
"loss": 16.208, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.823224067687988, |
|
"eval_runtime": 6.842, |
|
"eval_samples_per_second": 40.485, |
|
"eval_steps_per_second": 5.115, |
|
"step": 12168 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 39.0, |
|
"step": 12168 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.797883033752441, |
|
"eval_runtime": 6.8695, |
|
"eval_samples_per_second": 40.323, |
|
"eval_steps_per_second": 5.095, |
|
"step": 12480 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 40.0, |
|
"step": 12480 |
|
}, |
|
{ |
|
"epoch": 40.06, |
|
"learning_rate": 0.00033226495726495727, |
|
"loss": 16.191, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.789505004882812, |
|
"eval_runtime": 6.908, |
|
"eval_samples_per_second": 40.098, |
|
"eval_steps_per_second": 5.067, |
|
"step": 12792 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 41.0, |
|
"step": 12792 |
|
}, |
|
{ |
|
"epoch": 41.67, |
|
"learning_rate": 0.0003055555555555556, |
|
"loss": 16.1729, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.839137077331543, |
|
"eval_runtime": 6.9254, |
|
"eval_samples_per_second": 39.998, |
|
"eval_steps_per_second": 5.054, |
|
"step": 13104 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 42.0, |
|
"step": 13104 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 11.761892318725586, |
|
"eval_runtime": 3.298, |
|
"eval_samples_per_second": 83.991, |
|
"eval_steps_per_second": 10.613, |
|
"step": 13416 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 43.0, |
|
"step": 13416 |
|
}, |
|
{ |
|
"epoch": 43.27, |
|
"learning_rate": 0.0002788461538461539, |
|
"loss": 16.1571, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.750166893005371, |
|
"eval_runtime": 3.2851, |
|
"eval_samples_per_second": 84.319, |
|
"eval_steps_per_second": 10.654, |
|
"step": 13728 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 44.0, |
|
"step": 13728 |
|
}, |
|
{ |
|
"epoch": 44.87, |
|
"learning_rate": 0.00025213675213675216, |
|
"loss": 16.1268, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.751955032348633, |
|
"eval_runtime": 3.2892, |
|
"eval_samples_per_second": 84.215, |
|
"eval_steps_per_second": 10.641, |
|
"step": 14040 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 45.0, |
|
"step": 14040 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.75387954711914, |
|
"eval_runtime": 3.3027, |
|
"eval_samples_per_second": 83.87, |
|
"eval_steps_per_second": 10.597, |
|
"step": 14352 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 46.0, |
|
"step": 14352 |
|
}, |
|
{ |
|
"epoch": 46.47, |
|
"learning_rate": 0.00022542735042735044, |
|
"loss": 16.1194, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.754090309143066, |
|
"eval_runtime": 3.2928, |
|
"eval_samples_per_second": 84.122, |
|
"eval_steps_per_second": 10.629, |
|
"step": 14664 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 47.0, |
|
"step": 14664 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 11.712992668151855, |
|
"eval_runtime": 3.2869, |
|
"eval_samples_per_second": 84.274, |
|
"eval_steps_per_second": 10.648, |
|
"step": 14976 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 48.0, |
|
"step": 14976 |
|
}, |
|
{ |
|
"epoch": 48.08, |
|
"learning_rate": 0.0001987179487179487, |
|
"loss": 16.11, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 11.701953887939453, |
|
"eval_runtime": 3.2907, |
|
"eval_samples_per_second": 84.176, |
|
"eval_steps_per_second": 10.636, |
|
"step": 15288 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 49.0, |
|
"step": 15288 |
|
}, |
|
{ |
|
"epoch": 49.68, |
|
"learning_rate": 0.00017200854700854702, |
|
"loss": 16.0989, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.694924354553223, |
|
"eval_runtime": 3.2959, |
|
"eval_samples_per_second": 84.044, |
|
"eval_steps_per_second": 10.619, |
|
"step": 15600 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 50.0, |
|
"step": 15600 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.702590942382812, |
|
"eval_runtime": 3.3086, |
|
"eval_samples_per_second": 83.721, |
|
"eval_steps_per_second": 10.579, |
|
"step": 15912 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 51.0, |
|
"step": 15912 |
|
}, |
|
{ |
|
"epoch": 51.28, |
|
"learning_rate": 0.00014529914529914532, |
|
"loss": 16.0802, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.70562744140625, |
|
"eval_runtime": 3.3021, |
|
"eval_samples_per_second": 83.885, |
|
"eval_steps_per_second": 10.599, |
|
"step": 16224 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 52.0, |
|
"step": 16224 |
|
}, |
|
{ |
|
"epoch": 52.88, |
|
"learning_rate": 0.00011858974358974358, |
|
"loss": 16.0765, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 11.679302215576172, |
|
"eval_runtime": 3.3016, |
|
"eval_samples_per_second": 83.899, |
|
"eval_steps_per_second": 10.601, |
|
"step": 16536 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 53.0, |
|
"step": 16536 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"eval_accuracy": 0.5270758122743683, |
|
"eval_loss": 11.675891876220703, |
|
"eval_runtime": 3.3127, |
|
"eval_samples_per_second": 83.618, |
|
"eval_steps_per_second": 10.566, |
|
"step": 16848 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 54.0, |
|
"step": 16848 |
|
}, |
|
{ |
|
"epoch": 54.49, |
|
"learning_rate": 9.188034188034189e-05, |
|
"loss": 16.0629, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.671220779418945, |
|
"eval_runtime": 3.2982, |
|
"eval_samples_per_second": 83.984, |
|
"eval_steps_per_second": 10.612, |
|
"step": 17160 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 55.0, |
|
"step": 17160 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"eval_accuracy": 0.49458483754512633, |
|
"eval_loss": 11.665974617004395, |
|
"eval_runtime": 3.3072, |
|
"eval_samples_per_second": 83.758, |
|
"eval_steps_per_second": 10.583, |
|
"step": 17472 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 56.0, |
|
"step": 17472 |
|
}, |
|
{ |
|
"epoch": 56.09, |
|
"learning_rate": 6.517094017094018e-05, |
|
"loss": 16.0619, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.666152000427246, |
|
"eval_runtime": 3.2973, |
|
"eval_samples_per_second": 84.009, |
|
"eval_steps_per_second": 10.615, |
|
"step": 17784 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 57.0, |
|
"step": 17784 |
|
}, |
|
{ |
|
"epoch": 57.69, |
|
"learning_rate": 3.846153846153846e-05, |
|
"loss": 16.0566, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.664301872253418, |
|
"eval_runtime": 3.2855, |
|
"eval_samples_per_second": 84.311, |
|
"eval_steps_per_second": 10.653, |
|
"step": 18096 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 58.0, |
|
"step": 18096 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.661619186401367, |
|
"eval_runtime": 3.3182, |
|
"eval_samples_per_second": 83.479, |
|
"eval_steps_per_second": 10.548, |
|
"step": 18408 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 59.0, |
|
"step": 18408 |
|
}, |
|
{ |
|
"epoch": 59.29, |
|
"learning_rate": 1.1752136752136752e-05, |
|
"loss": 16.0547, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy": 0.4729241877256318, |
|
"eval_loss": 11.661417961120605, |
|
"eval_runtime": 3.2798, |
|
"eval_samples_per_second": 84.456, |
|
"eval_steps_per_second": 10.671, |
|
"step": 18720 |
|
}, |
|
{ |
|
"best_epoch": 4, |
|
"best_eval_accuracy": 0.5270758122743683, |
|
"epoch": 60.0, |
|
"step": 18720 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"step": 18720, |
|
"total_flos": 6.96152728406016e+16, |
|
"train_loss": 17.346778074085204, |
|
"train_runtime": 5573.2397, |
|
"train_samples_per_second": 26.807, |
|
"train_steps_per_second": 3.359 |
|
} |
|
], |
|
"max_steps": 18720, |
|
"num_train_epochs": 60, |
|
"total_flos": 6.96152728406016e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|