|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 27.0, |
|
"eval_steps": 500, |
|
"global_step": 5724, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 9.95e-05, |
|
"loss": 6.3597, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 5.853069305419922, |
|
"eval_runtime": 3.5937, |
|
"eval_samples_per_second": 188.106, |
|
"eval_steps_per_second": 23.652, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 9.900000000000001e-05, |
|
"loss": 5.5629, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 4.915834903717041, |
|
"eval_runtime": 3.704, |
|
"eval_samples_per_second": 182.506, |
|
"eval_steps_per_second": 22.948, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 9.850000000000001e-05, |
|
"loss": 4.8228, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 4.177055835723877, |
|
"eval_runtime": 3.7104, |
|
"eval_samples_per_second": 182.193, |
|
"eval_steps_per_second": 22.909, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 9.8e-05, |
|
"loss": 4.1035, |
|
"step": 848 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 3.462527275085449, |
|
"eval_runtime": 3.1205, |
|
"eval_samples_per_second": 216.633, |
|
"eval_steps_per_second": 27.239, |
|
"step": 848 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 9.75e-05, |
|
"loss": 3.467, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 2.8398332595825195, |
|
"eval_runtime": 3.3235, |
|
"eval_samples_per_second": 203.402, |
|
"eval_steps_per_second": 25.576, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 9.7e-05, |
|
"loss": 2.8887, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 2.3201420307159424, |
|
"eval_runtime": 3.4255, |
|
"eval_samples_per_second": 197.342, |
|
"eval_steps_per_second": 24.814, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 9.65e-05, |
|
"loss": 2.4003, |
|
"step": 1484 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 1.8698177337646484, |
|
"eval_runtime": 3.5218, |
|
"eval_samples_per_second": 191.945, |
|
"eval_steps_per_second": 24.135, |
|
"step": 1484 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 9.6e-05, |
|
"loss": 1.9919, |
|
"step": 1696 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 1.532776951789856, |
|
"eval_runtime": 3.5514, |
|
"eval_samples_per_second": 190.345, |
|
"eval_steps_per_second": 23.934, |
|
"step": 1696 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 9.55e-05, |
|
"loss": 1.6656, |
|
"step": 1908 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 1.2841078042984009, |
|
"eval_runtime": 3.622, |
|
"eval_samples_per_second": 186.637, |
|
"eval_steps_per_second": 23.468, |
|
"step": 1908 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 9.5e-05, |
|
"loss": 1.4036, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 1.107720971107483, |
|
"eval_runtime": 3.6471, |
|
"eval_samples_per_second": 185.353, |
|
"eval_steps_per_second": 23.306, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 9.449999999999999e-05, |
|
"loss": 1.2091, |
|
"step": 2332 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_loss": 0.9709760546684265, |
|
"eval_runtime": 3.7402, |
|
"eval_samples_per_second": 180.741, |
|
"eval_steps_per_second": 22.726, |
|
"step": 2332 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 9.4e-05, |
|
"loss": 1.0379, |
|
"step": 2544 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_loss": 0.8528622984886169, |
|
"eval_runtime": 3.734, |
|
"eval_samples_per_second": 181.041, |
|
"eval_steps_per_second": 22.764, |
|
"step": 2544 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 9.350000000000001e-05, |
|
"loss": 0.8994, |
|
"step": 2756 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_loss": 0.7617917060852051, |
|
"eval_runtime": 3.7153, |
|
"eval_samples_per_second": 181.95, |
|
"eval_steps_per_second": 22.878, |
|
"step": 2756 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 9.300000000000001e-05, |
|
"loss": 0.7828, |
|
"step": 2968 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_loss": 0.6774648427963257, |
|
"eval_runtime": 3.6994, |
|
"eval_samples_per_second": 182.734, |
|
"eval_steps_per_second": 22.977, |
|
"step": 2968 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 9.250000000000001e-05, |
|
"loss": 0.6788, |
|
"step": 3180 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_loss": 0.6361714005470276, |
|
"eval_runtime": 3.6581, |
|
"eval_samples_per_second": 184.796, |
|
"eval_steps_per_second": 23.236, |
|
"step": 3180 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 9.200000000000001e-05, |
|
"loss": 0.5819, |
|
"step": 3392 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_loss": 0.5761357545852661, |
|
"eval_runtime": 3.7378, |
|
"eval_samples_per_second": 180.853, |
|
"eval_steps_per_second": 22.74, |
|
"step": 3392 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 9.15e-05, |
|
"loss": 0.5104, |
|
"step": 3604 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_loss": 0.5219044089317322, |
|
"eval_runtime": 3.7453, |
|
"eval_samples_per_second": 180.495, |
|
"eval_steps_per_second": 22.695, |
|
"step": 3604 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 9.1e-05, |
|
"loss": 0.4343, |
|
"step": 3816 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_loss": 0.4844624102115631, |
|
"eval_runtime": 3.7695, |
|
"eval_samples_per_second": 179.335, |
|
"eval_steps_per_second": 22.55, |
|
"step": 3816 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 9.05e-05, |
|
"loss": 0.3784, |
|
"step": 4028 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_loss": 0.44724151492118835, |
|
"eval_runtime": 3.7703, |
|
"eval_samples_per_second": 179.296, |
|
"eval_steps_per_second": 22.545, |
|
"step": 4028 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 9e-05, |
|
"loss": 0.3224, |
|
"step": 4240 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_loss": 0.45848363637924194, |
|
"eval_runtime": 3.6822, |
|
"eval_samples_per_second": 183.584, |
|
"eval_steps_per_second": 23.084, |
|
"step": 4240 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 8.950000000000001e-05, |
|
"loss": 0.2768, |
|
"step": 4452 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"eval_loss": 0.4251560866832733, |
|
"eval_runtime": 3.9348, |
|
"eval_samples_per_second": 171.802, |
|
"eval_steps_per_second": 21.602, |
|
"step": 4452 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 8.900000000000001e-05, |
|
"loss": 0.2269, |
|
"step": 4664 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_loss": 0.39140933752059937, |
|
"eval_runtime": 3.6907, |
|
"eval_samples_per_second": 183.165, |
|
"eval_steps_per_second": 23.031, |
|
"step": 4664 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 8.850000000000001e-05, |
|
"loss": 0.1868, |
|
"step": 4876 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"eval_loss": 0.4142400920391083, |
|
"eval_runtime": 3.8669, |
|
"eval_samples_per_second": 174.815, |
|
"eval_steps_per_second": 21.981, |
|
"step": 4876 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 8.800000000000001e-05, |
|
"loss": 0.1539, |
|
"step": 5088 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_loss": 0.3828276991844177, |
|
"eval_runtime": 3.7873, |
|
"eval_samples_per_second": 178.493, |
|
"eval_steps_per_second": 22.444, |
|
"step": 5088 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 8.75e-05, |
|
"loss": 0.1298, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_loss": 0.40518617630004883, |
|
"eval_runtime": 3.9459, |
|
"eval_samples_per_second": 171.319, |
|
"eval_steps_per_second": 21.542, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 8.7e-05, |
|
"loss": 0.1098, |
|
"step": 5512 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_loss": 0.420820027589798, |
|
"eval_runtime": 3.7042, |
|
"eval_samples_per_second": 182.497, |
|
"eval_steps_per_second": 22.947, |
|
"step": 5512 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 8.65e-05, |
|
"loss": 0.0905, |
|
"step": 5724 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"eval_loss": 0.39528989791870117, |
|
"eval_runtime": 3.784, |
|
"eval_samples_per_second": 178.645, |
|
"eval_steps_per_second": 22.463, |
|
"step": 5724 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 42400, |
|
"num_train_epochs": 200, |
|
"save_steps": 500, |
|
"total_flos": 7207504134336120.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|