|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 35.0, |
|
"eval_steps": 500, |
|
"global_step": 7420, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 9.95e-05, |
|
"loss": 6.3714, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 5.850927352905273, |
|
"eval_runtime": 3.8282, |
|
"eval_samples_per_second": 176.584, |
|
"eval_steps_per_second": 22.204, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 9.900000000000001e-05, |
|
"loss": 5.5438, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 4.862037658691406, |
|
"eval_runtime": 3.7425, |
|
"eval_samples_per_second": 180.626, |
|
"eval_steps_per_second": 22.712, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 9.850000000000001e-05, |
|
"loss": 4.6993, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 4.018372058868408, |
|
"eval_runtime": 4.0424, |
|
"eval_samples_per_second": 167.229, |
|
"eval_steps_per_second": 21.027, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 9.8e-05, |
|
"loss": 3.967, |
|
"step": 848 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 3.290557384490967, |
|
"eval_runtime": 4.3933, |
|
"eval_samples_per_second": 153.872, |
|
"eval_steps_per_second": 19.348, |
|
"step": 848 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 9.75e-05, |
|
"loss": 3.305, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 2.6497254371643066, |
|
"eval_runtime": 4.3562, |
|
"eval_samples_per_second": 155.182, |
|
"eval_steps_per_second": 19.513, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 9.7e-05, |
|
"loss": 2.7185, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 2.1158342361450195, |
|
"eval_runtime": 4.5634, |
|
"eval_samples_per_second": 148.135, |
|
"eval_steps_per_second": 18.626, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 9.65e-05, |
|
"loss": 2.2181, |
|
"step": 1484 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 1.6882902383804321, |
|
"eval_runtime": 4.3232, |
|
"eval_samples_per_second": 156.367, |
|
"eval_steps_per_second": 19.662, |
|
"step": 1484 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 9.6e-05, |
|
"loss": 1.8106, |
|
"step": 1696 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 1.3730388879776, |
|
"eval_runtime": 4.3486, |
|
"eval_samples_per_second": 155.451, |
|
"eval_steps_per_second": 19.546, |
|
"step": 1696 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 9.55e-05, |
|
"loss": 1.497, |
|
"step": 1908 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 1.142008900642395, |
|
"eval_runtime": 4.2743, |
|
"eval_samples_per_second": 158.154, |
|
"eval_steps_per_second": 19.886, |
|
"step": 1908 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 9.5e-05, |
|
"loss": 1.2526, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 0.9753935933113098, |
|
"eval_runtime": 4.2134, |
|
"eval_samples_per_second": 160.44, |
|
"eval_steps_per_second": 20.174, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 9.449999999999999e-05, |
|
"loss": 1.0576, |
|
"step": 2332 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_loss": 0.8463678956031799, |
|
"eval_runtime": 4.432, |
|
"eval_samples_per_second": 152.527, |
|
"eval_steps_per_second": 19.179, |
|
"step": 2332 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 9.4e-05, |
|
"loss": 0.8965, |
|
"step": 2544 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_loss": 0.7349700927734375, |
|
"eval_runtime": 4.5659, |
|
"eval_samples_per_second": 148.054, |
|
"eval_steps_per_second": 18.616, |
|
"step": 2544 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 9.350000000000001e-05, |
|
"loss": 0.7613, |
|
"step": 2756 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_loss": 0.6494072675704956, |
|
"eval_runtime": 4.499, |
|
"eval_samples_per_second": 150.254, |
|
"eval_steps_per_second": 18.893, |
|
"step": 2756 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 9.300000000000001e-05, |
|
"loss": 0.645, |
|
"step": 2968 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_loss": 0.5844011902809143, |
|
"eval_runtime": 4.7455, |
|
"eval_samples_per_second": 142.449, |
|
"eval_steps_per_second": 17.912, |
|
"step": 2968 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 9.250000000000001e-05, |
|
"loss": 0.548, |
|
"step": 3180 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_loss": 0.5139679312705994, |
|
"eval_runtime": 4.7986, |
|
"eval_samples_per_second": 140.876, |
|
"eval_steps_per_second": 17.714, |
|
"step": 3180 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 9.200000000000001e-05, |
|
"loss": 0.4564, |
|
"step": 3392 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_loss": 0.47241923213005066, |
|
"eval_runtime": 4.6502, |
|
"eval_samples_per_second": 145.371, |
|
"eval_steps_per_second": 18.279, |
|
"step": 3392 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 9.15e-05, |
|
"loss": 0.3785, |
|
"step": 3604 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_loss": 0.4442097246646881, |
|
"eval_runtime": 4.6864, |
|
"eval_samples_per_second": 144.247, |
|
"eval_steps_per_second": 18.138, |
|
"step": 3604 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 9.1e-05, |
|
"loss": 0.3144, |
|
"step": 3816 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_loss": 0.407890647649765, |
|
"eval_runtime": 4.6905, |
|
"eval_samples_per_second": 144.121, |
|
"eval_steps_per_second": 18.122, |
|
"step": 3816 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 9.05e-05, |
|
"loss": 0.2609, |
|
"step": 4028 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_loss": 0.3970014452934265, |
|
"eval_runtime": 4.6686, |
|
"eval_samples_per_second": 144.799, |
|
"eval_steps_per_second": 18.207, |
|
"step": 4028 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 9e-05, |
|
"loss": 0.2106, |
|
"step": 4240 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_loss": 0.37661823630332947, |
|
"eval_runtime": 4.7062, |
|
"eval_samples_per_second": 143.641, |
|
"eval_steps_per_second": 18.061, |
|
"step": 4240 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 8.950000000000001e-05, |
|
"loss": 0.1727, |
|
"step": 4452 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"eval_loss": 0.37585482001304626, |
|
"eval_runtime": 4.8302, |
|
"eval_samples_per_second": 139.953, |
|
"eval_steps_per_second": 17.598, |
|
"step": 4452 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 8.900000000000001e-05, |
|
"loss": 0.1427, |
|
"step": 4664 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_loss": 0.35890331864356995, |
|
"eval_runtime": 4.823, |
|
"eval_samples_per_second": 140.16, |
|
"eval_steps_per_second": 17.624, |
|
"step": 4664 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 8.850000000000001e-05, |
|
"loss": 0.1115, |
|
"step": 4876 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"eval_loss": 0.35352450609207153, |
|
"eval_runtime": 4.9556, |
|
"eval_samples_per_second": 136.411, |
|
"eval_steps_per_second": 17.152, |
|
"step": 4876 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 8.800000000000001e-05, |
|
"loss": 0.0949, |
|
"step": 5088 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_loss": 0.3508472442626953, |
|
"eval_runtime": 4.4886, |
|
"eval_samples_per_second": 150.605, |
|
"eval_steps_per_second": 18.937, |
|
"step": 5088 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 8.75e-05, |
|
"loss": 0.0779, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_loss": 0.35696595907211304, |
|
"eval_runtime": 4.5496, |
|
"eval_samples_per_second": 148.586, |
|
"eval_steps_per_second": 18.683, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 8.7e-05, |
|
"loss": 0.0652, |
|
"step": 5512 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_loss": 0.3560032546520233, |
|
"eval_runtime": 5.7794, |
|
"eval_samples_per_second": 116.966, |
|
"eval_steps_per_second": 14.707, |
|
"step": 5512 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 8.65e-05, |
|
"loss": 0.0549, |
|
"step": 5724 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"eval_loss": 0.355691134929657, |
|
"eval_runtime": 5.1711, |
|
"eval_samples_per_second": 130.726, |
|
"eval_steps_per_second": 16.437, |
|
"step": 5724 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 8.6e-05, |
|
"loss": 0.0431, |
|
"step": 5936 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_loss": 0.3634819984436035, |
|
"eval_runtime": 4.8044, |
|
"eval_samples_per_second": 140.705, |
|
"eval_steps_per_second": 17.692, |
|
"step": 5936 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 8.55e-05, |
|
"loss": 0.0389, |
|
"step": 6148 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"eval_loss": 0.36610594391822815, |
|
"eval_runtime": 4.5861, |
|
"eval_samples_per_second": 147.403, |
|
"eval_steps_per_second": 18.534, |
|
"step": 6148 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 8.5e-05, |
|
"loss": 0.0327, |
|
"step": 6360 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_loss": 0.3725152611732483, |
|
"eval_runtime": 5.6392, |
|
"eval_samples_per_second": 119.874, |
|
"eval_steps_per_second": 15.073, |
|
"step": 6360 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 8.450000000000001e-05, |
|
"loss": 0.0293, |
|
"step": 6572 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"eval_loss": 0.38449323177337646, |
|
"eval_runtime": 5.8725, |
|
"eval_samples_per_second": 115.113, |
|
"eval_steps_per_second": 14.474, |
|
"step": 6572 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 8.4e-05, |
|
"loss": 0.0279, |
|
"step": 6784 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_loss": 0.3865319490432739, |
|
"eval_runtime": 4.6302, |
|
"eval_samples_per_second": 145.997, |
|
"eval_steps_per_second": 18.358, |
|
"step": 6784 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 8.35e-05, |
|
"loss": 0.0219, |
|
"step": 6996 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"eval_loss": 0.39288753271102905, |
|
"eval_runtime": 4.7491, |
|
"eval_samples_per_second": 142.342, |
|
"eval_steps_per_second": 17.898, |
|
"step": 6996 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 8.3e-05, |
|
"loss": 0.02, |
|
"step": 7208 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"eval_loss": 0.39614447951316833, |
|
"eval_runtime": 4.6865, |
|
"eval_samples_per_second": 144.245, |
|
"eval_steps_per_second": 18.137, |
|
"step": 7208 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 8.25e-05, |
|
"loss": 0.0166, |
|
"step": 7420 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_loss": 0.3927687704563141, |
|
"eval_runtime": 4.4645, |
|
"eval_samples_per_second": 151.418, |
|
"eval_steps_per_second": 19.039, |
|
"step": 7420 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 42400, |
|
"num_train_epochs": 200, |
|
"save_steps": 500, |
|
"total_flos": 9983766957232464.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|