|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"global_step": 36816, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 5.431830526887561e-06, |
|
"loss": 0.9928, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.0863661053775122e-05, |
|
"loss": 0.7822, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.6295491580662687e-05, |
|
"loss": 0.7134, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.9808052151868172e-05, |
|
"loss": 0.6789, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.9204442566547958e-05, |
|
"loss": 0.647, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.8600832981227744e-05, |
|
"loss": 0.6252, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.7997223395907527e-05, |
|
"loss": 0.6084, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.7393613810587313e-05, |
|
"loss": 0.5935, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.67900042252671e-05, |
|
"loss": 0.5812, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.618639463994688e-05, |
|
"loss": 0.5643, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.5582785054626667e-05, |
|
"loss": 0.5629, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 1.4979175469306453e-05, |
|
"loss": 0.556, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.5368458032608032, |
|
"eval_runtime": 2.6095, |
|
"eval_samples_per_second": 954.204, |
|
"eval_steps_per_second": 29.891, |
|
"step": 12272 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 1.437556588398624e-05, |
|
"loss": 0.4938, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 1.3771956298666025e-05, |
|
"loss": 0.4773, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 1.316834671334581e-05, |
|
"loss": 0.4797, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 1.2564737128025594e-05, |
|
"loss": 0.4754, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 1.196112754270538e-05, |
|
"loss": 0.4692, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 1.1357517957385164e-05, |
|
"loss": 0.4682, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 1.0753908372064949e-05, |
|
"loss": 0.4632, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 1.0150298786744735e-05, |
|
"loss": 0.4611, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 9.546689201424519e-06, |
|
"loss": 0.4507, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 8.943079616104305e-06, |
|
"loss": 0.4487, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 8.33947003078409e-06, |
|
"loss": 0.4493, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 7.735860445463874e-06, |
|
"loss": 0.4513, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.4784027636051178, |
|
"eval_runtime": 2.5508, |
|
"eval_samples_per_second": 976.183, |
|
"eval_steps_per_second": 30.579, |
|
"step": 24544 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 7.1322508601436605e-06, |
|
"loss": 0.4059, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 6.528641274823445e-06, |
|
"loss": 0.3585, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 5.92503168950323e-06, |
|
"loss": 0.3568, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 5.321422104183015e-06, |
|
"loss": 0.3566, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 4.7178125188628e-06, |
|
"loss": 0.3484, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 4.114202933542585e-06, |
|
"loss": 0.3527, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 3.51059334822237e-06, |
|
"loss": 0.3561, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 2.9069837629021554e-06, |
|
"loss": 0.3512, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 2.30337417758194e-06, |
|
"loss": 0.3549, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 1.6997645922617255e-06, |
|
"loss": 0.3555, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 1.0961550069415102e-06, |
|
"loss": 0.3396, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 4.925454216212955e-07, |
|
"loss": 0.3422, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.49958956241607666, |
|
"eval_runtime": 2.5304, |
|
"eval_samples_per_second": 984.052, |
|
"eval_steps_per_second": 30.826, |
|
"step": 36816 |
|
} |
|
], |
|
"max_steps": 36816, |
|
"num_train_epochs": 3, |
|
"total_flos": 7.749426953899213e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|