|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 9.465855307640297, |
|
"global_step": 14000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.9993238674780257e-05, |
|
"loss": 1.9928, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.9986477349560515e-05, |
|
"loss": 1.5804, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.5350453853607178, |
|
"eval_runtime": 0.0909, |
|
"eval_samples_per_second": 43.998, |
|
"eval_steps_per_second": 11.0, |
|
"step": 1479 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 1.997971602434077e-05, |
|
"loss": 1.4417, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 1.997295469912103e-05, |
|
"loss": 1.2966, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 1.9966193373901284e-05, |
|
"loss": 1.2455, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.411128044128418, |
|
"eval_runtime": 0.0866, |
|
"eval_samples_per_second": 46.21, |
|
"eval_steps_per_second": 11.552, |
|
"step": 2958 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 1.9959432048681543e-05, |
|
"loss": 1.1971, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 1.99526707234618e-05, |
|
"loss": 1.1112, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 1.9945909398242057e-05, |
|
"loss": 1.0734, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 1.3451578617095947, |
|
"eval_runtime": 0.0858, |
|
"eval_samples_per_second": 46.634, |
|
"eval_steps_per_second": 11.659, |
|
"step": 4437 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 1.9939148073022316e-05, |
|
"loss": 1.0531, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 1.993238674780257e-05, |
|
"loss": 0.9843, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 1.992562542258283e-05, |
|
"loss": 0.9622, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 1.2762081623077393, |
|
"eval_runtime": 0.0884, |
|
"eval_samples_per_second": 45.25, |
|
"eval_steps_per_second": 11.312, |
|
"step": 5916 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 1.9918864097363085e-05, |
|
"loss": 0.9453, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 1.9912102772143343e-05, |
|
"loss": 0.8873, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 1.99053414469236e-05, |
|
"loss": 0.8723, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 1.2012463808059692, |
|
"eval_runtime": 0.0849, |
|
"eval_samples_per_second": 47.093, |
|
"eval_steps_per_second": 11.773, |
|
"step": 7395 |
|
}, |
|
{ |
|
"epoch": 5.07, |
|
"learning_rate": 1.9898580121703857e-05, |
|
"loss": 0.8676, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 5.41, |
|
"learning_rate": 1.9891818796484112e-05, |
|
"loss": 0.7985, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"learning_rate": 1.9885057471264367e-05, |
|
"loss": 0.8085, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 1.1576131582260132, |
|
"eval_runtime": 0.0879, |
|
"eval_samples_per_second": 45.526, |
|
"eval_steps_per_second": 11.381, |
|
"step": 8874 |
|
}, |
|
{ |
|
"epoch": 6.09, |
|
"learning_rate": 1.9878296146044626e-05, |
|
"loss": 0.7917, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 6.42, |
|
"learning_rate": 1.987153482082488e-05, |
|
"loss": 0.7452, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 6.76, |
|
"learning_rate": 1.986477349560514e-05, |
|
"loss": 0.7447, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 1.1027594804763794, |
|
"eval_runtime": 0.0838, |
|
"eval_samples_per_second": 47.724, |
|
"eval_steps_per_second": 11.931, |
|
"step": 10353 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"learning_rate": 1.9858012170385395e-05, |
|
"loss": 0.7298, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 7.44, |
|
"learning_rate": 1.9851250845165654e-05, |
|
"loss": 0.681, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 7.78, |
|
"learning_rate": 1.9844489519945912e-05, |
|
"loss": 0.7038, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 1.0498589277267456, |
|
"eval_runtime": 0.086, |
|
"eval_samples_per_second": 46.519, |
|
"eval_steps_per_second": 11.63, |
|
"step": 11832 |
|
}, |
|
{ |
|
"epoch": 8.11, |
|
"learning_rate": 1.9837728194726168e-05, |
|
"loss": 0.6726, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 8.45, |
|
"learning_rate": 1.9830966869506426e-05, |
|
"loss": 0.6405, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 8.79, |
|
"learning_rate": 1.982420554428668e-05, |
|
"loss": 0.6458, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 0.9868650436401367, |
|
"eval_runtime": 0.0904, |
|
"eval_samples_per_second": 44.229, |
|
"eval_steps_per_second": 11.057, |
|
"step": 13311 |
|
}, |
|
{ |
|
"epoch": 9.13, |
|
"learning_rate": 1.981744421906694e-05, |
|
"loss": 0.6234, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 9.47, |
|
"learning_rate": 1.9810682893847195e-05, |
|
"loss": 0.5984, |
|
"step": 14000 |
|
} |
|
], |
|
"max_steps": 1479000, |
|
"num_train_epochs": 1000, |
|
"total_flos": 3.051556454321357e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|