|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 5.0, |
|
"global_step": 20365, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 2.8526884360422293e-05, |
|
"loss": 3.7774, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 2.705376872084459e-05, |
|
"loss": 1.1215, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 2.558065308126688e-05, |
|
"loss": 0.891, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 2.4107537441689173e-05, |
|
"loss": 0.7978, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 2.263442180211147e-05, |
|
"loss": 0.7133, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 2.116130616253376e-05, |
|
"loss": 0.6749, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 1.9688190522956053e-05, |
|
"loss": 0.6433, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 1.8215074883378345e-05, |
|
"loss": 0.6125, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 1.674195924380064e-05, |
|
"loss": 0.5748, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 1.5268843604222933e-05, |
|
"loss": 0.5683, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 1.3795727964645225e-05, |
|
"loss": 0.5413, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 1.2322612325067517e-05, |
|
"loss": 0.5412, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 1.0849496685489811e-05, |
|
"loss": 0.5168, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 9.376381045912103e-06, |
|
"loss": 0.5038, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 7.903265406334397e-06, |
|
"loss": 0.5042, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 6.43014976675669e-06, |
|
"loss": 0.4929, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 4.957034127178984e-06, |
|
"loss": 0.4919, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 3.483918487601277e-06, |
|
"loss": 0.4765, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 2.01080284802357e-06, |
|
"loss": 0.4772, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"learning_rate": 5.37687208445863e-07, |
|
"loss": 0.4729, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 20365, |
|
"total_flos": 5.859693481446605e+17, |
|
"train_loss": 0.764641843215564, |
|
"train_runtime": 18256.242, |
|
"train_samples_per_second": 35.692, |
|
"train_steps_per_second": 1.116 |
|
} |
|
], |
|
"max_steps": 20365, |
|
"num_train_epochs": 5, |
|
"total_flos": 5.859693481446605e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|