|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 36.231884057971016, |
|
"eval_steps": 500, |
|
"global_step": 2500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 7.246376811594203, |
|
"grad_norm": 4.833008289337158, |
|
"learning_rate": 4.094202898550725e-05, |
|
"loss": 1.952, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 14.492753623188406, |
|
"grad_norm": 4.0949931144714355, |
|
"learning_rate": 3.188405797101449e-05, |
|
"loss": 1.3781, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 21.73913043478261, |
|
"grad_norm": 2.9236912727355957, |
|
"learning_rate": 2.282608695652174e-05, |
|
"loss": 0.9801, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 28.985507246376812, |
|
"grad_norm": 2.521008014678955, |
|
"learning_rate": 1.3768115942028985e-05, |
|
"loss": 0.7325, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 36.231884057971016, |
|
"grad_norm": 2.6362154483795166, |
|
"learning_rate": 4.710144927536232e-06, |
|
"loss": 0.6039, |
|
"step": 2500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 2760, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 40, |
|
"save_steps": 500, |
|
"total_flos": 2612920320000000.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|