|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.444444444444445, |
|
"eval_steps": 1000, |
|
"global_step": 800, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5555555555555556, |
|
"grad_norm": 18.646621704101562, |
|
"learning_rate": 1.7777777777777777e-05, |
|
"loss": 12.9379, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.1111111111111112, |
|
"grad_norm": 15.787500381469727, |
|
"learning_rate": 1.555555555555556e-05, |
|
"loss": 12.4243, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.6666666666666665, |
|
"grad_norm": 24.62742805480957, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 12.1655, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.2222222222222223, |
|
"grad_norm": 22.622655868530273, |
|
"learning_rate": 1.1111111111111113e-05, |
|
"loss": 11.9433, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.7777777777777777, |
|
"grad_norm": 24.53895378112793, |
|
"learning_rate": 8.888888888888888e-06, |
|
"loss": 11.9653, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.3333333333333335, |
|
"grad_norm": 29.335464477539062, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 11.8621, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.888888888888889, |
|
"grad_norm": 23.47711753845215, |
|
"learning_rate": 4.444444444444444e-06, |
|
"loss": 11.5167, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 4.444444444444445, |
|
"grad_norm": 21.372766494750977, |
|
"learning_rate": 2.222222222222222e-06, |
|
"loss": 11.3985, |
|
"step": 800 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 900, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 100, |
|
"total_flos": 1007165894833152.0, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|