|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 250, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 1.06712806224823, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0755, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.9299841523170471, |
|
"learning_rate": 0.00017777777777777779, |
|
"loss": 1.2517, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": 1.0018107891082764, |
|
"learning_rate": 0.00015555555555555556, |
|
"loss": 1.039, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.8971847295761108, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 0.8388, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 1.2010732889175415, |
|
"learning_rate": 0.00011111111111111112, |
|
"loss": 0.7692, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 0.9972412586212158, |
|
"learning_rate": 8.888888888888889e-05, |
|
"loss": 0.7791, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"grad_norm": 1.464215636253357, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.7045, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 1.3566559553146362, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.675, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"grad_norm": 1.533048391342163, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.6353, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 1.503666877746582, |
|
"learning_rate": 0.0, |
|
"loss": 0.6217, |
|
"step": 250 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 250, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"total_flos": 4.372977156096e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|