|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.01999000499750125, |
|
"eval_steps": 500, |
|
"global_step": 20, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 3.3340553137590683, |
|
"learning_rate": 0.0, |
|
"loss": 11.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 2.398799707355898, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 10.125, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 2.3943029297945575, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 10.1172, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 1.9959117709404242, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 9.875, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 1.8270696218303057, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 9.6641, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 1.7854351602113614, |
|
"learning_rate": 0.0003, |
|
"loss": 9.4844, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 1.7194174424274788, |
|
"learning_rate": 0.00035999999999999997, |
|
"loss": 9.3281, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 1.463772638994466, |
|
"learning_rate": 0.00041999999999999996, |
|
"loss": 9.2109, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 1.439323678271545, |
|
"learning_rate": 0.00047999999999999996, |
|
"loss": 8.9453, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 1.2936126396494727, |
|
"learning_rate": 0.00054, |
|
"loss": 8.7109, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 1.0757761814549318, |
|
"learning_rate": 0.0005999986405514987, |
|
"loss": 8.4609, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.9278570154341632, |
|
"learning_rate": 0.0005999945622196846, |
|
"loss": 8.2344, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.8086775215724974, |
|
"learning_rate": 0.0005999877650456265, |
|
"loss": 8.125, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7630413213242441, |
|
"learning_rate": 0.000599978249097772, |
|
"loss": 7.9766, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.9172017565891333, |
|
"learning_rate": 0.0005999660144719463, |
|
"loss": 7.8555, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.6610052304024877, |
|
"learning_rate": 0.0005999510612913519, |
|
"loss": 7.7734, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.7091485456070775, |
|
"learning_rate": 0.0005999333897065673, |
|
"loss": 7.7148, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 16.771353248766836, |
|
"learning_rate": 0.0005999129998955453, |
|
"loss": 8.5078, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 1.3123969082989795, |
|
"learning_rate": 0.0005998898920636111, |
|
"loss": 7.7539, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.6992078172905232, |
|
"learning_rate": 0.00059986406644346, |
|
"loss": 7.75, |
|
"step": 20 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 1000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 10, |
|
"total_flos": 0.0, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|