|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.34375, |
|
"eval_steps": 500, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.996988640512931e-05, |
|
"loss": 0.7489, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.987961816680492e-05, |
|
"loss": 0.5822, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.972941274911953e-05, |
|
"loss": 0.5713, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.951963201008076e-05, |
|
"loss": 0.5457, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.9250781329863606e-05, |
|
"loss": 0.5473, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 4.892350839330522e-05, |
|
"loss": 0.5339, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.853860162957552e-05, |
|
"loss": 0.5368, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.8096988312782174e-05, |
|
"loss": 0.5598, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 4.759973232808609e-05, |
|
"loss": 0.517, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 4.7048031608708876e-05, |
|
"loss": 0.5279, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.6443215250006806e-05, |
|
"loss": 0.5079, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 4.5786740307563636e-05, |
|
"loss": 0.5045, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 4.508018828701612e-05, |
|
"loss": 0.5158, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 4.4325261334068426e-05, |
|
"loss": 0.5064, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.352377813387398e-05, |
|
"loss": 0.5072, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 4.267766952966369e-05, |
|
"loss": 0.5087, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 4.178897387117546e-05, |
|
"loss": 0.3132, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 4.085983210409114e-05, |
|
"loss": 0.2972, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 3.9892482612310836e-05, |
|
"loss": 0.2747, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 3.888925582549006e-05, |
|
"loss": 0.2399, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 3.785256860483054e-05, |
|
"loss": 0.2741, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 3.678491842064995e-05, |
|
"loss": 0.2622, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 3.624028324136517e-05, |
|
"loss": 0.258, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 3.513103285012475e-05, |
|
"loss": 0.2574, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 3.399737591337471e-05, |
|
"loss": 0.2551, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 3.284204350997229e-05, |
|
"loss": 0.265, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 3.1667818936872465e-05, |
|
"loss": 0.2641, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 3.0477531003921745e-05, |
|
"loss": 0.2588, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 2.9274047219007534e-05, |
|
"loss": 0.2366, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 2.8060266879980408e-05, |
|
"loss": 0.2581, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 2.683911408999169e-05, |
|
"loss": 0.2658, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 2.561353071307281e-05, |
|
"loss": 0.2474, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 2.4386469286927196e-05, |
|
"loss": 0.1347, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 2.3160885910008318e-05, |
|
"loss": 0.135, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 2.1939733120019598e-05, |
|
"loss": 0.1242, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 2.0725952780992468e-05, |
|
"loss": 0.1286, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 1.9522468996078258e-05, |
|
"loss": 0.1252, |
|
"step": 74 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 128, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"total_flos": 8.20785473084457e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|