|
{ |
|
"best_metric": 10.363082885742188, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.11341083073433512, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0022682166146867026, |
|
"grad_norm": 0.13453303277492523, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3842, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0022682166146867026, |
|
"eval_loss": 10.386616706848145, |
|
"eval_runtime": 3.0147, |
|
"eval_samples_per_second": 985.168, |
|
"eval_steps_per_second": 123.395, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004536433229373405, |
|
"grad_norm": 0.1353277713060379, |
|
"learning_rate": 0.0001, |
|
"loss": 10.3876, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.006804649844060108, |
|
"grad_norm": 0.14166195690631866, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 10.3875, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00907286645874681, |
|
"grad_norm": 0.14265072345733643, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 10.3854, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.011341083073433513, |
|
"grad_norm": 0.1489330232143402, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 10.3863, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.013609299688120215, |
|
"grad_norm": 0.14977039396762848, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 10.3842, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.01587751630280692, |
|
"grad_norm": 0.15355272591114044, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 10.3833, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.01814573291749362, |
|
"grad_norm": 0.1444183886051178, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 10.3862, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.020413949532180325, |
|
"grad_norm": 0.15106390416622162, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 10.3849, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.022682166146867026, |
|
"grad_norm": 0.154390349984169, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 10.3838, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02495038276155373, |
|
"grad_norm": 0.14189912378787994, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 10.3832, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.02721859937624043, |
|
"grad_norm": 0.16048261523246765, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 10.3848, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.029486815990927135, |
|
"grad_norm": 0.18116344511508942, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 10.3773, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.03175503260561384, |
|
"grad_norm": 0.1787891834974289, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 10.3796, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03402324922030054, |
|
"grad_norm": 0.19100786745548248, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 10.3789, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03629146583498724, |
|
"grad_norm": 0.19920316338539124, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 10.3765, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.03855968244967394, |
|
"grad_norm": 0.19364210963249207, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 10.3789, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.04082789906436065, |
|
"grad_norm": 0.1934957504272461, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 10.3758, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.04309611567904735, |
|
"grad_norm": 0.19975155591964722, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 10.3772, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.04536433229373405, |
|
"grad_norm": 0.20424290001392365, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 10.3748, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04763254890842075, |
|
"grad_norm": 0.21936988830566406, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 10.3752, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.04990076552310746, |
|
"grad_norm": 0.19495096802711487, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 10.3742, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.05216898213779416, |
|
"grad_norm": 0.1962108314037323, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 10.3749, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.05443719875248086, |
|
"grad_norm": 0.21276497840881348, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 10.3741, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.05670541536716756, |
|
"grad_norm": 0.22711583971977234, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 10.3694, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.05670541536716756, |
|
"eval_loss": 10.370012283325195, |
|
"eval_runtime": 3.012, |
|
"eval_samples_per_second": 986.057, |
|
"eval_steps_per_second": 123.506, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.05897363198185427, |
|
"grad_norm": 0.2256181389093399, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3691, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.06124184859654097, |
|
"grad_norm": 0.2604275941848755, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 10.3696, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.06351006521122768, |
|
"grad_norm": 0.25977396965026855, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 10.3701, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.06577828182591437, |
|
"grad_norm": 0.24867641925811768, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 10.3697, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.06804649844060108, |
|
"grad_norm": 0.25991758704185486, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 10.3685, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07031471505528777, |
|
"grad_norm": 0.2578147351741791, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 10.3677, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.07258293166997448, |
|
"grad_norm": 0.26339924335479736, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 10.3657, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.07485114828466119, |
|
"grad_norm": 0.26088207960128784, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 10.3669, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.07711936489934788, |
|
"grad_norm": 0.2547827959060669, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 10.3682, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.07938758151403459, |
|
"grad_norm": 0.24972853064537048, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 10.3678, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0816557981287213, |
|
"grad_norm": 0.24014292657375336, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 10.3681, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.08392401474340799, |
|
"grad_norm": 0.25370946526527405, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 10.368, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.0861922313580947, |
|
"grad_norm": 0.2755444645881653, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 10.365, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0884604479727814, |
|
"grad_norm": 0.28044813871383667, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 10.3633, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0907286645874681, |
|
"grad_norm": 0.2877492308616638, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 10.3634, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.09299688120215481, |
|
"grad_norm": 0.28548380732536316, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 10.3642, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.0952650978168415, |
|
"grad_norm": 0.287057101726532, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 10.3644, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.09753331443152821, |
|
"grad_norm": 0.27017641067504883, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 10.3632, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.09980153104621492, |
|
"grad_norm": 0.28016337752342224, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 10.3635, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.10206974766090161, |
|
"grad_norm": 0.2789553701877594, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 10.3652, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.10433796427558832, |
|
"grad_norm": 0.26287078857421875, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 10.3658, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.10660618089027502, |
|
"grad_norm": 0.2548418939113617, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 10.3657, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.10887439750496172, |
|
"grad_norm": 0.2463953047990799, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 10.3657, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.11114261411964843, |
|
"grad_norm": 0.24887269735336304, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 10.3673, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.11341083073433512, |
|
"grad_norm": 0.28098398447036743, |
|
"learning_rate": 0.0, |
|
"loss": 10.3651, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11341083073433512, |
|
"eval_loss": 10.363082885742188, |
|
"eval_runtime": 3.0067, |
|
"eval_samples_per_second": 987.799, |
|
"eval_steps_per_second": 123.724, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 42781424615424.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|