|
{ |
|
"best_metric": 2.295586347579956, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.031069167734669306, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0006213833546933862, |
|
"grad_norm": 20.18564796447754, |
|
"learning_rate": 5e-05, |
|
"loss": 6.7432, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0006213833546933862, |
|
"eval_loss": 8.449797630310059, |
|
"eval_runtime": 99.453, |
|
"eval_samples_per_second": 109.016, |
|
"eval_steps_per_second": 13.635, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0012427667093867724, |
|
"grad_norm": 33.745121002197266, |
|
"learning_rate": 0.0001, |
|
"loss": 7.6447, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0018641500640801585, |
|
"grad_norm": 34.93381881713867, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 7.7703, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0024855334187735448, |
|
"grad_norm": 14.993317604064941, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 6.4022, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0031069167734669306, |
|
"grad_norm": 13.611467361450195, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 6.1388, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.003728300128160317, |
|
"grad_norm": 6.870720863342285, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 6.0277, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.004349683482853703, |
|
"grad_norm": 11.345956802368164, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 5.8643, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0049710668375470895, |
|
"grad_norm": 5.781732559204102, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 5.6858, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.005592450192240475, |
|
"grad_norm": 5.978038311004639, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 5.5732, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.006213833546933861, |
|
"grad_norm": 5.235627174377441, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 5.2231, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0068352169016272476, |
|
"grad_norm": 6.350806713104248, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 5.2827, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.007456600256320634, |
|
"grad_norm": 5.949211120605469, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 5.2801, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.00807798361101402, |
|
"grad_norm": 5.284346580505371, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 4.3414, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.008699366965707406, |
|
"grad_norm": 5.39470100402832, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 4.4207, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.009320750320400792, |
|
"grad_norm": 5.765320777893066, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 4.3219, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.009942133675094179, |
|
"grad_norm": 10.471980094909668, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 4.5284, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.010563517029787564, |
|
"grad_norm": 7.325485706329346, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 4.4177, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.01118490038448095, |
|
"grad_norm": 7.271228790283203, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 4.1735, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.011806283739174337, |
|
"grad_norm": 6.641774654388428, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 4.1708, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.012427667093867723, |
|
"grad_norm": 10.327823638916016, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 3.9731, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01304905044856111, |
|
"grad_norm": 9.504193305969238, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 3.6202, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.013670433803254495, |
|
"grad_norm": 8.062225341796875, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 3.5678, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.014291817157947882, |
|
"grad_norm": 10.836487770080566, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 3.3968, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.014913200512641268, |
|
"grad_norm": 19.103179931640625, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 3.3893, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.015534583867334653, |
|
"grad_norm": 13.378957748413086, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 3.4375, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.015534583867334653, |
|
"eval_loss": 3.0447850227355957, |
|
"eval_runtime": 99.5049, |
|
"eval_samples_per_second": 108.96, |
|
"eval_steps_per_second": 13.627, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01615596722202804, |
|
"grad_norm": 6.254918575286865, |
|
"learning_rate": 5e-05, |
|
"loss": 2.7294, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.016777350576721427, |
|
"grad_norm": 7.319060802459717, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 2.8813, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.017398733931414813, |
|
"grad_norm": 8.43337345123291, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 2.8859, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0180201172861082, |
|
"grad_norm": 8.820302963256836, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 3.0777, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.018641500640801584, |
|
"grad_norm": 7.4783854484558105, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 2.5522, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.01926288399549497, |
|
"grad_norm": 7.428519248962402, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 2.7819, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.019884267350188358, |
|
"grad_norm": 7.079680442810059, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 2.6707, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.020505650704881744, |
|
"grad_norm": 6.271028995513916, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 2.4942, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.02112703405957513, |
|
"grad_norm": 8.533285140991211, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 2.8131, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.021748417414268514, |
|
"grad_norm": 7.770425796508789, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 2.6253, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0223698007689619, |
|
"grad_norm": 7.423771381378174, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 2.7925, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.02299118412365529, |
|
"grad_norm": 8.581366539001465, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 2.6777, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.023612567478348674, |
|
"grad_norm": 7.073851108551025, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 2.3883, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.02423395083304206, |
|
"grad_norm": 6.073333740234375, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 2.2233, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.024855334187735445, |
|
"grad_norm": 7.236076354980469, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 2.2824, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.025476717542428834, |
|
"grad_norm": 8.7551851272583, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 2.2064, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.02609810089712222, |
|
"grad_norm": 7.784063339233398, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 2.2192, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.026719484251815605, |
|
"grad_norm": 7.144655227661133, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 2.3833, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.02734086760650899, |
|
"grad_norm": 6.632129669189453, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 2.1774, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.027962250961202376, |
|
"grad_norm": 6.696552276611328, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 2.2596, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.028583634315895765, |
|
"grad_norm": 6.228443622589111, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 2.3508, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.02920501767058915, |
|
"grad_norm": 7.7984538078308105, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 2.3823, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.029826401025282535, |
|
"grad_norm": 9.903982162475586, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 2.5122, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.03044778437997592, |
|
"grad_norm": 7.956427097320557, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 2.5993, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.031069167734669306, |
|
"grad_norm": 8.152593612670898, |
|
"learning_rate": 0.0, |
|
"loss": 2.592, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.031069167734669306, |
|
"eval_loss": 2.295586347579956, |
|
"eval_runtime": 99.4875, |
|
"eval_samples_per_second": 108.979, |
|
"eval_steps_per_second": 13.63, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.33416392081408e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|