|
{ |
|
"best_metric": 0.9440848231315613, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.13183915622940012, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0026367831245880024, |
|
"grad_norm": 2.840853452682495, |
|
"learning_rate": 5e-05, |
|
"loss": 1.0702, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0026367831245880024, |
|
"eval_loss": 1.734526515007019, |
|
"eval_runtime": 124.8601, |
|
"eval_samples_per_second": 20.463, |
|
"eval_steps_per_second": 2.563, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005273566249176005, |
|
"grad_norm": 3.5962321758270264, |
|
"learning_rate": 0.0001, |
|
"loss": 1.3219, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.007910349373764008, |
|
"grad_norm": 3.732879400253296, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 1.3727, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01054713249835201, |
|
"grad_norm": 4.844780921936035, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 1.3763, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.013183915622940013, |
|
"grad_norm": 6.486969470977783, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 1.3032, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.015820698747528016, |
|
"grad_norm": 6.895390033721924, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 1.2952, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.01845748187211602, |
|
"grad_norm": 7.021610260009766, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 1.2004, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02109426499670402, |
|
"grad_norm": 8.559478759765625, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 1.1234, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.023731048121292023, |
|
"grad_norm": 9.841951370239258, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 1.1619, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.026367831245880026, |
|
"grad_norm": 9.573534965515137, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.0597, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02900461437046803, |
|
"grad_norm": 10.767778396606445, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 1.0368, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.03164139749505603, |
|
"grad_norm": 17.224395751953125, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 1.2416, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.034278180619644036, |
|
"grad_norm": 13.874789237976074, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 1.3775, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.03691496374423204, |
|
"grad_norm": 6.476630687713623, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 1.0851, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03955174686882004, |
|
"grad_norm": 6.1668477058410645, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 1.1948, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04218852999340804, |
|
"grad_norm": 5.449673652648926, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 1.1321, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.04482531311799604, |
|
"grad_norm": 4.996151447296143, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 1.0883, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.047462096242584045, |
|
"grad_norm": 4.24849271774292, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.1233, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.05009887936717205, |
|
"grad_norm": 4.225758075714111, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 1.0395, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.05273566249176005, |
|
"grad_norm": 4.2424845695495605, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.9618, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.055372445616348055, |
|
"grad_norm": 4.8217926025390625, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 0.9152, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.05800922874093606, |
|
"grad_norm": 4.882667541503906, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.9311, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.06064601186552406, |
|
"grad_norm": 5.357641696929932, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 0.7931, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.06328279499011207, |
|
"grad_norm": 8.169944763183594, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 0.7887, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.06591957811470006, |
|
"grad_norm": 17.7220516204834, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 0.9647, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06591957811470006, |
|
"eval_loss": 0.9688600897789001, |
|
"eval_runtime": 125.1137, |
|
"eval_samples_per_second": 20.421, |
|
"eval_steps_per_second": 2.558, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06855636123928807, |
|
"grad_norm": 1.7932987213134766, |
|
"learning_rate": 5e-05, |
|
"loss": 0.9886, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.07119314436387607, |
|
"grad_norm": 2.4545319080352783, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 1.1235, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.07382992748846408, |
|
"grad_norm": 2.206700325012207, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 1.088, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.07646671061305207, |
|
"grad_norm": 2.3057193756103516, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 1.0664, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.07910349373764008, |
|
"grad_norm": 2.116140842437744, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 1.0877, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08174027686222808, |
|
"grad_norm": 1.8481568098068237, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 1.0048, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.08437705998681608, |
|
"grad_norm": 2.556462287902832, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.0342, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.08701384311140409, |
|
"grad_norm": 2.689141035079956, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 0.929, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.08965062623599208, |
|
"grad_norm": 3.946350336074829, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.8648, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0922874093605801, |
|
"grad_norm": 3.916400671005249, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 0.7134, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.09492419248516809, |
|
"grad_norm": 5.732182502746582, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.7544, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0975609756097561, |
|
"grad_norm": 8.657495498657227, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 0.8536, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.1001977587343441, |
|
"grad_norm": 9.490399360656738, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 1.0348, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.10283454185893211, |
|
"grad_norm": 1.175460934638977, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 1.0276, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.1054713249835201, |
|
"grad_norm": 1.179926872253418, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 1.0622, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.10810810810810811, |
|
"grad_norm": 1.2721707820892334, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 1.0583, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.11074489123269611, |
|
"grad_norm": 1.4001848697662354, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 1.0218, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.1133816743572841, |
|
"grad_norm": 1.6390767097473145, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 1.0694, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.11601845748187212, |
|
"grad_norm": 2.0891051292419434, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 1.02, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.11865524060646011, |
|
"grad_norm": 2.4620070457458496, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 0.9383, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.12129202373104812, |
|
"grad_norm": 2.9754490852355957, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.9056, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.12392880685563612, |
|
"grad_norm": 4.956353664398193, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 0.8469, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.12656558998022413, |
|
"grad_norm": 4.547539234161377, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.8408, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.12920237310481214, |
|
"grad_norm": 6.952060222625732, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 0.7187, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.13183915622940012, |
|
"grad_norm": 14.889754295349121, |
|
"learning_rate": 0.0, |
|
"loss": 0.9862, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13183915622940012, |
|
"eval_loss": 0.9440848231315613, |
|
"eval_runtime": 125.0915, |
|
"eval_samples_per_second": 20.425, |
|
"eval_steps_per_second": 2.558, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.0952750720352256e+18, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|