|
{ |
|
"best_metric": 1.3278440237045288, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 0.0008455980280653986, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.6911960561307972e-05, |
|
"grad_norm": 0.5461403131484985, |
|
"learning_rate": 4e-05, |
|
"loss": 3.0343, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 1.6911960561307972e-05, |
|
"eval_loss": 3.3437588214874268, |
|
"eval_runtime": 1805.366, |
|
"eval_samples_per_second": 13.791, |
|
"eval_steps_per_second": 6.896, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 3.3823921122615945e-05, |
|
"grad_norm": 1.5352400541305542, |
|
"learning_rate": 8e-05, |
|
"loss": 2.567, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 5.073588168392391e-05, |
|
"grad_norm": 1.80024254322052, |
|
"learning_rate": 0.00012, |
|
"loss": 2.7514, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 6.764784224523189e-05, |
|
"grad_norm": 1.3233122825622559, |
|
"learning_rate": 0.00016, |
|
"loss": 2.6365, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 8.455980280653986e-05, |
|
"grad_norm": 1.474558711051941, |
|
"learning_rate": 0.0002, |
|
"loss": 2.8207, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00010147176336784782, |
|
"grad_norm": 1.7245981693267822, |
|
"learning_rate": 0.00019994532573409262, |
|
"loss": 2.6809, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00011838372392915579, |
|
"grad_norm": 2.162684679031372, |
|
"learning_rate": 0.00019978136272187747, |
|
"loss": 2.6813, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.00013529568449046378, |
|
"grad_norm": 1.9564881324768066, |
|
"learning_rate": 0.00019950829025450114, |
|
"loss": 2.4524, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.00015220764505177175, |
|
"grad_norm": 2.0885121822357178, |
|
"learning_rate": 0.00019912640693269752, |
|
"loss": 2.0998, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00016911960561307972, |
|
"grad_norm": 3.7902536392211914, |
|
"learning_rate": 0.00019863613034027224, |
|
"loss": 2.0842, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.00018603156617438767, |
|
"grad_norm": 2.2282345294952393, |
|
"learning_rate": 0.00019803799658748094, |
|
"loss": 1.9047, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.00020294352673569564, |
|
"grad_norm": 1.974695086479187, |
|
"learning_rate": 0.0001973326597248006, |
|
"loss": 1.5154, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0002198554872970036, |
|
"grad_norm": 2.592761278152466, |
|
"learning_rate": 0.00019652089102773488, |
|
"loss": 1.7882, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.00023676744785831159, |
|
"grad_norm": 2.0295674800872803, |
|
"learning_rate": 0.00019560357815343577, |
|
"loss": 1.6305, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.00025367940841961956, |
|
"grad_norm": 1.625274658203125, |
|
"learning_rate": 0.00019458172417006347, |
|
"loss": 1.473, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.00027059136898092756, |
|
"grad_norm": 1.835348129272461, |
|
"learning_rate": 0.0001934564464599461, |
|
"loss": 1.8893, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0002875033295422355, |
|
"grad_norm": 1.6969001293182373, |
|
"learning_rate": 0.00019222897549773848, |
|
"loss": 1.8415, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0003044152901035435, |
|
"grad_norm": 2.303267002105713, |
|
"learning_rate": 0.00019090065350491626, |
|
"loss": 1.3015, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.00032132725066485145, |
|
"grad_norm": 1.8150877952575684, |
|
"learning_rate": 0.00018947293298207635, |
|
"loss": 1.2658, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.00033823921122615945, |
|
"grad_norm": 2.3673338890075684, |
|
"learning_rate": 0.0001879473751206489, |
|
"loss": 1.2364, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0003551511717874674, |
|
"grad_norm": 2.4632089138031006, |
|
"learning_rate": 0.00018632564809575742, |
|
"loss": 1.0193, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.00037206313234877534, |
|
"grad_norm": 1.3708044290542603, |
|
"learning_rate": 0.00018460952524209355, |
|
"loss": 0.9471, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.00038897509291008334, |
|
"grad_norm": 2.6031060218811035, |
|
"learning_rate": 0.00018280088311480201, |
|
"loss": 1.0314, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0004058870534713913, |
|
"grad_norm": 0.9620503187179565, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 0.8515, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0004227990140326993, |
|
"grad_norm": 1.026154637336731, |
|
"learning_rate": 0.00017891405093963938, |
|
"loss": 0.8531, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0004227990140326993, |
|
"eval_loss": 1.3278440237045288, |
|
"eval_runtime": 1805.0632, |
|
"eval_samples_per_second": 13.793, |
|
"eval_steps_per_second": 6.897, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0004397109745940072, |
|
"grad_norm": 1.2110459804534912, |
|
"learning_rate": 0.00017684011108568592, |
|
"loss": 0.895, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0004566229351553152, |
|
"grad_norm": 1.1382213830947876, |
|
"learning_rate": 0.0001746821476984154, |
|
"loss": 0.9614, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.00047353489571662317, |
|
"grad_norm": 1.0368095636367798, |
|
"learning_rate": 0.00017244252047910892, |
|
"loss": 0.8552, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0004904468562779311, |
|
"grad_norm": 1.323333740234375, |
|
"learning_rate": 0.00017012367842724887, |
|
"loss": 1.0692, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0005073588168392391, |
|
"grad_norm": 1.1299031972885132, |
|
"learning_rate": 0.00016772815716257412, |
|
"loss": 1.0273, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0005242707774005471, |
|
"grad_norm": 0.9993110299110413, |
|
"learning_rate": 0.00016525857615241687, |
|
"loss": 0.7862, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0005411827379618551, |
|
"grad_norm": 0.9180406332015991, |
|
"learning_rate": 0.0001627176358473537, |
|
"loss": 0.8503, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.000558094698523163, |
|
"grad_norm": 0.852716863155365, |
|
"learning_rate": 0.00016010811472830252, |
|
"loss": 0.7589, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.000575006659084471, |
|
"grad_norm": 1.1683015823364258, |
|
"learning_rate": 0.00015743286626829437, |
|
"loss": 0.9032, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.000591918619645779, |
|
"grad_norm": 0.8834861516952515, |
|
"learning_rate": 0.00015469481581224272, |
|
"loss": 0.8227, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.000608830580207087, |
|
"grad_norm": 0.788383424282074, |
|
"learning_rate": 0.00015189695737812152, |
|
"loss": 0.8611, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0006257425407683949, |
|
"grad_norm": 0.7765706777572632, |
|
"learning_rate": 0.00014904235038305083, |
|
"loss": 0.8839, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.0006426545013297029, |
|
"grad_norm": 0.8954746127128601, |
|
"learning_rate": 0.0001461341162978688, |
|
"loss": 0.7209, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0006595664618910109, |
|
"grad_norm": 0.6842917799949646, |
|
"learning_rate": 0.00014317543523384928, |
|
"loss": 0.644, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0006764784224523189, |
|
"grad_norm": 0.8862132430076599, |
|
"learning_rate": 0.00014016954246529696, |
|
"loss": 0.7262, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0006933903830136268, |
|
"grad_norm": 0.8410934805870056, |
|
"learning_rate": 0.00013711972489182208, |
|
"loss": 0.7245, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.0007103023435749348, |
|
"grad_norm": 1.1996475458145142, |
|
"learning_rate": 0.00013402931744416433, |
|
"loss": 0.8627, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0007272143041362428, |
|
"grad_norm": 1.1021193265914917, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.887, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.0007441262646975507, |
|
"grad_norm": 1.623423457145691, |
|
"learning_rate": 0.00012774029087618446, |
|
"loss": 0.9683, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.0007610382252588587, |
|
"grad_norm": 0.932873547077179, |
|
"learning_rate": 0.00012454854871407994, |
|
"loss": 0.882, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0007779501858201667, |
|
"grad_norm": 0.9381756782531738, |
|
"learning_rate": 0.0001213299630743747, |
|
"loss": 0.8824, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.0007948621463814747, |
|
"grad_norm": 1.2354497909545898, |
|
"learning_rate": 0.000118088053433211, |
|
"loss": 1.093, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0008117741069427826, |
|
"grad_norm": 1.0774998664855957, |
|
"learning_rate": 0.0001148263647711842, |
|
"loss": 1.032, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0008286860675040906, |
|
"grad_norm": 0.8946607708930969, |
|
"learning_rate": 0.00011154846369695863, |
|
"loss": 0.8654, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0008455980280653986, |
|
"grad_norm": 0.8054470419883728, |
|
"learning_rate": 0.00010825793454723325, |
|
"loss": 0.8106, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0008455980280653986, |
|
"eval_loss": 1.3436920642852783, |
|
"eval_runtime": 1805.0607, |
|
"eval_samples_per_second": 13.793, |
|
"eval_steps_per_second": 6.897, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 1 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3604771504128000.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|