|
{ |
|
"best_metric": 0.6534409523010254, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.5983545250560958, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.011967090501121914, |
|
"grad_norm": 11.746336936950684, |
|
"learning_rate": 5e-05, |
|
"loss": 4.3107, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.011967090501121914, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.9307, |
|
"eval_samples_per_second": 114.182, |
|
"eval_steps_per_second": 14.399, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02393418100224383, |
|
"grad_norm": 11.741243362426758, |
|
"learning_rate": 0.0001, |
|
"loss": 4.3254, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.03590127150336574, |
|
"grad_norm": 10.741928100585938, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 4.0354, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.04786836200448766, |
|
"grad_norm": 7.754953384399414, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 2.6626, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.05983545250560957, |
|
"grad_norm": 7.887713432312012, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 1.8429, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07180254300673149, |
|
"grad_norm": 8.430523872375488, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 1.3845, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.08376963350785341, |
|
"grad_norm": 4.98731803894043, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 1.0405, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.09573672400897532, |
|
"grad_norm": 4.610218524932861, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.8455, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.10770381451009724, |
|
"grad_norm": 2.6115477085113525, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 0.7532, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.11967090501121914, |
|
"grad_norm": 4.972604751586914, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.7105, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13163799551234107, |
|
"grad_norm": 3.9826550483703613, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 0.7128, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.14360508601346297, |
|
"grad_norm": 6.129772186279297, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 0.7158, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.15557217651458488, |
|
"grad_norm": 4.263116836547852, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 0.7414, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.16753926701570682, |
|
"grad_norm": 3.859707832336426, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.7533, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.17950635751682872, |
|
"grad_norm": 3.044384479522705, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 0.725, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.19147344801795063, |
|
"grad_norm": 1.5025105476379395, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.6679, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.20344053851907254, |
|
"grad_norm": 1.998008131980896, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 0.6668, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.21540762902019447, |
|
"grad_norm": 2.3586137294769287, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.6825, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.22737471952131638, |
|
"grad_norm": 1.0638742446899414, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 0.649, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.2393418100224383, |
|
"grad_norm": 1.374294638633728, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.6837, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.2513089005235602, |
|
"grad_norm": 1.2036421298980713, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 0.6681, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.26327599102468213, |
|
"grad_norm": 1.2960809469223022, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.6281, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.275243081525804, |
|
"grad_norm": 0.574422299861908, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 0.627, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.28721017202692595, |
|
"grad_norm": 0.6000377535820007, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 0.6426, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.2991772625280479, |
|
"grad_norm": 0.9646682143211365, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 0.6476, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2991772625280479, |
|
"eval_loss": 0.6616689562797546, |
|
"eval_runtime": 4.9314, |
|
"eval_samples_per_second": 114.167, |
|
"eval_steps_per_second": 14.398, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.31114435302916976, |
|
"grad_norm": 3.2467966079711914, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7302, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.3231114435302917, |
|
"grad_norm": 2.911585807800293, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 0.6855, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.33507853403141363, |
|
"grad_norm": 1.2173607349395752, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.6575, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.3470456245325355, |
|
"grad_norm": 0.9109578728675842, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 0.6314, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.35901271503365745, |
|
"grad_norm": 2.12790584564209, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.6653, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.3709798055347794, |
|
"grad_norm": 1.9173645973205566, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 0.6826, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.38294689603590126, |
|
"grad_norm": 1.5864529609680176, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.6398, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.3949139865370232, |
|
"grad_norm": 0.49075672030448914, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 0.6084, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.4068810770381451, |
|
"grad_norm": 1.548568606376648, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.625, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.418848167539267, |
|
"grad_norm": 1.5237716436386108, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 0.6102, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.43081525804038895, |
|
"grad_norm": 1.6438311338424683, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.6662, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.44278234854151083, |
|
"grad_norm": 0.4927654564380646, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 0.6654, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.45474943904263276, |
|
"grad_norm": 1.176863670349121, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.6997, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.4667165295437547, |
|
"grad_norm": 1.7410774230957031, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 0.6752, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.4786836200448766, |
|
"grad_norm": 0.8015233874320984, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.6267, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.4906507105459985, |
|
"grad_norm": 0.6555165648460388, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 0.6412, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.5026178010471204, |
|
"grad_norm": 1.1170567274093628, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.6525, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.5145848915482424, |
|
"grad_norm": 0.7687378525733948, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 0.6476, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.5265519820493643, |
|
"grad_norm": 0.5519921183586121, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.6052, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.5385190725504861, |
|
"grad_norm": 1.3455817699432373, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 0.6142, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.550486163051608, |
|
"grad_norm": 1.2510255575180054, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.6408, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.56245325355273, |
|
"grad_norm": 1.7463011741638184, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 0.6705, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.5744203440538519, |
|
"grad_norm": 1.1176273822784424, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.5998, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.5863874345549738, |
|
"grad_norm": 1.4608560800552368, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 0.6772, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.5983545250560958, |
|
"grad_norm": 1.8109873533248901, |
|
"learning_rate": 0.0, |
|
"loss": 0.6619, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5983545250560958, |
|
"eval_loss": 0.6534409523010254, |
|
"eval_runtime": 5.1055, |
|
"eval_samples_per_second": 110.274, |
|
"eval_steps_per_second": 13.907, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.83042868150272e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|