|
{ |
|
"best_metric": 0.160928413271904, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.048667721133957904, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.000973354422679158, |
|
"grad_norm": 9.625638008117676, |
|
"learning_rate": 5e-05, |
|
"loss": 5.5684, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000973354422679158, |
|
"eval_loss": 6.539275646209717, |
|
"eval_runtime": 62.869, |
|
"eval_samples_per_second": 110.086, |
|
"eval_steps_per_second": 13.775, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001946708845358316, |
|
"grad_norm": 9.678377151489258, |
|
"learning_rate": 0.0001, |
|
"loss": 5.5684, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0029200632680374742, |
|
"grad_norm": 8.975667953491211, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 5.4059, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.003893417690716632, |
|
"grad_norm": 10.13470458984375, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 4.861, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00486677211339579, |
|
"grad_norm": 11.194205284118652, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 4.2056, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0058401265360749485, |
|
"grad_norm": 11.002691268920898, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 3.4972, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.006813480958754107, |
|
"grad_norm": 5.943538188934326, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 2.9091, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.007786835381433264, |
|
"grad_norm": 5.13279914855957, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 2.3804, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.008760189804112422, |
|
"grad_norm": 5.182049751281738, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 1.8848, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00973354422679158, |
|
"grad_norm": 4.752784729003906, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.4053, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.010706898649470739, |
|
"grad_norm": 6.532019138336182, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 1.5355, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.011680253072149897, |
|
"grad_norm": 6.032060623168945, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 1.2536, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.012653607494829055, |
|
"grad_norm": 5.628830909729004, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 0.7092, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.013626961917508213, |
|
"grad_norm": 4.3675689697265625, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.4724, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.01460031634018737, |
|
"grad_norm": 2.804490089416504, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 0.3723, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.015573670762866528, |
|
"grad_norm": 4.587884426116943, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.2718, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.016547025185545686, |
|
"grad_norm": 3.3521015644073486, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 0.1409, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.017520379608224845, |
|
"grad_norm": 1.9110612869262695, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.0426, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.018493734030904003, |
|
"grad_norm": 0.3547068238258362, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 0.0101, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.01946708845358316, |
|
"grad_norm": 0.1654203236103058, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.0037, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.02044044287626232, |
|
"grad_norm": 0.06659155339002609, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 0.0013, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.021413797298941478, |
|
"grad_norm": 0.028775788843631744, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.0006, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.022387151721620636, |
|
"grad_norm": 4.46445894241333, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 0.3989, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.023360506144299794, |
|
"grad_norm": 18.907028198242188, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 1.7332, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.024333860566978952, |
|
"grad_norm": 14.680952072143555, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 1.3845, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.024333860566978952, |
|
"eval_loss": 0.23052285611629486, |
|
"eval_runtime": 63.1828, |
|
"eval_samples_per_second": 109.539, |
|
"eval_steps_per_second": 13.706, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.02530721498965811, |
|
"grad_norm": 0.10166118294000626, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0027, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.02628056941233727, |
|
"grad_norm": 0.193026602268219, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 0.0059, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.027253923835016427, |
|
"grad_norm": 0.27356114983558655, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.009, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.028227278257695585, |
|
"grad_norm": 0.3083343505859375, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 0.0108, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.02920063268037474, |
|
"grad_norm": 0.2875608503818512, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.0106, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.030173987103053898, |
|
"grad_norm": 0.24416722357273102, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 0.0095, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.031147341525733056, |
|
"grad_norm": 0.193377286195755, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.0079, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.032120695948412215, |
|
"grad_norm": 0.14682887494564056, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 0.0063, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.03309405037109137, |
|
"grad_norm": 0.11604669690132141, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.0052, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.03406740479377053, |
|
"grad_norm": 0.09484685212373734, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 0.0043, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.03504075921644969, |
|
"grad_norm": 5.945147514343262, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.9259, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.03601411363912885, |
|
"grad_norm": 5.888754367828369, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 0.9055, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.036987468061808006, |
|
"grad_norm": 1.4646563529968262, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.2314, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.037960822484487164, |
|
"grad_norm": 0.12405002862215042, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 0.0059, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.03893417690716632, |
|
"grad_norm": 0.14476650953292847, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.007, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03990753132984548, |
|
"grad_norm": 0.16240093111991882, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 0.0078, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.04088088575252464, |
|
"grad_norm": 0.17413407564163208, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.0084, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0418542401752038, |
|
"grad_norm": 0.18335355818271637, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 0.0089, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.042827594597882955, |
|
"grad_norm": 0.18472228944301605, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.009, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.04380094902056211, |
|
"grad_norm": 0.1897263079881668, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 0.0092, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.04477430344324127, |
|
"grad_norm": 0.1893337368965149, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.0091, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.04574765786592043, |
|
"grad_norm": 0.18881848454475403, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 0.0091, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.04672101228859959, |
|
"grad_norm": 1.1086373329162598, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.1602, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.047694366711278746, |
|
"grad_norm": 5.398890972137451, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 0.6574, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.048667721133957904, |
|
"grad_norm": 5.379086017608643, |
|
"learning_rate": 0.0, |
|
"loss": 0.6569, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.048667721133957904, |
|
"eval_loss": 0.160928413271904, |
|
"eval_runtime": 62.7866, |
|
"eval_samples_per_second": 110.231, |
|
"eval_steps_per_second": 13.793, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.33416392081408e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|