|
{ |
|
"best_metric": 0.396539568901062, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-2550", |
|
"epoch": 0.966824644549763, |
|
"eval_steps": 150, |
|
"global_step": 2550, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0003791469194312796, |
|
"eval_loss": 1.7795168161392212, |
|
"eval_runtime": 209.8549, |
|
"eval_samples_per_second": 21.167, |
|
"eval_steps_per_second": 5.294, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0037914691943127963, |
|
"grad_norm": 2.3918707370758057, |
|
"learning_rate": 2e-05, |
|
"loss": 2.1878, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.007582938388625593, |
|
"grad_norm": 1.775721788406372, |
|
"learning_rate": 4e-05, |
|
"loss": 1.257, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.011374407582938388, |
|
"grad_norm": 1.3155328035354614, |
|
"learning_rate": 6e-05, |
|
"loss": 0.7683, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.015165876777251185, |
|
"grad_norm": 1.3514695167541504, |
|
"learning_rate": 8e-05, |
|
"loss": 0.5549, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.018957345971563982, |
|
"grad_norm": 1.1452535390853882, |
|
"learning_rate": 0.0001, |
|
"loss": 0.3957, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.022748815165876776, |
|
"grad_norm": 1.4056144952774048, |
|
"learning_rate": 9.999631611658893e-05, |
|
"loss": 1.3166, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.026540284360189573, |
|
"grad_norm": 1.0476895570755005, |
|
"learning_rate": 9.998526500919558e-05, |
|
"loss": 0.6856, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.03033175355450237, |
|
"grad_norm": 1.057721495628357, |
|
"learning_rate": 9.996684830625961e-05, |
|
"loss": 0.5876, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.034123222748815164, |
|
"grad_norm": 0.8883450627326965, |
|
"learning_rate": 9.99410687215805e-05, |
|
"loss": 0.5092, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.037914691943127965, |
|
"grad_norm": 1.0792176723480225, |
|
"learning_rate": 9.990793005391757e-05, |
|
"loss": 0.3727, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.04170616113744076, |
|
"grad_norm": 0.9899026155471802, |
|
"learning_rate": 9.986743718643037e-05, |
|
"loss": 1.0962, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.04549763033175355, |
|
"grad_norm": 0.9852607250213623, |
|
"learning_rate": 9.981959608595904e-05, |
|
"loss": 0.6074, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.04928909952606635, |
|
"grad_norm": 0.8935758471488953, |
|
"learning_rate": 9.976441380214499e-05, |
|
"loss": 0.5443, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.05308056872037915, |
|
"grad_norm": 0.8601885437965393, |
|
"learning_rate": 9.970189846639224e-05, |
|
"loss": 0.4588, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.05687203791469194, |
|
"grad_norm": 0.8278557658195496, |
|
"learning_rate": 9.963205929066912e-05, |
|
"loss": 0.3571, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.05687203791469194, |
|
"eval_loss": 0.6163225173950195, |
|
"eval_runtime": 211.3771, |
|
"eval_samples_per_second": 21.015, |
|
"eval_steps_per_second": 5.256, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.06066350710900474, |
|
"grad_norm": 0.9639042019844055, |
|
"learning_rate": 9.955490656615086e-05, |
|
"loss": 0.9907, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.06445497630331753, |
|
"grad_norm": 1.0571763515472412, |
|
"learning_rate": 9.947045166170315e-05, |
|
"loss": 0.6059, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.06824644549763033, |
|
"grad_norm": 0.8203420639038086, |
|
"learning_rate": 9.937870702220684e-05, |
|
"loss": 0.5407, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.07203791469194312, |
|
"grad_norm": 0.7809204459190369, |
|
"learning_rate": 9.927968616672416e-05, |
|
"loss": 0.4592, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.07582938388625593, |
|
"grad_norm": 0.6434981226921082, |
|
"learning_rate": 9.917340368650657e-05, |
|
"loss": 0.3295, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.07962085308056872, |
|
"grad_norm": 0.8934921026229858, |
|
"learning_rate": 9.905987524284471e-05, |
|
"loss": 0.9521, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.08341232227488152, |
|
"grad_norm": 0.8254252672195435, |
|
"learning_rate": 9.89391175647606e-05, |
|
"loss": 0.5738, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.08720379146919431, |
|
"grad_norm": 0.840071976184845, |
|
"learning_rate": 9.881114844654249e-05, |
|
"loss": 0.5222, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.0909952606635071, |
|
"grad_norm": 0.8142471313476562, |
|
"learning_rate": 9.867598674512288e-05, |
|
"loss": 0.4142, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.0947867298578199, |
|
"grad_norm": 0.6625562310218811, |
|
"learning_rate": 9.853365237729976e-05, |
|
"loss": 0.3158, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.0985781990521327, |
|
"grad_norm": 0.9738134145736694, |
|
"learning_rate": 9.838416631680176e-05, |
|
"loss": 0.9377, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.1023696682464455, |
|
"grad_norm": 0.919395387172699, |
|
"learning_rate": 9.822755059119765e-05, |
|
"loss": 0.5472, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.1061611374407583, |
|
"grad_norm": 0.9126551151275635, |
|
"learning_rate": 9.806382827865035e-05, |
|
"loss": 0.4959, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.10995260663507109, |
|
"grad_norm": 0.7662134766578674, |
|
"learning_rate": 9.78930235045163e-05, |
|
"loss": 0.4471, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.11374407582938388, |
|
"grad_norm": 0.6991143226623535, |
|
"learning_rate": 9.771516143779049e-05, |
|
"loss": 0.345, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.11374407582938388, |
|
"eval_loss": 0.5615507960319519, |
|
"eval_runtime": 210.9515, |
|
"eval_samples_per_second": 21.057, |
|
"eval_steps_per_second": 5.267, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.11753554502369669, |
|
"grad_norm": 0.8847883343696594, |
|
"learning_rate": 9.753026828739756e-05, |
|
"loss": 0.9371, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.12132701421800948, |
|
"grad_norm": 0.840414822101593, |
|
"learning_rate": 9.733837129832993e-05, |
|
"loss": 0.5609, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.12511848341232226, |
|
"grad_norm": 0.8552011251449585, |
|
"learning_rate": 9.713949874763296e-05, |
|
"loss": 0.4904, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.12890995260663507, |
|
"grad_norm": 0.7504507899284363, |
|
"learning_rate": 9.693367994023828e-05, |
|
"loss": 0.4276, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.13270142180094788, |
|
"grad_norm": 0.7728025317192078, |
|
"learning_rate": 9.672094520464552e-05, |
|
"loss": 0.3153, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.13649289099526066, |
|
"grad_norm": 0.8927388191223145, |
|
"learning_rate": 9.650132588845318e-05, |
|
"loss": 0.8539, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.14028436018957346, |
|
"grad_norm": 0.9280526638031006, |
|
"learning_rate": 9.627485435373948e-05, |
|
"loss": 0.5319, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.14407582938388624, |
|
"grad_norm": 0.8443691730499268, |
|
"learning_rate": 9.604156397229367e-05, |
|
"loss": 0.4799, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.14786729857819905, |
|
"grad_norm": 0.8244546055793762, |
|
"learning_rate": 9.580148912069836e-05, |
|
"loss": 0.4255, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.15165876777251186, |
|
"grad_norm": 0.587851881980896, |
|
"learning_rate": 9.555466517526405e-05, |
|
"loss": 0.3149, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.15545023696682464, |
|
"grad_norm": 0.9294399619102478, |
|
"learning_rate": 9.53011285068163e-05, |
|
"loss": 0.8398, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.15924170616113745, |
|
"grad_norm": 0.8756105303764343, |
|
"learning_rate": 9.50409164753362e-05, |
|
"loss": 0.5178, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.16303317535545023, |
|
"grad_norm": 0.7490562796592712, |
|
"learning_rate": 9.477406742445516e-05, |
|
"loss": 0.4677, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.16682464454976303, |
|
"grad_norm": 0.8731195330619812, |
|
"learning_rate": 9.450062067580488e-05, |
|
"loss": 0.4073, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.17061611374407584, |
|
"grad_norm": 0.7838053107261658, |
|
"learning_rate": 9.422061652322298e-05, |
|
"loss": 0.2986, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.17061611374407584, |
|
"eval_loss": 0.53005450963974, |
|
"eval_runtime": 211.0482, |
|
"eval_samples_per_second": 21.047, |
|
"eval_steps_per_second": 5.264, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.17440758293838862, |
|
"grad_norm": 0.840986430644989, |
|
"learning_rate": 9.393409622681559e-05, |
|
"loss": 0.8218, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.17819905213270143, |
|
"grad_norm": 0.8100599050521851, |
|
"learning_rate": 9.364110200687738e-05, |
|
"loss": 0.5342, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.1819905213270142, |
|
"grad_norm": 0.8114942908287048, |
|
"learning_rate": 9.33416770376702e-05, |
|
"loss": 0.4595, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.18578199052132702, |
|
"grad_norm": 0.6884726285934448, |
|
"learning_rate": 9.303586544106115e-05, |
|
"loss": 0.411, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.1895734597156398, |
|
"grad_norm": 0.8265155553817749, |
|
"learning_rate": 9.272371228002091e-05, |
|
"loss": 0.3095, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.1933649289099526, |
|
"grad_norm": 0.8282197117805481, |
|
"learning_rate": 9.240526355198353e-05, |
|
"loss": 0.8212, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.1971563981042654, |
|
"grad_norm": 0.783984899520874, |
|
"learning_rate": 9.208056618206853e-05, |
|
"loss": 0.5332, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.2009478672985782, |
|
"grad_norm": 0.7088342905044556, |
|
"learning_rate": 9.174966801616603e-05, |
|
"loss": 0.459, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.204739336492891, |
|
"grad_norm": 0.7358261942863464, |
|
"learning_rate": 9.141261781388664e-05, |
|
"loss": 0.3989, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.20853080568720378, |
|
"grad_norm": 0.686541736125946, |
|
"learning_rate": 9.10694652413763e-05, |
|
"loss": 0.3132, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.2123222748815166, |
|
"grad_norm": 0.8187770247459412, |
|
"learning_rate": 9.072026086399777e-05, |
|
"loss": 0.8469, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.2161137440758294, |
|
"grad_norm": 0.8380711674690247, |
|
"learning_rate": 9.03650561388796e-05, |
|
"loss": 0.5158, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.21990521327014217, |
|
"grad_norm": 0.7505501508712769, |
|
"learning_rate": 9.000390340733353e-05, |
|
"loss": 0.4408, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.22369668246445498, |
|
"grad_norm": 0.7203567028045654, |
|
"learning_rate": 8.963685588714185e-05, |
|
"loss": 0.3953, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.22748815165876776, |
|
"grad_norm": 0.6150539517402649, |
|
"learning_rate": 8.926396766471537e-05, |
|
"loss": 0.2903, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.22748815165876776, |
|
"eval_loss": 0.5122374892234802, |
|
"eval_runtime": 211.7681, |
|
"eval_samples_per_second": 20.976, |
|
"eval_steps_per_second": 5.246, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.23127962085308057, |
|
"grad_norm": 0.8199161291122437, |
|
"learning_rate": 8.888529368712357e-05, |
|
"loss": 0.8378, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.23507109004739338, |
|
"grad_norm": 0.7916120886802673, |
|
"learning_rate": 8.850088975399781e-05, |
|
"loss": 0.5298, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.23886255924170616, |
|
"grad_norm": 0.836801290512085, |
|
"learning_rate": 8.811081250930902e-05, |
|
"loss": 0.4505, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.24265402843601896, |
|
"grad_norm": 0.688791036605835, |
|
"learning_rate": 8.771511943302079e-05, |
|
"loss": 0.4163, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.24644549763033174, |
|
"grad_norm": 0.7080439925193787, |
|
"learning_rate": 8.731386883261952e-05, |
|
"loss": 0.3036, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.2502369668246445, |
|
"grad_norm": 0.7296505570411682, |
|
"learning_rate": 8.690711983452243e-05, |
|
"loss": 0.8379, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.25402843601895736, |
|
"grad_norm": 0.7905020117759705, |
|
"learning_rate": 8.649493237536499e-05, |
|
"loss": 0.5306, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.25781990521327014, |
|
"grad_norm": 0.8950763940811157, |
|
"learning_rate": 8.60773671931689e-05, |
|
"loss": 0.46, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.2616113744075829, |
|
"grad_norm": 0.7092387080192566, |
|
"learning_rate": 8.56544858183921e-05, |
|
"loss": 0.3971, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.26540284360189575, |
|
"grad_norm": 0.7552494406700134, |
|
"learning_rate": 8.522635056486181e-05, |
|
"loss": 0.2735, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.26919431279620853, |
|
"grad_norm": 0.7642441391944885, |
|
"learning_rate": 8.479302452059238e-05, |
|
"loss": 0.7899, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.2729857819905213, |
|
"grad_norm": 0.773184061050415, |
|
"learning_rate": 8.435457153848887e-05, |
|
"loss": 0.5101, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.27677725118483415, |
|
"grad_norm": 0.8441540002822876, |
|
"learning_rate": 8.391105622693793e-05, |
|
"loss": 0.4299, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.28056872037914693, |
|
"grad_norm": 0.6840046048164368, |
|
"learning_rate": 8.346254394028754e-05, |
|
"loss": 0.3795, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.2843601895734597, |
|
"grad_norm": 0.5915653705596924, |
|
"learning_rate": 8.30091007692166e-05, |
|
"loss": 0.2805, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.2843601895734597, |
|
"eval_loss": 0.49101725220680237, |
|
"eval_runtime": 211.5623, |
|
"eval_samples_per_second": 20.996, |
|
"eval_steps_per_second": 5.251, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.2881516587677725, |
|
"grad_norm": 0.8108296394348145, |
|
"learning_rate": 8.255079353099611e-05, |
|
"loss": 0.7564, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.2919431279620853, |
|
"grad_norm": 0.778976559638977, |
|
"learning_rate": 8.208768975964338e-05, |
|
"loss": 0.5116, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.2957345971563981, |
|
"grad_norm": 0.7423689961433411, |
|
"learning_rate": 8.161985769597045e-05, |
|
"loss": 0.4358, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.2995260663507109, |
|
"grad_norm": 0.7508371472358704, |
|
"learning_rate": 8.114736627752846e-05, |
|
"loss": 0.3686, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.3033175355450237, |
|
"grad_norm": 0.5939842462539673, |
|
"learning_rate": 8.067028512844929e-05, |
|
"loss": 0.2847, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.3071090047393365, |
|
"grad_norm": 0.8010008931159973, |
|
"learning_rate": 8.018868454918627e-05, |
|
"loss": 0.7743, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.3109004739336493, |
|
"grad_norm": 0.8660693764686584, |
|
"learning_rate": 7.970263550615469e-05, |
|
"loss": 0.5334, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.31469194312796206, |
|
"grad_norm": 0.708128035068512, |
|
"learning_rate": 7.921220962127487e-05, |
|
"loss": 0.4534, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.3184834123222749, |
|
"grad_norm": 0.7724855542182922, |
|
"learning_rate": 7.871747916141808e-05, |
|
"loss": 0.3683, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.3222748815165877, |
|
"grad_norm": 0.6242368817329407, |
|
"learning_rate": 7.821851702775765e-05, |
|
"loss": 0.2945, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.32606635071090045, |
|
"grad_norm": 0.8044713139533997, |
|
"learning_rate": 7.771539674502667e-05, |
|
"loss": 0.7826, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.3298578199052133, |
|
"grad_norm": 0.7477179765701294, |
|
"learning_rate": 7.720819245068368e-05, |
|
"loss": 0.4976, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.33364928909952607, |
|
"grad_norm": 0.7895752191543579, |
|
"learning_rate": 7.669697888398812e-05, |
|
"loss": 0.4432, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.33744075829383885, |
|
"grad_norm": 0.7435291409492493, |
|
"learning_rate": 7.618183137498709e-05, |
|
"loss": 0.3796, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.3412322274881517, |
|
"grad_norm": 0.8892961144447327, |
|
"learning_rate": 7.56628258334151e-05, |
|
"loss": 0.2694, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.3412322274881517, |
|
"eval_loss": 0.48085248470306396, |
|
"eval_runtime": 211.2971, |
|
"eval_samples_per_second": 21.023, |
|
"eval_steps_per_second": 5.258, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.34502369668246446, |
|
"grad_norm": 0.7861023545265198, |
|
"learning_rate": 7.514003873750836e-05, |
|
"loss": 0.7591, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.34881516587677724, |
|
"grad_norm": 0.7269836068153381, |
|
"learning_rate": 7.461354712273526e-05, |
|
"loss": 0.502, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.35260663507109, |
|
"grad_norm": 0.7629136443138123, |
|
"learning_rate": 7.408342857044484e-05, |
|
"loss": 0.4215, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.35639810426540286, |
|
"grad_norm": 0.668658435344696, |
|
"learning_rate": 7.354976119643472e-05, |
|
"loss": 0.3744, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.36018957345971564, |
|
"grad_norm": 0.6049548387527466, |
|
"learning_rate": 7.301262363944035e-05, |
|
"loss": 0.2709, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.3639810426540284, |
|
"grad_norm": 0.8917579650878906, |
|
"learning_rate": 7.247209504954715e-05, |
|
"loss": 0.7532, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.36777251184834125, |
|
"grad_norm": 0.7818393111228943, |
|
"learning_rate": 7.192825507652734e-05, |
|
"loss": 0.4742, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.37156398104265403, |
|
"grad_norm": 0.7396854162216187, |
|
"learning_rate": 7.138118385810313e-05, |
|
"loss": 0.4385, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.3753554502369668, |
|
"grad_norm": 0.8227097392082214, |
|
"learning_rate": 7.083096200813794e-05, |
|
"loss": 0.3656, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.3791469194312796, |
|
"grad_norm": 0.6364562511444092, |
|
"learning_rate": 7.027767060475764e-05, |
|
"loss": 0.2728, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.38293838862559243, |
|
"grad_norm": 0.7405619025230408, |
|
"learning_rate": 6.972139117840307e-05, |
|
"loss": 0.7329, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 0.3867298578199052, |
|
"grad_norm": 0.761043131351471, |
|
"learning_rate": 6.91622056998163e-05, |
|
"loss": 0.4928, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 0.390521327014218, |
|
"grad_norm": 0.7283722758293152, |
|
"learning_rate": 6.860019656796163e-05, |
|
"loss": 0.4324, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 0.3943127962085308, |
|
"grad_norm": 0.6700026392936707, |
|
"learning_rate": 6.80354465978838e-05, |
|
"loss": 0.371, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.3981042654028436, |
|
"grad_norm": 0.6622222065925598, |
|
"learning_rate": 6.746803900850462e-05, |
|
"loss": 0.2729, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.3981042654028436, |
|
"eval_loss": 0.4649004638195038, |
|
"eval_runtime": 211.8681, |
|
"eval_samples_per_second": 20.966, |
|
"eval_steps_per_second": 5.244, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.4018957345971564, |
|
"grad_norm": 0.7516476511955261, |
|
"learning_rate": 6.689805741036036e-05, |
|
"loss": 0.7771, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 0.4056872037914692, |
|
"grad_norm": 0.7608417272567749, |
|
"learning_rate": 6.632558579328114e-05, |
|
"loss": 0.4676, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 0.409478672985782, |
|
"grad_norm": 0.6563961505889893, |
|
"learning_rate": 6.575070851401475e-05, |
|
"loss": 0.4352, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 0.4132701421800948, |
|
"grad_norm": 0.6930544376373291, |
|
"learning_rate": 6.517351028379603e-05, |
|
"loss": 0.3728, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 0.41706161137440756, |
|
"grad_norm": 0.5903217792510986, |
|
"learning_rate": 6.459407615586446e-05, |
|
"loss": 0.2605, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.4208530805687204, |
|
"grad_norm": 0.7866091728210449, |
|
"learning_rate": 6.401249151293084e-05, |
|
"loss": 0.7243, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 0.4246445497630332, |
|
"grad_norm": 0.7677907347679138, |
|
"learning_rate": 6.342884205459594e-05, |
|
"loss": 0.4912, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 0.42843601895734595, |
|
"grad_norm": 0.7357597947120667, |
|
"learning_rate": 6.284321378472204e-05, |
|
"loss": 0.4106, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 0.4322274881516588, |
|
"grad_norm": 0.6618098020553589, |
|
"learning_rate": 6.225569299875989e-05, |
|
"loss": 0.3532, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 0.43601895734597157, |
|
"grad_norm": 0.7327076196670532, |
|
"learning_rate": 6.166636627103256e-05, |
|
"loss": 0.2859, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.43981042654028435, |
|
"grad_norm": 0.7591701149940491, |
|
"learning_rate": 6.107532044197828e-05, |
|
"loss": 0.726, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 0.44360189573459713, |
|
"grad_norm": 0.7473875284194946, |
|
"learning_rate": 6.0482642605354075e-05, |
|
"loss": 0.4789, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 0.44739336492890996, |
|
"grad_norm": 0.7394768595695496, |
|
"learning_rate": 5.988842009540194e-05, |
|
"loss": 0.4183, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 0.45118483412322274, |
|
"grad_norm": 0.662466287612915, |
|
"learning_rate": 5.929274047397977e-05, |
|
"loss": 0.3523, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 0.4549763033175355, |
|
"grad_norm": 0.66697096824646, |
|
"learning_rate": 5.869569151765857e-05, |
|
"loss": 0.2846, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.4549763033175355, |
|
"eval_loss": 0.45658260583877563, |
|
"eval_runtime": 211.0895, |
|
"eval_samples_per_second": 21.043, |
|
"eval_steps_per_second": 5.263, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.45876777251184836, |
|
"grad_norm": 0.8443006277084351, |
|
"learning_rate": 5.809736120478817e-05, |
|
"loss": 0.7046, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 0.46255924170616114, |
|
"grad_norm": 0.7299622893333435, |
|
"learning_rate": 5.749783770253315e-05, |
|
"loss": 0.4708, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 0.4663507109004739, |
|
"grad_norm": 0.7422939538955688, |
|
"learning_rate": 5.6897209353880885e-05, |
|
"loss": 0.4123, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 0.47014218009478675, |
|
"grad_norm": 0.723355770111084, |
|
"learning_rate": 5.629556466462376e-05, |
|
"loss": 0.3656, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 0.47393364928909953, |
|
"grad_norm": 0.611301839351654, |
|
"learning_rate": 5.5692992290317366e-05, |
|
"loss": 0.2865, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.4777251184834123, |
|
"grad_norm": 0.7408523559570312, |
|
"learning_rate": 5.508958102321666e-05, |
|
"loss": 0.7044, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 0.4815165876777251, |
|
"grad_norm": 0.779376745223999, |
|
"learning_rate": 5.448541977919195e-05, |
|
"loss": 0.4626, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 0.48530805687203793, |
|
"grad_norm": 0.824704110622406, |
|
"learning_rate": 5.388059758462658e-05, |
|
"loss": 0.4017, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 0.4890995260663507, |
|
"grad_norm": 0.7132182717323303, |
|
"learning_rate": 5.327520356329853e-05, |
|
"loss": 0.3587, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 0.4928909952606635, |
|
"grad_norm": 0.6510176062583923, |
|
"learning_rate": 5.266932692324747e-05, |
|
"loss": 0.2709, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.4966824644549763, |
|
"grad_norm": 0.8185328245162964, |
|
"learning_rate": 5.206305694362959e-05, |
|
"loss": 0.6967, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 0.500473933649289, |
|
"grad_norm": 0.8479395508766174, |
|
"learning_rate": 5.1456482961561656e-05, |
|
"loss": 0.4754, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 0.5042654028436019, |
|
"grad_norm": 0.7257512211799622, |
|
"learning_rate": 5.084969435895691e-05, |
|
"loss": 0.4183, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 0.5080568720379147, |
|
"grad_norm": 0.6470847725868225, |
|
"learning_rate": 5.024278054935403e-05, |
|
"loss": 0.3574, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 0.5118483412322274, |
|
"grad_norm": 0.5835993885993958, |
|
"learning_rate": 4.963583096474159e-05, |
|
"loss": 0.2612, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.5118483412322274, |
|
"eval_loss": 0.4398421049118042, |
|
"eval_runtime": 211.1019, |
|
"eval_samples_per_second": 21.042, |
|
"eval_steps_per_second": 5.263, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.5156398104265403, |
|
"grad_norm": 0.7083640694618225, |
|
"learning_rate": 4.9028935042379894e-05, |
|
"loss": 0.7199, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 0.5194312796208531, |
|
"grad_norm": 0.7591987252235413, |
|
"learning_rate": 4.842218221162174e-05, |
|
"loss": 0.4614, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 0.5232227488151658, |
|
"grad_norm": 0.6956468820571899, |
|
"learning_rate": 4.78156618807346e-05, |
|
"loss": 0.4092, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 0.5270142180094787, |
|
"grad_norm": 0.6942465901374817, |
|
"learning_rate": 4.720946342372596e-05, |
|
"loss": 0.3517, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 0.5308056872037915, |
|
"grad_norm": 0.6259123086929321, |
|
"learning_rate": 4.660367616717337e-05, |
|
"loss": 0.258, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.5345971563981042, |
|
"grad_norm": 0.7933112978935242, |
|
"learning_rate": 4.599838937706183e-05, |
|
"loss": 0.6823, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 0.5383886255924171, |
|
"grad_norm": 0.8216676115989685, |
|
"learning_rate": 4.5393692245629936e-05, |
|
"loss": 0.4629, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 0.5421800947867299, |
|
"grad_norm": 0.6931918263435364, |
|
"learning_rate": 4.478967387822697e-05, |
|
"loss": 0.402, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 0.5459715639810426, |
|
"grad_norm": 0.6871898174285889, |
|
"learning_rate": 4.418642328018265e-05, |
|
"loss": 0.3666, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 0.5497630331753555, |
|
"grad_norm": 0.7060806751251221, |
|
"learning_rate": 4.3584029343691805e-05, |
|
"loss": 0.2629, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 0.5535545023696683, |
|
"grad_norm": 0.7377336025238037, |
|
"learning_rate": 4.298258083471563e-05, |
|
"loss": 0.7025, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 0.557345971563981, |
|
"grad_norm": 0.718527615070343, |
|
"learning_rate": 4.238216637990152e-05, |
|
"loss": 0.4684, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 0.5611374407582939, |
|
"grad_norm": 0.7513172626495361, |
|
"learning_rate": 4.178287445352348e-05, |
|
"loss": 0.4107, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 0.5649289099526066, |
|
"grad_norm": 0.6840953826904297, |
|
"learning_rate": 4.118479336444492e-05, |
|
"loss": 0.3441, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 0.5687203791469194, |
|
"grad_norm": 0.5901861190795898, |
|
"learning_rate": 4.058801124310595e-05, |
|
"loss": 0.2717, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.5687203791469194, |
|
"eval_loss": 0.43063661456108093, |
|
"eval_runtime": 211.4062, |
|
"eval_samples_per_second": 21.012, |
|
"eval_steps_per_second": 5.255, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.5725118483412323, |
|
"grad_norm": 0.7785443067550659, |
|
"learning_rate": 3.999261602853686e-05, |
|
"loss": 0.6699, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 0.576303317535545, |
|
"grad_norm": 0.6982720494270325, |
|
"learning_rate": 3.93986954553998e-05, |
|
"loss": 0.4564, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 0.5800947867298578, |
|
"grad_norm": 0.7283275127410889, |
|
"learning_rate": 3.880633704106066e-05, |
|
"loss": 0.4075, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 0.5838862559241706, |
|
"grad_norm": 0.7250307202339172, |
|
"learning_rate": 3.821562807269284e-05, |
|
"loss": 0.3547, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 0.5876777251184834, |
|
"grad_norm": 0.8150220513343811, |
|
"learning_rate": 3.762665559441513e-05, |
|
"loss": 0.2715, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 0.5914691943127962, |
|
"grad_norm": 0.6675704717636108, |
|
"learning_rate": 3.703950639446525e-05, |
|
"loss": 0.6556, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 0.595260663507109, |
|
"grad_norm": 0.7834139466285706, |
|
"learning_rate": 3.6454266992411084e-05, |
|
"loss": 0.4595, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 0.5990521327014218, |
|
"grad_norm": 0.6742093563079834, |
|
"learning_rate": 3.5871023626401605e-05, |
|
"loss": 0.3832, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 0.6028436018957346, |
|
"grad_norm": 0.6419682502746582, |
|
"learning_rate": 3.5289862240459255e-05, |
|
"loss": 0.3495, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 0.6066350710900474, |
|
"grad_norm": 0.6275917291641235, |
|
"learning_rate": 3.4710868471815586e-05, |
|
"loss": 0.2515, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.6104265402843602, |
|
"grad_norm": 0.7566835284233093, |
|
"learning_rate": 3.413412763829218e-05, |
|
"loss": 0.6413, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 0.614218009478673, |
|
"grad_norm": 0.6755979657173157, |
|
"learning_rate": 3.355972472572859e-05, |
|
"loss": 0.4296, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 0.6180094786729858, |
|
"grad_norm": 0.7301715612411499, |
|
"learning_rate": 3.298774437545924e-05, |
|
"loss": 0.4115, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 0.6218009478672986, |
|
"grad_norm": 0.6284864544868469, |
|
"learning_rate": 3.241827087184112e-05, |
|
"loss": 0.3517, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 0.6255924170616114, |
|
"grad_norm": 0.6837667226791382, |
|
"learning_rate": 3.185138812983393e-05, |
|
"loss": 0.2677, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 0.6255924170616114, |
|
"eval_loss": 0.4205625653266907, |
|
"eval_runtime": 211.4182, |
|
"eval_samples_per_second": 21.01, |
|
"eval_steps_per_second": 5.255, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 0.6293838862559241, |
|
"grad_norm": 0.7050827741622925, |
|
"learning_rate": 3.128717968263484e-05, |
|
"loss": 0.6622, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 0.633175355450237, |
|
"grad_norm": 0.7003112435340881, |
|
"learning_rate": 3.072572866936939e-05, |
|
"loss": 0.4541, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 0.6369668246445498, |
|
"grad_norm": 0.7300443053245544, |
|
"learning_rate": 3.016711782284058e-05, |
|
"loss": 0.4091, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 0.6407582938388625, |
|
"grad_norm": 0.6804115176200867, |
|
"learning_rate": 2.9611429457337613e-05, |
|
"loss": 0.3318, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 0.6445497630331753, |
|
"grad_norm": 0.6225182414054871, |
|
"learning_rate": 2.905874545650656e-05, |
|
"loss": 0.2427, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.6483412322274882, |
|
"grad_norm": 0.7422319650650024, |
|
"learning_rate": 2.8509147261284287e-05, |
|
"loss": 0.6641, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 0.6521327014218009, |
|
"grad_norm": 0.7763936519622803, |
|
"learning_rate": 2.796271585789778e-05, |
|
"loss": 0.4526, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 0.6559241706161137, |
|
"grad_norm": 0.6862651705741882, |
|
"learning_rate": 2.7419531765930324e-05, |
|
"loss": 0.3974, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 0.6597156398104266, |
|
"grad_norm": 0.6718897223472595, |
|
"learning_rate": 2.6879675026456553e-05, |
|
"loss": 0.3427, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 0.6635071090047393, |
|
"grad_norm": 0.5931557416915894, |
|
"learning_rate": 2.634322519024791e-05, |
|
"loss": 0.2467, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.6672985781990521, |
|
"grad_norm": 0.8312835097312927, |
|
"learning_rate": 2.58102613060505e-05, |
|
"loss": 0.6196, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 0.671090047393365, |
|
"grad_norm": 0.7139614224433899, |
|
"learning_rate": 2.5280861908936843e-05, |
|
"loss": 0.4495, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 0.6748815165876777, |
|
"grad_norm": 0.7517569065093994, |
|
"learning_rate": 2.4755105008733154e-05, |
|
"loss": 0.4067, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 0.6786729857819905, |
|
"grad_norm": 0.7141692638397217, |
|
"learning_rate": 2.4233068078524375e-05, |
|
"loss": 0.3508, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 0.6824644549763034, |
|
"grad_norm": 0.739683985710144, |
|
"learning_rate": 2.371482804323798e-05, |
|
"loss": 0.2417, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.6824644549763034, |
|
"eval_loss": 0.41478267312049866, |
|
"eval_runtime": 211.5519, |
|
"eval_samples_per_second": 20.997, |
|
"eval_steps_per_second": 5.252, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.6862559241706161, |
|
"grad_norm": 0.7674782872200012, |
|
"learning_rate": 2.3200461268308744e-05, |
|
"loss": 0.6349, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 0.6900473933649289, |
|
"grad_norm": 0.776074230670929, |
|
"learning_rate": 2.269004354842585e-05, |
|
"loss": 0.4471, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 0.6938388625592417, |
|
"grad_norm": 0.7232591509819031, |
|
"learning_rate": 2.2183650096364095e-05, |
|
"loss": 0.3975, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 0.6976303317535545, |
|
"grad_norm": 0.6470640897750854, |
|
"learning_rate": 2.1681355531901016e-05, |
|
"loss": 0.342, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 0.7014218009478673, |
|
"grad_norm": 0.5892198085784912, |
|
"learning_rate": 2.118323387082114e-05, |
|
"loss": 0.2566, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 0.70521327014218, |
|
"grad_norm": 0.7599971890449524, |
|
"learning_rate": 2.0689358514009425e-05, |
|
"loss": 0.6659, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 0.7090047393364929, |
|
"grad_norm": 0.717060387134552, |
|
"learning_rate": 2.0199802236635257e-05, |
|
"loss": 0.4457, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 0.7127962085308057, |
|
"grad_norm": 0.6696526408195496, |
|
"learning_rate": 1.971463717742854e-05, |
|
"loss": 0.4077, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 0.7165876777251184, |
|
"grad_norm": 0.7238730192184448, |
|
"learning_rate": 1.9233934828049884e-05, |
|
"loss": 0.3367, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 0.7203791469194313, |
|
"grad_norm": 0.6004287600517273, |
|
"learning_rate": 1.875776602255564e-05, |
|
"loss": 0.2508, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.7241706161137441, |
|
"grad_norm": 0.7491863965988159, |
|
"learning_rate": 1.828620092696038e-05, |
|
"loss": 0.6121, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 0.7279620853080568, |
|
"grad_norm": 0.7434119582176208, |
|
"learning_rate": 1.781930902889741e-05, |
|
"loss": 0.4215, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 0.7317535545023697, |
|
"grad_norm": 0.6918310523033142, |
|
"learning_rate": 1.735715912737946e-05, |
|
"loss": 0.3903, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 0.7355450236966825, |
|
"grad_norm": 0.6157026886940002, |
|
"learning_rate": 1.6899819322660744e-05, |
|
"loss": 0.3268, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 0.7393364928909952, |
|
"grad_norm": 0.757264256477356, |
|
"learning_rate": 1.6447357006202074e-05, |
|
"loss": 0.2544, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 0.7393364928909952, |
|
"eval_loss": 0.40772125124931335, |
|
"eval_runtime": 211.58, |
|
"eval_samples_per_second": 20.994, |
|
"eval_steps_per_second": 5.251, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 0.7431279620853081, |
|
"grad_norm": 0.7177047729492188, |
|
"learning_rate": 1.5999838850740295e-05, |
|
"loss": 0.5998, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 0.7469194312796209, |
|
"grad_norm": 0.7692488431930542, |
|
"learning_rate": 1.555733080046378e-05, |
|
"loss": 0.4557, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 0.7507109004739336, |
|
"grad_norm": 0.7131124138832092, |
|
"learning_rate": 1.5119898061295162e-05, |
|
"loss": 0.3787, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 0.7545023696682465, |
|
"grad_norm": 0.7252326011657715, |
|
"learning_rate": 1.468760509128288e-05, |
|
"loss": 0.3477, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 0.7582938388625592, |
|
"grad_norm": 0.6361256241798401, |
|
"learning_rate": 1.4260515591103002e-05, |
|
"loss": 0.2685, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.762085308056872, |
|
"grad_norm": 0.741794228553772, |
|
"learning_rate": 1.3838692494672462e-05, |
|
"loss": 0.6092, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 0.7658767772511849, |
|
"grad_norm": 0.7934831976890564, |
|
"learning_rate": 1.3422197959875615e-05, |
|
"loss": 0.4335, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 0.7696682464454976, |
|
"grad_norm": 0.7226603031158447, |
|
"learning_rate": 1.3011093359404725e-05, |
|
"loss": 0.3835, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 0.7734597156398104, |
|
"grad_norm": 0.7291485667228699, |
|
"learning_rate": 1.2605439271716517e-05, |
|
"loss": 0.3368, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 0.7772511848341233, |
|
"grad_norm": 0.47125428915023804, |
|
"learning_rate": 1.220529547210556e-05, |
|
"loss": 0.239, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 0.781042654028436, |
|
"grad_norm": 0.7346218824386597, |
|
"learning_rate": 1.181072092389608e-05, |
|
"loss": 0.5992, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 0.7848341232227488, |
|
"grad_norm": 0.7117639780044556, |
|
"learning_rate": 1.1421773769753386e-05, |
|
"loss": 0.4201, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 0.7886255924170616, |
|
"grad_norm": 0.7014518976211548, |
|
"learning_rate": 1.1038511323116208e-05, |
|
"loss": 0.387, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 0.7924170616113744, |
|
"grad_norm": 0.6080646514892578, |
|
"learning_rate": 1.066099005975132e-05, |
|
"loss": 0.3142, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 0.7962085308056872, |
|
"grad_norm": 0.7223226428031921, |
|
"learning_rate": 1.0289265609431536e-05, |
|
"loss": 0.2375, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.7962085308056872, |
|
"eval_loss": 0.40230637788772583, |
|
"eval_runtime": 211.0367, |
|
"eval_samples_per_second": 21.048, |
|
"eval_steps_per_second": 5.264, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.7154859900474548, |
|
"learning_rate": 9.923392747738264e-06, |
|
"loss": 0.6162, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 0.8037914691943128, |
|
"grad_norm": 0.7427191734313965, |
|
"learning_rate": 9.563425387990149e-06, |
|
"loss": 0.4395, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 0.8075829383886256, |
|
"grad_norm": 0.6783840656280518, |
|
"learning_rate": 9.209416573298567e-06, |
|
"loss": 0.3985, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 0.8113744075829384, |
|
"grad_norm": 0.7031378149986267, |
|
"learning_rate": 8.861418468751542e-06, |
|
"loss": 0.3239, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 0.8151658767772512, |
|
"grad_norm": 0.7050457000732422, |
|
"learning_rate": 8.51948235372686e-06, |
|
"loss": 0.2472, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 0.818957345971564, |
|
"grad_norm": 0.7675678133964539, |
|
"learning_rate": 8.183658614335754e-06, |
|
"loss": 0.6221, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 0.8227488151658767, |
|
"grad_norm": 0.7096587419509888, |
|
"learning_rate": 7.853996735998353e-06, |
|
"loss": 0.404, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 0.8265402843601896, |
|
"grad_norm": 0.7109266519546509, |
|
"learning_rate": 7.530545296151642e-06, |
|
"loss": 0.3859, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 0.8303317535545024, |
|
"grad_norm": 0.6554428339004517, |
|
"learning_rate": 7.213351957091369e-06, |
|
"loss": 0.3142, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 0.8341232227488151, |
|
"grad_norm": 0.5105351805686951, |
|
"learning_rate": 6.902463458948738e-06, |
|
"loss": 0.2497, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.837914691943128, |
|
"grad_norm": 0.8070971965789795, |
|
"learning_rate": 6.597925612802969e-06, |
|
"loss": 0.6438, |
|
"step": 2210 |
|
}, |
|
{ |
|
"epoch": 0.8417061611374408, |
|
"grad_norm": 0.8927067518234253, |
|
"learning_rate": 6.299783293930844e-06, |
|
"loss": 0.4307, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 0.8454976303317535, |
|
"grad_norm": 0.6909337043762207, |
|
"learning_rate": 6.008080435194074e-06, |
|
"loss": 0.3953, |
|
"step": 2230 |
|
}, |
|
{ |
|
"epoch": 0.8492890995260663, |
|
"grad_norm": 0.7040910720825195, |
|
"learning_rate": 5.722860020565551e-06, |
|
"loss": 0.3385, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 0.8530805687203792, |
|
"grad_norm": 0.5680156350135803, |
|
"learning_rate": 5.444164078795444e-06, |
|
"loss": 0.247, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 0.8530805687203792, |
|
"eval_loss": 0.39911842346191406, |
|
"eval_runtime": 211.2832, |
|
"eval_samples_per_second": 21.024, |
|
"eval_steps_per_second": 5.258, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 0.8568720379146919, |
|
"grad_norm": 0.7242756485939026, |
|
"learning_rate": 5.17203367721798e-06, |
|
"loss": 0.617, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 0.8606635071090047, |
|
"grad_norm": 0.7452470660209656, |
|
"learning_rate": 4.906508915700081e-06, |
|
"loss": 0.4308, |
|
"step": 2270 |
|
}, |
|
{ |
|
"epoch": 0.8644549763033176, |
|
"grad_norm": 0.6811374425888062, |
|
"learning_rate": 4.647628920732294e-06, |
|
"loss": 0.368, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 0.8682464454976303, |
|
"grad_norm": 0.6746857762336731, |
|
"learning_rate": 4.395431839663378e-06, |
|
"loss": 0.3232, |
|
"step": 2290 |
|
}, |
|
{ |
|
"epoch": 0.8720379146919431, |
|
"grad_norm": 0.6928372979164124, |
|
"learning_rate": 4.149954835079067e-06, |
|
"loss": 0.2431, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.875829383886256, |
|
"grad_norm": 0.6891263127326965, |
|
"learning_rate": 3.911234079325954e-06, |
|
"loss": 0.6067, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 0.8796208530805687, |
|
"grad_norm": 0.7079217433929443, |
|
"learning_rate": 3.679304749181328e-06, |
|
"loss": 0.4293, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 0.8834123222748815, |
|
"grad_norm": 0.7635899782180786, |
|
"learning_rate": 3.4542010206696674e-06, |
|
"loss": 0.3754, |
|
"step": 2330 |
|
}, |
|
{ |
|
"epoch": 0.8872037914691943, |
|
"grad_norm": 0.710638165473938, |
|
"learning_rate": 3.2359560640265963e-06, |
|
"loss": 0.3347, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 0.8909952606635071, |
|
"grad_norm": 0.5463706851005554, |
|
"learning_rate": 3.024602038811164e-06, |
|
"loss": 0.2286, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 0.8947867298578199, |
|
"grad_norm": 0.7083713412284851, |
|
"learning_rate": 2.820170089166835e-06, |
|
"loss": 0.6061, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 0.8985781990521327, |
|
"grad_norm": 0.8036705255508423, |
|
"learning_rate": 2.622690339232342e-06, |
|
"loss": 0.4524, |
|
"step": 2370 |
|
}, |
|
{ |
|
"epoch": 0.9023696682464455, |
|
"grad_norm": 0.6927635073661804, |
|
"learning_rate": 2.432191888702684e-06, |
|
"loss": 0.3962, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 0.9061611374407583, |
|
"grad_norm": 0.686084508895874, |
|
"learning_rate": 2.2487028085411035e-06, |
|
"loss": 0.3254, |
|
"step": 2390 |
|
}, |
|
{ |
|
"epoch": 0.909952606635071, |
|
"grad_norm": 0.559173047542572, |
|
"learning_rate": 2.0722501368427736e-06, |
|
"loss": 0.2291, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.909952606635071, |
|
"eval_loss": 0.39714062213897705, |
|
"eval_runtime": 211.4195, |
|
"eval_samples_per_second": 21.01, |
|
"eval_steps_per_second": 5.255, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.9137440758293839, |
|
"grad_norm": 0.7534606456756592, |
|
"learning_rate": 1.9028598748504767e-06, |
|
"loss": 0.631, |
|
"step": 2410 |
|
}, |
|
{ |
|
"epoch": 0.9175355450236967, |
|
"grad_norm": 0.7881170511245728, |
|
"learning_rate": 1.7405569831232704e-06, |
|
"loss": 0.4308, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 0.9213270142180094, |
|
"grad_norm": 0.7743288278579712, |
|
"learning_rate": 1.5853653778583666e-06, |
|
"loss": 0.3928, |
|
"step": 2430 |
|
}, |
|
{ |
|
"epoch": 0.9251184834123223, |
|
"grad_norm": 0.6832749247550964, |
|
"learning_rate": 1.437307927366971e-06, |
|
"loss": 0.3336, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 0.9289099526066351, |
|
"grad_norm": 0.6585612297058105, |
|
"learning_rate": 1.2964064487045236e-06, |
|
"loss": 0.2391, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 0.9327014218009478, |
|
"grad_norm": 0.7330191135406494, |
|
"learning_rate": 1.162681704455798e-06, |
|
"loss": 0.6028, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 0.9364928909952607, |
|
"grad_norm": 0.8107926249504089, |
|
"learning_rate": 1.036153399675488e-06, |
|
"loss": 0.4511, |
|
"step": 2470 |
|
}, |
|
{ |
|
"epoch": 0.9402843601895735, |
|
"grad_norm": 0.7075092792510986, |
|
"learning_rate": 9.168401789845183e-07, |
|
"loss": 0.3733, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 0.9440758293838862, |
|
"grad_norm": 0.6632401347160339, |
|
"learning_rate": 8.04759623822654e-07, |
|
"loss": 0.3243, |
|
"step": 2490 |
|
}, |
|
{ |
|
"epoch": 0.9478672985781991, |
|
"grad_norm": 0.6346734166145325, |
|
"learning_rate": 6.999282498578174e-07, |
|
"loss": 0.2299, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.9516587677725118, |
|
"grad_norm": 0.7297029495239258, |
|
"learning_rate": 6.023615045523844e-07, |
|
"loss": 0.6426, |
|
"step": 2510 |
|
}, |
|
{ |
|
"epoch": 0.9554502369668246, |
|
"grad_norm": 0.6962762475013733, |
|
"learning_rate": 5.120737648869389e-07, |
|
"loss": 0.4379, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 0.9592417061611375, |
|
"grad_norm": 0.7509403824806213, |
|
"learning_rate": 4.290783352417338e-07, |
|
"loss": 0.3845, |
|
"step": 2530 |
|
}, |
|
{ |
|
"epoch": 0.9630331753554502, |
|
"grad_norm": 0.7338268160820007, |
|
"learning_rate": 3.5338744543622627e-07, |
|
"loss": 0.332, |
|
"step": 2540 |
|
}, |
|
{ |
|
"epoch": 0.966824644549763, |
|
"grad_norm": 0.5980384945869446, |
|
"learning_rate": 2.8501224892695245e-07, |
|
"loss": 0.2342, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 0.966824644549763, |
|
"eval_loss": 0.396539568901062, |
|
"eval_runtime": 211.4226, |
|
"eval_samples_per_second": 21.01, |
|
"eval_steps_per_second": 5.255, |
|
"step": 2550 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 2638, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 150, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 2, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.2657873952762757e+18, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|