|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.019436345966958212, |
|
"eval_steps": 500, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 2.820549726486206, |
|
"learning_rate": 0.00019805941782534764, |
|
"loss": 4.7714, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 2.7905406951904297, |
|
"learning_rate": 0.00019605881764529358, |
|
"loss": 4.8623, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 1.4746589660644531, |
|
"learning_rate": 0.00019405821746523957, |
|
"loss": 3.7993, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 2.333172559738159, |
|
"learning_rate": 0.00019205761728518557, |
|
"loss": 4.5898, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 1.711241602897644, |
|
"learning_rate": 0.00019005701710513156, |
|
"loss": 3.1208, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 2.001378059387207, |
|
"learning_rate": 0.00018805641692507753, |
|
"loss": 2.7962, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 2.5922505855560303, |
|
"learning_rate": 0.00018605581674502352, |
|
"loss": 2.7348, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 3.073798179626465, |
|
"learning_rate": 0.00018405521656496952, |
|
"loss": 2.4213, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 2.494370937347412, |
|
"learning_rate": 0.00018205461638491548, |
|
"loss": 2.2847, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 3.1938252449035645, |
|
"learning_rate": 0.00018005401620486148, |
|
"loss": 2.2266, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 1.0904405117034912, |
|
"learning_rate": 0.00017805341602480744, |
|
"loss": 1.7216, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.8593369126319885, |
|
"learning_rate": 0.00017605281584475344, |
|
"loss": 2.0195, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 1.051988959312439, |
|
"learning_rate": 0.00017405221566469943, |
|
"loss": 1.8962, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 1.313820242881775, |
|
"learning_rate": 0.00017205161548464542, |
|
"loss": 1.735, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 1.035528302192688, |
|
"learning_rate": 0.00017005101530459136, |
|
"loss": 2.0408, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.8312838077545166, |
|
"learning_rate": 0.00016805041512453736, |
|
"loss": 1.7348, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 1.406984806060791, |
|
"learning_rate": 0.00016604981494448335, |
|
"loss": 1.9893, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 1.0014063119888306, |
|
"learning_rate": 0.00016404921476442935, |
|
"loss": 1.7413, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 1.1123204231262207, |
|
"learning_rate": 0.0001620486145843753, |
|
"loss": 1.642, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.9622805118560791, |
|
"learning_rate": 0.0001600480144043213, |
|
"loss": 1.8947, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.9021405577659607, |
|
"learning_rate": 0.0001580474142242673, |
|
"loss": 1.8771, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.7586336135864258, |
|
"learning_rate": 0.00015604681404421327, |
|
"loss": 1.7902, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.8033269643783569, |
|
"learning_rate": 0.00015404621386415926, |
|
"loss": 1.536, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.8731617331504822, |
|
"learning_rate": 0.00015204561368410523, |
|
"loss": 1.6825, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.7662222981452942, |
|
"learning_rate": 0.00015004501350405122, |
|
"loss": 1.6269, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.8472873568534851, |
|
"learning_rate": 0.00014804441332399721, |
|
"loss": 1.7143, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.8353914618492126, |
|
"learning_rate": 0.0001460438131439432, |
|
"loss": 0.9914, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.8981612324714661, |
|
"learning_rate": 0.00014404321296388918, |
|
"loss": 1.1645, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7975760698318481, |
|
"learning_rate": 0.00014204261278383514, |
|
"loss": 1.4233, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7593973875045776, |
|
"learning_rate": 0.00014004201260378114, |
|
"loss": 1.4445, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.9403287172317505, |
|
"learning_rate": 0.00013804141242372713, |
|
"loss": 1.4302, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.9004844427108765, |
|
"learning_rate": 0.00013604081224367312, |
|
"loss": 1.5638, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.8177943229675293, |
|
"learning_rate": 0.0001340402120636191, |
|
"loss": 1.3372, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.733913004398346, |
|
"learning_rate": 0.00013203961188356508, |
|
"loss": 1.2174, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7267743945121765, |
|
"learning_rate": 0.00013003901170351108, |
|
"loss": 1.0346, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.9789184331893921, |
|
"learning_rate": 0.00012803841152345704, |
|
"loss": 1.4935, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7921544909477234, |
|
"learning_rate": 0.000126037811343403, |
|
"loss": 1.2153, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.687468945980072, |
|
"learning_rate": 0.000124037211163349, |
|
"loss": 1.096, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7953901290893555, |
|
"learning_rate": 0.000122036610983295, |
|
"loss": 1.3469, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.6782249212265015, |
|
"learning_rate": 0.00012003601080324098, |
|
"loss": 1.1761, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7382201552391052, |
|
"learning_rate": 0.00011803541062318697, |
|
"loss": 1.2891, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7965229749679565, |
|
"learning_rate": 0.00011603481044313295, |
|
"loss": 1.2347, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.6263877153396606, |
|
"learning_rate": 0.00011403421026307892, |
|
"loss": 1.2815, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.8494048714637756, |
|
"learning_rate": 0.00011203361008302491, |
|
"loss": 1.4406, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.8479952216148376, |
|
"learning_rate": 0.0001100330099029709, |
|
"loss": 1.1149, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.6989169716835022, |
|
"learning_rate": 0.00010803240972291689, |
|
"loss": 1.3057, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.8104137182235718, |
|
"learning_rate": 0.00010603180954286287, |
|
"loss": 1.1267, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.8028876781463623, |
|
"learning_rate": 0.00010403120936280886, |
|
"loss": 1.6476, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7756268382072449, |
|
"learning_rate": 0.00010203060918275482, |
|
"loss": 1.2497, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7349205613136292, |
|
"learning_rate": 0.00010003000900270081, |
|
"loss": 0.9531, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.6500608325004578, |
|
"learning_rate": 9.802940882264679e-05, |
|
"loss": 1.026, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.6990982294082642, |
|
"learning_rate": 9.602880864259278e-05, |
|
"loss": 1.119, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 1.040234923362732, |
|
"learning_rate": 9.402820846253876e-05, |
|
"loss": 1.9937, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.6553092002868652, |
|
"learning_rate": 9.202760828248476e-05, |
|
"loss": 1.1563, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7430468797683716, |
|
"learning_rate": 9.002700810243074e-05, |
|
"loss": 1.0199, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.748374342918396, |
|
"learning_rate": 8.802640792237672e-05, |
|
"loss": 1.2218, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.6465599536895752, |
|
"learning_rate": 8.602580774232271e-05, |
|
"loss": 1.3286, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.8411876559257507, |
|
"learning_rate": 8.402520756226868e-05, |
|
"loss": 1.2057, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.8302045464515686, |
|
"learning_rate": 8.202460738221467e-05, |
|
"loss": 1.5277, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7100818753242493, |
|
"learning_rate": 8.002400720216065e-05, |
|
"loss": 1.3326, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.6247999668121338, |
|
"learning_rate": 7.802340702210663e-05, |
|
"loss": 1.0997, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 1.1579809188842773, |
|
"learning_rate": 7.602280684205261e-05, |
|
"loss": 1.565, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7506289482116699, |
|
"learning_rate": 7.402220666199861e-05, |
|
"loss": 1.2788, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7227886915206909, |
|
"learning_rate": 7.202160648194459e-05, |
|
"loss": 0.9065, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 1.144627332687378, |
|
"learning_rate": 7.002100630189057e-05, |
|
"loss": 1.6695, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7828360199928284, |
|
"learning_rate": 6.802040612183656e-05, |
|
"loss": 1.0203, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 1.2042758464813232, |
|
"learning_rate": 6.601980594178254e-05, |
|
"loss": 1.2251, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.6878300905227661, |
|
"learning_rate": 6.401920576172852e-05, |
|
"loss": 0.8927, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7350767850875854, |
|
"learning_rate": 6.20186055816745e-05, |
|
"loss": 1.0885, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7477673292160034, |
|
"learning_rate": 6.001800540162049e-05, |
|
"loss": 1.2264, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.8758525848388672, |
|
"learning_rate": 5.801740522156648e-05, |
|
"loss": 1.3656, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.742152214050293, |
|
"learning_rate": 5.601680504151246e-05, |
|
"loss": 1.1293, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7166507840156555, |
|
"learning_rate": 5.4016204861458444e-05, |
|
"loss": 1.2553, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.6855165362358093, |
|
"learning_rate": 5.201560468140443e-05, |
|
"loss": 1.2659, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.8550592064857483, |
|
"learning_rate": 5.0015004501350405e-05, |
|
"loss": 1.2631, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.6638922691345215, |
|
"learning_rate": 4.801440432129639e-05, |
|
"loss": 0.972, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.7261601090431213, |
|
"learning_rate": 4.601380414124238e-05, |
|
"loss": 1.2276, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.7439085245132446, |
|
"learning_rate": 4.401320396118836e-05, |
|
"loss": 1.0153, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.6688580513000488, |
|
"learning_rate": 4.201260378113434e-05, |
|
"loss": 0.9473, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 1.1068280935287476, |
|
"learning_rate": 4.0012003601080326e-05, |
|
"loss": 1.3205, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.6699922680854797, |
|
"learning_rate": 3.801140342102631e-05, |
|
"loss": 1.2403, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.8933495879173279, |
|
"learning_rate": 3.6010803240972294e-05, |
|
"loss": 1.0529, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.7752862572669983, |
|
"learning_rate": 3.401020306091828e-05, |
|
"loss": 1.1408, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 1.043106198310852, |
|
"learning_rate": 3.200960288086426e-05, |
|
"loss": 1.0511, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.6093169450759888, |
|
"learning_rate": 3.0009002700810245e-05, |
|
"loss": 0.9798, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.9623333811759949, |
|
"learning_rate": 2.800840252075623e-05, |
|
"loss": 0.8907, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.7118967175483704, |
|
"learning_rate": 2.6007802340702216e-05, |
|
"loss": 1.224, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.7716160416603088, |
|
"learning_rate": 2.4007202160648196e-05, |
|
"loss": 1.2367, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.8782191872596741, |
|
"learning_rate": 2.200660198059418e-05, |
|
"loss": 1.2046, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.8954662084579468, |
|
"learning_rate": 2.0006001800540163e-05, |
|
"loss": 1.1141, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.7129700183868408, |
|
"learning_rate": 1.8005401620486147e-05, |
|
"loss": 1.2983, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.8731375932693481, |
|
"learning_rate": 1.600480144043213e-05, |
|
"loss": 1.1551, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.6418743133544922, |
|
"learning_rate": 1.4004201260378114e-05, |
|
"loss": 1.0709, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.5780905485153198, |
|
"learning_rate": 1.2003601080324098e-05, |
|
"loss": 0.7904, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.6729098558425903, |
|
"learning_rate": 1.0003000900270082e-05, |
|
"loss": 1.2541, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.9264962673187256, |
|
"learning_rate": 8.002400720216065e-06, |
|
"loss": 0.9846, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.6398671865463257, |
|
"learning_rate": 6.001800540162049e-06, |
|
"loss": 1.007, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.639799952507019, |
|
"learning_rate": 4.001200360108033e-06, |
|
"loss": 1.0604, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.6854999661445618, |
|
"learning_rate": 2.0006001800540163e-06, |
|
"loss": 0.8762, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.7004562020301819, |
|
"learning_rate": 0.0, |
|
"loss": 0.9948, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"total_flos": 1.5977891623471104e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|