|
{ |
|
"best_metric": 1.06106698513031, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 0.07732456988208003, |
|
"eval_steps": 25, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0007732456988208003, |
|
"grad_norm": 17.36431312561035, |
|
"learning_rate": 2e-05, |
|
"loss": 4.7994, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0007732456988208003, |
|
"eval_loss": 1.5391517877578735, |
|
"eval_runtime": 350.2016, |
|
"eval_samples_per_second": 1.556, |
|
"eval_steps_per_second": 0.78, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0015464913976416005, |
|
"grad_norm": 16.813636779785156, |
|
"learning_rate": 4e-05, |
|
"loss": 6.5771, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.002319737096462401, |
|
"grad_norm": 15.335413932800293, |
|
"learning_rate": 6e-05, |
|
"loss": 6.3871, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.003092982795283201, |
|
"grad_norm": 12.222745895385742, |
|
"learning_rate": 8e-05, |
|
"loss": 5.7415, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0038662284941040014, |
|
"grad_norm": 11.978750228881836, |
|
"learning_rate": 0.0001, |
|
"loss": 5.3726, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004639474192924802, |
|
"grad_norm": 10.958758354187012, |
|
"learning_rate": 9.997266286704631e-05, |
|
"loss": 5.7592, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.005412719891745603, |
|
"grad_norm": 10.371870994567871, |
|
"learning_rate": 9.989068136093873e-05, |
|
"loss": 5.0328, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.006185965590566402, |
|
"grad_norm": 15.32174301147461, |
|
"learning_rate": 9.975414512725057e-05, |
|
"loss": 4.7995, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0069592112893872024, |
|
"grad_norm": 11.446320533752441, |
|
"learning_rate": 9.956320346634876e-05, |
|
"loss": 4.8366, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.007732456988208003, |
|
"grad_norm": 11.212560653686523, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 5.5192, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.008505702687028804, |
|
"grad_norm": 11.631918907165527, |
|
"learning_rate": 9.901899829374047e-05, |
|
"loss": 5.0465, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.009278948385849604, |
|
"grad_norm": 10.389140129089355, |
|
"learning_rate": 9.86663298624003e-05, |
|
"loss": 4.9063, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.010052194084670405, |
|
"grad_norm": 11.703014373779297, |
|
"learning_rate": 9.826044551386744e-05, |
|
"loss": 5.2112, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.010825439783491205, |
|
"grad_norm": 10.6223726272583, |
|
"learning_rate": 9.780178907671789e-05, |
|
"loss": 4.7938, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.011598685482312004, |
|
"grad_norm": 9.967108726501465, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 4.8199, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.012371931181132804, |
|
"grad_norm": 9.973040580749512, |
|
"learning_rate": 9.672822322997305e-05, |
|
"loss": 5.1272, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.013145176879953605, |
|
"grad_norm": 10.061216354370117, |
|
"learning_rate": 9.611448774886924e-05, |
|
"loss": 4.5049, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.013918422578774405, |
|
"grad_norm": 9.44239616394043, |
|
"learning_rate": 9.545032675245813e-05, |
|
"loss": 4.7982, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.014691668277595205, |
|
"grad_norm": 9.862997055053711, |
|
"learning_rate": 9.473646649103818e-05, |
|
"loss": 4.9277, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.015464913976416006, |
|
"grad_norm": 8.347469329833984, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 4.4154, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.016238159675236806, |
|
"grad_norm": 8.445976257324219, |
|
"learning_rate": 9.316282404787871e-05, |
|
"loss": 3.718, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.017011405374057608, |
|
"grad_norm": 9.758785247802734, |
|
"learning_rate": 9.230476262104677e-05, |
|
"loss": 4.5808, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.017784651072878407, |
|
"grad_norm": 11.981024742126465, |
|
"learning_rate": 9.140044155740101e-05, |
|
"loss": 6.4283, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.01855789677169921, |
|
"grad_norm": 12.591474533081055, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 5.034, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.019331142470520007, |
|
"grad_norm": 10.587797164916992, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 5.3043, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.019331142470520007, |
|
"eval_loss": 1.1092556715011597, |
|
"eval_runtime": 351.8518, |
|
"eval_samples_per_second": 1.549, |
|
"eval_steps_per_second": 0.776, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.02010438816934081, |
|
"grad_norm": 8.1387300491333, |
|
"learning_rate": 8.842005554284296e-05, |
|
"loss": 5.3438, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.020877633868161608, |
|
"grad_norm": 8.583553314208984, |
|
"learning_rate": 8.73410738492077e-05, |
|
"loss": 3.7688, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.02165087956698241, |
|
"grad_norm": 8.653087615966797, |
|
"learning_rate": 8.622126023955446e-05, |
|
"loss": 4.184, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.02242412526580321, |
|
"grad_norm": 9.537445068359375, |
|
"learning_rate": 8.506183921362443e-05, |
|
"loss": 3.9987, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.023197370964624008, |
|
"grad_norm": 10.528961181640625, |
|
"learning_rate": 8.386407858128706e-05, |
|
"loss": 5.8944, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.02397061666344481, |
|
"grad_norm": 11.14555549621582, |
|
"learning_rate": 8.262928807620843e-05, |
|
"loss": 4.382, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.02474386236226561, |
|
"grad_norm": 9.45686149597168, |
|
"learning_rate": 8.135881792367686e-05, |
|
"loss": 4.9518, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.02551710806108641, |
|
"grad_norm": 8.43744945526123, |
|
"learning_rate": 8.005405736415126e-05, |
|
"loss": 4.2156, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.02629035375990721, |
|
"grad_norm": 9.362334251403809, |
|
"learning_rate": 7.871643313414718e-05, |
|
"loss": 4.2592, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.02706359945872801, |
|
"grad_norm": 9.237394332885742, |
|
"learning_rate": 7.734740790612136e-05, |
|
"loss": 3.7767, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.02783684515754881, |
|
"grad_norm": 10.05842399597168, |
|
"learning_rate": 7.594847868906076e-05, |
|
"loss": 4.4501, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.028610090856369612, |
|
"grad_norm": 10.300761222839355, |
|
"learning_rate": 7.452117519152542e-05, |
|
"loss": 4.6419, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.02938333655519041, |
|
"grad_norm": 10.16982650756836, |
|
"learning_rate": 7.30670581489344e-05, |
|
"loss": 4.0752, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.030156582254011213, |
|
"grad_norm": 9.05498218536377, |
|
"learning_rate": 7.158771761692464e-05, |
|
"loss": 4.7093, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.03092982795283201, |
|
"grad_norm": 9.234203338623047, |
|
"learning_rate": 7.008477123264848e-05, |
|
"loss": 4.4179, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03170307365165281, |
|
"grad_norm": 9.255125999450684, |
|
"learning_rate": 6.855986244591104e-05, |
|
"loss": 3.5948, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.03247631935047361, |
|
"grad_norm": 10.639616012573242, |
|
"learning_rate": 6.701465872208216e-05, |
|
"loss": 4.5235, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.03324956504929441, |
|
"grad_norm": 8.199054718017578, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 3.7567, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.034022810748115216, |
|
"grad_norm": 14.720418930053711, |
|
"learning_rate": 6.387014543809223e-05, |
|
"loss": 5.8634, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.034796056446936015, |
|
"grad_norm": 8.992387771606445, |
|
"learning_rate": 6.227427435703997e-05, |
|
"loss": 3.7691, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03556930214575681, |
|
"grad_norm": 13.752641677856445, |
|
"learning_rate": 6.066498153718735e-05, |
|
"loss": 4.0749, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.03634254784457761, |
|
"grad_norm": 9.262603759765625, |
|
"learning_rate": 5.90440267166055e-05, |
|
"loss": 3.926, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.03711579354339842, |
|
"grad_norm": 8.374828338623047, |
|
"learning_rate": 5.74131823855921e-05, |
|
"loss": 3.248, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.037889039242219216, |
|
"grad_norm": 10.220807075500488, |
|
"learning_rate": 5.577423184847932e-05, |
|
"loss": 4.066, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.038662284941040015, |
|
"grad_norm": 13.713523864746094, |
|
"learning_rate": 5.4128967273616625e-05, |
|
"loss": 3.6488, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.038662284941040015, |
|
"eval_loss": 1.0772173404693604, |
|
"eval_runtime": 351.8904, |
|
"eval_samples_per_second": 1.549, |
|
"eval_steps_per_second": 0.776, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.039435530639860814, |
|
"grad_norm": 10.129049301147461, |
|
"learning_rate": 5.247918773366112e-05, |
|
"loss": 4.7483, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.04020877633868162, |
|
"grad_norm": 8.601277351379395, |
|
"learning_rate": 5.0826697238317935e-05, |
|
"loss": 4.5312, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.04098202203750242, |
|
"grad_norm": 11.301950454711914, |
|
"learning_rate": 4.917330276168208e-05, |
|
"loss": 4.7463, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.041755267736323216, |
|
"grad_norm": 8.8668851852417, |
|
"learning_rate": 4.7520812266338885e-05, |
|
"loss": 5.0553, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.042528513435144015, |
|
"grad_norm": 9.888030052185059, |
|
"learning_rate": 4.5871032726383386e-05, |
|
"loss": 4.5142, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.04330175913396482, |
|
"grad_norm": 9.675687789916992, |
|
"learning_rate": 4.4225768151520694e-05, |
|
"loss": 4.6986, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.04407500483278562, |
|
"grad_norm": 11.324494361877441, |
|
"learning_rate": 4.2586817614407895e-05, |
|
"loss": 5.2414, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.04484825053160642, |
|
"grad_norm": 10.077937126159668, |
|
"learning_rate": 4.095597328339452e-05, |
|
"loss": 4.647, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.045621496230427216, |
|
"grad_norm": 9.43274211883545, |
|
"learning_rate": 3.933501846281267e-05, |
|
"loss": 4.5543, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.046394741929248015, |
|
"grad_norm": 10.41069221496582, |
|
"learning_rate": 3.772572564296005e-05, |
|
"loss": 4.8732, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.04716798762806882, |
|
"grad_norm": 9.759603500366211, |
|
"learning_rate": 3.612985456190778e-05, |
|
"loss": 4.7214, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.04794123332688962, |
|
"grad_norm": 11.823719024658203, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 5.5216, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.04871447902571042, |
|
"grad_norm": 9.14537239074707, |
|
"learning_rate": 3.298534127791785e-05, |
|
"loss": 3.9145, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.04948772472453122, |
|
"grad_norm": 10.29529094696045, |
|
"learning_rate": 3.144013755408895e-05, |
|
"loss": 5.2241, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.05026097042335202, |
|
"grad_norm": 8.020105361938477, |
|
"learning_rate": 2.991522876735154e-05, |
|
"loss": 4.4175, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.05103421612217282, |
|
"grad_norm": 7.962001323699951, |
|
"learning_rate": 2.8412282383075363e-05, |
|
"loss": 3.8547, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.05180746182099362, |
|
"grad_norm": 7.753868579864502, |
|
"learning_rate": 2.693294185106562e-05, |
|
"loss": 3.7267, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.05258070751981442, |
|
"grad_norm": 8.525618553161621, |
|
"learning_rate": 2.547882480847461e-05, |
|
"loss": 4.6899, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.053353953218635224, |
|
"grad_norm": 8.045459747314453, |
|
"learning_rate": 2.405152131093926e-05, |
|
"loss": 4.1387, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.05412719891745602, |
|
"grad_norm": 7.087581634521484, |
|
"learning_rate": 2.2652592093878666e-05, |
|
"loss": 4.1212, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.05490044461627682, |
|
"grad_norm": 7.834628582000732, |
|
"learning_rate": 2.128356686585282e-05, |
|
"loss": 3.7453, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.05567369031509762, |
|
"grad_norm": 8.538744926452637, |
|
"learning_rate": 1.9945942635848748e-05, |
|
"loss": 4.6089, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.056446936013918425, |
|
"grad_norm": 7.815285682678223, |
|
"learning_rate": 1.8641182076323148e-05, |
|
"loss": 3.8365, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.057220181712739224, |
|
"grad_norm": 7.645932674407959, |
|
"learning_rate": 1.7370711923791567e-05, |
|
"loss": 2.5073, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.05799342741156002, |
|
"grad_norm": 8.115581512451172, |
|
"learning_rate": 1.6135921418712956e-05, |
|
"loss": 4.0698, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.05799342741156002, |
|
"eval_loss": 1.0642553567886353, |
|
"eval_runtime": 352.0611, |
|
"eval_samples_per_second": 1.548, |
|
"eval_steps_per_second": 0.775, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.05876667311038082, |
|
"grad_norm": 7.514301776885986, |
|
"learning_rate": 1.4938160786375572e-05, |
|
"loss": 3.9285, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.05953991880920163, |
|
"grad_norm": 11.112171173095703, |
|
"learning_rate": 1.3778739760445552e-05, |
|
"loss": 4.9558, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.060313164508022425, |
|
"grad_norm": 7.495338439941406, |
|
"learning_rate": 1.2658926150792322e-05, |
|
"loss": 3.9271, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.061086410206843224, |
|
"grad_norm": 9.140981674194336, |
|
"learning_rate": 1.157994445715706e-05, |
|
"loss": 3.571, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.06185965590566402, |
|
"grad_norm": 9.3052978515625, |
|
"learning_rate": 1.0542974530180327e-05, |
|
"loss": 4.5799, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.06263290160448483, |
|
"grad_norm": 8.143803596496582, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 3.5652, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.06340614730330563, |
|
"grad_norm": 10.234214782714844, |
|
"learning_rate": 8.599558442598998e-06, |
|
"loss": 4.2623, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.06417939300212643, |
|
"grad_norm": 8.836921691894531, |
|
"learning_rate": 7.695237378953223e-06, |
|
"loss": 3.5661, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.06495263870094722, |
|
"grad_norm": 10.0997953414917, |
|
"learning_rate": 6.837175952121306e-06, |
|
"loss": 4.2513, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.06572588439976802, |
|
"grad_norm": 9.577383995056152, |
|
"learning_rate": 6.026312439675552e-06, |
|
"loss": 4.6833, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.06649913009858882, |
|
"grad_norm": 10.211189270019531, |
|
"learning_rate": 5.263533508961827e-06, |
|
"loss": 4.5139, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.06727237579740963, |
|
"grad_norm": 9.4320068359375, |
|
"learning_rate": 4.549673247541875e-06, |
|
"loss": 4.1881, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.06804562149623043, |
|
"grad_norm": 9.295085906982422, |
|
"learning_rate": 3.885512251130763e-06, |
|
"loss": 4.5596, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.06881886719505123, |
|
"grad_norm": 7.192505836486816, |
|
"learning_rate": 3.271776770026963e-06, |
|
"loss": 3.1656, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.06959211289387203, |
|
"grad_norm": 8.369428634643555, |
|
"learning_rate": 2.7091379149682685e-06, |
|
"loss": 4.5796, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.07036535859269283, |
|
"grad_norm": 7.142214298248291, |
|
"learning_rate": 2.1982109232821178e-06, |
|
"loss": 3.4252, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.07113860429151363, |
|
"grad_norm": 9.0031099319458, |
|
"learning_rate": 1.7395544861325718e-06, |
|
"loss": 4.4111, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.07191184999033443, |
|
"grad_norm": 8.895259857177734, |
|
"learning_rate": 1.333670137599713e-06, |
|
"loss": 3.6634, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.07268509568915522, |
|
"grad_norm": 8.90333080291748, |
|
"learning_rate": 9.810017062595322e-07, |
|
"loss": 4.1098, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.07345834138797602, |
|
"grad_norm": 10.424738883972168, |
|
"learning_rate": 6.819348298638839e-07, |
|
"loss": 4.9226, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.07423158708679684, |
|
"grad_norm": 10.640342712402344, |
|
"learning_rate": 4.367965336512403e-07, |
|
"loss": 4.4209, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.07500483278561763, |
|
"grad_norm": 11.282198905944824, |
|
"learning_rate": 2.458548727494292e-07, |
|
"loss": 4.0654, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.07577807848443843, |
|
"grad_norm": 9.29175853729248, |
|
"learning_rate": 1.0931863906127327e-07, |
|
"loss": 4.4543, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.07655132418325923, |
|
"grad_norm": 9.734441757202148, |
|
"learning_rate": 2.7337132953697554e-08, |
|
"loss": 3.6864, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.07732456988208003, |
|
"grad_norm": 9.622171401977539, |
|
"learning_rate": 0.0, |
|
"loss": 3.4783, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07732456988208003, |
|
"eval_loss": 1.06106698513031, |
|
"eval_runtime": 351.8934, |
|
"eval_samples_per_second": 1.549, |
|
"eval_steps_per_second": 0.776, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.406258997362688e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|