imdatta0's picture
End of training
8b1b653 verified
raw
history blame
26.4 kB
{
"best_metric": 0.17445562779903412,
"best_model_checkpoint": "/home/datta0/models/lora_final/Qwen2-7B_metamath_ortho/checkpoint-13",
"epoch": 0.9995949777237748,
"eval_steps": 13,
"global_step": 617,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0016200891049007696,
"grad_norm": 2.756908416748047,
"learning_rate": 2.3076923076923076e-05,
"loss": 0.2557,
"step": 1
},
{
"epoch": 0.011340623734305387,
"grad_norm": 1.0947959423065186,
"learning_rate": 0.00016153846153846153,
"loss": 0.1901,
"step": 7
},
{
"epoch": 0.021061158363710003,
"eval_loss": 0.17445562779903412,
"eval_runtime": 12.3542,
"eval_samples_per_second": 40.472,
"eval_steps_per_second": 5.099,
"step": 13
},
{
"epoch": 0.022681247468610773,
"grad_norm": 0.9623692631721497,
"learning_rate": 0.0002999979709808197,
"loss": 0.1637,
"step": 14
},
{
"epoch": 0.03402187120291616,
"grad_norm": 1.330176591873169,
"learning_rate": 0.0002998701612152596,
"loss": 0.1805,
"step": 21
},
{
"epoch": 0.042122316727420006,
"eval_loss": 0.2130957394838333,
"eval_runtime": 98.158,
"eval_samples_per_second": 5.094,
"eval_steps_per_second": 0.642,
"step": 26
},
{
"epoch": 0.04536249493722155,
"grad_norm": 1.3409372568130493,
"learning_rate": 0.0002995437011859465,
"loss": 0.2023,
"step": 28
},
{
"epoch": 0.056703118671526935,
"grad_norm": 1.3518779277801514,
"learning_rate": 0.00029901902360990936,
"loss": 0.2166,
"step": 35
},
{
"epoch": 0.06318347509113001,
"eval_loss": 0.2353646159172058,
"eval_runtime": 91.64,
"eval_samples_per_second": 5.456,
"eval_steps_per_second": 0.687,
"step": 39
},
{
"epoch": 0.06804374240583232,
"grad_norm": 1.306227207183838,
"learning_rate": 0.00029829682393805085,
"loss": 0.2177,
"step": 42
},
{
"epoch": 0.0793843661401377,
"grad_norm": 1.211318850517273,
"learning_rate": 0.0002973780594333385,
"loss": 0.2322,
"step": 49
},
{
"epoch": 0.08424463345484001,
"eval_loss": 0.2522711753845215,
"eval_runtime": 12.3576,
"eval_samples_per_second": 40.461,
"eval_steps_per_second": 5.098,
"step": 52
},
{
"epoch": 0.0907249898744431,
"grad_norm": 1.3756389617919922,
"learning_rate": 0.00029626394790197025,
"loss": 0.2425,
"step": 56
},
{
"epoch": 0.10206561360874848,
"grad_norm": 1.5007774829864502,
"learning_rate": 0.00029495596607919305,
"loss": 0.2419,
"step": 63
},
{
"epoch": 0.10530579181855002,
"eval_loss": 0.2641831636428833,
"eval_runtime": 90.5578,
"eval_samples_per_second": 5.521,
"eval_steps_per_second": 0.696,
"step": 65
},
{
"epoch": 0.11340623734305387,
"grad_norm": 1.4407020807266235,
"learning_rate": 0.00029345584767191685,
"loss": 0.2513,
"step": 70
},
{
"epoch": 0.12474686107735926,
"grad_norm": 1.4020073413848877,
"learning_rate": 0.0002917655810607161,
"loss": 0.2577,
"step": 77
},
{
"epoch": 0.12636695018226002,
"eval_loss": 0.2727738320827484,
"eval_runtime": 102.1463,
"eval_samples_per_second": 4.895,
"eval_steps_per_second": 0.617,
"step": 78
},
{
"epoch": 0.13608748481166463,
"grad_norm": 1.3295173645019531,
"learning_rate": 0.0002898874066642667,
"loss": 0.2586,
"step": 84
},
{
"epoch": 0.14742810854597002,
"grad_norm": 1.3159449100494385,
"learning_rate": 0.00028782381396971003,
"loss": 0.2667,
"step": 91
},
{
"epoch": 0.14742810854597002,
"eval_loss": 0.2756331264972687,
"eval_runtime": 12.3671,
"eval_samples_per_second": 40.43,
"eval_steps_per_second": 5.094,
"step": 91
},
{
"epoch": 0.1587687322802754,
"grad_norm": 1.342775583267212,
"learning_rate": 0.00028557753823288173,
"loss": 0.2586,
"step": 98
},
{
"epoch": 0.16848926690968002,
"eval_loss": 0.2776657044887543,
"eval_runtime": 95.5783,
"eval_samples_per_second": 5.231,
"eval_steps_per_second": 0.659,
"step": 104
},
{
"epoch": 0.1701093560145808,
"grad_norm": 1.3520455360412598,
"learning_rate": 0.0002831515568527781,
"loss": 0.2689,
"step": 105
},
{
"epoch": 0.1814499797488862,
"grad_norm": 1.1435012817382812,
"learning_rate": 0.00028054908542506627,
"loss": 0.2699,
"step": 112
},
{
"epoch": 0.18955042527339003,
"eval_loss": 0.28325632214546204,
"eval_runtime": 99.0949,
"eval_samples_per_second": 5.046,
"eval_steps_per_second": 0.636,
"step": 117
},
{
"epoch": 0.19279060348319157,
"grad_norm": 1.2993550300598145,
"learning_rate": 0.00027777357347986823,
"loss": 0.2675,
"step": 119
},
{
"epoch": 0.20413122721749696,
"grad_norm": 1.2573751211166382,
"learning_rate": 0.00027482869990946986,
"loss": 0.268,
"step": 126
},
{
"epoch": 0.21061158363710003,
"eval_loss": 0.28788676857948303,
"eval_runtime": 12.4001,
"eval_samples_per_second": 40.322,
"eval_steps_per_second": 5.081,
"step": 130
},
{
"epoch": 0.21547185095180235,
"grad_norm": 1.0781062841415405,
"learning_rate": 0.0002717183680920135,
"loss": 0.274,
"step": 133
},
{
"epoch": 0.22681247468610774,
"grad_norm": 1.4101282358169556,
"learning_rate": 0.00026844670071763906,
"loss": 0.2792,
"step": 140
},
{
"epoch": 0.23167274200081003,
"eval_loss": 0.28967005014419556,
"eval_runtime": 101.2635,
"eval_samples_per_second": 4.938,
"eval_steps_per_second": 0.622,
"step": 143
},
{
"epoch": 0.23815309842041313,
"grad_norm": 1.1714630126953125,
"learning_rate": 0.00026501803432393037,
"loss": 0.2703,
"step": 147
},
{
"epoch": 0.24949372215471852,
"grad_norm": 1.2528555393218994,
"learning_rate": 0.00026143691354791145,
"loss": 0.2717,
"step": 154
},
{
"epoch": 0.25273390036452004,
"eval_loss": 0.28649693727493286,
"eval_runtime": 105.229,
"eval_samples_per_second": 4.752,
"eval_steps_per_second": 0.599,
"step": 156
},
{
"epoch": 0.2608343458890239,
"grad_norm": 1.2904279232025146,
"learning_rate": 0.00025770808510220956,
"loss": 0.2749,
"step": 161
},
{
"epoch": 0.27217496962332927,
"grad_norm": 1.2408525943756104,
"learning_rate": 0.00025383649148337105,
"loss": 0.2823,
"step": 168
},
{
"epoch": 0.27379505872823007,
"eval_loss": 0.2874949276447296,
"eval_runtime": 12.4012,
"eval_samples_per_second": 40.319,
"eval_steps_per_second": 5.08,
"step": 169
},
{
"epoch": 0.28351559335763465,
"grad_norm": 1.3247877359390259,
"learning_rate": 0.0002498272644206695,
"loss": 0.2808,
"step": 175
},
{
"epoch": 0.29485621709194004,
"grad_norm": 1.1582810878753662,
"learning_rate": 0.0002456857180740884,
"loss": 0.2717,
"step": 182
},
{
"epoch": 0.29485621709194004,
"eval_loss": 0.2861660122871399,
"eval_runtime": 94.7561,
"eval_samples_per_second": 5.277,
"eval_steps_per_second": 0.665,
"step": 182
},
{
"epoch": 0.30619684082624543,
"grad_norm": 1.1963558197021484,
"learning_rate": 0.0002414173419904956,
"loss": 0.2771,
"step": 189
},
{
"epoch": 0.3159173754556501,
"eval_loss": 0.28404995799064636,
"eval_runtime": 90.8914,
"eval_samples_per_second": 5.501,
"eval_steps_per_second": 0.693,
"step": 195
},
{
"epoch": 0.3175374645605508,
"grad_norm": 1.1339807510375977,
"learning_rate": 0.00023702779382734566,
"loss": 0.271,
"step": 196
},
{
"epoch": 0.3288780882948562,
"grad_norm": 1.1551539897918701,
"learning_rate": 0.0002325228918535541,
"loss": 0.2703,
"step": 203
},
{
"epoch": 0.33697853381936005,
"eval_loss": 0.2813732922077179,
"eval_runtime": 12.359,
"eval_samples_per_second": 40.456,
"eval_steps_per_second": 5.097,
"step": 208
},
{
"epoch": 0.3402187120291616,
"grad_norm": 1.2488080263137817,
"learning_rate": 0.00022790860723748442,
"loss": 0.2742,
"step": 210
},
{
"epoch": 0.351559335763467,
"grad_norm": 1.110122561454773,
"learning_rate": 0.00022319105613226921,
"loss": 0.2684,
"step": 217
},
{
"epoch": 0.3580396921830701,
"eval_loss": 0.27897754311561584,
"eval_runtime": 102.1713,
"eval_samples_per_second": 4.894,
"eval_steps_per_second": 0.617,
"step": 221
},
{
"epoch": 0.3628999594977724,
"grad_norm": 1.0932738780975342,
"learning_rate": 0.00021837649156895706,
"loss": 0.2717,
"step": 224
},
{
"epoch": 0.37424058323207776,
"grad_norm": 1.1976110935211182,
"learning_rate": 0.00021347129516822945,
"loss": 0.2685,
"step": 231
},
{
"epoch": 0.37910085054678005,
"eval_loss": 0.27723681926727295,
"eval_runtime": 97.0594,
"eval_samples_per_second": 5.151,
"eval_steps_per_second": 0.649,
"step": 234
},
{
"epoch": 0.38558120696638315,
"grad_norm": 1.1907345056533813,
"learning_rate": 0.00020848196868167505,
"loss": 0.2661,
"step": 238
},
{
"epoch": 0.39692183070068854,
"grad_norm": 1.0816656351089478,
"learning_rate": 0.000203415125373832,
"loss": 0.2659,
"step": 245
},
{
"epoch": 0.4001620089104901,
"eval_loss": 0.2725655734539032,
"eval_runtime": 12.4173,
"eval_samples_per_second": 40.267,
"eval_steps_per_second": 5.074,
"step": 247
},
{
"epoch": 0.4082624544349939,
"grad_norm": 1.1740119457244873,
"learning_rate": 0.00019827748125642242,
"loss": 0.2665,
"step": 252
},
{
"epoch": 0.4196030781692993,
"grad_norm": 1.1028467416763306,
"learning_rate": 0.0001930758461863965,
"loss": 0.2665,
"step": 259
},
{
"epoch": 0.42122316727420006,
"eval_loss": 0.27276599407196045,
"eval_runtime": 97.9547,
"eval_samples_per_second": 5.104,
"eval_steps_per_second": 0.643,
"step": 260
},
{
"epoch": 0.4309437019036047,
"grad_norm": 1.3131815195083618,
"learning_rate": 0.0001878171148395872,
"loss": 0.2698,
"step": 266
},
{
"epoch": 0.4422843256379101,
"grad_norm": 1.3851832151412964,
"learning_rate": 0.00018250825757193848,
"loss": 0.2702,
"step": 273
},
{
"epoch": 0.4422843256379101,
"eval_loss": 0.27008697390556335,
"eval_runtime": 100.4969,
"eval_samples_per_second": 4.975,
"eval_steps_per_second": 0.627,
"step": 273
},
{
"epoch": 0.4536249493722155,
"grad_norm": 1.0187535285949707,
"learning_rate": 0.0001771563111804211,
"loss": 0.258,
"step": 280
},
{
"epoch": 0.46334548400162007,
"eval_loss": 0.26795363426208496,
"eval_runtime": 12.3721,
"eval_samples_per_second": 40.414,
"eval_steps_per_second": 5.092,
"step": 286
},
{
"epoch": 0.46496557310652087,
"grad_norm": 1.1849358081817627,
"learning_rate": 0.0001717683695758819,
"loss": 0.2585,
"step": 287
},
{
"epoch": 0.47630619684082626,
"grad_norm": 1.1577839851379395,
"learning_rate": 0.00016635157438018983,
"loss": 0.2649,
"step": 294
},
{
"epoch": 0.4844066423653301,
"eval_loss": 0.26310181617736816,
"eval_runtime": 101.5519,
"eval_samples_per_second": 4.924,
"eval_steps_per_second": 0.62,
"step": 299
},
{
"epoch": 0.48764682057513165,
"grad_norm": 1.1407397985458374,
"learning_rate": 0.0001609131054601416,
"loss": 0.2577,
"step": 301
},
{
"epoch": 0.49898744430943703,
"grad_norm": 1.1221944093704224,
"learning_rate": 0.00015546017141067432,
"loss": 0.2465,
"step": 308
},
{
"epoch": 0.5054678007290401,
"eval_loss": 0.2600298225879669,
"eval_runtime": 99.6473,
"eval_samples_per_second": 5.018,
"eval_steps_per_second": 0.632,
"step": 312
},
{
"epoch": 0.5103280680437424,
"grad_norm": 1.072572112083435,
"learning_rate": 0.00015,
"loss": 0.243,
"step": 315
},
{
"epoch": 0.5216686917780478,
"grad_norm": 1.0873061418533325,
"learning_rate": 0.0001445398285893257,
"loss": 0.2497,
"step": 322
},
{
"epoch": 0.5265289590927501,
"eval_loss": 0.25752100348472595,
"eval_runtime": 12.4003,
"eval_samples_per_second": 40.322,
"eval_steps_per_second": 5.081,
"step": 325
},
{
"epoch": 0.5330093155123532,
"grad_norm": 1.0061649084091187,
"learning_rate": 0.0001390868945398584,
"loss": 0.2455,
"step": 329
},
{
"epoch": 0.5443499392466585,
"grad_norm": 1.3215664625167847,
"learning_rate": 0.00013364842561981014,
"loss": 0.2403,
"step": 336
},
{
"epoch": 0.5475901174564601,
"eval_loss": 0.2532329857349396,
"eval_runtime": 93.6404,
"eval_samples_per_second": 5.34,
"eval_steps_per_second": 0.673,
"step": 338
},
{
"epoch": 0.555690562980964,
"grad_norm": 1.0991076231002808,
"learning_rate": 0.00012823163042411807,
"loss": 0.2418,
"step": 343
},
{
"epoch": 0.5670311867152693,
"grad_norm": 1.18295156955719,
"learning_rate": 0.0001228436888195789,
"loss": 0.2409,
"step": 350
},
{
"epoch": 0.56865127582017,
"eval_loss": 0.25006380677223206,
"eval_runtime": 95.1638,
"eval_samples_per_second": 5.254,
"eval_steps_per_second": 0.662,
"step": 351
},
{
"epoch": 0.5783718104495748,
"grad_norm": 1.1283714771270752,
"learning_rate": 0.00011749174242806152,
"loss": 0.2373,
"step": 357
},
{
"epoch": 0.5897124341838801,
"grad_norm": 1.0942596197128296,
"learning_rate": 0.00011218288516041279,
"loss": 0.2425,
"step": 364
},
{
"epoch": 0.5897124341838801,
"eval_loss": 0.24510693550109863,
"eval_runtime": 12.4067,
"eval_samples_per_second": 40.301,
"eval_steps_per_second": 5.078,
"step": 364
},
{
"epoch": 0.6010530579181855,
"grad_norm": 1.0159014463424683,
"learning_rate": 0.00010692415381360349,
"loss": 0.2357,
"step": 371
},
{
"epoch": 0.6107735925475901,
"eval_loss": 0.24082474410533905,
"eval_runtime": 89.1035,
"eval_samples_per_second": 5.611,
"eval_steps_per_second": 0.707,
"step": 377
},
{
"epoch": 0.6123936816524909,
"grad_norm": 1.2053192853927612,
"learning_rate": 0.00010172251874357757,
"loss": 0.2334,
"step": 378
},
{
"epoch": 0.6237343053867963,
"grad_norm": 1.0529735088348389,
"learning_rate": 9.658487462616794e-05,
"loss": 0.2294,
"step": 385
},
{
"epoch": 0.6318347509113001,
"eval_loss": 0.23589473962783813,
"eval_runtime": 92.596,
"eval_samples_per_second": 5.4,
"eval_steps_per_second": 0.68,
"step": 390
},
{
"epoch": 0.6350749291211016,
"grad_norm": 1.1033920049667358,
"learning_rate": 9.151803131832493e-05,
"loss": 0.2384,
"step": 392
},
{
"epoch": 0.6464155528554071,
"grad_norm": 1.0052192211151123,
"learning_rate": 8.652870483177049e-05,
"loss": 0.2306,
"step": 399
},
{
"epoch": 0.6528959092750102,
"eval_loss": 0.2337246686220169,
"eval_runtime": 12.4277,
"eval_samples_per_second": 40.233,
"eval_steps_per_second": 5.069,
"step": 403
},
{
"epoch": 0.6577561765897124,
"grad_norm": 1.006942868232727,
"learning_rate": 8.162350843104291e-05,
"loss": 0.2237,
"step": 406
},
{
"epoch": 0.6690968003240179,
"grad_norm": 1.1988211870193481,
"learning_rate": 7.680894386773072e-05,
"loss": 0.2337,
"step": 413
},
{
"epoch": 0.6739570676387201,
"eval_loss": 0.22989404201507568,
"eval_runtime": 101.3384,
"eval_samples_per_second": 4.934,
"eval_steps_per_second": 0.622,
"step": 416
},
{
"epoch": 0.6804374240583232,
"grad_norm": 1.1436697244644165,
"learning_rate": 7.209139276251558e-05,
"loss": 0.2262,
"step": 420
},
{
"epoch": 0.6917780477926286,
"grad_norm": 0.9382355809211731,
"learning_rate": 6.747710814644589e-05,
"loss": 0.2249,
"step": 427
},
{
"epoch": 0.6950182260024301,
"eval_loss": 0.22609174251556396,
"eval_runtime": 104.5487,
"eval_samples_per_second": 4.782,
"eval_steps_per_second": 0.603,
"step": 429
},
{
"epoch": 0.703118671526934,
"grad_norm": 1.0061136484146118,
"learning_rate": 6.297220617265435e-05,
"loss": 0.2168,
"step": 434
},
{
"epoch": 0.7144592952612394,
"grad_norm": 1.0379403829574585,
"learning_rate": 5.858265800950438e-05,
"loss": 0.2271,
"step": 441
},
{
"epoch": 0.7160793843661402,
"eval_loss": 0.22214940190315247,
"eval_runtime": 12.3965,
"eval_samples_per_second": 40.334,
"eval_steps_per_second": 5.082,
"step": 442
},
{
"epoch": 0.7257999189955447,
"grad_norm": 0.9516971111297607,
"learning_rate": 5.4314281925911634e-05,
"loss": 0.2132,
"step": 448
},
{
"epoch": 0.7371405427298502,
"grad_norm": 1.009210467338562,
"learning_rate": 5.0172735579330526e-05,
"loss": 0.2123,
"step": 455
},
{
"epoch": 0.7371405427298502,
"eval_loss": 0.21895556151866913,
"eval_runtime": 100.4419,
"eval_samples_per_second": 4.978,
"eval_steps_per_second": 0.627,
"step": 455
},
{
"epoch": 0.7484811664641555,
"grad_norm": 1.0561505556106567,
"learning_rate": 4.616350851662895e-05,
"loss": 0.2137,
"step": 462
},
{
"epoch": 0.7582017010935601,
"eval_loss": 0.2165665626525879,
"eval_runtime": 90.1033,
"eval_samples_per_second": 5.549,
"eval_steps_per_second": 0.699,
"step": 468
},
{
"epoch": 0.759821790198461,
"grad_norm": 0.9735592007637024,
"learning_rate": 4.229191489779047e-05,
"loss": 0.2153,
"step": 469
},
{
"epoch": 0.7711624139327663,
"grad_norm": 1.0260740518569946,
"learning_rate": 3.8563086452088506e-05,
"loss": 0.2185,
"step": 476
},
{
"epoch": 0.7792628594572701,
"eval_loss": 0.2151920646429062,
"eval_runtime": 12.4453,
"eval_samples_per_second": 40.176,
"eval_steps_per_second": 5.062,
"step": 481
},
{
"epoch": 0.7825030376670717,
"grad_norm": 0.9905771613121033,
"learning_rate": 3.498196567606959e-05,
"loss": 0.211,
"step": 483
},
{
"epoch": 0.7938436614013771,
"grad_norm": 0.9943744540214539,
"learning_rate": 3.1553299282360966e-05,
"loss": 0.2132,
"step": 490
},
{
"epoch": 0.8003240178209802,
"eval_loss": 0.21405138075351715,
"eval_runtime": 101.6391,
"eval_samples_per_second": 4.919,
"eval_steps_per_second": 0.62,
"step": 494
},
{
"epoch": 0.8051842851356824,
"grad_norm": 0.9495494961738586,
"learning_rate": 2.828163190798644e-05,
"loss": 0.2081,
"step": 497
},
{
"epoch": 0.8165249088699879,
"grad_norm": 1.0003526210784912,
"learning_rate": 2.5171300090530106e-05,
"loss": 0.2008,
"step": 504
},
{
"epoch": 0.8213851761846902,
"eval_loss": 0.21176332235336304,
"eval_runtime": 95.0781,
"eval_samples_per_second": 5.259,
"eval_steps_per_second": 0.663,
"step": 507
},
{
"epoch": 0.8278655326042932,
"grad_norm": 1.0116578340530396,
"learning_rate": 2.2226426520131734e-05,
"loss": 0.2043,
"step": 511
},
{
"epoch": 0.8392061563385986,
"grad_norm": 0.8993664979934692,
"learning_rate": 1.9450914574933725e-05,
"loss": 0.2083,
"step": 518
},
{
"epoch": 0.8424463345484001,
"eval_loss": 0.2098345309495926,
"eval_runtime": 12.4496,
"eval_samples_per_second": 40.162,
"eval_steps_per_second": 5.06,
"step": 520
},
{
"epoch": 0.850546780072904,
"grad_norm": 0.992878794670105,
"learning_rate": 1.6848443147221828e-05,
"loss": 0.1988,
"step": 525
},
{
"epoch": 0.8618874038072094,
"grad_norm": 0.9492712616920471,
"learning_rate": 1.4422461767118233e-05,
"loss": 0.2045,
"step": 532
},
{
"epoch": 0.8635074929121102,
"eval_loss": 0.2093399316072464,
"eval_runtime": 87.9109,
"eval_samples_per_second": 5.688,
"eval_steps_per_second": 0.717,
"step": 533
},
{
"epoch": 0.8732280275415147,
"grad_norm": 0.8434467315673828,
"learning_rate": 1.2176186030289936e-05,
"loss": 0.2031,
"step": 539
},
{
"epoch": 0.8845686512758202,
"grad_norm": 0.859354555606842,
"learning_rate": 1.011259333573326e-05,
"loss": 0.2011,
"step": 546
},
{
"epoch": 0.8845686512758202,
"eval_loss": 0.20857252180576324,
"eval_runtime": 100.5488,
"eval_samples_per_second": 4.973,
"eval_steps_per_second": 0.627,
"step": 546
},
{
"epoch": 0.8959092750101255,
"grad_norm": 0.8895564675331116,
"learning_rate": 8.234418939283866e-06,
"loss": 0.1969,
"step": 553
},
{
"epoch": 0.9056298096395302,
"eval_loss": 0.2077966034412384,
"eval_runtime": 12.4058,
"eval_samples_per_second": 40.304,
"eval_steps_per_second": 5.078,
"step": 559
},
{
"epoch": 0.907249898744431,
"grad_norm": 0.9060482382774353,
"learning_rate": 6.544152328083152e-06,
"loss": 0.2043,
"step": 560
},
{
"epoch": 0.9185905224787363,
"grad_norm": 0.9856771230697632,
"learning_rate": 5.044033920806933e-06,
"loss": 0.1979,
"step": 567
},
{
"epoch": 0.9266909680032401,
"eval_loss": 0.20735213160514832,
"eval_runtime": 93.0309,
"eval_samples_per_second": 5.375,
"eval_steps_per_second": 0.677,
"step": 572
},
{
"epoch": 0.9299311462130417,
"grad_norm": 0.9075601100921631,
"learning_rate": 3.7360520980297514e-06,
"loss": 0.1996,
"step": 574
},
{
"epoch": 0.9412717699473471,
"grad_norm": 0.9999048113822937,
"learning_rate": 2.6219405666614402e-06,
"loss": 0.203,
"step": 581
},
{
"epoch": 0.9477521263669502,
"eval_loss": 0.20708170533180237,
"eval_runtime": 101.6385,
"eval_samples_per_second": 4.919,
"eval_steps_per_second": 0.62,
"step": 585
},
{
"epoch": 0.9526123936816525,
"grad_norm": 1.077417016029358,
"learning_rate": 1.7031760619491353e-06,
"loss": 0.1991,
"step": 588
},
{
"epoch": 0.9639530174159578,
"grad_norm": 1.0060704946517944,
"learning_rate": 9.809763900905875e-07,
"loss": 0.2037,
"step": 595
},
{
"epoch": 0.9688132847306602,
"eval_loss": 0.2068352997303009,
"eval_runtime": 12.4417,
"eval_samples_per_second": 40.187,
"eval_steps_per_second": 5.064,
"step": 598
},
{
"epoch": 0.9752936411502633,
"grad_norm": 0.9167914986610413,
"learning_rate": 4.562988140535073e-07,
"loss": 0.2022,
"step": 602
},
{
"epoch": 0.9866342648845686,
"grad_norm": 0.9976924061775208,
"learning_rate": 1.298387847403437e-07,
"loss": 0.2047,
"step": 609
},
{
"epoch": 0.9898744430943702,
"eval_loss": 0.20685938000679016,
"eval_runtime": 100.7905,
"eval_samples_per_second": 4.961,
"eval_steps_per_second": 0.625,
"step": 611
},
{
"epoch": 0.9979748886188741,
"grad_norm": 1.0042866468429565,
"learning_rate": 2.029019180288527e-09,
"loss": 0.2052,
"step": 616
}
],
"logging_steps": 7,
"max_steps": 617,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 13,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.47773295983788e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}