|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.997349589186324, |
|
"eval_steps": 200, |
|
"global_step": 942, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.010601643254704479, |
|
"grad_norm": 0.8797913789749146, |
|
"learning_rate": 5.208333333333334e-06, |
|
"loss": 3.3968, |
|
"num_input_tokens_seen": 10485760, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.021203286509408958, |
|
"grad_norm": 0.5857719779014587, |
|
"learning_rate": 1.0416666666666668e-05, |
|
"loss": 3.3102, |
|
"num_input_tokens_seen": 20971520, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.031804929764113435, |
|
"grad_norm": 0.38287150859832764, |
|
"learning_rate": 1.5625e-05, |
|
"loss": 3.3919, |
|
"num_input_tokens_seen": 31457280, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.042406573018817915, |
|
"grad_norm": 0.2890233099460602, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 3.3448, |
|
"num_input_tokens_seen": 41943040, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.053008216273522396, |
|
"grad_norm": 0.28800511360168457, |
|
"learning_rate": 2.604166666666667e-05, |
|
"loss": 3.247, |
|
"num_input_tokens_seen": 52428800, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06360985952822687, |
|
"grad_norm": 0.5624825358390808, |
|
"learning_rate": 3.125e-05, |
|
"loss": 3.3736, |
|
"num_input_tokens_seen": 62914560, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07421150278293136, |
|
"grad_norm": 0.2708434760570526, |
|
"learning_rate": 3.6458333333333336e-05, |
|
"loss": 3.3105, |
|
"num_input_tokens_seen": 73400320, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.08481314603763583, |
|
"grad_norm": 0.26101428270339966, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 3.3523, |
|
"num_input_tokens_seen": 83886080, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.09541478929234032, |
|
"grad_norm": 0.22077707946300507, |
|
"learning_rate": 4.6875e-05, |
|
"loss": 3.3395, |
|
"num_input_tokens_seen": 94371840, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.10601643254704479, |
|
"grad_norm": 0.2976844608783722, |
|
"learning_rate": 4.9999382562611344e-05, |
|
"loss": 3.2379, |
|
"num_input_tokens_seen": 104857600, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11661807580174927, |
|
"grad_norm": 0.32657739520072937, |
|
"learning_rate": 4.999243674223826e-05, |
|
"loss": 3.2281, |
|
"num_input_tokens_seen": 115343360, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.12721971905645374, |
|
"grad_norm": 0.2382456362247467, |
|
"learning_rate": 4.997777545616258e-05, |
|
"loss": 3.2807, |
|
"num_input_tokens_seen": 125829120, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.13782136231115824, |
|
"grad_norm": 0.4440530836582184, |
|
"learning_rate": 4.99554032304996e-05, |
|
"loss": 3.2192, |
|
"num_input_tokens_seen": 136314880, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.14842300556586271, |
|
"grad_norm": 0.2103557139635086, |
|
"learning_rate": 4.9925326971824345e-05, |
|
"loss": 3.3152, |
|
"num_input_tokens_seen": 146800640, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.1590246488205672, |
|
"grad_norm": 0.252693772315979, |
|
"learning_rate": 4.988755596503948e-05, |
|
"loss": 3.2393, |
|
"num_input_tokens_seen": 157286400, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.16962629207527166, |
|
"grad_norm": 0.21133287250995636, |
|
"learning_rate": 4.9842101870508904e-05, |
|
"loss": 3.2727, |
|
"num_input_tokens_seen": 167772160, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.18022793532997614, |
|
"grad_norm": 0.24876847863197327, |
|
"learning_rate": 4.9788978720458104e-05, |
|
"loss": 3.2566, |
|
"num_input_tokens_seen": 178257920, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.19082957858468064, |
|
"grad_norm": 0.2968827486038208, |
|
"learning_rate": 4.9728202914642183e-05, |
|
"loss": 3.3258, |
|
"num_input_tokens_seen": 188743680, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.2014312218393851, |
|
"grad_norm": 0.17700955271720886, |
|
"learning_rate": 4.965979321528309e-05, |
|
"loss": 3.2355, |
|
"num_input_tokens_seen": 199229440, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.21203286509408958, |
|
"grad_norm": 0.20748387277126312, |
|
"learning_rate": 4.9583770741277505e-05, |
|
"loss": 3.195, |
|
"num_input_tokens_seen": 209715200, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.22263450834879406, |
|
"grad_norm": 1.2878308296203613, |
|
"learning_rate": 4.950015896167716e-05, |
|
"loss": 3.3275, |
|
"num_input_tokens_seen": 220200960, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.23323615160349853, |
|
"grad_norm": 0.26103636622428894, |
|
"learning_rate": 4.9408983688443654e-05, |
|
"loss": 3.3154, |
|
"num_input_tokens_seen": 230686720, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.24383779485820303, |
|
"grad_norm": 0.2692287266254425, |
|
"learning_rate": 4.931027306848004e-05, |
|
"loss": 3.2573, |
|
"num_input_tokens_seen": 241172480, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.2544394381129075, |
|
"grad_norm": 0.2487805187702179, |
|
"learning_rate": 4.920405757494147e-05, |
|
"loss": 3.2915, |
|
"num_input_tokens_seen": 251658240, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.26504108136761195, |
|
"grad_norm": 0.17953425645828247, |
|
"learning_rate": 4.9090369997827826e-05, |
|
"loss": 3.1941, |
|
"num_input_tokens_seen": 262144000, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.2756427246223165, |
|
"grad_norm": 0.19957853853702545, |
|
"learning_rate": 4.896924543386099e-05, |
|
"loss": 3.3033, |
|
"num_input_tokens_seen": 272629760, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.28624436787702096, |
|
"grad_norm": 0.22015607357025146, |
|
"learning_rate": 4.884072127565014e-05, |
|
"loss": 3.2327, |
|
"num_input_tokens_seen": 283115520, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.29684601113172543, |
|
"grad_norm": 0.241306334733963, |
|
"learning_rate": 4.870483720014814e-05, |
|
"loss": 3.2124, |
|
"num_input_tokens_seen": 293601280, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.3074476543864299, |
|
"grad_norm": 0.49794742465019226, |
|
"learning_rate": 4.85616351564028e-05, |
|
"loss": 3.312, |
|
"num_input_tokens_seen": 304087040, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.3180492976411344, |
|
"grad_norm": 0.18301662802696228, |
|
"learning_rate": 4.8411159352606734e-05, |
|
"loss": 3.295, |
|
"num_input_tokens_seen": 314572800, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.32865094089583885, |
|
"grad_norm": 0.23481640219688416, |
|
"learning_rate": 4.8253456242449704e-05, |
|
"loss": 3.2555, |
|
"num_input_tokens_seen": 325058560, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.3392525841505433, |
|
"grad_norm": 0.2195325493812561, |
|
"learning_rate": 4.808857451077788e-05, |
|
"loss": 3.1667, |
|
"num_input_tokens_seen": 335544320, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.3498542274052478, |
|
"grad_norm": 0.23738664388656616, |
|
"learning_rate": 4.7916565058564155e-05, |
|
"loss": 3.2499, |
|
"num_input_tokens_seen": 346030080, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.36045587065995227, |
|
"grad_norm": 0.3056640028953552, |
|
"learning_rate": 4.7737480987194484e-05, |
|
"loss": 3.2478, |
|
"num_input_tokens_seen": 356515840, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.37105751391465674, |
|
"grad_norm": 0.18291524052619934, |
|
"learning_rate": 4.755137758207479e-05, |
|
"loss": 3.2648, |
|
"num_input_tokens_seen": 367001600, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.3816591571693613, |
|
"grad_norm": 0.20183615386486053, |
|
"learning_rate": 4.7358312295563734e-05, |
|
"loss": 3.1717, |
|
"num_input_tokens_seen": 377487360, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.39226080042406575, |
|
"grad_norm": 0.19222132861614227, |
|
"learning_rate": 4.7158344729236454e-05, |
|
"loss": 3.2288, |
|
"num_input_tokens_seen": 387973120, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.4028624436787702, |
|
"grad_norm": 0.16234181821346283, |
|
"learning_rate": 4.6951536615484854e-05, |
|
"loss": 3.2316, |
|
"num_input_tokens_seen": 398458880, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.4134640869334747, |
|
"grad_norm": 0.21459302306175232, |
|
"learning_rate": 4.673795179846007e-05, |
|
"loss": 3.1703, |
|
"num_input_tokens_seen": 408944640, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.42406573018817917, |
|
"grad_norm": 0.1751609593629837, |
|
"learning_rate": 4.651765621436303e-05, |
|
"loss": 3.2013, |
|
"num_input_tokens_seen": 419430400, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.42406573018817917, |
|
"eval_accuracy": 0.44791084254003133, |
|
"eval_loss": 3.0653164386749268, |
|
"eval_runtime": 36.089, |
|
"eval_samples_per_second": 8.313, |
|
"eval_steps_per_second": 2.078, |
|
"num_input_tokens_seen": 419430400, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.43466737344288364, |
|
"grad_norm": 0.15440785884857178, |
|
"learning_rate": 4.62907178710891e-05, |
|
"loss": 3.2764, |
|
"num_input_tokens_seen": 429916160, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.4452690166975881, |
|
"grad_norm": 0.19598723948001862, |
|
"learning_rate": 4.60572068272333e-05, |
|
"loss": 3.2128, |
|
"num_input_tokens_seen": 440401920, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.4558706599522926, |
|
"grad_norm": 0.21214532852172852, |
|
"learning_rate": 4.581719517046236e-05, |
|
"loss": 3.1353, |
|
"num_input_tokens_seen": 450887680, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.46647230320699706, |
|
"grad_norm": 0.181034654378891, |
|
"learning_rate": 4.557075699526032e-05, |
|
"loss": 3.2866, |
|
"num_input_tokens_seen": 461373440, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.47707394646170154, |
|
"grad_norm": 0.16727791726589203, |
|
"learning_rate": 4.531796838005477e-05, |
|
"loss": 3.2111, |
|
"num_input_tokens_seen": 471859200, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.48767558971640607, |
|
"grad_norm": 0.1804332137107849, |
|
"learning_rate": 4.505890736373045e-05, |
|
"loss": 3.297, |
|
"num_input_tokens_seen": 482344960, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.49827723297111054, |
|
"grad_norm": 0.21492359042167664, |
|
"learning_rate": 4.479365392153776e-05, |
|
"loss": 3.1203, |
|
"num_input_tokens_seen": 492830720, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.508878876225815, |
|
"grad_norm": 0.2664985656738281, |
|
"learning_rate": 4.4522289940403404e-05, |
|
"loss": 3.1352, |
|
"num_input_tokens_seen": 503316480, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.5194805194805194, |
|
"grad_norm": 0.18706196546554565, |
|
"learning_rate": 4.4244899193650933e-05, |
|
"loss": 3.1747, |
|
"num_input_tokens_seen": 513802240, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.5300821627352239, |
|
"grad_norm": 0.2427309900522232, |
|
"learning_rate": 4.3961567315138885e-05, |
|
"loss": 3.1677, |
|
"num_input_tokens_seen": 524288000, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5406838059899285, |
|
"grad_norm": 0.17540065944194794, |
|
"learning_rate": 4.3672381772824615e-05, |
|
"loss": 3.2012, |
|
"num_input_tokens_seen": 534773760, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.551285449244633, |
|
"grad_norm": 0.31619423627853394, |
|
"learning_rate": 4.3377431841761875e-05, |
|
"loss": 3.1498, |
|
"num_input_tokens_seen": 545259520, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.5618870924993374, |
|
"grad_norm": 0.24162785708904266, |
|
"learning_rate": 4.307680857654052e-05, |
|
"loss": 3.1703, |
|
"num_input_tokens_seen": 555745280, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.5724887357540419, |
|
"grad_norm": 0.22926253080368042, |
|
"learning_rate": 4.277060478317687e-05, |
|
"loss": 3.2395, |
|
"num_input_tokens_seen": 566231040, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.5830903790087464, |
|
"grad_norm": 0.23989850282669067, |
|
"learning_rate": 4.245891499046338e-05, |
|
"loss": 3.2383, |
|
"num_input_tokens_seen": 576716800, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.5936920222634509, |
|
"grad_norm": 0.20367790758609772, |
|
"learning_rate": 4.214183542078646e-05, |
|
"loss": 3.1489, |
|
"num_input_tokens_seen": 587202560, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.6042936655181553, |
|
"grad_norm": 0.22007331252098083, |
|
"learning_rate": 4.1819463960421454e-05, |
|
"loss": 3.1707, |
|
"num_input_tokens_seen": 597688320, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.6148953087728598, |
|
"grad_norm": 0.19839680194854736, |
|
"learning_rate": 4.149190012931402e-05, |
|
"loss": 3.1679, |
|
"num_input_tokens_seen": 608174080, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.6254969520275643, |
|
"grad_norm": 0.23358359932899475, |
|
"learning_rate": 4.1159245050357065e-05, |
|
"loss": 3.1629, |
|
"num_input_tokens_seen": 618659840, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.6360985952822688, |
|
"grad_norm": 0.16982465982437134, |
|
"learning_rate": 4.082160141817293e-05, |
|
"loss": 3.16, |
|
"num_input_tokens_seen": 629145600, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6467002385369732, |
|
"grad_norm": 0.3546576201915741, |
|
"learning_rate": 4.0479073467410286e-05, |
|
"loss": 3.2448, |
|
"num_input_tokens_seen": 639631360, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.6573018817916777, |
|
"grad_norm": 0.16487599909305573, |
|
"learning_rate": 4.0131766940565715e-05, |
|
"loss": 3.1789, |
|
"num_input_tokens_seen": 650117120, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.6679035250463822, |
|
"grad_norm": 0.26232755184173584, |
|
"learning_rate": 3.9779789055339656e-05, |
|
"loss": 3.1994, |
|
"num_input_tokens_seen": 660602880, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.6785051683010866, |
|
"grad_norm": 0.17949418723583221, |
|
"learning_rate": 3.9423248471537065e-05, |
|
"loss": 3.1464, |
|
"num_input_tokens_seen": 671088640, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.6891068115557911, |
|
"grad_norm": 0.22808890044689178, |
|
"learning_rate": 3.9062255257522794e-05, |
|
"loss": 3.2207, |
|
"num_input_tokens_seen": 681574400, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.6997084548104956, |
|
"grad_norm": 0.20621077716350555, |
|
"learning_rate": 3.8696920856242174e-05, |
|
"loss": 3.1688, |
|
"num_input_tokens_seen": 692060160, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.7103100980652001, |
|
"grad_norm": 0.20659266412258148, |
|
"learning_rate": 3.8327358050817234e-05, |
|
"loss": 3.1726, |
|
"num_input_tokens_seen": 702545920, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.7209117413199045, |
|
"grad_norm": 0.23239997029304504, |
|
"learning_rate": 3.7953680929729215e-05, |
|
"loss": 3.2254, |
|
"num_input_tokens_seen": 713031680, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.731513384574609, |
|
"grad_norm": 0.15853458642959595, |
|
"learning_rate": 3.757600485159805e-05, |
|
"loss": 3.1453, |
|
"num_input_tokens_seen": 723517440, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.7421150278293135, |
|
"grad_norm": 0.28123387694358826, |
|
"learning_rate": 3.719444640956981e-05, |
|
"loss": 3.1849, |
|
"num_input_tokens_seen": 734003200, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.7527166710840181, |
|
"grad_norm": 0.16030676662921906, |
|
"learning_rate": 3.680912339532296e-05, |
|
"loss": 3.187, |
|
"num_input_tokens_seen": 744488960, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.7633183143387225, |
|
"grad_norm": 0.1698087602853775, |
|
"learning_rate": 3.6420154762704686e-05, |
|
"loss": 3.1603, |
|
"num_input_tokens_seen": 754974720, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.773919957593427, |
|
"grad_norm": 0.1804792284965515, |
|
"learning_rate": 3.602766059100838e-05, |
|
"loss": 3.1501, |
|
"num_input_tokens_seen": 765460480, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.7845216008481315, |
|
"grad_norm": 0.22060543298721313, |
|
"learning_rate": 3.563176204790374e-05, |
|
"loss": 3.2135, |
|
"num_input_tokens_seen": 775946240, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.795123244102836, |
|
"grad_norm": 0.2411007434129715, |
|
"learning_rate": 3.523258135203087e-05, |
|
"loss": 3.1263, |
|
"num_input_tokens_seen": 786432000, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.8057248873575404, |
|
"grad_norm": 0.17039397358894348, |
|
"learning_rate": 3.483024173526985e-05, |
|
"loss": 3.1278, |
|
"num_input_tokens_seen": 796917760, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.8163265306122449, |
|
"grad_norm": 0.17096886038780212, |
|
"learning_rate": 3.442486740469766e-05, |
|
"loss": 3.1805, |
|
"num_input_tokens_seen": 807403520, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.8269281738669494, |
|
"grad_norm": 0.20285804569721222, |
|
"learning_rate": 3.401658350424389e-05, |
|
"loss": 3.1093, |
|
"num_input_tokens_seen": 817889280, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.8375298171216539, |
|
"grad_norm": 0.17684973776340485, |
|
"learning_rate": 3.360551607605735e-05, |
|
"loss": 3.1336, |
|
"num_input_tokens_seen": 828375040, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.8481314603763583, |
|
"grad_norm": 0.16076605021953583, |
|
"learning_rate": 3.3191792021595316e-05, |
|
"loss": 3.1976, |
|
"num_input_tokens_seen": 838860800, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.8481314603763583, |
|
"eval_accuracy": 0.4506036745406824, |
|
"eval_loss": 3.043421983718872, |
|
"eval_runtime": 36.2661, |
|
"eval_samples_per_second": 8.272, |
|
"eval_steps_per_second": 2.068, |
|
"num_input_tokens_seen": 838860800, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.8587331036310628, |
|
"grad_norm": 0.1959790736436844, |
|
"learning_rate": 3.277553906244756e-05, |
|
"loss": 3.2013, |
|
"num_input_tokens_seen": 849346560, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.8693347468857673, |
|
"grad_norm": 0.15397347509860992, |
|
"learning_rate": 3.23568857009071e-05, |
|
"loss": 3.124, |
|
"num_input_tokens_seen": 859832320, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.8799363901404718, |
|
"grad_norm": 0.22286681830883026, |
|
"learning_rate": 3.193596118030005e-05, |
|
"loss": 3.1723, |
|
"num_input_tokens_seen": 870318080, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.8905380333951762, |
|
"grad_norm": 0.22909095883369446, |
|
"learning_rate": 3.1512895445086636e-05, |
|
"loss": 3.1068, |
|
"num_input_tokens_seen": 880803840, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.9011396766498807, |
|
"grad_norm": 0.24190495908260345, |
|
"learning_rate": 3.108781910074578e-05, |
|
"loss": 3.143, |
|
"num_input_tokens_seen": 891289600, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.9117413199045852, |
|
"grad_norm": 0.24453699588775635, |
|
"learning_rate": 3.0660863373455595e-05, |
|
"loss": 3.1667, |
|
"num_input_tokens_seen": 901775360, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.9223429631592897, |
|
"grad_norm": 0.1636463701725006, |
|
"learning_rate": 3.0232160069582332e-05, |
|
"loss": 3.1123, |
|
"num_input_tokens_seen": 912261120, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.9329446064139941, |
|
"grad_norm": 0.14330972731113434, |
|
"learning_rate": 2.9801841534990115e-05, |
|
"loss": 3.1616, |
|
"num_input_tokens_seen": 922746880, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.9435462496686986, |
|
"grad_norm": 0.14314919710159302, |
|
"learning_rate": 2.9370040614184245e-05, |
|
"loss": 3.1241, |
|
"num_input_tokens_seen": 933232640, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.9541478929234031, |
|
"grad_norm": 0.15261518955230713, |
|
"learning_rate": 2.893689060930045e-05, |
|
"loss": 3.1573, |
|
"num_input_tokens_seen": 943718400, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.9647495361781077, |
|
"grad_norm": 0.14767758548259735, |
|
"learning_rate": 2.8502525238952916e-05, |
|
"loss": 3.2165, |
|
"num_input_tokens_seen": 954204160, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.9753511794328121, |
|
"grad_norm": 0.161110982298851, |
|
"learning_rate": 2.8067078596953796e-05, |
|
"loss": 3.1457, |
|
"num_input_tokens_seen": 964689920, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.9859528226875166, |
|
"grad_norm": 0.15843427181243896, |
|
"learning_rate": 2.7630685110916778e-05, |
|
"loss": 3.143, |
|
"num_input_tokens_seen": 975175680, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.9965544659422211, |
|
"grad_norm": 0.24187405407428741, |
|
"learning_rate": 2.7193479500757685e-05, |
|
"loss": 3.2156, |
|
"num_input_tokens_seen": 985661440, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.0071561091969254, |
|
"grad_norm": 0.15035971999168396, |
|
"learning_rate": 2.675559673710485e-05, |
|
"loss": 3.0852, |
|
"num_input_tokens_seen": 996147200, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 1.01775775245163, |
|
"grad_norm": 0.17717382311820984, |
|
"learning_rate": 2.631717199963199e-05, |
|
"loss": 3.1221, |
|
"num_input_tokens_seen": 1006632960, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.0283593957063344, |
|
"grad_norm": 0.1390335112810135, |
|
"learning_rate": 2.5878340635326686e-05, |
|
"loss": 3.1358, |
|
"num_input_tokens_seen": 1017118720, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 1.0389610389610389, |
|
"grad_norm": 0.14454010128974915, |
|
"learning_rate": 2.5439238116707102e-05, |
|
"loss": 3.1307, |
|
"num_input_tokens_seen": 1027604480, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.0495626822157433, |
|
"grad_norm": 0.19984786212444305, |
|
"learning_rate": 2.5e-05, |
|
"loss": 3.1269, |
|
"num_input_tokens_seen": 1038090240, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 1.0601643254704478, |
|
"grad_norm": 0.263656347990036, |
|
"learning_rate": 2.45607618832929e-05, |
|
"loss": 3.1076, |
|
"num_input_tokens_seen": 1048576000, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.0707659687251523, |
|
"grad_norm": 0.15432648360729218, |
|
"learning_rate": 2.412165936467332e-05, |
|
"loss": 3.1218, |
|
"num_input_tokens_seen": 1059061760, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 1.081367611979857, |
|
"grad_norm": 0.24721471965312958, |
|
"learning_rate": 2.368282800036801e-05, |
|
"loss": 3.1035, |
|
"num_input_tokens_seen": 1069547520, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.0919692552345615, |
|
"grad_norm": 0.1441834270954132, |
|
"learning_rate": 2.3244403262895153e-05, |
|
"loss": 3.1801, |
|
"num_input_tokens_seen": 1080033280, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 1.102570898489266, |
|
"grad_norm": 0.1535598337650299, |
|
"learning_rate": 2.280652049924232e-05, |
|
"loss": 3.2002, |
|
"num_input_tokens_seen": 1090519040, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.1131725417439704, |
|
"grad_norm": 0.15322482585906982, |
|
"learning_rate": 2.2369314889083235e-05, |
|
"loss": 3.1208, |
|
"num_input_tokens_seen": 1101004800, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 1.1237741849986749, |
|
"grad_norm": 0.1468251496553421, |
|
"learning_rate": 2.1932921403046207e-05, |
|
"loss": 3.1917, |
|
"num_input_tokens_seen": 1111490560, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.1343758282533793, |
|
"grad_norm": 0.1530437469482422, |
|
"learning_rate": 2.1497474761047086e-05, |
|
"loss": 3.0797, |
|
"num_input_tokens_seen": 1121976320, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 1.1449774715080838, |
|
"grad_norm": 0.1411171704530716, |
|
"learning_rate": 2.106310939069956e-05, |
|
"loss": 3.1136, |
|
"num_input_tokens_seen": 1132462080, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.1555791147627883, |
|
"grad_norm": 0.14499180018901825, |
|
"learning_rate": 2.0629959385815757e-05, |
|
"loss": 3.1427, |
|
"num_input_tokens_seen": 1142947840, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 1.1661807580174928, |
|
"grad_norm": 0.14953380823135376, |
|
"learning_rate": 2.019815846500988e-05, |
|
"loss": 3.1948, |
|
"num_input_tokens_seen": 1153433600, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.1767824012721972, |
|
"grad_norm": 0.1349666863679886, |
|
"learning_rate": 1.9767839930417673e-05, |
|
"loss": 3.1605, |
|
"num_input_tokens_seen": 1163919360, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 1.1873840445269017, |
|
"grad_norm": 0.144802987575531, |
|
"learning_rate": 1.9339136626544407e-05, |
|
"loss": 3.1857, |
|
"num_input_tokens_seen": 1174405120, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.1979856877816062, |
|
"grad_norm": 0.16848242282867432, |
|
"learning_rate": 1.891218089925423e-05, |
|
"loss": 3.18, |
|
"num_input_tokens_seen": 1184890880, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 1.2085873310363107, |
|
"grad_norm": 0.15472151339054108, |
|
"learning_rate": 1.8487104554913363e-05, |
|
"loss": 3.1939, |
|
"num_input_tokens_seen": 1195376640, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.2191889742910151, |
|
"grad_norm": 0.1529063582420349, |
|
"learning_rate": 1.806403881969996e-05, |
|
"loss": 3.142, |
|
"num_input_tokens_seen": 1205862400, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 1.2297906175457196, |
|
"grad_norm": 0.1687614619731903, |
|
"learning_rate": 1.764311429909291e-05, |
|
"loss": 3.1344, |
|
"num_input_tokens_seen": 1216348160, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.240392260800424, |
|
"grad_norm": 0.13812561333179474, |
|
"learning_rate": 1.7224460937552446e-05, |
|
"loss": 3.1626, |
|
"num_input_tokens_seen": 1226833920, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 1.2509939040551286, |
|
"grad_norm": 0.16453585028648376, |
|
"learning_rate": 1.6808207978404683e-05, |
|
"loss": 3.1616, |
|
"num_input_tokens_seen": 1237319680, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.261595547309833, |
|
"grad_norm": 0.13337315618991852, |
|
"learning_rate": 1.6394483923942655e-05, |
|
"loss": 3.1413, |
|
"num_input_tokens_seen": 1247805440, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 1.2721971905645375, |
|
"grad_norm": 0.2029571831226349, |
|
"learning_rate": 1.5983416495756116e-05, |
|
"loss": 3.1485, |
|
"num_input_tokens_seen": 1258291200, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.2721971905645375, |
|
"eval_accuracy": 0.45126899835195017, |
|
"eval_loss": 3.0374794006347656, |
|
"eval_runtime": 36.1798, |
|
"eval_samples_per_second": 8.292, |
|
"eval_steps_per_second": 2.073, |
|
"num_input_tokens_seen": 1258291200, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.282798833819242, |
|
"grad_norm": 0.21333394944667816, |
|
"learning_rate": 1.5575132595302352e-05, |
|
"loss": 3.1131, |
|
"num_input_tokens_seen": 1268776960, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 1.2934004770739465, |
|
"grad_norm": 0.1743020862340927, |
|
"learning_rate": 1.516975826473015e-05, |
|
"loss": 3.1177, |
|
"num_input_tokens_seen": 1279262720, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.304002120328651, |
|
"grad_norm": 0.15571139752864838, |
|
"learning_rate": 1.4767418647969133e-05, |
|
"loss": 3.1626, |
|
"num_input_tokens_seen": 1289748480, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 1.3146037635833554, |
|
"grad_norm": 0.16648022830486298, |
|
"learning_rate": 1.4368237952096258e-05, |
|
"loss": 3.1987, |
|
"num_input_tokens_seen": 1300234240, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.3252054068380599, |
|
"grad_norm": 0.2865128219127655, |
|
"learning_rate": 1.3972339408991626e-05, |
|
"loss": 3.176, |
|
"num_input_tokens_seen": 1310720000, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 1.3358070500927643, |
|
"grad_norm": 0.14998364448547363, |
|
"learning_rate": 1.357984523729533e-05, |
|
"loss": 3.1568, |
|
"num_input_tokens_seen": 1321205760, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.3464086933474688, |
|
"grad_norm": 0.14780345559120178, |
|
"learning_rate": 1.3190876604677043e-05, |
|
"loss": 3.1895, |
|
"num_input_tokens_seen": 1331691520, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 1.3570103366021733, |
|
"grad_norm": 0.15796662867069244, |
|
"learning_rate": 1.2805553590430197e-05, |
|
"loss": 3.1708, |
|
"num_input_tokens_seen": 1342177280, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.3676119798568778, |
|
"grad_norm": 0.14260126650333405, |
|
"learning_rate": 1.2423995148401954e-05, |
|
"loss": 3.1649, |
|
"num_input_tokens_seen": 1352663040, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 1.3782136231115822, |
|
"grad_norm": 0.16667188704013824, |
|
"learning_rate": 1.2046319070270792e-05, |
|
"loss": 3.1345, |
|
"num_input_tokens_seen": 1363148800, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.3888152663662867, |
|
"grad_norm": 0.13283655047416687, |
|
"learning_rate": 1.1672641949182769e-05, |
|
"loss": 3.1653, |
|
"num_input_tokens_seen": 1373634560, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 1.3994169096209912, |
|
"grad_norm": 0.1479809582233429, |
|
"learning_rate": 1.130307914375783e-05, |
|
"loss": 3.2014, |
|
"num_input_tokens_seen": 1384120320, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.4100185528756957, |
|
"grad_norm": 0.1443646103143692, |
|
"learning_rate": 1.093774474247721e-05, |
|
"loss": 3.1561, |
|
"num_input_tokens_seen": 1394606080, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 1.4206201961304001, |
|
"grad_norm": 0.15595485270023346, |
|
"learning_rate": 1.0576751528462935e-05, |
|
"loss": 3.1774, |
|
"num_input_tokens_seen": 1405091840, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.4312218393851046, |
|
"grad_norm": 0.1376049816608429, |
|
"learning_rate": 1.0220210944660338e-05, |
|
"loss": 3.1102, |
|
"num_input_tokens_seen": 1415577600, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 1.4418234826398093, |
|
"grad_norm": 0.14685603976249695, |
|
"learning_rate": 9.868233059434288e-06, |
|
"loss": 3.2077, |
|
"num_input_tokens_seen": 1426063360, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.4524251258945138, |
|
"grad_norm": 0.13503292202949524, |
|
"learning_rate": 9.520926532589725e-06, |
|
"loss": 3.1899, |
|
"num_input_tokens_seen": 1436549120, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 1.4630267691492183, |
|
"grad_norm": 0.18555864691734314, |
|
"learning_rate": 9.178398581827085e-06, |
|
"loss": 3.2424, |
|
"num_input_tokens_seen": 1447034880, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.4736284124039227, |
|
"grad_norm": 0.26128634810447693, |
|
"learning_rate": 8.840754949642935e-06, |
|
"loss": 3.1609, |
|
"num_input_tokens_seen": 1457520640, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 1.4842300556586272, |
|
"grad_norm": 0.13384391367435455, |
|
"learning_rate": 8.50809987068598e-06, |
|
"loss": 3.1455, |
|
"num_input_tokens_seen": 1468006400, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.4948316989133317, |
|
"grad_norm": 0.20566639304161072, |
|
"learning_rate": 8.180536039578545e-06, |
|
"loss": 3.1995, |
|
"num_input_tokens_seen": 1478492160, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 1.5054333421680361, |
|
"grad_norm": 0.13730423152446747, |
|
"learning_rate": 7.858164579213547e-06, |
|
"loss": 3.134, |
|
"num_input_tokens_seen": 1488977920, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.5160349854227406, |
|
"grad_norm": 0.14655794203281403, |
|
"learning_rate": 7.541085009536625e-06, |
|
"loss": 3.0949, |
|
"num_input_tokens_seen": 1499463680, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 1.526636628677445, |
|
"grad_norm": 0.12526313960552216, |
|
"learning_rate": 7.2293952168231316e-06, |
|
"loss": 3.1615, |
|
"num_input_tokens_seen": 1509949440, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.5372382719321496, |
|
"grad_norm": 0.1351662427186966, |
|
"learning_rate": 6.923191423459482e-06, |
|
"loss": 3.1805, |
|
"num_input_tokens_seen": 1520435200, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 1.547839915186854, |
|
"grad_norm": 0.1686793714761734, |
|
"learning_rate": 6.622568158238126e-06, |
|
"loss": 3.1779, |
|
"num_input_tokens_seen": 1530920960, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.5584415584415585, |
|
"grad_norm": 0.14492954313755035, |
|
"learning_rate": 6.327618227175389e-06, |
|
"loss": 3.1046, |
|
"num_input_tokens_seen": 1541406720, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 1.569043201696263, |
|
"grad_norm": 0.14910686016082764, |
|
"learning_rate": 6.0384326848611225e-06, |
|
"loss": 3.0803, |
|
"num_input_tokens_seen": 1551892480, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.5796448449509675, |
|
"grad_norm": 0.1342857927083969, |
|
"learning_rate": 5.755100806349076e-06, |
|
"loss": 3.062, |
|
"num_input_tokens_seen": 1562378240, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 1.590246488205672, |
|
"grad_norm": 0.1598527878522873, |
|
"learning_rate": 5.4777100595965994e-06, |
|
"loss": 3.2036, |
|
"num_input_tokens_seen": 1572864000, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.6008481314603764, |
|
"grad_norm": 0.1773335188627243, |
|
"learning_rate": 5.206346078462249e-06, |
|
"loss": 3.1892, |
|
"num_input_tokens_seen": 1583349760, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 1.6114497747150809, |
|
"grad_norm": 0.17586533725261688, |
|
"learning_rate": 4.941092636269554e-06, |
|
"loss": 3.1995, |
|
"num_input_tokens_seen": 1593835520, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.6220514179697854, |
|
"grad_norm": 0.16583110392093658, |
|
"learning_rate": 4.682031619945238e-06, |
|
"loss": 3.1309, |
|
"num_input_tokens_seen": 1604321280, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 1.6326530612244898, |
|
"grad_norm": 0.1462288796901703, |
|
"learning_rate": 4.4292430047396914e-06, |
|
"loss": 3.1376, |
|
"num_input_tokens_seen": 1614807040, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.6432547044791943, |
|
"grad_norm": 0.13279996812343597, |
|
"learning_rate": 4.182804829537654e-06, |
|
"loss": 3.1541, |
|
"num_input_tokens_seen": 1625292800, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 1.6538563477338988, |
|
"grad_norm": 0.18102097511291504, |
|
"learning_rate": 3.942793172766699e-06, |
|
"loss": 3.1894, |
|
"num_input_tokens_seen": 1635778560, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.6644579909886033, |
|
"grad_norm": 0.16354134678840637, |
|
"learning_rate": 3.709282128910907e-06, |
|
"loss": 3.1664, |
|
"num_input_tokens_seen": 1646264320, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 1.6750596342433077, |
|
"grad_norm": 0.32594841718673706, |
|
"learning_rate": 3.4823437856369794e-06, |
|
"loss": 3.1721, |
|
"num_input_tokens_seen": 1656750080, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.6856612774980122, |
|
"grad_norm": 0.13957597315311432, |
|
"learning_rate": 3.2620482015399302e-06, |
|
"loss": 3.2471, |
|
"num_input_tokens_seen": 1667235840, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 1.6962629207527167, |
|
"grad_norm": 0.13924147188663483, |
|
"learning_rate": 3.0484633845151488e-06, |
|
"loss": 3.1871, |
|
"num_input_tokens_seen": 1677721600, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.6962629207527167, |
|
"eval_accuracy": 0.4514018596512645, |
|
"eval_loss": 3.0366272926330566, |
|
"eval_runtime": 36.0138, |
|
"eval_samples_per_second": 8.33, |
|
"eval_steps_per_second": 2.083, |
|
"num_input_tokens_seen": 1677721600, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.7068645640074211, |
|
"grad_norm": 0.14513356983661652, |
|
"learning_rate": 2.841655270763549e-06, |
|
"loss": 3.066, |
|
"num_input_tokens_seen": 1688207360, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 1.7174662072621256, |
|
"grad_norm": 0.14862333238124847, |
|
"learning_rate": 2.6416877044362685e-06, |
|
"loss": 3.1206, |
|
"num_input_tokens_seen": 1698693120, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.72806785051683, |
|
"grad_norm": 0.13608209788799286, |
|
"learning_rate": 2.448622417925214e-06, |
|
"loss": 3.2281, |
|
"num_input_tokens_seen": 1709178880, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 1.7386694937715346, |
|
"grad_norm": 0.1472754031419754, |
|
"learning_rate": 2.2625190128055168e-06, |
|
"loss": 3.2023, |
|
"num_input_tokens_seen": 1719664640, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.749271137026239, |
|
"grad_norm": 0.17672620713710785, |
|
"learning_rate": 2.0834349414358495e-06, |
|
"loss": 3.2217, |
|
"num_input_tokens_seen": 1730150400, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 1.7598727802809435, |
|
"grad_norm": 0.14799125492572784, |
|
"learning_rate": 1.911425489222127e-06, |
|
"loss": 3.2231, |
|
"num_input_tokens_seen": 1740636160, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.770474423535648, |
|
"grad_norm": 0.14448776841163635, |
|
"learning_rate": 1.7465437575502952e-06, |
|
"loss": 3.1126, |
|
"num_input_tokens_seen": 1751121920, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 1.7810760667903525, |
|
"grad_norm": 0.14082539081573486, |
|
"learning_rate": 1.5888406473932692e-06, |
|
"loss": 3.0933, |
|
"num_input_tokens_seen": 1761607680, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.791677710045057, |
|
"grad_norm": 0.18886087834835052, |
|
"learning_rate": 1.4383648435972007e-06, |
|
"loss": 3.2596, |
|
"num_input_tokens_seen": 1772093440, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 1.8022793532997614, |
|
"grad_norm": 0.14818327128887177, |
|
"learning_rate": 1.2951627998518623e-06, |
|
"loss": 3.1292, |
|
"num_input_tokens_seen": 1782579200, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.8128809965544659, |
|
"grad_norm": 0.14874930679798126, |
|
"learning_rate": 1.1592787243498631e-06, |
|
"loss": 3.1284, |
|
"num_input_tokens_seen": 1793064960, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 1.8234826398091704, |
|
"grad_norm": 0.16616405546665192, |
|
"learning_rate": 1.0307545661390139e-06, |
|
"loss": 3.1422, |
|
"num_input_tokens_seen": 1803550720, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.8340842830638748, |
|
"grad_norm": 0.15211746096611023, |
|
"learning_rate": 9.09630002172182e-07, |
|
"loss": 3.1793, |
|
"num_input_tokens_seen": 1814036480, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 1.8446859263185793, |
|
"grad_norm": 0.14233224093914032, |
|
"learning_rate": 7.959424250585323e-07, |
|
"loss": 3.1624, |
|
"num_input_tokens_seen": 1824522240, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.8552875695732838, |
|
"grad_norm": 0.13079190254211426, |
|
"learning_rate": 6.897269315199628e-07, |
|
"loss": 3.2029, |
|
"num_input_tokens_seen": 1835008000, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 1.8658892128279883, |
|
"grad_norm": 0.1364782601594925, |
|
"learning_rate": 5.910163115563471e-07, |
|
"loss": 3.1872, |
|
"num_input_tokens_seen": 1845493760, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.8764908560826927, |
|
"grad_norm": 0.22711250185966492, |
|
"learning_rate": 4.998410383228458e-07, |
|
"loss": 3.1676, |
|
"num_input_tokens_seen": 1855979520, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 1.8870924993373972, |
|
"grad_norm": 0.21140585839748383, |
|
"learning_rate": 4.162292587224947e-07, |
|
"loss": 3.1227, |
|
"num_input_tokens_seen": 1866465280, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.8976941425921017, |
|
"grad_norm": 0.2973221242427826, |
|
"learning_rate": 3.4020678471690934e-07, |
|
"loss": 3.1285, |
|
"num_input_tokens_seen": 1876951040, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 1.9082957858468061, |
|
"grad_norm": 0.15094168484210968, |
|
"learning_rate": 2.7179708535781943e-07, |
|
"loss": 3.152, |
|
"num_input_tokens_seen": 1887436800, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.9188974291015106, |
|
"grad_norm": 0.14399217069149017, |
|
"learning_rate": 2.110212795418953e-07, |
|
"loss": 3.1971, |
|
"num_input_tokens_seen": 1897922560, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 1.929499072356215, |
|
"grad_norm": 0.15141591429710388, |
|
"learning_rate": 1.578981294910936e-07, |
|
"loss": 3.1604, |
|
"num_input_tokens_seen": 1908408320, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.9401007156109196, |
|
"grad_norm": 0.15116803348064423, |
|
"learning_rate": 1.1244403496052658e-07, |
|
"loss": 3.0712, |
|
"num_input_tokens_seen": 1918894080, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 1.950702358865624, |
|
"grad_norm": 0.1515650451183319, |
|
"learning_rate": 7.46730281756619e-08, |
|
"loss": 3.0719, |
|
"num_input_tokens_seen": 1929379840, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.9613040021203285, |
|
"grad_norm": 0.1448003053665161, |
|
"learning_rate": 4.4596769500407366e-08, |
|
"loss": 3.2699, |
|
"num_input_tokens_seen": 1939865600, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 1.971905645375033, |
|
"grad_norm": 0.18116047978401184, |
|
"learning_rate": 2.2224543837423562e-08, |
|
"loss": 3.1134, |
|
"num_input_tokens_seen": 1950351360, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.9825072886297375, |
|
"grad_norm": 0.3333103358745575, |
|
"learning_rate": 7.563257761744601e-09, |
|
"loss": 3.1083, |
|
"num_input_tokens_seen": 1960837120, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 1.993108931884442, |
|
"grad_norm": 0.15106463432312012, |
|
"learning_rate": 6.174373886586037e-10, |
|
"loss": 3.2087, |
|
"num_input_tokens_seen": 1971322880, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.997349589186324, |
|
"num_input_tokens_seen": 1975517184, |
|
"step": 942, |
|
"total_flos": 9.732316690586272e+18, |
|
"train_loss": 3.186332613039928, |
|
"train_runtime": 34789.3198, |
|
"train_samples_per_second": 3.47, |
|
"train_steps_per_second": 0.027 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 942, |
|
"num_input_tokens_seen": 1975517184, |
|
"num_train_epochs": 2, |
|
"save_steps": 100, |
|
"total_flos": 9.732316690586272e+18, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|