|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.28651874218272183, |
|
"eval_steps": 500, |
|
"global_step": 26000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0016529927433618567, |
|
"grad_norm": 2.137389659881592, |
|
"learning_rate": 0.0001599141895763845, |
|
"loss": 6.3906, |
|
"num_input_tokens_seen": 872064, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.0033059854867237134, |
|
"grad_norm": 1.725138783454895, |
|
"learning_rate": 0.000159826028182259, |
|
"loss": 5.6466, |
|
"num_input_tokens_seen": 1744032, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.00495897823008557, |
|
"grad_norm": 1.9830477237701416, |
|
"learning_rate": 0.00015973786678813348, |
|
"loss": 5.4449, |
|
"num_input_tokens_seen": 2615552, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.006611970973447427, |
|
"grad_norm": 1.8168050050735474, |
|
"learning_rate": 0.00015964970539400798, |
|
"loss": 5.3521, |
|
"num_input_tokens_seen": 3469792, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.008264963716809284, |
|
"grad_norm": 1.9247260093688965, |
|
"learning_rate": 0.00015956154399988245, |
|
"loss": 5.2863, |
|
"num_input_tokens_seen": 4342304, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.00991795646017114, |
|
"grad_norm": 1.9191653728485107, |
|
"learning_rate": 0.00015947338260575695, |
|
"loss": 5.2405, |
|
"num_input_tokens_seen": 5211616, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.011570949203532997, |
|
"grad_norm": 1.748758316040039, |
|
"learning_rate": 0.00015938522121163143, |
|
"loss": 5.1998, |
|
"num_input_tokens_seen": 6098432, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.013223941946894854, |
|
"grad_norm": 2.057206869125366, |
|
"learning_rate": 0.00015929705981750593, |
|
"loss": 5.1706, |
|
"num_input_tokens_seen": 6967904, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.014876934690256709, |
|
"grad_norm": 2.0983762741088867, |
|
"learning_rate": 0.0001592088984233804, |
|
"loss": 5.1521, |
|
"num_input_tokens_seen": 7851072, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.016529927433618568, |
|
"grad_norm": 1.9689010381698608, |
|
"learning_rate": 0.0001591207370292549, |
|
"loss": 5.1314, |
|
"num_input_tokens_seen": 8719744, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.01818292017698042, |
|
"grad_norm": 1.7962827682495117, |
|
"learning_rate": 0.00015903257563512938, |
|
"loss": 5.0878, |
|
"num_input_tokens_seen": 9566912, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 0.01983591292034228, |
|
"grad_norm": 1.8615788221359253, |
|
"learning_rate": 0.00015894441424100389, |
|
"loss": 5.0739, |
|
"num_input_tokens_seen": 10450048, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.021488905663704136, |
|
"grad_norm": 1.8449132442474365, |
|
"learning_rate": 0.00015885625284687836, |
|
"loss": 5.0658, |
|
"num_input_tokens_seen": 11348640, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 0.023141898407065993, |
|
"grad_norm": 1.8516219854354858, |
|
"learning_rate": 0.00015876809145275284, |
|
"loss": 5.0462, |
|
"num_input_tokens_seen": 12226144, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.02479489115042785, |
|
"grad_norm": 1.9487632513046265, |
|
"learning_rate": 0.00015867993005862734, |
|
"loss": 5.0265, |
|
"num_input_tokens_seen": 13119040, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 0.026447883893789707, |
|
"grad_norm": 1.8343034982681274, |
|
"learning_rate": 0.0001585917686645018, |
|
"loss": 5.0056, |
|
"num_input_tokens_seen": 13993280, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.028100876637151564, |
|
"grad_norm": 2.002856731414795, |
|
"learning_rate": 0.00015850360727037632, |
|
"loss": 4.9993, |
|
"num_input_tokens_seen": 14876768, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 0.029753869380513418, |
|
"grad_norm": 1.981166124343872, |
|
"learning_rate": 0.0001584154458762508, |
|
"loss": 4.9943, |
|
"num_input_tokens_seen": 15749088, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.03140686212387528, |
|
"grad_norm": 1.9732400178909302, |
|
"learning_rate": 0.0001583272844821253, |
|
"loss": 4.9762, |
|
"num_input_tokens_seen": 16634176, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 0.033059854867237136, |
|
"grad_norm": 1.8369622230529785, |
|
"learning_rate": 0.00015823912308799977, |
|
"loss": 4.9525, |
|
"num_input_tokens_seen": 17509824, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.034712847610598986, |
|
"grad_norm": 1.9628883600234985, |
|
"learning_rate": 0.00015815096169387427, |
|
"loss": 4.9481, |
|
"num_input_tokens_seen": 18398112, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 0.03636584035396084, |
|
"grad_norm": 1.8523181676864624, |
|
"learning_rate": 0.00015806280029974874, |
|
"loss": 4.9306, |
|
"num_input_tokens_seen": 19272928, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 0.0380188330973227, |
|
"grad_norm": 1.9627933502197266, |
|
"learning_rate": 0.00015797463890562325, |
|
"loss": 4.9217, |
|
"num_input_tokens_seen": 20162880, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 0.03967182584068456, |
|
"grad_norm": 1.8966543674468994, |
|
"learning_rate": 0.00015788647751149772, |
|
"loss": 4.919, |
|
"num_input_tokens_seen": 21041888, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 0.041324818584046415, |
|
"grad_norm": 1.9131779670715332, |
|
"learning_rate": 0.00015779831611737222, |
|
"loss": 4.9118, |
|
"num_input_tokens_seen": 21914272, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 0.04297781132740827, |
|
"grad_norm": 1.8262194395065308, |
|
"learning_rate": 0.0001577101547232467, |
|
"loss": 4.9139, |
|
"num_input_tokens_seen": 22802432, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 0.04463080407077013, |
|
"grad_norm": 1.9549835920333862, |
|
"learning_rate": 0.0001576219933291212, |
|
"loss": 4.8919, |
|
"num_input_tokens_seen": 23680544, |
|
"step": 4050 |
|
}, |
|
{ |
|
"epoch": 0.046283796814131986, |
|
"grad_norm": 1.9537177085876465, |
|
"learning_rate": 0.00015753383193499568, |
|
"loss": 4.8895, |
|
"num_input_tokens_seen": 24572928, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 0.04793678955749384, |
|
"grad_norm": 1.9916348457336426, |
|
"learning_rate": 0.00015744567054087018, |
|
"loss": 4.8798, |
|
"num_input_tokens_seen": 25458752, |
|
"step": 4350 |
|
}, |
|
{ |
|
"epoch": 0.0495897823008557, |
|
"grad_norm": 1.9964395761489868, |
|
"learning_rate": 0.00015735750914674465, |
|
"loss": 4.8734, |
|
"num_input_tokens_seen": 26339424, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.05124277504421756, |
|
"grad_norm": 1.9595707654953003, |
|
"learning_rate": 0.00015726934775261916, |
|
"loss": 4.8654, |
|
"num_input_tokens_seen": 27208928, |
|
"step": 4650 |
|
}, |
|
{ |
|
"epoch": 0.052895767787579415, |
|
"grad_norm": 2.002746820449829, |
|
"learning_rate": 0.00015718118635849363, |
|
"loss": 4.8536, |
|
"num_input_tokens_seen": 28083488, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 0.05454876053094127, |
|
"grad_norm": 2.014301300048828, |
|
"learning_rate": 0.00015709302496436813, |
|
"loss": 4.8513, |
|
"num_input_tokens_seen": 28948832, |
|
"step": 4950 |
|
}, |
|
{ |
|
"epoch": 0.05620175327430313, |
|
"grad_norm": 1.82748544216156, |
|
"learning_rate": 0.0001570048635702426, |
|
"loss": 4.8477, |
|
"num_input_tokens_seen": 29830752, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 0.05785474601766498, |
|
"grad_norm": 1.907245397567749, |
|
"learning_rate": 0.0001569167021761171, |
|
"loss": 4.8445, |
|
"num_input_tokens_seen": 30709248, |
|
"step": 5250 |
|
}, |
|
{ |
|
"epoch": 0.059507738761026836, |
|
"grad_norm": 1.9649808406829834, |
|
"learning_rate": 0.00015682854078199158, |
|
"loss": 4.8313, |
|
"num_input_tokens_seen": 31597856, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 0.06116073150438869, |
|
"grad_norm": 1.9375178813934326, |
|
"learning_rate": 0.00015674037938786606, |
|
"loss": 4.8206, |
|
"num_input_tokens_seen": 32485120, |
|
"step": 5550 |
|
}, |
|
{ |
|
"epoch": 0.06281372424775056, |
|
"grad_norm": 1.8886380195617676, |
|
"learning_rate": 0.00015665221799374056, |
|
"loss": 4.8152, |
|
"num_input_tokens_seen": 33354688, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 0.06446671699111241, |
|
"grad_norm": 1.8993780612945557, |
|
"learning_rate": 0.00015656405659961504, |
|
"loss": 4.8155, |
|
"num_input_tokens_seen": 34230592, |
|
"step": 5850 |
|
}, |
|
{ |
|
"epoch": 0.06611970973447427, |
|
"grad_norm": 1.8930308818817139, |
|
"learning_rate": 0.00015647589520548954, |
|
"loss": 4.8173, |
|
"num_input_tokens_seen": 35090336, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.06777270247783612, |
|
"grad_norm": 1.951819658279419, |
|
"learning_rate": 0.00015638773381136401, |
|
"loss": 4.8118, |
|
"num_input_tokens_seen": 35973024, |
|
"step": 6150 |
|
}, |
|
{ |
|
"epoch": 0.06942569522119797, |
|
"grad_norm": 1.9142402410507202, |
|
"learning_rate": 0.00015629957241723852, |
|
"loss": 4.8079, |
|
"num_input_tokens_seen": 36855936, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 0.07107868796455984, |
|
"grad_norm": 1.9393310546875, |
|
"learning_rate": 0.000156211411023113, |
|
"loss": 4.7883, |
|
"num_input_tokens_seen": 37722848, |
|
"step": 6450 |
|
}, |
|
{ |
|
"epoch": 0.07273168070792169, |
|
"grad_norm": 1.8511933088302612, |
|
"learning_rate": 0.00015612383737161498, |
|
"loss": 4.8043, |
|
"num_input_tokens_seen": 38597600, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 0.07438467345128355, |
|
"grad_norm": 1.8763892650604248, |
|
"learning_rate": 0.00015603567597748946, |
|
"loss": 4.7932, |
|
"num_input_tokens_seen": 39493152, |
|
"step": 6750 |
|
}, |
|
{ |
|
"epoch": 0.0760376661946454, |
|
"grad_norm": 1.9806557893753052, |
|
"learning_rate": 0.00015594751458336396, |
|
"loss": 4.7813, |
|
"num_input_tokens_seen": 40352640, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 0.07769065893800726, |
|
"grad_norm": 2.001722574234009, |
|
"learning_rate": 0.00015585935318923843, |
|
"loss": 4.7941, |
|
"num_input_tokens_seen": 41236160, |
|
"step": 7050 |
|
}, |
|
{ |
|
"epoch": 0.07934365168136912, |
|
"grad_norm": 2.1065292358398438, |
|
"learning_rate": 0.00015577119179511294, |
|
"loss": 4.7819, |
|
"num_input_tokens_seen": 42111296, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 0.08099664442473098, |
|
"grad_norm": 1.8941328525543213, |
|
"learning_rate": 0.0001556830304009874, |
|
"loss": 4.7737, |
|
"num_input_tokens_seen": 42992864, |
|
"step": 7350 |
|
}, |
|
{ |
|
"epoch": 0.08264963716809283, |
|
"grad_norm": 1.8765467405319214, |
|
"learning_rate": 0.00015559486900686191, |
|
"loss": 4.764, |
|
"num_input_tokens_seen": 43871808, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.0843026299114547, |
|
"grad_norm": 1.9826706647872925, |
|
"learning_rate": 0.0001555067076127364, |
|
"loss": 4.7805, |
|
"num_input_tokens_seen": 44742496, |
|
"step": 7650 |
|
}, |
|
{ |
|
"epoch": 0.08595562265481654, |
|
"grad_norm": 1.9296499490737915, |
|
"learning_rate": 0.0001554185462186109, |
|
"loss": 4.7585, |
|
"num_input_tokens_seen": 45594112, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 0.08760861539817841, |
|
"grad_norm": 1.9379116296768188, |
|
"learning_rate": 0.00015533038482448537, |
|
"loss": 4.7682, |
|
"num_input_tokens_seen": 46465952, |
|
"step": 7950 |
|
}, |
|
{ |
|
"epoch": 0.08926160814154026, |
|
"grad_norm": 1.8769218921661377, |
|
"learning_rate": 0.00015524222343035987, |
|
"loss": 4.7574, |
|
"num_input_tokens_seen": 47325664, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 0.09091460088490212, |
|
"grad_norm": 1.8942108154296875, |
|
"learning_rate": 0.00015515406203623434, |
|
"loss": 4.7493, |
|
"num_input_tokens_seen": 48202944, |
|
"step": 8250 |
|
}, |
|
{ |
|
"epoch": 0.09256759362826397, |
|
"grad_norm": 1.84010648727417, |
|
"learning_rate": 0.00015506590064210885, |
|
"loss": 4.7515, |
|
"num_input_tokens_seen": 49074048, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 0.09422058637162582, |
|
"grad_norm": 1.8978796005249023, |
|
"learning_rate": 0.00015497773924798332, |
|
"loss": 4.7512, |
|
"num_input_tokens_seen": 49959008, |
|
"step": 8550 |
|
}, |
|
{ |
|
"epoch": 0.09587357911498769, |
|
"grad_norm": 1.9536223411560059, |
|
"learning_rate": 0.0001548895778538578, |
|
"loss": 4.758, |
|
"num_input_tokens_seen": 50863648, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 0.09752657185834954, |
|
"grad_norm": 2.0626060962677, |
|
"learning_rate": 0.0001548014164597323, |
|
"loss": 4.7434, |
|
"num_input_tokens_seen": 51730944, |
|
"step": 8850 |
|
}, |
|
{ |
|
"epoch": 0.0991795646017114, |
|
"grad_norm": 1.9423109292984009, |
|
"learning_rate": 0.00015471325506560677, |
|
"loss": 4.736, |
|
"num_input_tokens_seen": 52593024, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.10083255734507325, |
|
"grad_norm": 1.9180619716644287, |
|
"learning_rate": 0.00015462509367148127, |
|
"loss": 4.7187, |
|
"num_input_tokens_seen": 53470528, |
|
"step": 9150 |
|
}, |
|
{ |
|
"epoch": 0.10248555008843512, |
|
"grad_norm": 1.8776642084121704, |
|
"learning_rate": 0.00015453693227735575, |
|
"loss": 4.7382, |
|
"num_input_tokens_seen": 54362720, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 0.10413854283179697, |
|
"grad_norm": 1.9289714097976685, |
|
"learning_rate": 0.00015444877088323025, |
|
"loss": 4.7181, |
|
"num_input_tokens_seen": 55238304, |
|
"step": 9450 |
|
}, |
|
{ |
|
"epoch": 0.10579153557515883, |
|
"grad_norm": 1.9489550590515137, |
|
"learning_rate": 0.00015436060948910473, |
|
"loss": 4.7287, |
|
"num_input_tokens_seen": 56107040, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 0.10744452831852068, |
|
"grad_norm": 2.01839280128479, |
|
"learning_rate": 0.00015427244809497923, |
|
"loss": 4.7097, |
|
"num_input_tokens_seen": 56995456, |
|
"step": 9750 |
|
}, |
|
{ |
|
"epoch": 0.10909752106188254, |
|
"grad_norm": 1.9155646562576294, |
|
"learning_rate": 0.0001541842867008537, |
|
"loss": 4.7153, |
|
"num_input_tokens_seen": 57852640, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 0.1107505138052444, |
|
"grad_norm": 2.008150100708008, |
|
"learning_rate": 0.0001540961253067282, |
|
"loss": 4.7139, |
|
"num_input_tokens_seen": 58719840, |
|
"step": 10050 |
|
}, |
|
{ |
|
"epoch": 0.11240350654860626, |
|
"grad_norm": 1.9440505504608154, |
|
"learning_rate": 0.00015400796391260268, |
|
"loss": 4.7184, |
|
"num_input_tokens_seen": 59594784, |
|
"step": 10200 |
|
}, |
|
{ |
|
"epoch": 0.11405649929196811, |
|
"grad_norm": 1.9298348426818848, |
|
"learning_rate": 0.00015391980251847718, |
|
"loss": 4.708, |
|
"num_input_tokens_seen": 60451712, |
|
"step": 10350 |
|
}, |
|
{ |
|
"epoch": 0.11570949203532996, |
|
"grad_norm": 1.9444379806518555, |
|
"learning_rate": 0.00015383164112435166, |
|
"loss": 4.6979, |
|
"num_input_tokens_seen": 61335008, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.11736248477869182, |
|
"grad_norm": 2.0216357707977295, |
|
"learning_rate": 0.00015374406747285365, |
|
"loss": 4.7055, |
|
"num_input_tokens_seen": 62197344, |
|
"step": 10650 |
|
}, |
|
{ |
|
"epoch": 0.11901547752205367, |
|
"grad_norm": 1.9788328409194946, |
|
"learning_rate": 0.00015365590607872815, |
|
"loss": 4.6948, |
|
"num_input_tokens_seen": 63058464, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 0.12066847026541554, |
|
"grad_norm": 2.0648193359375, |
|
"learning_rate": 0.00015356774468460263, |
|
"loss": 4.7058, |
|
"num_input_tokens_seen": 63942112, |
|
"step": 10950 |
|
}, |
|
{ |
|
"epoch": 0.12232146300877739, |
|
"grad_norm": 1.9497121572494507, |
|
"learning_rate": 0.00015348017103310462, |
|
"loss": 4.6924, |
|
"num_input_tokens_seen": 64815232, |
|
"step": 11100 |
|
}, |
|
{ |
|
"epoch": 0.12397445575213925, |
|
"grad_norm": 1.9825148582458496, |
|
"learning_rate": 0.0001533920096389791, |
|
"loss": 4.7021, |
|
"num_input_tokens_seen": 65689856, |
|
"step": 11250 |
|
}, |
|
{ |
|
"epoch": 0.12562744849550112, |
|
"grad_norm": 1.9766299724578857, |
|
"learning_rate": 0.0001533038482448536, |
|
"loss": 4.6921, |
|
"num_input_tokens_seen": 66570272, |
|
"step": 11400 |
|
}, |
|
{ |
|
"epoch": 0.12728044123886295, |
|
"grad_norm": 1.9706653356552124, |
|
"learning_rate": 0.00015321568685072807, |
|
"loss": 4.6847, |
|
"num_input_tokens_seen": 67436096, |
|
"step": 11550 |
|
}, |
|
{ |
|
"epoch": 0.12893343398222482, |
|
"grad_norm": 1.9741766452789307, |
|
"learning_rate": 0.00015312811319923006, |
|
"loss": 4.6835, |
|
"num_input_tokens_seen": 68326816, |
|
"step": 11700 |
|
}, |
|
{ |
|
"epoch": 0.13058642672558668, |
|
"grad_norm": 1.850825548171997, |
|
"learning_rate": 0.00015303995180510456, |
|
"loss": 4.6874, |
|
"num_input_tokens_seen": 69203328, |
|
"step": 11850 |
|
}, |
|
{ |
|
"epoch": 0.13223941946894854, |
|
"grad_norm": 2.0040206909179688, |
|
"learning_rate": 0.00015295179041097904, |
|
"loss": 4.6805, |
|
"num_input_tokens_seen": 70049696, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.13389241221231038, |
|
"grad_norm": 1.9326891899108887, |
|
"learning_rate": 0.00015286362901685354, |
|
"loss": 4.6781, |
|
"num_input_tokens_seen": 70929856, |
|
"step": 12150 |
|
}, |
|
{ |
|
"epoch": 0.13554540495567224, |
|
"grad_norm": 1.8233270645141602, |
|
"learning_rate": 0.00015277546762272802, |
|
"loss": 4.6732, |
|
"num_input_tokens_seen": 71797184, |
|
"step": 12300 |
|
}, |
|
{ |
|
"epoch": 0.1371983976990341, |
|
"grad_norm": 2.026263475418091, |
|
"learning_rate": 0.00015268730622860252, |
|
"loss": 4.672, |
|
"num_input_tokens_seen": 72662528, |
|
"step": 12450 |
|
}, |
|
{ |
|
"epoch": 0.13885139044239594, |
|
"grad_norm": 1.8338570594787598, |
|
"learning_rate": 0.000152599144834477, |
|
"loss": 4.6718, |
|
"num_input_tokens_seen": 73543840, |
|
"step": 12600 |
|
}, |
|
{ |
|
"epoch": 0.1405043831857578, |
|
"grad_norm": 1.934313416481018, |
|
"learning_rate": 0.0001525109834403515, |
|
"loss": 4.6595, |
|
"num_input_tokens_seen": 74426752, |
|
"step": 12750 |
|
}, |
|
{ |
|
"epoch": 0.14215737592911967, |
|
"grad_norm": 1.861647367477417, |
|
"learning_rate": 0.00015242282204622597, |
|
"loss": 4.6692, |
|
"num_input_tokens_seen": 75309888, |
|
"step": 12900 |
|
}, |
|
{ |
|
"epoch": 0.14381036867248154, |
|
"grad_norm": 1.9282541275024414, |
|
"learning_rate": 0.00015233466065210047, |
|
"loss": 4.6604, |
|
"num_input_tokens_seen": 76194976, |
|
"step": 13050 |
|
}, |
|
{ |
|
"epoch": 0.14546336141584337, |
|
"grad_norm": 1.975542664527893, |
|
"learning_rate": 0.00015224649925797495, |
|
"loss": 4.662, |
|
"num_input_tokens_seen": 77078048, |
|
"step": 13200 |
|
}, |
|
{ |
|
"epoch": 0.14711635415920524, |
|
"grad_norm": 1.8979029655456543, |
|
"learning_rate": 0.00015215833786384945, |
|
"loss": 4.6554, |
|
"num_input_tokens_seen": 77940000, |
|
"step": 13350 |
|
}, |
|
{ |
|
"epoch": 0.1487693469025671, |
|
"grad_norm": 1.875108242034912, |
|
"learning_rate": 0.00015207017646972392, |
|
"loss": 4.6599, |
|
"num_input_tokens_seen": 78822336, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.15042233964592897, |
|
"grad_norm": 1.9476161003112793, |
|
"learning_rate": 0.0001519820150755984, |
|
"loss": 4.6688, |
|
"num_input_tokens_seen": 79708256, |
|
"step": 13650 |
|
}, |
|
{ |
|
"epoch": 0.1520753323892908, |
|
"grad_norm": 1.9902242422103882, |
|
"learning_rate": 0.0001518938536814729, |
|
"loss": 4.6626, |
|
"num_input_tokens_seen": 80595168, |
|
"step": 13800 |
|
}, |
|
{ |
|
"epoch": 0.15372832513265267, |
|
"grad_norm": 1.7958108186721802, |
|
"learning_rate": 0.00015180569228734738, |
|
"loss": 4.662, |
|
"num_input_tokens_seen": 81484416, |
|
"step": 13950 |
|
}, |
|
{ |
|
"epoch": 0.15538131787601453, |
|
"grad_norm": 1.8727210760116577, |
|
"learning_rate": 0.00015171753089322188, |
|
"loss": 4.6579, |
|
"num_input_tokens_seen": 82355712, |
|
"step": 14100 |
|
}, |
|
{ |
|
"epoch": 0.1570343106193764, |
|
"grad_norm": 2.0186071395874023, |
|
"learning_rate": 0.00015162936949909635, |
|
"loss": 4.6494, |
|
"num_input_tokens_seen": 83223136, |
|
"step": 14250 |
|
}, |
|
{ |
|
"epoch": 0.15868730336273823, |
|
"grad_norm": 1.8837051391601562, |
|
"learning_rate": 0.00015154120810497086, |
|
"loss": 4.6463, |
|
"num_input_tokens_seen": 84097152, |
|
"step": 14400 |
|
}, |
|
{ |
|
"epoch": 0.1603402961061001, |
|
"grad_norm": 1.8699517250061035, |
|
"learning_rate": 0.00015145304671084533, |
|
"loss": 4.6489, |
|
"num_input_tokens_seen": 84976576, |
|
"step": 14550 |
|
}, |
|
{ |
|
"epoch": 0.16199328884946196, |
|
"grad_norm": 1.956932783126831, |
|
"learning_rate": 0.00015136488531671983, |
|
"loss": 4.649, |
|
"num_input_tokens_seen": 85868224, |
|
"step": 14700 |
|
}, |
|
{ |
|
"epoch": 0.1636462815928238, |
|
"grad_norm": 2.020624876022339, |
|
"learning_rate": 0.0001512767239225943, |
|
"loss": 4.6458, |
|
"num_input_tokens_seen": 86746944, |
|
"step": 14850 |
|
}, |
|
{ |
|
"epoch": 0.16529927433618566, |
|
"grad_norm": 1.9445135593414307, |
|
"learning_rate": 0.0001511885625284688, |
|
"loss": 4.6387, |
|
"num_input_tokens_seen": 87626976, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.16695226707954752, |
|
"grad_norm": 1.9843000173568726, |
|
"learning_rate": 0.00015110040113434328, |
|
"loss": 4.6481, |
|
"num_input_tokens_seen": 88483616, |
|
"step": 15150 |
|
}, |
|
{ |
|
"epoch": 0.1686052598229094, |
|
"grad_norm": 2.0259897708892822, |
|
"learning_rate": 0.00015101282748284528, |
|
"loss": 4.6317, |
|
"num_input_tokens_seen": 89374048, |
|
"step": 15300 |
|
}, |
|
{ |
|
"epoch": 0.17025825256627122, |
|
"grad_norm": 1.8472915887832642, |
|
"learning_rate": 0.00015092466608871975, |
|
"loss": 4.6468, |
|
"num_input_tokens_seen": 90265376, |
|
"step": 15450 |
|
}, |
|
{ |
|
"epoch": 0.1719112453096331, |
|
"grad_norm": 1.9485039710998535, |
|
"learning_rate": 0.00015083650469459425, |
|
"loss": 4.6272, |
|
"num_input_tokens_seen": 91161504, |
|
"step": 15600 |
|
}, |
|
{ |
|
"epoch": 0.17356423805299495, |
|
"grad_norm": 2.0340664386749268, |
|
"learning_rate": 0.00015074834330046873, |
|
"loss": 4.6252, |
|
"num_input_tokens_seen": 92033536, |
|
"step": 15750 |
|
}, |
|
{ |
|
"epoch": 0.17521723079635682, |
|
"grad_norm": 1.8034217357635498, |
|
"learning_rate": 0.00015066018190634323, |
|
"loss": 4.6347, |
|
"num_input_tokens_seen": 92906464, |
|
"step": 15900 |
|
}, |
|
{ |
|
"epoch": 0.17687022353971865, |
|
"grad_norm": 1.9323750734329224, |
|
"learning_rate": 0.0001505720205122177, |
|
"loss": 4.624, |
|
"num_input_tokens_seen": 93773664, |
|
"step": 16050 |
|
}, |
|
{ |
|
"epoch": 0.17852321628308052, |
|
"grad_norm": 1.9791151285171509, |
|
"learning_rate": 0.00015048385911809218, |
|
"loss": 4.6184, |
|
"num_input_tokens_seen": 94646528, |
|
"step": 16200 |
|
}, |
|
{ |
|
"epoch": 0.18017620902644238, |
|
"grad_norm": 2.0325284004211426, |
|
"learning_rate": 0.00015039569772396668, |
|
"loss": 4.6129, |
|
"num_input_tokens_seen": 95522304, |
|
"step": 16350 |
|
}, |
|
{ |
|
"epoch": 0.18182920176980424, |
|
"grad_norm": 1.773972511291504, |
|
"learning_rate": 0.00015030753632984116, |
|
"loss": 4.6283, |
|
"num_input_tokens_seen": 96397632, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.18348219451316608, |
|
"grad_norm": 1.792601466178894, |
|
"learning_rate": 0.00015021937493571566, |
|
"loss": 4.6201, |
|
"num_input_tokens_seen": 97283072, |
|
"step": 16650 |
|
}, |
|
{ |
|
"epoch": 0.18513518725652794, |
|
"grad_norm": 1.9488441944122314, |
|
"learning_rate": 0.00015013121354159013, |
|
"loss": 4.6174, |
|
"num_input_tokens_seen": 98164960, |
|
"step": 16800 |
|
}, |
|
{ |
|
"epoch": 0.1867881799998898, |
|
"grad_norm": 1.8708151578903198, |
|
"learning_rate": 0.00015004305214746464, |
|
"loss": 4.6156, |
|
"num_input_tokens_seen": 99030464, |
|
"step": 16950 |
|
}, |
|
{ |
|
"epoch": 0.18844117274325164, |
|
"grad_norm": 1.9848783016204834, |
|
"learning_rate": 0.0001499548907533391, |
|
"loss": 4.6069, |
|
"num_input_tokens_seen": 99913184, |
|
"step": 17100 |
|
}, |
|
{ |
|
"epoch": 0.1900941654866135, |
|
"grad_norm": 1.9591269493103027, |
|
"learning_rate": 0.00014986672935921361, |
|
"loss": 4.6195, |
|
"num_input_tokens_seen": 100797056, |
|
"step": 17250 |
|
}, |
|
{ |
|
"epoch": 0.19174715822997537, |
|
"grad_norm": 1.9400300979614258, |
|
"learning_rate": 0.0001497785679650881, |
|
"loss": 4.6167, |
|
"num_input_tokens_seen": 101678336, |
|
"step": 17400 |
|
}, |
|
{ |
|
"epoch": 0.19340015097333724, |
|
"grad_norm": 1.9163286685943604, |
|
"learning_rate": 0.0001496904065709626, |
|
"loss": 4.6135, |
|
"num_input_tokens_seen": 102532640, |
|
"step": 17550 |
|
}, |
|
{ |
|
"epoch": 0.19505314371669907, |
|
"grad_norm": 1.86648690700531, |
|
"learning_rate": 0.00014960224517683707, |
|
"loss": 4.6063, |
|
"num_input_tokens_seen": 103403264, |
|
"step": 17700 |
|
}, |
|
{ |
|
"epoch": 0.19670613646006094, |
|
"grad_norm": 1.9310001134872437, |
|
"learning_rate": 0.00014951408378271157, |
|
"loss": 4.6143, |
|
"num_input_tokens_seen": 104304224, |
|
"step": 17850 |
|
}, |
|
{ |
|
"epoch": 0.1983591292034228, |
|
"grad_norm": 1.9832515716552734, |
|
"learning_rate": 0.00014942592238858604, |
|
"loss": 4.6138, |
|
"num_input_tokens_seen": 105184128, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.20001212194678467, |
|
"grad_norm": 1.9453548192977905, |
|
"learning_rate": 0.00014933776099446055, |
|
"loss": 4.6143, |
|
"num_input_tokens_seen": 106070880, |
|
"step": 18150 |
|
}, |
|
{ |
|
"epoch": 0.2016651146901465, |
|
"grad_norm": 1.8135297298431396, |
|
"learning_rate": 0.00014924959960033502, |
|
"loss": 4.6197, |
|
"num_input_tokens_seen": 106944960, |
|
"step": 18300 |
|
}, |
|
{ |
|
"epoch": 0.20331810743350837, |
|
"grad_norm": 1.892717719078064, |
|
"learning_rate": 0.00014916143820620952, |
|
"loss": 4.6039, |
|
"num_input_tokens_seen": 107808512, |
|
"step": 18450 |
|
}, |
|
{ |
|
"epoch": 0.20497110017687023, |
|
"grad_norm": 1.9304077625274658, |
|
"learning_rate": 0.000149073276812084, |
|
"loss": 4.6103, |
|
"num_input_tokens_seen": 108679584, |
|
"step": 18600 |
|
}, |
|
{ |
|
"epoch": 0.20662409292023207, |
|
"grad_norm": 1.874104380607605, |
|
"learning_rate": 0.0001489851154179585, |
|
"loss": 4.6092, |
|
"num_input_tokens_seen": 109561440, |
|
"step": 18750 |
|
}, |
|
{ |
|
"epoch": 0.20827708566359393, |
|
"grad_norm": 1.9672309160232544, |
|
"learning_rate": 0.00014889695402383297, |
|
"loss": 4.6027, |
|
"num_input_tokens_seen": 110445760, |
|
"step": 18900 |
|
}, |
|
{ |
|
"epoch": 0.2099300784069558, |
|
"grad_norm": 1.9013960361480713, |
|
"learning_rate": 0.00014880879262970748, |
|
"loss": 4.6161, |
|
"num_input_tokens_seen": 111310304, |
|
"step": 19050 |
|
}, |
|
{ |
|
"epoch": 0.21158307115031766, |
|
"grad_norm": 1.902948021888733, |
|
"learning_rate": 0.00014872063123558195, |
|
"loss": 4.6118, |
|
"num_input_tokens_seen": 112181440, |
|
"step": 19200 |
|
}, |
|
{ |
|
"epoch": 0.2132360638936795, |
|
"grad_norm": 1.9160059690475464, |
|
"learning_rate": 0.00014863246984145645, |
|
"loss": 4.5929, |
|
"num_input_tokens_seen": 113074496, |
|
"step": 19350 |
|
}, |
|
{ |
|
"epoch": 0.21488905663704136, |
|
"grad_norm": 1.843983769416809, |
|
"learning_rate": 0.00014854489618995845, |
|
"loss": 4.5982, |
|
"num_input_tokens_seen": 113972512, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 0.21654204938040322, |
|
"grad_norm": 1.83791184425354, |
|
"learning_rate": 0.00014845673479583292, |
|
"loss": 4.6046, |
|
"num_input_tokens_seen": 114839680, |
|
"step": 19650 |
|
}, |
|
{ |
|
"epoch": 0.2181950421237651, |
|
"grad_norm": 1.8458436727523804, |
|
"learning_rate": 0.00014836857340170742, |
|
"loss": 4.5987, |
|
"num_input_tokens_seen": 115716000, |
|
"step": 19800 |
|
}, |
|
{ |
|
"epoch": 0.21984803486712692, |
|
"grad_norm": 2.0030035972595215, |
|
"learning_rate": 0.0001482804120075819, |
|
"loss": 4.5873, |
|
"num_input_tokens_seen": 116581408, |
|
"step": 19950 |
|
}, |
|
{ |
|
"epoch": 0.2215010276104888, |
|
"grad_norm": 1.8120313882827759, |
|
"learning_rate": 0.00014819225061345637, |
|
"loss": 4.5893, |
|
"num_input_tokens_seen": 117446368, |
|
"step": 20100 |
|
}, |
|
{ |
|
"epoch": 0.22315402035385065, |
|
"grad_norm": 1.8799773454666138, |
|
"learning_rate": 0.00014810408921933087, |
|
"loss": 4.5746, |
|
"num_input_tokens_seen": 118320096, |
|
"step": 20250 |
|
}, |
|
{ |
|
"epoch": 0.22480701309721252, |
|
"grad_norm": 1.9042309522628784, |
|
"learning_rate": 0.00014801592782520535, |
|
"loss": 4.5851, |
|
"num_input_tokens_seen": 119192128, |
|
"step": 20400 |
|
}, |
|
{ |
|
"epoch": 0.22646000584057435, |
|
"grad_norm": 1.8850473165512085, |
|
"learning_rate": 0.00014792776643107985, |
|
"loss": 4.5883, |
|
"num_input_tokens_seen": 120065888, |
|
"step": 20550 |
|
}, |
|
{ |
|
"epoch": 0.22811299858393622, |
|
"grad_norm": 1.8963854312896729, |
|
"learning_rate": 0.00014783960503695433, |
|
"loss": 4.5869, |
|
"num_input_tokens_seen": 120921504, |
|
"step": 20700 |
|
}, |
|
{ |
|
"epoch": 0.22976599132729808, |
|
"grad_norm": 1.8145036697387695, |
|
"learning_rate": 0.00014775144364282883, |
|
"loss": 4.5857, |
|
"num_input_tokens_seen": 121819936, |
|
"step": 20850 |
|
}, |
|
{ |
|
"epoch": 0.23141898407065992, |
|
"grad_norm": 1.8780988454818726, |
|
"learning_rate": 0.0001476632822487033, |
|
"loss": 4.5793, |
|
"num_input_tokens_seen": 122700576, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.23307197681402178, |
|
"grad_norm": 1.8859424591064453, |
|
"learning_rate": 0.00014757512085457778, |
|
"loss": 4.5847, |
|
"num_input_tokens_seen": 123578848, |
|
"step": 21150 |
|
}, |
|
{ |
|
"epoch": 0.23472496955738364, |
|
"grad_norm": 1.8556190729141235, |
|
"learning_rate": 0.00014748695946045228, |
|
"loss": 4.5915, |
|
"num_input_tokens_seen": 124451552, |
|
"step": 21300 |
|
}, |
|
{ |
|
"epoch": 0.2363779623007455, |
|
"grad_norm": 1.8445396423339844, |
|
"learning_rate": 0.00014739879806632676, |
|
"loss": 4.5857, |
|
"num_input_tokens_seen": 125326208, |
|
"step": 21450 |
|
}, |
|
{ |
|
"epoch": 0.23803095504410735, |
|
"grad_norm": 1.903262972831726, |
|
"learning_rate": 0.00014731063667220126, |
|
"loss": 4.5811, |
|
"num_input_tokens_seen": 126206560, |
|
"step": 21600 |
|
}, |
|
{ |
|
"epoch": 0.2396839477874692, |
|
"grad_norm": 1.7595880031585693, |
|
"learning_rate": 0.00014722247527807573, |
|
"loss": 4.567, |
|
"num_input_tokens_seen": 127077440, |
|
"step": 21750 |
|
}, |
|
{ |
|
"epoch": 0.24133694053083107, |
|
"grad_norm": 1.8828771114349365, |
|
"learning_rate": 0.00014713431388395024, |
|
"loss": 4.5792, |
|
"num_input_tokens_seen": 127955584, |
|
"step": 21900 |
|
}, |
|
{ |
|
"epoch": 0.24298993327419294, |
|
"grad_norm": 1.8850219249725342, |
|
"learning_rate": 0.0001470461524898247, |
|
"loss": 4.5749, |
|
"num_input_tokens_seen": 128807200, |
|
"step": 22050 |
|
}, |
|
{ |
|
"epoch": 0.24464292601755477, |
|
"grad_norm": 1.9162580966949463, |
|
"learning_rate": 0.00014695799109569919, |
|
"loss": 4.5725, |
|
"num_input_tokens_seen": 129672192, |
|
"step": 22200 |
|
}, |
|
{ |
|
"epoch": 0.24629591876091664, |
|
"grad_norm": 1.866351842880249, |
|
"learning_rate": 0.0001468698297015737, |
|
"loss": 4.569, |
|
"num_input_tokens_seen": 130541696, |
|
"step": 22350 |
|
}, |
|
{ |
|
"epoch": 0.2479489115042785, |
|
"grad_norm": 1.849186658859253, |
|
"learning_rate": 0.00014678166830744816, |
|
"loss": 4.5832, |
|
"num_input_tokens_seen": 131420992, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 0.24960190424764037, |
|
"grad_norm": 1.8402087688446045, |
|
"learning_rate": 0.00014669350691332266, |
|
"loss": 4.5664, |
|
"num_input_tokens_seen": 132312352, |
|
"step": 22650 |
|
}, |
|
{ |
|
"epoch": 0.25125489699100223, |
|
"grad_norm": 1.8887277841567993, |
|
"learning_rate": 0.00014660534551919714, |
|
"loss": 4.5771, |
|
"num_input_tokens_seen": 133195968, |
|
"step": 22800 |
|
}, |
|
{ |
|
"epoch": 0.25290788973436407, |
|
"grad_norm": 2.029491424560547, |
|
"learning_rate": 0.00014651718412507164, |
|
"loss": 4.5718, |
|
"num_input_tokens_seen": 134079040, |
|
"step": 22950 |
|
}, |
|
{ |
|
"epoch": 0.2545608824777259, |
|
"grad_norm": 1.9162187576293945, |
|
"learning_rate": 0.00014642902273094612, |
|
"loss": 4.5746, |
|
"num_input_tokens_seen": 134957440, |
|
"step": 23100 |
|
}, |
|
{ |
|
"epoch": 0.2562138752210878, |
|
"grad_norm": 1.8331772089004517, |
|
"learning_rate": 0.00014634086133682062, |
|
"loss": 4.5736, |
|
"num_input_tokens_seen": 135835328, |
|
"step": 23250 |
|
}, |
|
{ |
|
"epoch": 0.25786686796444963, |
|
"grad_norm": 1.9076178073883057, |
|
"learning_rate": 0.0001462526999426951, |
|
"loss": 4.5647, |
|
"num_input_tokens_seen": 136701184, |
|
"step": 23400 |
|
}, |
|
{ |
|
"epoch": 0.25951986070781147, |
|
"grad_norm": 1.9407544136047363, |
|
"learning_rate": 0.0001461645385485696, |
|
"loss": 4.5607, |
|
"num_input_tokens_seen": 137547264, |
|
"step": 23550 |
|
}, |
|
{ |
|
"epoch": 0.26117285345117336, |
|
"grad_norm": 1.9287118911743164, |
|
"learning_rate": 0.0001460769648970716, |
|
"loss": 4.5642, |
|
"num_input_tokens_seen": 138417888, |
|
"step": 23700 |
|
}, |
|
{ |
|
"epoch": 0.2628258461945352, |
|
"grad_norm": 2.0106074810028076, |
|
"learning_rate": 0.00014598939124557358, |
|
"loss": 4.5689, |
|
"num_input_tokens_seen": 139297088, |
|
"step": 23850 |
|
}, |
|
{ |
|
"epoch": 0.2644788389378971, |
|
"grad_norm": 1.8796470165252686, |
|
"learning_rate": 0.00014590122985144808, |
|
"loss": 4.5736, |
|
"num_input_tokens_seen": 140165984, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.2661318316812589, |
|
"grad_norm": 1.8495882749557495, |
|
"learning_rate": 0.00014581306845732256, |
|
"loss": 4.5613, |
|
"num_input_tokens_seen": 141043680, |
|
"step": 24150 |
|
}, |
|
{ |
|
"epoch": 0.26778482442462076, |
|
"grad_norm": 1.8603812456130981, |
|
"learning_rate": 0.00014572490706319706, |
|
"loss": 4.5707, |
|
"num_input_tokens_seen": 141913088, |
|
"step": 24300 |
|
}, |
|
{ |
|
"epoch": 0.26943781716798265, |
|
"grad_norm": 1.8450992107391357, |
|
"learning_rate": 0.00014563674566907153, |
|
"loss": 4.5685, |
|
"num_input_tokens_seen": 142780992, |
|
"step": 24450 |
|
}, |
|
{ |
|
"epoch": 0.2710908099113445, |
|
"grad_norm": 1.9459301233291626, |
|
"learning_rate": 0.000145548584274946, |
|
"loss": 4.5624, |
|
"num_input_tokens_seen": 143655008, |
|
"step": 24600 |
|
}, |
|
{ |
|
"epoch": 0.2727438026547063, |
|
"grad_norm": 1.87797212600708, |
|
"learning_rate": 0.0001454604228808205, |
|
"loss": 4.5753, |
|
"num_input_tokens_seen": 144533760, |
|
"step": 24750 |
|
}, |
|
{ |
|
"epoch": 0.2743967953980682, |
|
"grad_norm": 1.9368420839309692, |
|
"learning_rate": 0.00014537226148669498, |
|
"loss": 4.5473, |
|
"num_input_tokens_seen": 145405376, |
|
"step": 24900 |
|
}, |
|
{ |
|
"epoch": 0.27604978814143005, |
|
"grad_norm": 1.9425833225250244, |
|
"learning_rate": 0.0001452841000925695, |
|
"loss": 4.5621, |
|
"num_input_tokens_seen": 146278176, |
|
"step": 25050 |
|
}, |
|
{ |
|
"epoch": 0.2777027808847919, |
|
"grad_norm": 1.8315942287445068, |
|
"learning_rate": 0.00014519593869844396, |
|
"loss": 4.5441, |
|
"num_input_tokens_seen": 147150880, |
|
"step": 25200 |
|
}, |
|
{ |
|
"epoch": 0.2793557736281538, |
|
"grad_norm": 1.865020990371704, |
|
"learning_rate": 0.00014510777730431846, |
|
"loss": 4.5681, |
|
"num_input_tokens_seen": 148039488, |
|
"step": 25350 |
|
}, |
|
{ |
|
"epoch": 0.2810087663715156, |
|
"grad_norm": 1.9058725833892822, |
|
"learning_rate": 0.00014501961591019294, |
|
"loss": 4.55, |
|
"num_input_tokens_seen": 148898720, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 0.2826617591148775, |
|
"grad_norm": 1.9358283281326294, |
|
"learning_rate": 0.00014493145451606741, |
|
"loss": 4.5608, |
|
"num_input_tokens_seen": 149776576, |
|
"step": 25650 |
|
}, |
|
{ |
|
"epoch": 0.28431475185823935, |
|
"grad_norm": 1.871090292930603, |
|
"learning_rate": 0.00014484329312194192, |
|
"loss": 4.539, |
|
"num_input_tokens_seen": 150667296, |
|
"step": 25800 |
|
}, |
|
{ |
|
"epoch": 0.2859677446016012, |
|
"grad_norm": 1.885185956954956, |
|
"learning_rate": 0.0001447551317278164, |
|
"loss": 4.5421, |
|
"num_input_tokens_seen": 151536512, |
|
"step": 25950 |
|
} |
|
], |
|
"logging_steps": 150, |
|
"max_steps": 272232, |
|
"num_input_tokens_seen": 151830560, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2344361017958400.0, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|