{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.0, "eval_steps": 500, "global_step": 1949, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0, "grad_norm": 0.06121898619475577, "learning_rate": 1.0256410256410257e-06, "loss": 0.9032, "step": 1 }, { "epoch": 0.0, "grad_norm": 0.05076189465509454, "learning_rate": 5.128205128205128e-06, "loss": 0.7741, "step": 5 }, { "epoch": 0.01, "grad_norm": 0.056309939667137404, "learning_rate": 1.0256410256410256e-05, "loss": 0.807, "step": 10 }, { "epoch": 0.01, "grad_norm": 0.08652735131470117, "learning_rate": 1.5384615384615387e-05, "loss": 0.8461, "step": 15 }, { "epoch": 0.01, "grad_norm": 0.07961187993196545, "learning_rate": 2.0512820512820512e-05, "loss": 0.7646, "step": 20 }, { "epoch": 0.01, "grad_norm": 0.09626619309485744, "learning_rate": 2.564102564102564e-05, "loss": 0.7847, "step": 25 }, { "epoch": 0.02, "grad_norm": 0.07333598901062204, "learning_rate": 3.0769230769230774e-05, "loss": 0.804, "step": 30 }, { "epoch": 0.02, "grad_norm": 0.07656413646264956, "learning_rate": 3.58974358974359e-05, "loss": 0.762, "step": 35 }, { "epoch": 0.02, "grad_norm": 0.10274445509894202, "learning_rate": 4.1025641025641023e-05, "loss": 0.7525, "step": 40 }, { "epoch": 0.02, "grad_norm": 0.06931022616141319, "learning_rate": 4.615384615384616e-05, "loss": 0.7161, "step": 45 }, { "epoch": 0.03, "grad_norm": 0.06249108360478095, "learning_rate": 5.128205128205128e-05, "loss": 0.7223, "step": 50 }, { "epoch": 0.03, "grad_norm": 0.06488575342654651, "learning_rate": 5.6410256410256414e-05, "loss": 0.7326, "step": 55 }, { "epoch": 0.03, "grad_norm": 0.07291924815316946, "learning_rate": 6.153846153846155e-05, "loss": 0.8038, "step": 60 }, { "epoch": 0.03, "grad_norm": 0.060040360077683416, "learning_rate": 6.666666666666667e-05, "loss": 0.7521, "step": 65 }, { "epoch": 0.04, "grad_norm": 0.08523783674260317, "learning_rate": 7.17948717948718e-05, "loss": 0.709, "step": 70 }, { "epoch": 0.04, "grad_norm": 0.06703200831653133, "learning_rate": 7.692307692307693e-05, "loss": 0.7942, "step": 75 }, { "epoch": 0.04, "grad_norm": 0.05463611621688078, "learning_rate": 8.205128205128205e-05, "loss": 0.6818, "step": 80 }, { "epoch": 0.04, "grad_norm": 0.07432808646419857, "learning_rate": 8.717948717948718e-05, "loss": 0.7341, "step": 85 }, { "epoch": 0.05, "grad_norm": 0.06238952277649466, "learning_rate": 9.230769230769232e-05, "loss": 0.7342, "step": 90 }, { "epoch": 0.05, "grad_norm": 0.07425456689389622, "learning_rate": 9.743589743589744e-05, "loss": 0.7742, "step": 95 }, { "epoch": 0.05, "grad_norm": 0.0605404650874565, "learning_rate": 0.00010256410256410256, "loss": 0.7809, "step": 100 }, { "epoch": 0.05, "grad_norm": 0.0694984420567863, "learning_rate": 0.0001076923076923077, "loss": 0.7539, "step": 105 }, { "epoch": 0.06, "grad_norm": 0.06656828962914285, "learning_rate": 0.00011282051282051283, "loss": 0.7593, "step": 110 }, { "epoch": 0.06, "grad_norm": 0.05957346631757917, "learning_rate": 0.00011794871794871796, "loss": 0.7155, "step": 115 }, { "epoch": 0.06, "grad_norm": 0.05662071050147848, "learning_rate": 0.0001230769230769231, "loss": 0.7591, "step": 120 }, { "epoch": 0.06, "grad_norm": 0.06578556046938931, "learning_rate": 0.00012820512820512823, "loss": 0.7192, "step": 125 }, { "epoch": 0.07, "grad_norm": 0.051881403101396946, "learning_rate": 0.00013333333333333334, "loss": 0.7401, "step": 130 }, { "epoch": 0.07, "grad_norm": 0.042026782084728216, "learning_rate": 0.00013846153846153847, "loss": 0.7068, "step": 135 }, { "epoch": 0.07, "grad_norm": 0.0557128886654579, "learning_rate": 0.0001435897435897436, "loss": 0.6984, "step": 140 }, { "epoch": 0.07, "grad_norm": 0.05570176969330806, "learning_rate": 0.00014871794871794872, "loss": 0.6639, "step": 145 }, { "epoch": 0.08, "grad_norm": 0.06656654996242936, "learning_rate": 0.00015384615384615385, "loss": 0.7783, "step": 150 }, { "epoch": 0.08, "grad_norm": 0.045655704141469376, "learning_rate": 0.00015897435897435896, "loss": 0.681, "step": 155 }, { "epoch": 0.08, "grad_norm": 0.05653995551779284, "learning_rate": 0.0001641025641025641, "loss": 0.6951, "step": 160 }, { "epoch": 0.08, "grad_norm": 0.06359181018131281, "learning_rate": 0.00016923076923076923, "loss": 0.7425, "step": 165 }, { "epoch": 0.09, "grad_norm": 0.05324062210646119, "learning_rate": 0.00017435897435897436, "loss": 0.7827, "step": 170 }, { "epoch": 0.09, "grad_norm": 0.05722015806927473, "learning_rate": 0.0001794871794871795, "loss": 0.8072, "step": 175 }, { "epoch": 0.09, "grad_norm": 0.057554172233914125, "learning_rate": 0.00018461538461538463, "loss": 0.6969, "step": 180 }, { "epoch": 0.09, "grad_norm": 0.06236094601265461, "learning_rate": 0.00018974358974358974, "loss": 0.7717, "step": 185 }, { "epoch": 0.1, "grad_norm": 0.04706829300587947, "learning_rate": 0.00019487179487179487, "loss": 0.6552, "step": 190 }, { "epoch": 0.1, "grad_norm": 0.05907857575128907, "learning_rate": 0.0002, "loss": 0.7665, "step": 195 }, { "epoch": 0.1, "grad_norm": 0.05666486553086404, "learning_rate": 0.00019999598996948235, "loss": 0.722, "step": 200 }, { "epoch": 0.11, "grad_norm": 0.06751342355465378, "learning_rate": 0.00019998396019953624, "loss": 0.7304, "step": 205 }, { "epoch": 0.11, "grad_norm": 0.05870214883989128, "learning_rate": 0.0001999639116549566, "loss": 0.6464, "step": 210 }, { "epoch": 0.11, "grad_norm": 0.06856502375216533, "learning_rate": 0.00019993584594364894, "loss": 0.6875, "step": 215 }, { "epoch": 0.11, "grad_norm": 0.04809000181014521, "learning_rate": 0.0001998997653165004, "loss": 0.7196, "step": 220 }, { "epoch": 0.12, "grad_norm": 0.06330068013492544, "learning_rate": 0.00019985567266719934, "loss": 0.8071, "step": 225 }, { "epoch": 0.12, "grad_norm": 0.06501358267774308, "learning_rate": 0.00019980357153200315, "loss": 0.7484, "step": 230 }, { "epoch": 0.12, "grad_norm": 0.056442995828752306, "learning_rate": 0.00019974346608945466, "loss": 0.7425, "step": 235 }, { "epoch": 0.12, "grad_norm": 0.05136163609317852, "learning_rate": 0.00019967536116004698, "loss": 0.7365, "step": 240 }, { "epoch": 0.13, "grad_norm": 0.06539335014303507, "learning_rate": 0.00019959926220583713, "loss": 0.7225, "step": 245 }, { "epoch": 0.13, "grad_norm": 0.05450780975464187, "learning_rate": 0.00019951517533000764, "loss": 0.6874, "step": 250 }, { "epoch": 0.13, "grad_norm": 0.05390893998675368, "learning_rate": 0.00019942310727637724, "loss": 0.7312, "step": 255 }, { "epoch": 0.13, "grad_norm": 0.05549601275822287, "learning_rate": 0.00019932306542886009, "loss": 0.7505, "step": 260 }, { "epoch": 0.14, "grad_norm": 0.06035962160268456, "learning_rate": 0.00019921505781087334, "loss": 0.7648, "step": 265 }, { "epoch": 0.14, "grad_norm": 0.05407880633367139, "learning_rate": 0.00019909909308469398, "loss": 0.7745, "step": 270 }, { "epoch": 0.14, "grad_norm": 0.07099486719812642, "learning_rate": 0.0001989751805507637, "loss": 0.7364, "step": 275 }, { "epoch": 0.14, "grad_norm": 0.06961377188666044, "learning_rate": 0.00019884333014694345, "loss": 0.7675, "step": 280 }, { "epoch": 0.15, "grad_norm": 0.05666459121830388, "learning_rate": 0.00019870355244771607, "loss": 0.7478, "step": 285 }, { "epoch": 0.15, "grad_norm": 0.06243860942546019, "learning_rate": 0.00019855585866333835, "loss": 0.7405, "step": 290 }, { "epoch": 0.15, "grad_norm": 0.06614433181862764, "learning_rate": 0.00019840026063894193, "loss": 0.746, "step": 295 }, { "epoch": 0.15, "grad_norm": 0.07249007154385097, "learning_rate": 0.00019823677085358335, "loss": 0.7144, "step": 300 }, { "epoch": 0.16, "grad_norm": 0.059290083038044415, "learning_rate": 0.00019806540241924317, "loss": 0.7228, "step": 305 }, { "epoch": 0.16, "grad_norm": 0.0590976532576803, "learning_rate": 0.00019788616907977441, "loss": 0.7468, "step": 310 }, { "epoch": 0.16, "grad_norm": 0.059034355580792085, "learning_rate": 0.00019769908520980034, "loss": 0.7107, "step": 315 }, { "epoch": 0.16, "grad_norm": 0.06039820906529118, "learning_rate": 0.00019750416581356146, "loss": 0.7255, "step": 320 }, { "epoch": 0.17, "grad_norm": 0.07506252340754628, "learning_rate": 0.00019730142652371236, "loss": 0.6218, "step": 325 }, { "epoch": 0.17, "grad_norm": 0.06227640687325107, "learning_rate": 0.0001970908836000678, "loss": 0.7271, "step": 330 }, { "epoch": 0.17, "grad_norm": 0.059379397850922444, "learning_rate": 0.00019687255392829877, "loss": 0.6962, "step": 335 }, { "epoch": 0.17, "grad_norm": 0.06733271176615478, "learning_rate": 0.0001966464550185782, "loss": 0.6985, "step": 340 }, { "epoch": 0.18, "grad_norm": 0.06276187431993832, "learning_rate": 0.0001964126050041767, "loss": 0.6847, "step": 345 }, { "epoch": 0.18, "grad_norm": 0.06756039835970662, "learning_rate": 0.0001961710226400081, "loss": 0.7006, "step": 350 }, { "epoch": 0.18, "grad_norm": 0.058664527111491456, "learning_rate": 0.00019592172730112544, "loss": 0.7226, "step": 355 }, { "epoch": 0.18, "grad_norm": 0.05992656585552198, "learning_rate": 0.00019566473898116713, "loss": 0.7373, "step": 360 }, { "epoch": 0.19, "grad_norm": 0.059131250218324535, "learning_rate": 0.0001954000782907532, "loss": 0.7174, "step": 365 }, { "epoch": 0.19, "grad_norm": 0.05973639610049065, "learning_rate": 0.00019512776645583263, "loss": 0.7287, "step": 370 }, { "epoch": 0.19, "grad_norm": 0.07529175059087607, "learning_rate": 0.00019484782531598073, "loss": 0.7584, "step": 375 }, { "epoch": 0.19, "grad_norm": 0.05996235414061146, "learning_rate": 0.00019456027732264784, "loss": 0.7802, "step": 380 }, { "epoch": 0.2, "grad_norm": 0.0741871606600372, "learning_rate": 0.00019426514553735848, "loss": 0.7574, "step": 385 }, { "epoch": 0.2, "grad_norm": 0.05719814806544979, "learning_rate": 0.00019396245362986197, "loss": 0.7187, "step": 390 }, { "epoch": 0.2, "grad_norm": 0.062306915853923, "learning_rate": 0.00019365222587623405, "loss": 0.6974, "step": 395 }, { "epoch": 0.21, "grad_norm": 0.0578376102759499, "learning_rate": 0.00019333448715692995, "loss": 0.7263, "step": 400 }, { "epoch": 0.21, "grad_norm": 0.05895437900134049, "learning_rate": 0.00019300926295478884, "loss": 0.7737, "step": 405 }, { "epoch": 0.21, "grad_norm": 0.06528074688398286, "learning_rate": 0.0001926765793529902, "loss": 0.7083, "step": 410 }, { "epoch": 0.21, "grad_norm": 0.06352416055439895, "learning_rate": 0.00019233646303296205, "loss": 0.7538, "step": 415 }, { "epoch": 0.22, "grad_norm": 0.05931981491215686, "learning_rate": 0.00019198894127224074, "loss": 0.7418, "step": 420 }, { "epoch": 0.22, "grad_norm": 0.06745142735169624, "learning_rate": 0.0001916340419422837, "loss": 0.7643, "step": 425 }, { "epoch": 0.22, "grad_norm": 0.05313044781100756, "learning_rate": 0.00019127179350623372, "loss": 0.7045, "step": 430 }, { "epoch": 0.22, "grad_norm": 0.058146422311574276, "learning_rate": 0.0001909022250166365, "loss": 0.7029, "step": 435 }, { "epoch": 0.23, "grad_norm": 0.06465624410270512, "learning_rate": 0.00019052536611311046, "loss": 0.7329, "step": 440 }, { "epoch": 0.23, "grad_norm": 0.07476190763880249, "learning_rate": 0.00019014124701996973, "loss": 0.7337, "step": 445 }, { "epoch": 0.23, "grad_norm": 0.06473337914713544, "learning_rate": 0.00018974989854379996, "loss": 0.7425, "step": 450 }, { "epoch": 0.23, "grad_norm": 0.06217891087939524, "learning_rate": 0.00018935135207098785, "loss": 0.7233, "step": 455 }, { "epoch": 0.24, "grad_norm": 0.06538705398075274, "learning_rate": 0.00018894563956520374, "loss": 0.7181, "step": 460 }, { "epoch": 0.24, "grad_norm": 0.06585818368422851, "learning_rate": 0.00018853279356483826, "loss": 0.7839, "step": 465 }, { "epoch": 0.24, "grad_norm": 0.07158174416486009, "learning_rate": 0.00018811284718039256, "loss": 0.6415, "step": 470 }, { "epoch": 0.24, "grad_norm": 0.07546822617864929, "learning_rate": 0.00018768583409182305, "loss": 0.7182, "step": 475 }, { "epoch": 0.25, "grad_norm": 0.05416665482621128, "learning_rate": 0.00018725178854584007, "loss": 0.7367, "step": 480 }, { "epoch": 0.25, "grad_norm": 0.04571169273365327, "learning_rate": 0.00018681074535316125, "loss": 0.7597, "step": 485 }, { "epoch": 0.25, "grad_norm": 0.0662815718580705, "learning_rate": 0.00018636273988571991, "loss": 0.6998, "step": 490 }, { "epoch": 0.25, "grad_norm": 0.06457843383025075, "learning_rate": 0.0001859078080738279, "loss": 0.6547, "step": 495 }, { "epoch": 0.26, "grad_norm": 0.054545024268562024, "learning_rate": 0.00018544598640329432, "loss": 0.7352, "step": 500 }, { "epoch": 0.26, "grad_norm": 0.05705170468183904, "learning_rate": 0.00018497731191249894, "loss": 0.7663, "step": 505 }, { "epoch": 0.26, "grad_norm": 0.05546917332133685, "learning_rate": 0.000184501822189422, "loss": 0.7412, "step": 510 }, { "epoch": 0.26, "grad_norm": 0.061537343417600796, "learning_rate": 0.00018401955536862948, "loss": 0.7453, "step": 515 }, { "epoch": 0.27, "grad_norm": 0.05548194532215062, "learning_rate": 0.0001835305501282148, "loss": 0.7617, "step": 520 }, { "epoch": 0.27, "grad_norm": 0.05596980684672694, "learning_rate": 0.00018303484568669667, "loss": 0.7057, "step": 525 }, { "epoch": 0.27, "grad_norm": 0.05553023356923259, "learning_rate": 0.00018253248179987388, "loss": 0.666, "step": 530 }, { "epoch": 0.27, "grad_norm": 0.06815419873023405, "learning_rate": 0.0001820234987576368, "loss": 0.7085, "step": 535 }, { "epoch": 0.28, "grad_norm": 0.07008274336459665, "learning_rate": 0.00018150793738073602, "loss": 0.7008, "step": 540 }, { "epoch": 0.28, "grad_norm": 0.06075688816959689, "learning_rate": 0.00018098583901750867, "loss": 0.7204, "step": 545 }, { "epoch": 0.28, "grad_norm": 0.0740518099336573, "learning_rate": 0.00018045724554056214, "loss": 0.7168, "step": 550 }, { "epoch": 0.28, "grad_norm": 0.06931559683782222, "learning_rate": 0.0001799221993434159, "loss": 0.7513, "step": 555 }, { "epoch": 0.29, "grad_norm": 0.0808502355660198, "learning_rate": 0.00017938074333710157, "loss": 0.7221, "step": 560 }, { "epoch": 0.29, "grad_norm": 0.057224574717575194, "learning_rate": 0.00017883292094672128, "loss": 0.7159, "step": 565 }, { "epoch": 0.29, "grad_norm": 0.06478263819099996, "learning_rate": 0.00017827877610796514, "loss": 0.7003, "step": 570 }, { "epoch": 0.3, "grad_norm": 0.06759261764339991, "learning_rate": 0.00017771835326358743, "loss": 0.7051, "step": 575 }, { "epoch": 0.3, "grad_norm": 0.054230696689679704, "learning_rate": 0.00017715169735984233, "loss": 0.6614, "step": 580 }, { "epoch": 0.3, "grad_norm": 0.05539403398423511, "learning_rate": 0.0001765788538428792, "loss": 0.7778, "step": 585 }, { "epoch": 0.3, "grad_norm": 0.1047776677529583, "learning_rate": 0.00017599986865509767, "loss": 0.7174, "step": 590 }, { "epoch": 0.31, "grad_norm": 0.054148533545801215, "learning_rate": 0.00017541478823146327, "loss": 0.7501, "step": 595 }, { "epoch": 0.31, "grad_norm": 0.062010495956553884, "learning_rate": 0.00017482365949578302, "loss": 0.7168, "step": 600 }, { "epoch": 0.31, "grad_norm": 0.06518741238538411, "learning_rate": 0.00017422652985694237, "loss": 0.7587, "step": 605 }, { "epoch": 0.31, "grad_norm": 0.05897004105933778, "learning_rate": 0.00017362344720510278, "loss": 0.7, "step": 610 }, { "epoch": 0.32, "grad_norm": 0.06674251786961966, "learning_rate": 0.00017301445990786102, "loss": 0.7656, "step": 615 }, { "epoch": 0.32, "grad_norm": 0.059907265503361756, "learning_rate": 0.00017239961680637, "loss": 0.7302, "step": 620 }, { "epoch": 0.32, "grad_norm": 0.05659492984342654, "learning_rate": 0.0001717789672114218, "loss": 0.8095, "step": 625 }, { "epoch": 0.32, "grad_norm": 0.06211544092043524, "learning_rate": 0.0001711525608994927, "loss": 0.6989, "step": 630 }, { "epoch": 0.33, "grad_norm": 0.07196474317183386, "learning_rate": 0.00017052044810875126, "loss": 0.7653, "step": 635 }, { "epoch": 0.33, "grad_norm": 0.0703844878707138, "learning_rate": 0.00016988267953502913, "loss": 0.7115, "step": 640 }, { "epoch": 0.33, "grad_norm": 0.06502783286863137, "learning_rate": 0.00016923930632775516, "loss": 0.6975, "step": 645 }, { "epoch": 0.33, "grad_norm": 0.06625609228888715, "learning_rate": 0.00016859038008585326, "loss": 0.7186, "step": 650 }, { "epoch": 0.34, "grad_norm": 0.060170689666281245, "learning_rate": 0.0001679359528536041, "loss": 0.6841, "step": 655 }, { "epoch": 0.34, "grad_norm": 0.06189493863054274, "learning_rate": 0.00016727607711647114, "loss": 0.7285, "step": 660 }, { "epoch": 0.34, "grad_norm": 0.06529829421352147, "learning_rate": 0.00016661080579689132, "loss": 0.7046, "step": 665 }, { "epoch": 0.34, "grad_norm": 0.07643231666581908, "learning_rate": 0.0001659401922500304, "loss": 0.712, "step": 670 }, { "epoch": 0.35, "grad_norm": 0.06356569960794868, "learning_rate": 0.00016526429025950424, "loss": 0.6761, "step": 675 }, { "epoch": 0.35, "grad_norm": 0.0657780464611706, "learning_rate": 0.00016458315403306502, "loss": 0.7623, "step": 680 }, { "epoch": 0.35, "grad_norm": 0.06528216463724903, "learning_rate": 0.0001638968381982538, "loss": 0.7035, "step": 685 }, { "epoch": 0.35, "grad_norm": 0.05768226228469023, "learning_rate": 0.0001632053977980194, "loss": 0.7592, "step": 690 }, { "epoch": 0.36, "grad_norm": 0.08439997111949711, "learning_rate": 0.000162508888286304, "loss": 0.7302, "step": 695 }, { "epoch": 0.36, "grad_norm": 0.05921155467869476, "learning_rate": 0.00016180736552359553, "loss": 0.7561, "step": 700 }, { "epoch": 0.36, "grad_norm": 0.06479164017299047, "learning_rate": 0.00016110088577244773, "loss": 0.7319, "step": 705 }, { "epoch": 0.36, "grad_norm": 0.06671590691395402, "learning_rate": 0.00016038950569296785, "loss": 0.7056, "step": 710 }, { "epoch": 0.37, "grad_norm": 0.060116735850209595, "learning_rate": 0.00015967328233827249, "loss": 0.7566, "step": 715 }, { "epoch": 0.37, "grad_norm": 0.0671946478188747, "learning_rate": 0.00015895227314991178, "loss": 0.672, "step": 720 }, { "epoch": 0.37, "grad_norm": 0.06337938637321384, "learning_rate": 0.00015822653595326275, "loss": 0.7015, "step": 725 }, { "epoch": 0.37, "grad_norm": 0.05904487810677323, "learning_rate": 0.00015749612895289152, "loss": 0.7058, "step": 730 }, { "epoch": 0.38, "grad_norm": 0.05858071309598837, "learning_rate": 0.00015676111072788527, "loss": 0.7012, "step": 735 }, { "epoch": 0.38, "grad_norm": 0.07042516508192798, "learning_rate": 0.00015602154022715435, "loss": 0.732, "step": 740 }, { "epoch": 0.38, "grad_norm": 0.06845893159668907, "learning_rate": 0.0001552774767647043, "loss": 0.6938, "step": 745 }, { "epoch": 0.38, "grad_norm": 0.0725751629044133, "learning_rate": 0.0001545289800148789, "loss": 0.7172, "step": 750 }, { "epoch": 0.39, "grad_norm": 0.06616285707027898, "learning_rate": 0.0001537761100075744, "loss": 0.7247, "step": 755 }, { "epoch": 0.39, "grad_norm": 0.055431700566913336, "learning_rate": 0.00015301892712342482, "loss": 0.7647, "step": 760 }, { "epoch": 0.39, "grad_norm": 0.07044884291430001, "learning_rate": 0.00015225749208895968, "loss": 0.7562, "step": 765 }, { "epoch": 0.4, "grad_norm": 0.05989702445794234, "learning_rate": 0.0001514918659717335, "loss": 0.7686, "step": 770 }, { "epoch": 0.4, "grad_norm": 0.054216080405442194, "learning_rate": 0.00015072211017542813, "loss": 0.7333, "step": 775 }, { "epoch": 0.4, "grad_norm": 0.05722110654549261, "learning_rate": 0.00014994828643492827, "loss": 0.7012, "step": 780 }, { "epoch": 0.4, "grad_norm": 0.06301212828108414, "learning_rate": 0.00014917045681137026, "loss": 0.7175, "step": 785 }, { "epoch": 0.41, "grad_norm": 0.060197780995845586, "learning_rate": 0.0001483886836871646, "loss": 0.673, "step": 790 }, { "epoch": 0.41, "grad_norm": 0.06219148524987427, "learning_rate": 0.00014760302976099304, "loss": 0.7114, "step": 795 }, { "epoch": 0.41, "grad_norm": 0.07450360556471726, "learning_rate": 0.00014681355804278001, "loss": 0.7053, "step": 800 }, { "epoch": 0.41, "grad_norm": 0.05107293406160496, "learning_rate": 0.00014602033184863913, "loss": 0.7106, "step": 805 }, { "epoch": 0.42, "grad_norm": 0.06265788071525129, "learning_rate": 0.00014522341479579533, "loss": 0.7966, "step": 810 }, { "epoch": 0.42, "grad_norm": 0.05206720433117282, "learning_rate": 0.00014442287079748263, "loss": 0.7293, "step": 815 }, { "epoch": 0.42, "grad_norm": 0.0757239829239859, "learning_rate": 0.00014361876405781832, "loss": 0.7085, "step": 820 }, { "epoch": 0.42, "grad_norm": 0.07831803288446669, "learning_rate": 0.00014281115906665374, "loss": 0.7687, "step": 825 }, { "epoch": 0.43, "grad_norm": 0.07291838892590645, "learning_rate": 0.00014200012059440207, "loss": 0.7025, "step": 830 }, { "epoch": 0.43, "grad_norm": 0.06634284103936865, "learning_rate": 0.00014118571368684383, "loss": 0.7781, "step": 835 }, { "epoch": 0.43, "grad_norm": 0.06089133278443612, "learning_rate": 0.00014036800365991008, "loss": 0.6563, "step": 840 }, { "epoch": 0.43, "grad_norm": 0.071974124311568, "learning_rate": 0.00013954705609444404, "loss": 0.7234, "step": 845 }, { "epoch": 0.44, "grad_norm": 0.0640536251178564, "learning_rate": 0.00013872293683094152, "loss": 0.7471, "step": 850 }, { "epoch": 0.44, "grad_norm": 0.06850057235253415, "learning_rate": 0.00013789571196427055, "loss": 0.7212, "step": 855 }, { "epoch": 0.44, "grad_norm": 0.07611987427477505, "learning_rate": 0.00013706544783837022, "loss": 0.6921, "step": 860 }, { "epoch": 0.44, "grad_norm": 0.06558103857160827, "learning_rate": 0.00013623221104093025, "loss": 0.7803, "step": 865 }, { "epoch": 0.45, "grad_norm": 0.060280688322475906, "learning_rate": 0.00013539606839805036, "loss": 0.7223, "step": 870 }, { "epoch": 0.45, "grad_norm": 0.0644359499391939, "learning_rate": 0.00013455708696888085, "loss": 0.7332, "step": 875 }, { "epoch": 0.45, "grad_norm": 0.0545048505716513, "learning_rate": 0.00013371533404024438, "loss": 0.7426, "step": 880 }, { "epoch": 0.45, "grad_norm": 0.062408064191097595, "learning_rate": 0.00013287087712123962, "loss": 0.7533, "step": 885 }, { "epoch": 0.46, "grad_norm": 0.06389869452396484, "learning_rate": 0.00013202378393782692, "loss": 0.7179, "step": 890 }, { "epoch": 0.46, "grad_norm": 0.08333515596016716, "learning_rate": 0.00013117412242739655, "loss": 0.6805, "step": 895 }, { "epoch": 0.46, "grad_norm": 0.06605322926802382, "learning_rate": 0.00013032196073332027, "loss": 0.7626, "step": 900 }, { "epoch": 0.46, "grad_norm": 0.06438983023731973, "learning_rate": 0.00012946736719948607, "loss": 0.7285, "step": 905 }, { "epoch": 0.47, "grad_norm": 0.05317618314846494, "learning_rate": 0.000128610410364817, "loss": 0.6886, "step": 910 }, { "epoch": 0.47, "grad_norm": 0.06426168509188175, "learning_rate": 0.00012775115895777417, "loss": 0.7066, "step": 915 }, { "epoch": 0.47, "grad_norm": 0.07823316157528669, "learning_rate": 0.00012688968189084493, "loss": 0.6876, "step": 920 }, { "epoch": 0.47, "grad_norm": 0.06352925688166776, "learning_rate": 0.00012602604825501587, "loss": 0.7281, "step": 925 }, { "epoch": 0.48, "grad_norm": 0.0560358701166056, "learning_rate": 0.00012516032731423165, "loss": 0.714, "step": 930 }, { "epoch": 0.48, "grad_norm": 0.05967077285578719, "learning_rate": 0.00012429258849984014, "loss": 0.7313, "step": 935 }, { "epoch": 0.48, "grad_norm": 0.06331893233925272, "learning_rate": 0.00012342290140502388, "loss": 0.7319, "step": 940 }, { "epoch": 0.48, "grad_norm": 0.06346188515705922, "learning_rate": 0.00012255133577921868, "loss": 0.7298, "step": 945 }, { "epoch": 0.49, "grad_norm": 0.06487969791361832, "learning_rate": 0.0001216779615225197, "loss": 0.6916, "step": 950 }, { "epoch": 0.49, "grad_norm": 0.06459701736380498, "learning_rate": 0.00012080284868007541, "loss": 0.7131, "step": 955 }, { "epoch": 0.49, "grad_norm": 0.0637596839504511, "learning_rate": 0.0001199260674364699, "loss": 0.7483, "step": 960 }, { "epoch": 0.5, "grad_norm": 0.058147103713184295, "learning_rate": 0.00011904768811009405, "loss": 0.7546, "step": 965 }, { "epoch": 0.5, "grad_norm": 0.04960680867211613, "learning_rate": 0.00011816778114750593, "loss": 0.7271, "step": 970 }, { "epoch": 0.5, "grad_norm": 0.054982008015380404, "learning_rate": 0.00011728641711778103, "loss": 0.7482, "step": 975 }, { "epoch": 0.5, "grad_norm": 0.05707338359759536, "learning_rate": 0.00011640366670685248, "loss": 0.6697, "step": 980 }, { "epoch": 0.51, "grad_norm": 0.05927872290783747, "learning_rate": 0.00011551960071184195, "loss": 0.8193, "step": 985 }, { "epoch": 0.51, "grad_norm": 0.07725069996761502, "learning_rate": 0.00011463429003538196, "loss": 0.7432, "step": 990 }, { "epoch": 0.51, "grad_norm": 0.05725145842711473, "learning_rate": 0.000113747805679929, "loss": 0.759, "step": 995 }, { "epoch": 0.51, "grad_norm": 0.07186182485542994, "learning_rate": 0.00011286021874206952, "loss": 0.6704, "step": 1000 }, { "epoch": 0.52, "grad_norm": 0.0686861437931256, "learning_rate": 0.00011197160040681762, "loss": 0.8042, "step": 1005 }, { "epoch": 0.52, "grad_norm": 0.05601004454883831, "learning_rate": 0.0001110820219419062, "loss": 0.7338, "step": 1010 }, { "epoch": 0.52, "grad_norm": 0.05732144501117736, "learning_rate": 0.0001101915546920711, "loss": 0.7266, "step": 1015 }, { "epoch": 0.52, "grad_norm": 0.05767273887002733, "learning_rate": 0.00010930027007332923, "loss": 0.6721, "step": 1020 }, { "epoch": 0.53, "grad_norm": 0.06070633674818661, "learning_rate": 0.00010840823956725103, "loss": 0.7276, "step": 1025 }, { "epoch": 0.53, "grad_norm": 0.0608684388554378, "learning_rate": 0.00010751553471522757, "loss": 0.7308, "step": 1030 }, { "epoch": 0.53, "grad_norm": 0.058955147100918356, "learning_rate": 0.00010662222711273279, "loss": 0.6947, "step": 1035 }, { "epoch": 0.53, "grad_norm": 0.060134707447051496, "learning_rate": 0.00010572838840358168, "loss": 0.6722, "step": 1040 }, { "epoch": 0.54, "grad_norm": 0.06428460825842836, "learning_rate": 0.00010483409027418425, "loss": 0.6918, "step": 1045 }, { "epoch": 0.54, "grad_norm": 0.06287339641207959, "learning_rate": 0.00010393940444779635, "loss": 0.6713, "step": 1050 }, { "epoch": 0.54, "grad_norm": 0.0633410011658368, "learning_rate": 0.00010304440267876727, "loss": 0.7685, "step": 1055 }, { "epoch": 0.54, "grad_norm": 0.06581703699322343, "learning_rate": 0.00010214915674678523, "loss": 0.7767, "step": 1060 }, { "epoch": 0.55, "grad_norm": 0.07413976856973425, "learning_rate": 0.00010125373845112034, "loss": 0.762, "step": 1065 }, { "epoch": 0.55, "grad_norm": 0.07167916408221033, "learning_rate": 0.00010035821960486643, "loss": 0.7376, "step": 1070 }, { "epoch": 0.55, "grad_norm": 0.06708982281673678, "learning_rate": 9.946267202918157e-05, "loss": 0.6889, "step": 1075 }, { "epoch": 0.55, "grad_norm": 0.0819220894814777, "learning_rate": 9.856716754752796e-05, "loss": 0.6908, "step": 1080 }, { "epoch": 0.56, "grad_norm": 0.06608143578295782, "learning_rate": 9.767177797991155e-05, "loss": 0.7399, "step": 1085 }, { "epoch": 0.56, "grad_norm": 0.05592575944392656, "learning_rate": 9.677657513712221e-05, "loss": 0.7138, "step": 1090 }, { "epoch": 0.56, "grad_norm": 0.0546831567691992, "learning_rate": 9.588163081497427e-05, "loss": 0.7627, "step": 1095 }, { "epoch": 0.56, "grad_norm": 0.06346039164995447, "learning_rate": 9.498701678854865e-05, "loss": 0.7236, "step": 1100 }, { "epoch": 0.57, "grad_norm": 0.0746991316854835, "learning_rate": 9.409280480643628e-05, "loss": 0.7629, "step": 1105 }, { "epoch": 0.57, "grad_norm": 0.06237683578790127, "learning_rate": 9.319906658498389e-05, "loss": 0.7416, "step": 1110 }, { "epoch": 0.57, "grad_norm": 0.06387729635810044, "learning_rate": 9.230587380254237e-05, "loss": 0.7184, "step": 1115 }, { "epoch": 0.57, "grad_norm": 0.053639300773281065, "learning_rate": 9.141329809371803e-05, "loss": 0.7293, "step": 1120 }, { "epoch": 0.58, "grad_norm": 0.055148045352961035, "learning_rate": 9.052141104362748e-05, "loss": 0.6874, "step": 1125 }, { "epoch": 0.58, "grad_norm": 0.06026774699282564, "learning_rate": 8.963028418215653e-05, "loss": 0.792, "step": 1130 }, { "epoch": 0.58, "grad_norm": 0.05815501167689513, "learning_rate": 8.873998897822336e-05, "loss": 0.7768, "step": 1135 }, { "epoch": 0.58, "grad_norm": 0.06092520719858454, "learning_rate": 8.785059683404672e-05, "loss": 0.6928, "step": 1140 }, { "epoch": 0.59, "grad_norm": 0.06680081897157324, "learning_rate": 8.696217907941941e-05, "loss": 0.744, "step": 1145 }, { "epoch": 0.59, "grad_norm": 0.07064620273754864, "learning_rate": 8.607480696598762e-05, "loss": 0.6606, "step": 1150 }, { "epoch": 0.59, "grad_norm": 0.06809202255793978, "learning_rate": 8.518855166153644e-05, "loss": 0.7513, "step": 1155 }, { "epoch": 0.6, "grad_norm": 0.06295987055363506, "learning_rate": 8.43034842442822e-05, "loss": 0.7085, "step": 1160 }, { "epoch": 0.6, "grad_norm": 0.0628445698799043, "learning_rate": 8.341967569717202e-05, "loss": 0.668, "step": 1165 }, { "epoch": 0.6, "grad_norm": 0.06659960321763707, "learning_rate": 8.253719690219079e-05, "loss": 0.6853, "step": 1170 }, { "epoch": 0.6, "grad_norm": 0.0688529485097757, "learning_rate": 8.165611863467644e-05, "loss": 0.7052, "step": 1175 }, { "epoch": 0.61, "grad_norm": 0.06023727493720994, "learning_rate": 8.077651155764387e-05, "loss": 0.6858, "step": 1180 }, { "epoch": 0.61, "grad_norm": 0.06220450495769832, "learning_rate": 7.98984462161175e-05, "loss": 0.7433, "step": 1185 }, { "epoch": 0.61, "grad_norm": 0.053745746371760474, "learning_rate": 7.902199303147363e-05, "loss": 0.7362, "step": 1190 }, { "epoch": 0.61, "grad_norm": 0.061298250546248934, "learning_rate": 7.814722229579264e-05, "loss": 0.7385, "step": 1195 }, { "epoch": 0.62, "grad_norm": 0.06481532056857399, "learning_rate": 7.727420416622144e-05, "loss": 0.7399, "step": 1200 }, { "epoch": 0.62, "grad_norm": 0.05841856930605358, "learning_rate": 7.640300865934687e-05, "loss": 0.6672, "step": 1205 }, { "epoch": 0.62, "grad_norm": 0.06847941130435668, "learning_rate": 7.553370564558032e-05, "loss": 0.73, "step": 1210 }, { "epoch": 0.62, "grad_norm": 0.053093666640836194, "learning_rate": 7.46663648435541e-05, "loss": 0.7162, "step": 1215 }, { "epoch": 0.63, "grad_norm": 0.07700372089299697, "learning_rate": 7.380105581452987e-05, "loss": 0.7027, "step": 1220 }, { "epoch": 0.63, "grad_norm": 0.06497009385059972, "learning_rate": 7.293784795681994e-05, "loss": 0.7629, "step": 1225 }, { "epoch": 0.63, "grad_norm": 0.07176927987751118, "learning_rate": 7.207681050022132e-05, "loss": 0.7009, "step": 1230 }, { "epoch": 0.63, "grad_norm": 0.06323784869312263, "learning_rate": 7.121801250046363e-05, "loss": 0.8098, "step": 1235 }, { "epoch": 0.64, "grad_norm": 0.05550749509584078, "learning_rate": 7.036152283367056e-05, "loss": 0.6892, "step": 1240 }, { "epoch": 0.64, "grad_norm": 0.06233653087775129, "learning_rate": 6.950741019083617e-05, "loss": 0.722, "step": 1245 }, { "epoch": 0.64, "grad_norm": 0.061296742852639696, "learning_rate": 6.865574307231575e-05, "loss": 0.7057, "step": 1250 }, { "epoch": 0.64, "grad_norm": 0.07218037431449478, "learning_rate": 6.780658978233199e-05, "loss": 0.7104, "step": 1255 }, { "epoch": 0.65, "grad_norm": 0.06031232298569737, "learning_rate": 6.696001842349702e-05, "loss": 0.7238, "step": 1260 }, { "epoch": 0.65, "grad_norm": 0.06261408542212123, "learning_rate": 6.611609689135056e-05, "loss": 0.7804, "step": 1265 }, { "epoch": 0.65, "grad_norm": 0.06509293878957043, "learning_rate": 6.527489286891459e-05, "loss": 0.6191, "step": 1270 }, { "epoch": 0.65, "grad_norm": 0.0637053230023183, "learning_rate": 6.443647382126509e-05, "loss": 0.7272, "step": 1275 }, { "epoch": 0.66, "grad_norm": 0.060890338775236155, "learning_rate": 6.360090699012145e-05, "loss": 0.7158, "step": 1280 }, { "epoch": 0.66, "grad_norm": 0.06390720535215949, "learning_rate": 6.27682593884535e-05, "loss": 0.6418, "step": 1285 }, { "epoch": 0.66, "grad_norm": 0.059941265719477915, "learning_rate": 6.193859779510712e-05, "loss": 0.7396, "step": 1290 }, { "epoch": 0.66, "grad_norm": 0.057335301960845145, "learning_rate": 6.111198874944845e-05, "loss": 0.7136, "step": 1295 }, { "epoch": 0.67, "grad_norm": 0.0669688076887658, "learning_rate": 6.0288498546027536e-05, "loss": 0.7683, "step": 1300 }, { "epoch": 0.67, "grad_norm": 0.06887004197989798, "learning_rate": 5.946819322926127e-05, "loss": 0.6736, "step": 1305 }, { "epoch": 0.67, "grad_norm": 0.06281493310941981, "learning_rate": 5.865113858813673e-05, "loss": 0.7169, "step": 1310 }, { "epoch": 0.67, "grad_norm": 0.05868856490220455, "learning_rate": 5.783740015093484e-05, "loss": 0.7227, "step": 1315 }, { "epoch": 0.68, "grad_norm": 0.059464256939025885, "learning_rate": 5.702704317997492e-05, "loss": 0.6855, "step": 1320 }, { "epoch": 0.68, "grad_norm": 0.06386543918702894, "learning_rate": 5.6220132666380635e-05, "loss": 0.6664, "step": 1325 }, { "epoch": 0.68, "grad_norm": 0.06630497213003285, "learning_rate": 5.541673332486773e-05, "loss": 0.7539, "step": 1330 }, { "epoch": 0.68, "grad_norm": 0.0572397341438381, "learning_rate": 5.4616909588553674e-05, "loss": 0.8131, "step": 1335 }, { "epoch": 0.69, "grad_norm": 0.0554406728774194, "learning_rate": 5.3820725603790346e-05, "loss": 0.7787, "step": 1340 }, { "epoch": 0.69, "grad_norm": 0.05900113915228878, "learning_rate": 5.30282452250193e-05, "loss": 0.7764, "step": 1345 }, { "epoch": 0.69, "grad_norm": 0.06385527269768287, "learning_rate": 5.223953200965055e-05, "loss": 0.7301, "step": 1350 }, { "epoch": 0.7, "grad_norm": 0.06245137688685591, "learning_rate": 5.145464921296537e-05, "loss": 0.7958, "step": 1355 }, { "epoch": 0.7, "grad_norm": 0.0684043207080321, "learning_rate": 5.067365978304315e-05, "loss": 0.6871, "step": 1360 }, { "epoch": 0.7, "grad_norm": 0.06711154333394342, "learning_rate": 4.9896626355712805e-05, "loss": 0.694, "step": 1365 }, { "epoch": 0.7, "grad_norm": 0.05283989756729982, "learning_rate": 4.912361124952948e-05, "loss": 0.7149, "step": 1370 }, { "epoch": 0.71, "grad_norm": 0.060600777598165734, "learning_rate": 4.835467646077656e-05, "loss": 0.6808, "step": 1375 }, { "epoch": 0.71, "grad_norm": 0.05563364488701937, "learning_rate": 4.7589883658493296e-05, "loss": 0.6914, "step": 1380 }, { "epoch": 0.71, "grad_norm": 0.06530550198410516, "learning_rate": 4.682929417952939e-05, "loss": 0.6881, "step": 1385 }, { "epoch": 0.71, "grad_norm": 0.052698731359847616, "learning_rate": 4.6072969023625165e-05, "loss": 0.7082, "step": 1390 }, { "epoch": 0.72, "grad_norm": 0.05983275498927436, "learning_rate": 4.532096884851978e-05, "loss": 0.7188, "step": 1395 }, { "epoch": 0.72, "grad_norm": 0.06612287041729081, "learning_rate": 4.457335396508631e-05, "loss": 0.7189, "step": 1400 }, { "epoch": 0.72, "grad_norm": 0.05677308006082468, "learning_rate": 4.383018433249464e-05, "loss": 0.7081, "step": 1405 }, { "epoch": 0.72, "grad_norm": 0.06237896853204969, "learning_rate": 4.309151955340297e-05, "loss": 0.6921, "step": 1410 }, { "epoch": 0.73, "grad_norm": 0.05812595825532448, "learning_rate": 4.2357418869177354e-05, "loss": 0.7548, "step": 1415 }, { "epoch": 0.73, "grad_norm": 0.06741213496816625, "learning_rate": 4.162794115514078e-05, "loss": 0.7643, "step": 1420 }, { "epoch": 0.73, "grad_norm": 0.0631021793158054, "learning_rate": 4.0903144915851174e-05, "loss": 0.6469, "step": 1425 }, { "epoch": 0.73, "grad_norm": 0.05831509904707476, "learning_rate": 4.018308828040924e-05, "loss": 0.6687, "step": 1430 }, { "epoch": 0.74, "grad_norm": 0.0542248638635785, "learning_rate": 3.946782899779667e-05, "loss": 0.7034, "step": 1435 }, { "epoch": 0.74, "grad_norm": 0.06625784513238857, "learning_rate": 3.875742443224451e-05, "loss": 0.721, "step": 1440 }, { "epoch": 0.74, "grad_norm": 0.05870827407396205, "learning_rate": 3.805193155863247e-05, "loss": 0.675, "step": 1445 }, { "epoch": 0.74, "grad_norm": 0.059612966400709386, "learning_rate": 3.7351406957919636e-05, "loss": 0.7293, "step": 1450 }, { "epoch": 0.75, "grad_norm": 0.06211176018305973, "learning_rate": 3.665590681260658e-05, "loss": 0.7181, "step": 1455 }, { "epoch": 0.75, "grad_norm": 0.05866568540322711, "learning_rate": 3.59654869022294e-05, "loss": 0.7666, "step": 1460 }, { "epoch": 0.75, "grad_norm": 0.0501465470248372, "learning_rate": 3.5280202598886324e-05, "loss": 0.7647, "step": 1465 }, { "epoch": 0.75, "grad_norm": 0.060705521344698324, "learning_rate": 3.4600108862796796e-05, "loss": 0.7136, "step": 1470 }, { "epoch": 0.76, "grad_norm": 0.06092141310578522, "learning_rate": 3.392526023789349e-05, "loss": 0.7384, "step": 1475 }, { "epoch": 0.76, "grad_norm": 0.04920843840426036, "learning_rate": 3.325571084744803e-05, "loss": 0.6823, "step": 1480 }, { "epoch": 0.76, "grad_norm": 0.06578821588742692, "learning_rate": 3.259151438973024e-05, "loss": 0.7787, "step": 1485 }, { "epoch": 0.76, "grad_norm": 0.054652544113093456, "learning_rate": 3.1932724133701344e-05, "loss": 0.6821, "step": 1490 }, { "epoch": 0.77, "grad_norm": 0.06096008553815832, "learning_rate": 3.1279392914742046e-05, "loss": 0.735, "step": 1495 }, { "epoch": 0.77, "grad_norm": 0.058962269998868645, "learning_rate": 3.06315731304148e-05, "loss": 0.7616, "step": 1500 }, { "epoch": 0.77, "grad_norm": 0.05523256517584398, "learning_rate": 2.998931673626175e-05, "loss": 0.6681, "step": 1505 }, { "epoch": 0.77, "grad_norm": 0.05959688310829463, "learning_rate": 2.935267524163774e-05, "loss": 0.7581, "step": 1510 }, { "epoch": 0.78, "grad_norm": 0.05982538851896185, "learning_rate": 2.872169970557913e-05, "loss": 0.7541, "step": 1515 }, { "epoch": 0.78, "grad_norm": 0.06494218833414687, "learning_rate": 2.8096440732709083e-05, "loss": 0.7742, "step": 1520 }, { "epoch": 0.78, "grad_norm": 0.06859915764079565, "learning_rate": 2.7476948469178887e-05, "loss": 0.7247, "step": 1525 }, { "epoch": 0.79, "grad_norm": 0.09010453444524248, "learning_rate": 2.6863272598646106e-05, "loss": 0.712, "step": 1530 }, { "epoch": 0.79, "grad_norm": 0.06068755169221279, "learning_rate": 2.625546233829016e-05, "loss": 0.7516, "step": 1535 }, { "epoch": 0.79, "grad_norm": 0.06062575490061368, "learning_rate": 2.5653566434864928e-05, "loss": 0.7284, "step": 1540 }, { "epoch": 0.79, "grad_norm": 0.06813879053211623, "learning_rate": 2.5057633160789184e-05, "loss": 0.7177, "step": 1545 }, { "epoch": 0.8, "grad_norm": 0.05549352383131599, "learning_rate": 2.446771031027527e-05, "loss": 0.7735, "step": 1550 }, { "epoch": 0.8, "grad_norm": 0.06167056235702363, "learning_rate": 2.3883845195495878e-05, "loss": 0.7829, "step": 1555 }, { "epoch": 0.8, "grad_norm": 0.05941849303346335, "learning_rate": 2.330608464278953e-05, "loss": 0.7349, "step": 1560 }, { "epoch": 0.8, "grad_norm": 0.059930008570328284, "learning_rate": 2.273447498890521e-05, "loss": 0.7793, "step": 1565 }, { "epoch": 0.81, "grad_norm": 0.05579818006374695, "learning_rate": 2.2169062077286075e-05, "loss": 0.6831, "step": 1570 }, { "epoch": 0.81, "grad_norm": 0.05556533610425669, "learning_rate": 2.1609891254392678e-05, "loss": 0.6766, "step": 1575 }, { "epoch": 0.81, "grad_norm": 0.07117318253440096, "learning_rate": 2.1057007366066373e-05, "loss": 0.7592, "step": 1580 }, { "epoch": 0.81, "grad_norm": 0.06880636339513395, "learning_rate": 2.0510454753932395e-05, "loss": 0.6818, "step": 1585 }, { "epoch": 0.82, "grad_norm": 0.05185758649755237, "learning_rate": 1.9970277251843862e-05, "loss": 0.7131, "step": 1590 }, { "epoch": 0.82, "grad_norm": 0.0569426799635938, "learning_rate": 1.9436518182366158e-05, "loss": 0.723, "step": 1595 }, { "epoch": 0.82, "grad_norm": 0.05730020536410127, "learning_rate": 1.8909220353302392e-05, "loss": 0.7303, "step": 1600 }, { "epoch": 0.82, "grad_norm": 0.06163290829765527, "learning_rate": 1.838842605426031e-05, "loss": 0.6702, "step": 1605 }, { "epoch": 0.83, "grad_norm": 0.05358008544806708, "learning_rate": 1.7874177053260598e-05, "loss": 0.6647, "step": 1610 }, { "epoch": 0.83, "grad_norm": 0.08348745178054953, "learning_rate": 1.736651459338695e-05, "loss": 0.6768, "step": 1615 }, { "epoch": 0.83, "grad_norm": 0.059233304270911794, "learning_rate": 1.6865479389478545e-05, "loss": 0.7239, "step": 1620 }, { "epoch": 0.83, "grad_norm": 0.0530792952405111, "learning_rate": 1.6371111624864543e-05, "loss": 0.7146, "step": 1625 }, { "epoch": 0.84, "grad_norm": 0.07089338712434175, "learning_rate": 1.5883450948141377e-05, "loss": 0.7008, "step": 1630 }, { "epoch": 0.84, "grad_norm": 0.05743797721150877, "learning_rate": 1.540253646999299e-05, "loss": 0.7422, "step": 1635 }, { "epoch": 0.84, "grad_norm": 0.054938710195774586, "learning_rate": 1.4928406760054059e-05, "loss": 0.7121, "step": 1640 }, { "epoch": 0.84, "grad_norm": 0.06182870818089227, "learning_rate": 1.4461099843816684e-05, "loss": 0.7006, "step": 1645 }, { "epoch": 0.85, "grad_norm": 0.051352577478825545, "learning_rate": 1.4000653199580782e-05, "loss": 0.759, "step": 1650 }, { "epoch": 0.85, "grad_norm": 0.059641862900702905, "learning_rate": 1.3547103755448287e-05, "loss": 0.742, "step": 1655 }, { "epoch": 0.85, "grad_norm": 0.05397773348884262, "learning_rate": 1.3100487886361379e-05, "loss": 0.7741, "step": 1660 }, { "epoch": 0.85, "grad_norm": 0.05135238686196935, "learning_rate": 1.266084141118542e-05, "loss": 0.7079, "step": 1665 }, { "epoch": 0.86, "grad_norm": 0.057288371085909104, "learning_rate": 1.2228199589835999e-05, "loss": 0.7327, "step": 1670 }, { "epoch": 0.86, "grad_norm": 0.06493631809483912, "learning_rate": 1.1802597120451286e-05, "loss": 0.7295, "step": 1675 }, { "epoch": 0.86, "grad_norm": 0.06770101954166549, "learning_rate": 1.1384068136609105e-05, "loss": 0.7514, "step": 1680 }, { "epoch": 0.86, "grad_norm": 0.05371862338343528, "learning_rate": 1.0972646204589377e-05, "loss": 0.6835, "step": 1685 }, { "epoch": 0.87, "grad_norm": 0.05759804328762846, "learning_rate": 1.0568364320682178e-05, "loss": 0.7513, "step": 1690 }, { "epoch": 0.87, "grad_norm": 0.06129923897917773, "learning_rate": 1.0171254908541372e-05, "loss": 0.6705, "step": 1695 }, { "epoch": 0.87, "grad_norm": 0.053581168501307445, "learning_rate": 9.781349816584162e-06, "loss": 0.7042, "step": 1700 }, { "epoch": 0.87, "grad_norm": 0.05603939945818747, "learning_rate": 9.398680315436903e-06, "loss": 0.7258, "step": 1705 }, { "epoch": 0.88, "grad_norm": 0.06584992700683939, "learning_rate": 9.023277095427173e-06, "loss": 0.6769, "step": 1710 }, { "epoch": 0.88, "grad_norm": 0.059968649425522685, "learning_rate": 8.655170264122303e-06, "loss": 0.7463, "step": 1715 }, { "epoch": 0.88, "grad_norm": 0.05488029646190535, "learning_rate": 8.294389343914899e-06, "loss": 0.722, "step": 1720 }, { "epoch": 0.89, "grad_norm": 0.05526234423387249, "learning_rate": 7.940963269654922e-06, "loss": 0.7629, "step": 1725 }, { "epoch": 0.89, "grad_norm": 0.05556936448236485, "learning_rate": 7.594920386329252e-06, "loss": 0.7161, "step": 1730 }, { "epoch": 0.89, "grad_norm": 0.057128352068576076, "learning_rate": 7.256288446788362e-06, "loss": 0.6614, "step": 1735 }, { "epoch": 0.89, "grad_norm": 0.06678671968173212, "learning_rate": 6.925094609520455e-06, "loss": 0.7391, "step": 1740 }, { "epoch": 0.9, "grad_norm": 0.0551309681633079, "learning_rate": 6.601365436473439e-06, "loss": 0.7053, "step": 1745 }, { "epoch": 0.9, "grad_norm": 0.0660459237833797, "learning_rate": 6.2851268909245865e-06, "loss": 0.6966, "step": 1750 }, { "epoch": 0.9, "grad_norm": 0.06028870099044076, "learning_rate": 5.976404335398256e-06, "loss": 0.7425, "step": 1755 }, { "epoch": 0.9, "grad_norm": 0.04676509027459768, "learning_rate": 5.675222529631841e-06, "loss": 0.7178, "step": 1760 }, { "epoch": 0.91, "grad_norm": 0.05560159391722265, "learning_rate": 5.381605628590003e-06, "loss": 0.7285, "step": 1765 }, { "epoch": 0.91, "grad_norm": 0.06016761021331081, "learning_rate": 5.095577180527378e-06, "loss": 0.7333, "step": 1770 }, { "epoch": 0.91, "grad_norm": 0.0607495003253662, "learning_rate": 4.817160125100106e-06, "loss": 0.7747, "step": 1775 }, { "epoch": 0.91, "grad_norm": 0.05994300070228749, "learning_rate": 4.546376791525975e-06, "loss": 0.7357, "step": 1780 }, { "epoch": 0.92, "grad_norm": 0.052705940192527105, "learning_rate": 4.2832488967935795e-06, "loss": 0.7319, "step": 1785 }, { "epoch": 0.92, "grad_norm": 0.05605139500885904, "learning_rate": 4.02779754392072e-06, "loss": 0.7196, "step": 1790 }, { "epoch": 0.92, "grad_norm": 0.06832445824454762, "learning_rate": 3.780043220261764e-06, "loss": 0.7419, "step": 1795 }, { "epoch": 0.92, "grad_norm": 0.07505662282091412, "learning_rate": 3.540005795864709e-06, "loss": 0.6612, "step": 1800 }, { "epoch": 0.93, "grad_norm": 0.06215112837654046, "learning_rate": 3.3077045218775192e-06, "loss": 0.7493, "step": 1805 }, { "epoch": 0.93, "grad_norm": 0.06733733726055094, "learning_rate": 3.0831580290041184e-06, "loss": 0.7542, "step": 1810 }, { "epoch": 0.93, "grad_norm": 0.04850593633489853, "learning_rate": 2.8663843260103074e-06, "loss": 0.7319, "step": 1815 }, { "epoch": 0.93, "grad_norm": 0.07012766389455326, "learning_rate": 2.6574007982793857e-06, "loss": 0.7144, "step": 1820 }, { "epoch": 0.94, "grad_norm": 0.06128302394909496, "learning_rate": 2.456224206417812e-06, "loss": 0.6819, "step": 1825 }, { "epoch": 0.94, "grad_norm": 0.06568885594871854, "learning_rate": 2.262870684911045e-06, "loss": 0.7466, "step": 1830 }, { "epoch": 0.94, "grad_norm": 0.05638801020800785, "learning_rate": 2.0773557408295343e-06, "loss": 0.7104, "step": 1835 }, { "epoch": 0.94, "grad_norm": 0.05606985820795626, "learning_rate": 1.8996942525850047e-06, "loss": 0.7032, "step": 1840 }, { "epoch": 0.95, "grad_norm": 0.055316558262847634, "learning_rate": 1.7299004687372665e-06, "loss": 0.6945, "step": 1845 }, { "epoch": 0.95, "grad_norm": 0.0589945616388343, "learning_rate": 1.5679880068514174e-06, "loss": 0.6811, "step": 1850 }, { "epoch": 0.95, "grad_norm": 0.0667279053464632, "learning_rate": 1.4139698524057165e-06, "loss": 0.7469, "step": 1855 }, { "epoch": 0.95, "grad_norm": 0.06272671335327658, "learning_rate": 1.2678583577501624e-06, "loss": 0.7279, "step": 1860 }, { "epoch": 0.96, "grad_norm": 0.06410423650132317, "learning_rate": 1.1296652411158182e-06, "loss": 0.724, "step": 1865 }, { "epoch": 0.96, "grad_norm": 0.0554322802504446, "learning_rate": 9.994015856749527e-07, "loss": 0.6884, "step": 1870 }, { "epoch": 0.96, "grad_norm": 0.05363867115392121, "learning_rate": 8.770778386522627e-07, "loss": 0.6692, "step": 1875 }, { "epoch": 0.96, "grad_norm": 0.06290861069536181, "learning_rate": 7.627038104869199e-07, "loss": 0.7241, "step": 1880 }, { "epoch": 0.97, "grad_norm": 0.058034813454390606, "learning_rate": 6.562886740457797e-07, "loss": 0.7169, "step": 1885 }, { "epoch": 0.97, "grad_norm": 0.0569479748353595, "learning_rate": 5.578409638877457e-07, "loss": 0.6471, "step": 1890 }, { "epoch": 0.97, "grad_norm": 0.06498145548324454, "learning_rate": 4.6736857557925227e-07, "loss": 0.7028, "step": 1895 }, { "epoch": 0.97, "grad_norm": 0.058277777474149445, "learning_rate": 3.8487876506106966e-07, "loss": 0.784, "step": 1900 }, { "epoch": 0.98, "grad_norm": 0.06195969485337902, "learning_rate": 3.1037814806634815e-07, "loss": 0.6927, "step": 1905 }, { "epoch": 0.98, "grad_norm": 0.05456887165409484, "learning_rate": 2.43872699590042e-07, "loss": 0.6691, "step": 1910 }, { "epoch": 0.98, "grad_norm": 0.05499949679669644, "learning_rate": 1.8536775340970425e-07, "loss": 0.7706, "step": 1915 }, { "epoch": 0.99, "grad_norm": 0.06327430290778817, "learning_rate": 1.348680016577397e-07, "loss": 0.7543, "step": 1920 }, { "epoch": 0.99, "grad_norm": 0.06143220825866882, "learning_rate": 9.237749444505062e-08, "loss": 0.7157, "step": 1925 }, { "epoch": 0.99, "grad_norm": 0.057790877621458125, "learning_rate": 5.7899639536251883e-08, "loss": 0.7225, "step": 1930 }, { "epoch": 0.99, "grad_norm": 0.05824054504791697, "learning_rate": 3.143720207635648e-08, "loss": 0.7305, "step": 1935 }, { "epoch": 1.0, "grad_norm": 0.0708892592599129, "learning_rate": 1.299230436898613e-08, "loss": 0.7639, "step": 1940 }, { "epoch": 1.0, "grad_norm": 0.061086419354873724, "learning_rate": 2.566425706218567e-09, "loss": 0.6812, "step": 1945 }, { "epoch": 1.0, "eval_loss": NaN, "eval_runtime": 1970.507, "eval_samples_per_second": 3.518, "eval_steps_per_second": 0.879, "step": 1949 }, { "epoch": 1.0, "step": 1949, "total_flos": 1.2135540828143616e+16, "train_loss": 0.7257849163982182, "train_runtime": 18158.5302, "train_samples_per_second": 3.434, "train_steps_per_second": 0.107 } ], "logging_steps": 5, "max_steps": 1949, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 100, "total_flos": 1.2135540828143616e+16, "train_batch_size": 8, "trial_name": null, "trial_params": null }