|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.4150268336314848, |
|
"eval_steps": 500, |
|
"global_step": 1160, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0035778175313059034, |
|
"grad_norm": 0.5825825929641724, |
|
"learning_rate": 0.00019928443649373882, |
|
"loss": 1.248, |
|
"num_input_tokens_seen": 6646, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.007155635062611807, |
|
"grad_norm": 0.5380188822746277, |
|
"learning_rate": 0.00019856887298747765, |
|
"loss": 0.5478, |
|
"num_input_tokens_seen": 13063, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01073345259391771, |
|
"grad_norm": 0.3872911036014557, |
|
"learning_rate": 0.00019785330948121648, |
|
"loss": 0.5135, |
|
"num_input_tokens_seen": 19512, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.014311270125223614, |
|
"grad_norm": 0.4991438686847687, |
|
"learning_rate": 0.0001971377459749553, |
|
"loss": 0.5092, |
|
"num_input_tokens_seen": 26884, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.017889087656529516, |
|
"grad_norm": 0.6744784116744995, |
|
"learning_rate": 0.0001964221824686941, |
|
"loss": 0.4799, |
|
"num_input_tokens_seen": 34831, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.02146690518783542, |
|
"grad_norm": 0.5413841009140015, |
|
"learning_rate": 0.00019570661896243293, |
|
"loss": 0.4738, |
|
"num_input_tokens_seen": 40074, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.025044722719141325, |
|
"grad_norm": 0.33517029881477356, |
|
"learning_rate": 0.00019499105545617174, |
|
"loss": 0.4907, |
|
"num_input_tokens_seen": 47194, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.028622540250447227, |
|
"grad_norm": 0.34275758266448975, |
|
"learning_rate": 0.00019427549194991057, |
|
"loss": 0.4642, |
|
"num_input_tokens_seen": 53439, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.03220035778175313, |
|
"grad_norm": 0.4074145257472992, |
|
"learning_rate": 0.00019355992844364938, |
|
"loss": 0.4431, |
|
"num_input_tokens_seen": 59366, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.03577817531305903, |
|
"grad_norm": 0.372760534286499, |
|
"learning_rate": 0.0001928443649373882, |
|
"loss": 0.4824, |
|
"num_input_tokens_seen": 66414, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.03935599284436494, |
|
"grad_norm": 0.35169002413749695, |
|
"learning_rate": 0.00019212880143112702, |
|
"loss": 0.4863, |
|
"num_input_tokens_seen": 73451, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.04293381037567084, |
|
"grad_norm": 0.4088020324707031, |
|
"learning_rate": 0.00019141323792486585, |
|
"loss": 0.4792, |
|
"num_input_tokens_seen": 81934, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.046511627906976744, |
|
"grad_norm": 0.40062326192855835, |
|
"learning_rate": 0.00019069767441860466, |
|
"loss": 0.4607, |
|
"num_input_tokens_seen": 88335, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.05008944543828265, |
|
"grad_norm": 0.5044320225715637, |
|
"learning_rate": 0.0001899821109123435, |
|
"loss": 0.456, |
|
"num_input_tokens_seen": 96192, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.05366726296958855, |
|
"grad_norm": 0.4566495418548584, |
|
"learning_rate": 0.0001892665474060823, |
|
"loss": 0.429, |
|
"num_input_tokens_seen": 101609, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.057245080500894455, |
|
"grad_norm": 0.4657338559627533, |
|
"learning_rate": 0.0001885509838998211, |
|
"loss": 0.4445, |
|
"num_input_tokens_seen": 107467, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.06082289803220036, |
|
"grad_norm": 0.5721924304962158, |
|
"learning_rate": 0.00018783542039355994, |
|
"loss": 0.4304, |
|
"num_input_tokens_seen": 113612, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.06440071556350627, |
|
"grad_norm": 0.2883516848087311, |
|
"learning_rate": 0.00018711985688729877, |
|
"loss": 0.4525, |
|
"num_input_tokens_seen": 121416, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.06797853309481217, |
|
"grad_norm": 0.5061659216880798, |
|
"learning_rate": 0.00018640429338103758, |
|
"loss": 0.4439, |
|
"num_input_tokens_seen": 128284, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.07155635062611806, |
|
"grad_norm": 0.3323754072189331, |
|
"learning_rate": 0.00018568872987477638, |
|
"loss": 0.4489, |
|
"num_input_tokens_seen": 135571, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.07513416815742398, |
|
"grad_norm": 0.5354058742523193, |
|
"learning_rate": 0.00018497316636851522, |
|
"loss": 0.4634, |
|
"num_input_tokens_seen": 141479, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.07871198568872988, |
|
"grad_norm": 0.4328760504722595, |
|
"learning_rate": 0.00018425760286225405, |
|
"loss": 0.4545, |
|
"num_input_tokens_seen": 147677, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.08228980322003578, |
|
"grad_norm": 0.28675127029418945, |
|
"learning_rate": 0.00018354203935599286, |
|
"loss": 0.4814, |
|
"num_input_tokens_seen": 154847, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.08586762075134168, |
|
"grad_norm": 0.31572216749191284, |
|
"learning_rate": 0.00018282647584973166, |
|
"loss": 0.446, |
|
"num_input_tokens_seen": 162267, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.08944543828264759, |
|
"grad_norm": 0.360166996717453, |
|
"learning_rate": 0.0001821109123434705, |
|
"loss": 0.4549, |
|
"num_input_tokens_seen": 168817, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.09302325581395349, |
|
"grad_norm": 0.342385470867157, |
|
"learning_rate": 0.0001813953488372093, |
|
"loss": 0.4297, |
|
"num_input_tokens_seen": 174828, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.09660107334525939, |
|
"grad_norm": 0.37481924891471863, |
|
"learning_rate": 0.00018067978533094814, |
|
"loss": 0.4314, |
|
"num_input_tokens_seen": 181578, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.1001788908765653, |
|
"grad_norm": 0.28545519709587097, |
|
"learning_rate": 0.00017996422182468694, |
|
"loss": 0.4332, |
|
"num_input_tokens_seen": 187842, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.1037567084078712, |
|
"grad_norm": 0.38877248764038086, |
|
"learning_rate": 0.00017924865831842578, |
|
"loss": 0.4248, |
|
"num_input_tokens_seen": 194651, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.1073345259391771, |
|
"grad_norm": 0.30087631940841675, |
|
"learning_rate": 0.00017853309481216458, |
|
"loss": 0.4405, |
|
"num_input_tokens_seen": 202203, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.11091234347048301, |
|
"grad_norm": 0.33470776677131653, |
|
"learning_rate": 0.00017781753130590342, |
|
"loss": 0.4485, |
|
"num_input_tokens_seen": 210364, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.11449016100178891, |
|
"grad_norm": 0.44205668568611145, |
|
"learning_rate": 0.00017710196779964222, |
|
"loss": 0.4431, |
|
"num_input_tokens_seen": 217965, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.11806797853309481, |
|
"grad_norm": 0.39270082116127014, |
|
"learning_rate": 0.00017638640429338106, |
|
"loss": 0.4475, |
|
"num_input_tokens_seen": 224249, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.12164579606440072, |
|
"grad_norm": 0.37138304114341736, |
|
"learning_rate": 0.00017567084078711986, |
|
"loss": 0.4139, |
|
"num_input_tokens_seen": 230513, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.1252236135957066, |
|
"grad_norm": 0.32019296288490295, |
|
"learning_rate": 0.00017495527728085867, |
|
"loss": 0.4488, |
|
"num_input_tokens_seen": 237103, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.12880143112701253, |
|
"grad_norm": 0.42748796939849854, |
|
"learning_rate": 0.0001742397137745975, |
|
"loss": 0.4059, |
|
"num_input_tokens_seen": 242530, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.13237924865831843, |
|
"grad_norm": 0.39997342228889465, |
|
"learning_rate": 0.00017352415026833634, |
|
"loss": 0.4069, |
|
"num_input_tokens_seen": 247524, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.13595706618962433, |
|
"grad_norm": 0.45402902364730835, |
|
"learning_rate": 0.00017280858676207514, |
|
"loss": 0.4354, |
|
"num_input_tokens_seen": 252526, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.13953488372093023, |
|
"grad_norm": 0.500701904296875, |
|
"learning_rate": 0.00017209302325581395, |
|
"loss": 0.4421, |
|
"num_input_tokens_seen": 258974, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.14311270125223613, |
|
"grad_norm": 0.2604714035987854, |
|
"learning_rate": 0.00017137745974955278, |
|
"loss": 0.4338, |
|
"num_input_tokens_seen": 265411, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.14669051878354203, |
|
"grad_norm": 0.5467566251754761, |
|
"learning_rate": 0.00017066189624329162, |
|
"loss": 0.4336, |
|
"num_input_tokens_seen": 271069, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.15026833631484796, |
|
"grad_norm": 0.4278429448604584, |
|
"learning_rate": 0.00016994633273703042, |
|
"loss": 0.4384, |
|
"num_input_tokens_seen": 277913, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.15384615384615385, |
|
"grad_norm": 0.38349634408950806, |
|
"learning_rate": 0.00016923076923076923, |
|
"loss": 0.445, |
|
"num_input_tokens_seen": 285726, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.15742397137745975, |
|
"grad_norm": 0.298753559589386, |
|
"learning_rate": 0.00016851520572450806, |
|
"loss": 0.411, |
|
"num_input_tokens_seen": 293886, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.16100178890876565, |
|
"grad_norm": 0.4590730667114258, |
|
"learning_rate": 0.00016779964221824687, |
|
"loss": 0.4441, |
|
"num_input_tokens_seen": 300933, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.16457960644007155, |
|
"grad_norm": 0.25614652037620544, |
|
"learning_rate": 0.0001670840787119857, |
|
"loss": 0.4201, |
|
"num_input_tokens_seen": 306782, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.16815742397137745, |
|
"grad_norm": 0.3007524013519287, |
|
"learning_rate": 0.0001663685152057245, |
|
"loss": 0.444, |
|
"num_input_tokens_seen": 314243, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.17173524150268335, |
|
"grad_norm": 0.4292968809604645, |
|
"learning_rate": 0.00016565295169946334, |
|
"loss": 0.4089, |
|
"num_input_tokens_seen": 319432, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.17531305903398928, |
|
"grad_norm": 0.31139102578163147, |
|
"learning_rate": 0.00016493738819320215, |
|
"loss": 0.4244, |
|
"num_input_tokens_seen": 325211, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.17889087656529518, |
|
"grad_norm": 0.34276363253593445, |
|
"learning_rate": 0.00016422182468694098, |
|
"loss": 0.4133, |
|
"num_input_tokens_seen": 332100, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.18246869409660108, |
|
"grad_norm": 0.39435234665870667, |
|
"learning_rate": 0.0001635062611806798, |
|
"loss": 0.4428, |
|
"num_input_tokens_seen": 337848, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.18604651162790697, |
|
"grad_norm": 0.2763209640979767, |
|
"learning_rate": 0.00016279069767441862, |
|
"loss": 0.4464, |
|
"num_input_tokens_seen": 345759, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.18962432915921287, |
|
"grad_norm": 0.2753828763961792, |
|
"learning_rate": 0.00016207513416815743, |
|
"loss": 0.438, |
|
"num_input_tokens_seen": 354078, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.19320214669051877, |
|
"grad_norm": 0.3228139579296112, |
|
"learning_rate": 0.00016135957066189623, |
|
"loss": 0.4314, |
|
"num_input_tokens_seen": 360961, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.1967799642218247, |
|
"grad_norm": 0.30050089955329895, |
|
"learning_rate": 0.00016064400715563507, |
|
"loss": 0.4364, |
|
"num_input_tokens_seen": 367083, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.2003577817531306, |
|
"grad_norm": 0.3418981432914734, |
|
"learning_rate": 0.0001599284436493739, |
|
"loss": 0.4329, |
|
"num_input_tokens_seen": 373196, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.2039355992844365, |
|
"grad_norm": 0.36333030462265015, |
|
"learning_rate": 0.0001592128801431127, |
|
"loss": 0.4482, |
|
"num_input_tokens_seen": 380449, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.2075134168157424, |
|
"grad_norm": 0.2979726195335388, |
|
"learning_rate": 0.00015849731663685151, |
|
"loss": 0.4258, |
|
"num_input_tokens_seen": 386577, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.2110912343470483, |
|
"grad_norm": 0.2969113886356354, |
|
"learning_rate": 0.00015778175313059035, |
|
"loss": 0.433, |
|
"num_input_tokens_seen": 392953, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.2146690518783542, |
|
"grad_norm": 0.4132014811038971, |
|
"learning_rate": 0.00015706618962432918, |
|
"loss": 0.4148, |
|
"num_input_tokens_seen": 399368, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.2182468694096601, |
|
"grad_norm": 0.35863760113716125, |
|
"learning_rate": 0.000156350626118068, |
|
"loss": 0.4105, |
|
"num_input_tokens_seen": 407343, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.22182468694096602, |
|
"grad_norm": 0.287056028842926, |
|
"learning_rate": 0.0001556350626118068, |
|
"loss": 0.4495, |
|
"num_input_tokens_seen": 413867, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.22540250447227192, |
|
"grad_norm": 0.41710999608039856, |
|
"learning_rate": 0.00015491949910554563, |
|
"loss": 0.426, |
|
"num_input_tokens_seen": 422712, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.22898032200357782, |
|
"grad_norm": 0.42847341299057007, |
|
"learning_rate": 0.00015420393559928446, |
|
"loss": 0.4163, |
|
"num_input_tokens_seen": 428523, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.23255813953488372, |
|
"grad_norm": 0.35523882508277893, |
|
"learning_rate": 0.00015348837209302327, |
|
"loss": 0.4237, |
|
"num_input_tokens_seen": 436283, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.23613595706618962, |
|
"grad_norm": 0.32238948345184326, |
|
"learning_rate": 0.00015277280858676207, |
|
"loss": 0.4527, |
|
"num_input_tokens_seen": 444368, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.23971377459749552, |
|
"grad_norm": 0.3075689375400543, |
|
"learning_rate": 0.0001520572450805009, |
|
"loss": 0.4057, |
|
"num_input_tokens_seen": 449919, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.24329159212880144, |
|
"grad_norm": 0.3819845914840698, |
|
"learning_rate": 0.00015134168157423971, |
|
"loss": 0.3951, |
|
"num_input_tokens_seen": 455612, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.24686940966010734, |
|
"grad_norm": 0.3687816560268402, |
|
"learning_rate": 0.00015062611806797855, |
|
"loss": 0.4176, |
|
"num_input_tokens_seen": 461846, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.2504472271914132, |
|
"grad_norm": 0.32070156931877136, |
|
"learning_rate": 0.00014991055456171735, |
|
"loss": 0.4111, |
|
"num_input_tokens_seen": 468333, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.25402504472271914, |
|
"grad_norm": 0.35491397976875305, |
|
"learning_rate": 0.0001491949910554562, |
|
"loss": 0.4215, |
|
"num_input_tokens_seen": 476220, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.25760286225402507, |
|
"grad_norm": 0.3560994863510132, |
|
"learning_rate": 0.000148479427549195, |
|
"loss": 0.4011, |
|
"num_input_tokens_seen": 481867, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.26118067978533094, |
|
"grad_norm": 0.3188275992870331, |
|
"learning_rate": 0.0001477638640429338, |
|
"loss": 0.4372, |
|
"num_input_tokens_seen": 489088, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.26475849731663686, |
|
"grad_norm": 0.3422275483608246, |
|
"learning_rate": 0.00014704830053667263, |
|
"loss": 0.4178, |
|
"num_input_tokens_seen": 494747, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.26833631484794274, |
|
"grad_norm": 0.29682761430740356, |
|
"learning_rate": 0.00014633273703041147, |
|
"loss": 0.4309, |
|
"num_input_tokens_seen": 501076, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.27191413237924866, |
|
"grad_norm": 0.44356122612953186, |
|
"learning_rate": 0.00014561717352415027, |
|
"loss": 0.4113, |
|
"num_input_tokens_seen": 508236, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.27549194991055453, |
|
"grad_norm": 0.4110952317714691, |
|
"learning_rate": 0.00014490161001788908, |
|
"loss": 0.4232, |
|
"num_input_tokens_seen": 514982, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.27906976744186046, |
|
"grad_norm": 0.3558269441127777, |
|
"learning_rate": 0.00014418604651162791, |
|
"loss": 0.4076, |
|
"num_input_tokens_seen": 521248, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.2826475849731664, |
|
"grad_norm": 0.3435330092906952, |
|
"learning_rate": 0.00014347048300536675, |
|
"loss": 0.4109, |
|
"num_input_tokens_seen": 527954, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.28622540250447226, |
|
"grad_norm": 0.2566317021846771, |
|
"learning_rate": 0.00014275491949910555, |
|
"loss": 0.4144, |
|
"num_input_tokens_seen": 535308, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.2898032200357782, |
|
"grad_norm": 0.24462071061134338, |
|
"learning_rate": 0.00014203935599284436, |
|
"loss": 0.3984, |
|
"num_input_tokens_seen": 542297, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.29338103756708406, |
|
"grad_norm": 0.4308456480503082, |
|
"learning_rate": 0.0001413237924865832, |
|
"loss": 0.4142, |
|
"num_input_tokens_seen": 548183, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.29695885509839, |
|
"grad_norm": 0.4515162408351898, |
|
"learning_rate": 0.00014060822898032203, |
|
"loss": 0.404, |
|
"num_input_tokens_seen": 553861, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.3005366726296959, |
|
"grad_norm": 0.3883764147758484, |
|
"learning_rate": 0.00013989266547406083, |
|
"loss": 0.4159, |
|
"num_input_tokens_seen": 559943, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.3041144901610018, |
|
"grad_norm": 0.2981751263141632, |
|
"learning_rate": 0.00013917710196779964, |
|
"loss": 0.4099, |
|
"num_input_tokens_seen": 566038, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.3076923076923077, |
|
"grad_norm": 0.42181873321533203, |
|
"learning_rate": 0.00013846153846153847, |
|
"loss": 0.4365, |
|
"num_input_tokens_seen": 574126, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.3112701252236136, |
|
"grad_norm": 0.31020838022232056, |
|
"learning_rate": 0.00013774597495527728, |
|
"loss": 0.4076, |
|
"num_input_tokens_seen": 581039, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.3148479427549195, |
|
"grad_norm": 0.2839311361312866, |
|
"learning_rate": 0.00013703041144901611, |
|
"loss": 0.4139, |
|
"num_input_tokens_seen": 587384, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.3184257602862254, |
|
"grad_norm": 0.30183765292167664, |
|
"learning_rate": 0.00013631484794275492, |
|
"loss": 0.4412, |
|
"num_input_tokens_seen": 594626, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.3220035778175313, |
|
"grad_norm": 0.24405309557914734, |
|
"learning_rate": 0.00013559928443649375, |
|
"loss": 0.4217, |
|
"num_input_tokens_seen": 601425, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.32558139534883723, |
|
"grad_norm": 0.3325401544570923, |
|
"learning_rate": 0.00013488372093023256, |
|
"loss": 0.399, |
|
"num_input_tokens_seen": 607033, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.3291592128801431, |
|
"grad_norm": 0.4284108877182007, |
|
"learning_rate": 0.00013416815742397137, |
|
"loss": 0.4329, |
|
"num_input_tokens_seen": 612959, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.33273703041144903, |
|
"grad_norm": 0.39697644114494324, |
|
"learning_rate": 0.0001334525939177102, |
|
"loss": 0.4062, |
|
"num_input_tokens_seen": 620181, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.3363148479427549, |
|
"grad_norm": 0.4615568220615387, |
|
"learning_rate": 0.00013273703041144903, |
|
"loss": 0.3929, |
|
"num_input_tokens_seen": 625029, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.33989266547406083, |
|
"grad_norm": 0.3364561200141907, |
|
"learning_rate": 0.00013202146690518784, |
|
"loss": 0.4217, |
|
"num_input_tokens_seen": 630571, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.3434704830053667, |
|
"grad_norm": 0.3077852129936218, |
|
"learning_rate": 0.00013130590339892665, |
|
"loss": 0.4223, |
|
"num_input_tokens_seen": 636343, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.3470483005366726, |
|
"grad_norm": 0.4563674032688141, |
|
"learning_rate": 0.00013059033989266548, |
|
"loss": 0.4093, |
|
"num_input_tokens_seen": 644448, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.35062611806797855, |
|
"grad_norm": 0.3391217291355133, |
|
"learning_rate": 0.00012987477638640431, |
|
"loss": 0.4146, |
|
"num_input_tokens_seen": 651211, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.3542039355992844, |
|
"grad_norm": 0.2930082082748413, |
|
"learning_rate": 0.00012915921288014312, |
|
"loss": 0.4248, |
|
"num_input_tokens_seen": 658984, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.35778175313059035, |
|
"grad_norm": 0.3553549647331238, |
|
"learning_rate": 0.00012844364937388193, |
|
"loss": 0.417, |
|
"num_input_tokens_seen": 666184, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.3613595706618962, |
|
"grad_norm": 0.3744185268878937, |
|
"learning_rate": 0.00012772808586762076, |
|
"loss": 0.3988, |
|
"num_input_tokens_seen": 673125, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 0.36493738819320215, |
|
"grad_norm": 0.4000609219074249, |
|
"learning_rate": 0.0001270125223613596, |
|
"loss": 0.4301, |
|
"num_input_tokens_seen": 680863, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 0.3685152057245081, |
|
"grad_norm": 0.27482128143310547, |
|
"learning_rate": 0.0001262969588550984, |
|
"loss": 0.4122, |
|
"num_input_tokens_seen": 688582, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 0.37209302325581395, |
|
"grad_norm": 0.438516765832901, |
|
"learning_rate": 0.0001255813953488372, |
|
"loss": 0.4253, |
|
"num_input_tokens_seen": 695760, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.3756708407871199, |
|
"grad_norm": 0.26929447054862976, |
|
"learning_rate": 0.00012486583184257604, |
|
"loss": 0.405, |
|
"num_input_tokens_seen": 702561, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.37924865831842575, |
|
"grad_norm": 0.24823708832263947, |
|
"learning_rate": 0.00012415026833631485, |
|
"loss": 0.4317, |
|
"num_input_tokens_seen": 710659, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 0.3828264758497317, |
|
"grad_norm": 0.31499966979026794, |
|
"learning_rate": 0.00012343470483005368, |
|
"loss": 0.4357, |
|
"num_input_tokens_seen": 717932, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 0.38640429338103754, |
|
"grad_norm": 0.4409402012825012, |
|
"learning_rate": 0.0001227191413237925, |
|
"loss": 0.408, |
|
"num_input_tokens_seen": 724716, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 0.38998211091234347, |
|
"grad_norm": 0.35407742857933044, |
|
"learning_rate": 0.00012200357781753131, |
|
"loss": 0.4088, |
|
"num_input_tokens_seen": 730694, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 0.3935599284436494, |
|
"grad_norm": 0.36748751997947693, |
|
"learning_rate": 0.00012128801431127013, |
|
"loss": 0.4208, |
|
"num_input_tokens_seen": 739112, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.39713774597495527, |
|
"grad_norm": 0.3873242437839508, |
|
"learning_rate": 0.00012057245080500895, |
|
"loss": 0.3798, |
|
"num_input_tokens_seen": 744478, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 0.4007155635062612, |
|
"grad_norm": 0.31334424018859863, |
|
"learning_rate": 0.00011985688729874778, |
|
"loss": 0.4179, |
|
"num_input_tokens_seen": 753315, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 0.40429338103756707, |
|
"grad_norm": 0.42388609051704407, |
|
"learning_rate": 0.00011914132379248659, |
|
"loss": 0.3882, |
|
"num_input_tokens_seen": 759832, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 0.407871198568873, |
|
"grad_norm": 0.3108317255973816, |
|
"learning_rate": 0.00011842576028622541, |
|
"loss": 0.4247, |
|
"num_input_tokens_seen": 766054, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 0.41144901610017887, |
|
"grad_norm": 0.322694331407547, |
|
"learning_rate": 0.00011771019677996423, |
|
"loss": 0.416, |
|
"num_input_tokens_seen": 772192, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.4150268336314848, |
|
"grad_norm": 0.4780764877796173, |
|
"learning_rate": 0.00011699463327370303, |
|
"loss": 0.3937, |
|
"num_input_tokens_seen": 778619, |
|
"step": 1160 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 2795, |
|
"num_input_tokens_seen": 778619, |
|
"num_train_epochs": 1, |
|
"save_steps": 20, |
|
"total_flos": 1.7508391701092352e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|