|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9954430379746837, |
|
"eval_steps": 500, |
|
"global_step": 1479, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.020253164556962026, |
|
"grad_norm": 2.8877771911403016, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7568, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04050632911392405, |
|
"grad_norm": 2.2294834485187973, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6504, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.060759493670886074, |
|
"grad_norm": 2.4547588475293103, |
|
"learning_rate": 5e-06, |
|
"loss": 0.629, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0810126582278481, |
|
"grad_norm": 2.0443878280598624, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6134, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.10126582278481013, |
|
"grad_norm": 1.5102886423214212, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6034, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.12151898734177215, |
|
"grad_norm": 1.4856866018507489, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5979, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.14177215189873418, |
|
"grad_norm": 1.5316356840206717, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5922, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.1620253164556962, |
|
"grad_norm": 1.7147665128956195, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5877, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.18227848101265823, |
|
"grad_norm": 1.7308970189870827, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5869, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.20253164556962025, |
|
"grad_norm": 2.2908677489633327, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5894, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.22278481012658227, |
|
"grad_norm": 1.354241491190274, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5815, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.2430379746835443, |
|
"grad_norm": 1.3948248922862374, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5792, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.26329113924050634, |
|
"grad_norm": 2.2718824029357347, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5781, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.28354430379746837, |
|
"grad_norm": 2.1060380654628625, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5682, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.3037974683544304, |
|
"grad_norm": 1.6323986172543352, |
|
"learning_rate": 5e-06, |
|
"loss": 0.578, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3240506329113924, |
|
"grad_norm": 1.5001325556393612, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5762, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.34430379746835443, |
|
"grad_norm": 2.19381837761027, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5776, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.36455696202531646, |
|
"grad_norm": 2.0646731852381044, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5727, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.3848101265822785, |
|
"grad_norm": 1.5335478355598067, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5708, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.4050632911392405, |
|
"grad_norm": 1.3945582055615051, |
|
"learning_rate": 5e-06, |
|
"loss": 0.565, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4253164556962025, |
|
"grad_norm": 1.3897842852914954, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5608, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.44556962025316454, |
|
"grad_norm": 1.4935312375590064, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5665, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.46582278481012657, |
|
"grad_norm": 1.7518839161142288, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5659, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.4860759493670886, |
|
"grad_norm": 1.1585796963266615, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5617, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.5063291139240507, |
|
"grad_norm": 1.2966452791221394, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5658, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5265822784810127, |
|
"grad_norm": 1.5279441262749707, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5528, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.5468354430379747, |
|
"grad_norm": 1.4864965256894778, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5624, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.5670886075949367, |
|
"grad_norm": 1.6172726228041323, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5584, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.5873417721518988, |
|
"grad_norm": 1.309516691642357, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5584, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.6075949367088608, |
|
"grad_norm": 1.3060937890001645, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5579, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6278481012658228, |
|
"grad_norm": 1.2379596944740083, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5647, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.6481012658227848, |
|
"grad_norm": 1.2060237317202396, |
|
"learning_rate": 5e-06, |
|
"loss": 0.561, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.6683544303797468, |
|
"grad_norm": 1.3300076458783476, |
|
"learning_rate": 5e-06, |
|
"loss": 0.561, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.6886075949367089, |
|
"grad_norm": 1.298934921391733, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5607, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.7088607594936709, |
|
"grad_norm": 1.34396713861144, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5619, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.7291139240506329, |
|
"grad_norm": 1.230038251173589, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5638, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.7493670886075949, |
|
"grad_norm": 1.2916876849266508, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5561, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.769620253164557, |
|
"grad_norm": 1.4062727584587997, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5528, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.789873417721519, |
|
"grad_norm": 1.8176516191927978, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5557, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.810126582278481, |
|
"grad_norm": 1.1154026747296282, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5557, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.830379746835443, |
|
"grad_norm": 1.4178981343708785, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5505, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.850632911392405, |
|
"grad_norm": 1.1213120675703767, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5589, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.8708860759493671, |
|
"grad_norm": 1.2510408891414735, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5521, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.8911392405063291, |
|
"grad_norm": 1.1502044804280906, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5578, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.9113924050632911, |
|
"grad_norm": 1.4085741123286435, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5491, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.9316455696202531, |
|
"grad_norm": 1.0314952283107646, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5534, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.9518987341772152, |
|
"grad_norm": 1.392988455426854, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5543, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.9721518987341772, |
|
"grad_norm": 1.0962802919223296, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5513, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.9924050632911392, |
|
"grad_norm": 1.1019400606381653, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5479, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.9984810126582279, |
|
"eval_loss": 0.0688617154955864, |
|
"eval_runtime": 506.5979, |
|
"eval_samples_per_second": 26.261, |
|
"eval_steps_per_second": 0.411, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 1.0126582278481013, |
|
"grad_norm": 1.9265333502006505, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5076, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.0329113924050632, |
|
"grad_norm": 1.9664694257097348, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4727, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.0531645569620254, |
|
"grad_norm": 1.7942783041476533, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4674, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.0734177215189873, |
|
"grad_norm": 2.3027454914122107, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4693, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.0936708860759494, |
|
"grad_norm": 1.6756627768032348, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4652, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.1139240506329113, |
|
"grad_norm": 1.3357692446389655, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4668, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.1341772151898735, |
|
"grad_norm": 1.2874290505413306, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4634, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.1544303797468354, |
|
"grad_norm": 1.1486343814753555, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4706, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.1746835443037975, |
|
"grad_norm": 1.2063227401232046, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4705, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.1949367088607594, |
|
"grad_norm": 1.184320963931943, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4653, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.2151898734177216, |
|
"grad_norm": 1.3358636951088882, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4731, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.2354430379746835, |
|
"grad_norm": 1.425133318797002, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4689, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.2556962025316456, |
|
"grad_norm": 1.6364325943594298, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4729, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.2759493670886077, |
|
"grad_norm": 1.318673316672248, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4732, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.2962025316455696, |
|
"grad_norm": 1.3122002263891996, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4722, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.3164556962025316, |
|
"grad_norm": 2.1508251935281733, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4752, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.3367088607594937, |
|
"grad_norm": 1.863930986406004, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4639, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.3569620253164558, |
|
"grad_norm": 1.4719212499856806, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4771, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.3772151898734177, |
|
"grad_norm": 1.414514809425289, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4734, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.3974683544303796, |
|
"grad_norm": 1.6000497017103448, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4723, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.4177215189873418, |
|
"grad_norm": 1.400047845824815, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4708, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.437974683544304, |
|
"grad_norm": 1.3238131935441257, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4754, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.4582278481012658, |
|
"grad_norm": 1.2381301593371368, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4754, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.4784810126582277, |
|
"grad_norm": 1.3586544382551995, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4785, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.4987341772151899, |
|
"grad_norm": 1.2020207137782775, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4721, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.518987341772152, |
|
"grad_norm": 1.2396166262877506, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4747, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.539240506329114, |
|
"grad_norm": 1.1667049104651803, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4765, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.5594936708860758, |
|
"grad_norm": 1.273616512026121, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4759, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.579746835443038, |
|
"grad_norm": 1.1889561098260564, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4764, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 1.1503189703578973, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4787, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.620253164556962, |
|
"grad_norm": 1.4278882590555009, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4754, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.640506329113924, |
|
"grad_norm": 1.183071414253031, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4682, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.660759493670886, |
|
"grad_norm": 1.3960016943164528, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4773, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.6810126582278482, |
|
"grad_norm": 1.2320788611563414, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4728, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.70126582278481, |
|
"grad_norm": 1.3108466956318439, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4747, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.721518987341772, |
|
"grad_norm": 1.181031459769793, |
|
"learning_rate": 5e-06, |
|
"loss": 0.477, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.7417721518987341, |
|
"grad_norm": 1.1302662753413042, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4768, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.7620253164556963, |
|
"grad_norm": 1.127889601843092, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4804, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.7822784810126582, |
|
"grad_norm": 1.145481859966167, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4817, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.80253164556962, |
|
"grad_norm": 1.2363679424934566, |
|
"learning_rate": 5e-06, |
|
"loss": 0.478, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.8227848101265822, |
|
"grad_norm": 1.2389482233566855, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4776, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.8430379746835444, |
|
"grad_norm": 1.1716310226253532, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4796, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.8632911392405065, |
|
"grad_norm": 1.2634817133278293, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4783, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.8835443037974684, |
|
"grad_norm": 1.0975216253496476, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4749, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.9037974683544303, |
|
"grad_norm": 1.1740097191762555, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4789, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.9240506329113924, |
|
"grad_norm": 1.6550569156768484, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4797, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.9443037974683546, |
|
"grad_norm": 1.5015715849094382, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4842, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.9645569620253165, |
|
"grad_norm": 1.0921513269169847, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4816, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.9848101265822784, |
|
"grad_norm": 1.3708525343966105, |
|
"learning_rate": 5e-06, |
|
"loss": 0.481, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.998987341772152, |
|
"eval_loss": 0.06875930726528168, |
|
"eval_runtime": 509.0527, |
|
"eval_samples_per_second": 26.135, |
|
"eval_steps_per_second": 0.409, |
|
"step": 987 |
|
}, |
|
{ |
|
"epoch": 2.0050632911392405, |
|
"grad_norm": 1.9154494703196068, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4531, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.0253164556962027, |
|
"grad_norm": 1.979264846486845, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3866, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.0455696202531644, |
|
"grad_norm": 1.4526649203181639, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3822, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 2.0658227848101265, |
|
"grad_norm": 1.4220994888729301, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3784, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 2.0860759493670886, |
|
"grad_norm": 1.3393673769442962, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3788, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 2.1063291139240508, |
|
"grad_norm": 1.277014029344912, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3764, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 2.1265822784810124, |
|
"grad_norm": 1.5340314098900096, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3868, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.1468354430379746, |
|
"grad_norm": 1.6508873941026114, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3822, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 2.1670886075949367, |
|
"grad_norm": 1.407606446135562, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3862, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 2.187341772151899, |
|
"grad_norm": 1.411050375863623, |
|
"learning_rate": 5e-06, |
|
"loss": 0.384, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.207594936708861, |
|
"grad_norm": 1.3248395383039557, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3819, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 2.2278481012658227, |
|
"grad_norm": 1.3819331833896153, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3912, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.248101265822785, |
|
"grad_norm": 1.2436936664526173, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3874, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 2.268354430379747, |
|
"grad_norm": 1.2684331146489296, |
|
"learning_rate": 5e-06, |
|
"loss": 0.39, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 2.2886075949367086, |
|
"grad_norm": 1.5794913167245803, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3915, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 2.3088607594936708, |
|
"grad_norm": 1.3748398729318, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3875, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 2.329113924050633, |
|
"grad_norm": 93.67781801844193, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3941, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 2.349367088607595, |
|
"grad_norm": 1.6658805973526587, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3937, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 2.369620253164557, |
|
"grad_norm": 1.387380031309021, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3929, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 2.389873417721519, |
|
"grad_norm": 1.286285420789643, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3925, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 2.410126582278481, |
|
"grad_norm": 1.2998738300592168, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3926, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 2.430379746835443, |
|
"grad_norm": 1.5793445810769084, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3917, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.4506329113924052, |
|
"grad_norm": 1.2740544518977726, |
|
"learning_rate": 5e-06, |
|
"loss": 0.392, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 2.470886075949367, |
|
"grad_norm": 1.23865480155546, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3945, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 2.491139240506329, |
|
"grad_norm": 1.3769287033825384, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3948, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 2.511392405063291, |
|
"grad_norm": 1.4266978552229843, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3994, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 2.5316455696202533, |
|
"grad_norm": 1.3743424703937366, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3936, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.5518987341772155, |
|
"grad_norm": 1.475562880517447, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3887, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 2.572151898734177, |
|
"grad_norm": 1.3396408438411616, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3951, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 2.5924050632911393, |
|
"grad_norm": 1.3394788659237564, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3969, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 2.6126582278481014, |
|
"grad_norm": 1.373851456706019, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4018, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 2.632911392405063, |
|
"grad_norm": 1.4504759405093464, |
|
"learning_rate": 5e-06, |
|
"loss": 0.397, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.6531645569620252, |
|
"grad_norm": 1.254741242014257, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3972, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 2.6734177215189874, |
|
"grad_norm": 1.5197421689722739, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4001, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 2.6936708860759495, |
|
"grad_norm": 1.2789775046223264, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4023, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 2.7139240506329116, |
|
"grad_norm": 1.4778301872403103, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4004, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 2.7341772151898733, |
|
"grad_norm": 1.2954290946299785, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3999, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 2.7544303797468355, |
|
"grad_norm": 1.2598940164892094, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3955, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 2.7746835443037976, |
|
"grad_norm": 1.4352391902300061, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4017, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 2.7949367088607593, |
|
"grad_norm": 1.3764405845983565, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3994, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 2.8151898734177214, |
|
"grad_norm": 1.4992747448014379, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4019, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 2.8354430379746836, |
|
"grad_norm": 1.2897539200248462, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3953, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.8556962025316457, |
|
"grad_norm": 1.275567652965357, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4043, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 2.875949367088608, |
|
"grad_norm": 1.3040786299424412, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4001, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 2.8962025316455695, |
|
"grad_norm": 1.3229679573518474, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4007, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 2.9164556962025316, |
|
"grad_norm": 1.3751034663278319, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4032, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 2.9367088607594938, |
|
"grad_norm": 1.332106303867251, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4118, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 2.9569620253164555, |
|
"grad_norm": 1.4514797774998427, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3999, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 2.9772151898734176, |
|
"grad_norm": 1.2854548452051338, |
|
"learning_rate": 5e-06, |
|
"loss": 0.404, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 2.9954430379746837, |
|
"eval_loss": 0.07273153215646744, |
|
"eval_runtime": 508.6931, |
|
"eval_samples_per_second": 26.153, |
|
"eval_steps_per_second": 0.409, |
|
"step": 1479 |
|
}, |
|
{ |
|
"epoch": 2.9954430379746837, |
|
"step": 1479, |
|
"total_flos": 2477170706350080.0, |
|
"train_loss": 0.4808535206712848, |
|
"train_runtime": 84482.3839, |
|
"train_samples_per_second": 8.976, |
|
"train_steps_per_second": 0.018 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1479, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2477170706350080.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|