|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9954430379746837, |
|
"eval_steps": 500, |
|
"global_step": 1479, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.020253164556962026, |
|
"grad_norm": 2.8544461931274934, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7569, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04050632911392405, |
|
"grad_norm": 2.7444903942187175, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6507, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.060759493670886074, |
|
"grad_norm": 1.3943814336457603, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6289, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0810126582278481, |
|
"grad_norm": 1.4159200277827484, |
|
"learning_rate": 5e-06, |
|
"loss": 0.614, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.10126582278481013, |
|
"grad_norm": 1.8420749848935474, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6034, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.12151898734177215, |
|
"grad_norm": 1.447732727495403, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5973, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.14177215189873418, |
|
"grad_norm": 1.8401989346240508, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5925, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.1620253164556962, |
|
"grad_norm": 1.4380019004522187, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5876, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.18227848101265823, |
|
"grad_norm": 1.657450409370915, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5863, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.20253164556962025, |
|
"grad_norm": 2.311058324506438, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5895, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.22278481012658227, |
|
"grad_norm": 1.2759051791468548, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5816, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.2430379746835443, |
|
"grad_norm": 1.3376976540526444, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5792, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.26329113924050634, |
|
"grad_norm": 1.6258428242345302, |
|
"learning_rate": 5e-06, |
|
"loss": 0.578, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.28354430379746837, |
|
"grad_norm": 1.5716514169659848, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5677, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.3037974683544304, |
|
"grad_norm": 1.5695310925399224, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5779, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3240506329113924, |
|
"grad_norm": 1.5748942702615565, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5762, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.34430379746835443, |
|
"grad_norm": 1.739225496496722, |
|
"learning_rate": 5e-06, |
|
"loss": 0.577, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.36455696202531646, |
|
"grad_norm": 1.7585144012617044, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5731, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.3848101265822785, |
|
"grad_norm": 1.5833716095990014, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5717, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.4050632911392405, |
|
"grad_norm": 1.6675521594937952, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5664, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4253164556962025, |
|
"grad_norm": 1.5564420443090938, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5621, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.44556962025316454, |
|
"grad_norm": 1.4391864660123526, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5678, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.46582278481012657, |
|
"grad_norm": 1.3545531998280866, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5665, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.4860759493670886, |
|
"grad_norm": 1.2117242731823523, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5628, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.5063291139240507, |
|
"grad_norm": 1.2967375627557465, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5668, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5265822784810127, |
|
"grad_norm": 1.4423833757840236, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5533, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.5468354430379747, |
|
"grad_norm": 1.2030292390095976, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5623, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.5670886075949367, |
|
"grad_norm": 1.340394632932045, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5577, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.5873417721518988, |
|
"grad_norm": 1.699090610982426, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5579, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.6075949367088608, |
|
"grad_norm": 1.2878486189890792, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5576, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6278481012658228, |
|
"grad_norm": 1.2077186320339615, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5629, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.6481012658227848, |
|
"grad_norm": 1.3244718672832188, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5594, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.6683544303797468, |
|
"grad_norm": 1.417195710228401, |
|
"learning_rate": 5e-06, |
|
"loss": 0.559, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.6886075949367089, |
|
"grad_norm": 1.329383308154249, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5588, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.7088607594936709, |
|
"grad_norm": 1.3387484647946544, |
|
"learning_rate": 5e-06, |
|
"loss": 0.56, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.7291139240506329, |
|
"grad_norm": 1.2546659088358711, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5622, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.7493670886075949, |
|
"grad_norm": 1.2552138171718579, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5547, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.769620253164557, |
|
"grad_norm": 1.343049081695452, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5514, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.789873417721519, |
|
"grad_norm": 1.7012223994903535, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5546, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.810126582278481, |
|
"grad_norm": 1.1159081764354688, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5549, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.830379746835443, |
|
"grad_norm": 1.3266370286884424, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5495, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.850632911392405, |
|
"grad_norm": 1.1188079131605666, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5576, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.8708860759493671, |
|
"grad_norm": 1.281542150580452, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5508, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.8911392405063291, |
|
"grad_norm": 1.1598655378018121, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5562, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.9113924050632911, |
|
"grad_norm": 1.2672325826373367, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5473, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.9316455696202531, |
|
"grad_norm": 1.0317053256498185, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5516, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.9518987341772152, |
|
"grad_norm": 1.4315894237990952, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5526, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.9721518987341772, |
|
"grad_norm": 1.2047073476573256, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5499, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.9924050632911392, |
|
"grad_norm": 1.1500251693609045, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5464, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.9984810126582279, |
|
"eval_loss": 0.06868810206651688, |
|
"eval_runtime": 512.3603, |
|
"eval_samples_per_second": 25.966, |
|
"eval_steps_per_second": 0.406, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 1.0126582278481013, |
|
"grad_norm": 1.8413916260979792, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5079, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.0329113924050632, |
|
"grad_norm": 1.4722173755845698, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4751, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.0531645569620254, |
|
"grad_norm": 1.3281419836326962, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4713, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.0734177215189873, |
|
"grad_norm": 1.9790378457546416, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4733, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.0936708860759494, |
|
"grad_norm": 1.3589991519525444, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4702, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.1139240506329113, |
|
"grad_norm": 1.4545027920691798, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4722, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.1341772151898735, |
|
"grad_norm": 1.3978013602179746, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4695, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.1544303797468354, |
|
"grad_norm": 1.085881264218112, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4762, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.1746835443037975, |
|
"grad_norm": 1.2222577696921726, |
|
"learning_rate": 5e-06, |
|
"loss": 0.475, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.1949367088607594, |
|
"grad_norm": 1.1668742626981172, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4689, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.2151898734177216, |
|
"grad_norm": 1.2276792224748574, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4762, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.2354430379746835, |
|
"grad_norm": 1.453085671614249, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4721, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.2556962025316456, |
|
"grad_norm": 1.502759057117359, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4763, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.2759493670886077, |
|
"grad_norm": 1.3654526602741475, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4758, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.2962025316455696, |
|
"grad_norm": 1.2900057606104782, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4751, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.3164556962025316, |
|
"grad_norm": 1.2304140089136695, |
|
"learning_rate": 5e-06, |
|
"loss": 0.479, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.3367088607594937, |
|
"grad_norm": 1.3208086599766524, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4676, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.3569620253164558, |
|
"grad_norm": 1.6255671967857097, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4817, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.3772151898734177, |
|
"grad_norm": 1.2914422612667809, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4777, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.3974683544303796, |
|
"grad_norm": 1.1553498242671723, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4765, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.4177215189873418, |
|
"grad_norm": 1.0985630409409104, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4745, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.437974683544304, |
|
"grad_norm": 1.1922053458640358, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4796, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.4582278481012658, |
|
"grad_norm": 1.3398045780563845, |
|
"learning_rate": 5e-06, |
|
"loss": 0.48, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.4784810126582277, |
|
"grad_norm": 1.177617617952355, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4825, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.4987341772151899, |
|
"grad_norm": 1.2525034867544784, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4753, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.518987341772152, |
|
"grad_norm": 1.2039537283128936, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4779, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.539240506329114, |
|
"grad_norm": 1.1297848909075034, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4789, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.5594936708860758, |
|
"grad_norm": 1.3174710848931364, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4776, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.579746835443038, |
|
"grad_norm": 1.175160970448449, |
|
"learning_rate": 5e-06, |
|
"loss": 0.478, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 1.2815667975924372, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4807, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.620253164556962, |
|
"grad_norm": 1.158977784005594, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4772, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.640506329113924, |
|
"grad_norm": 1.1864733415229585, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4697, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.660759493670886, |
|
"grad_norm": 1.2895166819237298, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4797, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.6810126582278482, |
|
"grad_norm": 1.300938286282075, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4745, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.70126582278481, |
|
"grad_norm": 1.1424310626896699, |
|
"learning_rate": 5e-06, |
|
"loss": 0.476, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.721518987341772, |
|
"grad_norm": 1.1429559951896378, |
|
"learning_rate": 5e-06, |
|
"loss": 0.478, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.7417721518987341, |
|
"grad_norm": 1.1739860272827067, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4783, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.7620253164556963, |
|
"grad_norm": 1.191849148804322, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4814, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.7822784810126582, |
|
"grad_norm": 1.162039130349595, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4823, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.80253164556962, |
|
"grad_norm": 1.2498690007156736, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4791, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.8227848101265822, |
|
"grad_norm": 1.2554926926315564, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4785, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.8430379746835444, |
|
"grad_norm": 1.1957986226654673, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4804, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.8632911392405065, |
|
"grad_norm": 1.1851282579533418, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4792, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.8835443037974684, |
|
"grad_norm": 1.1068170260766412, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4757, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.9037974683544303, |
|
"grad_norm": 1.2646052261464578, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4801, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.9240506329113924, |
|
"grad_norm": 1.333239155517521, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4804, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.9443037974683546, |
|
"grad_norm": 1.1435808455640823, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4845, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.9645569620253165, |
|
"grad_norm": 1.1466649702197023, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4829, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.9848101265822784, |
|
"grad_norm": 1.268837220918901, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4834, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.998987341772152, |
|
"eval_loss": 0.06870175153017044, |
|
"eval_runtime": 514.0164, |
|
"eval_samples_per_second": 25.882, |
|
"eval_steps_per_second": 0.405, |
|
"step": 987 |
|
}, |
|
{ |
|
"epoch": 2.0050632911392405, |
|
"grad_norm": 2.1408625149508023, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4569, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.0253164556962027, |
|
"grad_norm": 1.72871232368084, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3942, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.0455696202531644, |
|
"grad_norm": 1.8639823449544832, |
|
"learning_rate": 5e-06, |
|
"loss": 0.391, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 2.0658227848101265, |
|
"grad_norm": 1.3817144113711173, |
|
"learning_rate": 5e-06, |
|
"loss": 0.387, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 2.0860759493670886, |
|
"grad_norm": 1.5869361079541535, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3871, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 2.1063291139240508, |
|
"grad_norm": 1.2460624418796462, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3851, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 2.1265822784810124, |
|
"grad_norm": 1.7394900572434608, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3943, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.1468354430379746, |
|
"grad_norm": 1.338895486927167, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3897, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 2.1670886075949367, |
|
"grad_norm": 1.247982467499197, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3931, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 2.187341772151899, |
|
"grad_norm": 1.3618812768218835, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3916, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.207594936708861, |
|
"grad_norm": 1.3590221552926487, |
|
"learning_rate": 5e-06, |
|
"loss": 0.389, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 2.2278481012658227, |
|
"grad_norm": 1.3797000672458826, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3983, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.248101265822785, |
|
"grad_norm": 1.2368742400666088, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3934, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 2.268354430379747, |
|
"grad_norm": 1.2904792214170409, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3961, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 2.2886075949367086, |
|
"grad_norm": 1.5782227405689053, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3977, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 2.3088607594936708, |
|
"grad_norm": 1.4737079360584568, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3933, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 2.329113924050633, |
|
"grad_norm": 1.7303325343844742, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3993, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 2.349367088607595, |
|
"grad_norm": 1.4518621606722086, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3991, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 2.369620253164557, |
|
"grad_norm": 2.017892997447158, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3994, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 2.389873417721519, |
|
"grad_norm": 1.5068146289530309, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3997, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 2.410126582278481, |
|
"grad_norm": 1.353124441993217, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3992, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 2.430379746835443, |
|
"grad_norm": 1.4083869167690402, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3978, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.4506329113924052, |
|
"grad_norm": 1.2368765464544949, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3973, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 2.470886075949367, |
|
"grad_norm": 1.2620787369427364, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4011, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 2.491139240506329, |
|
"grad_norm": 1.319762068057727, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4009, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 2.511392405063291, |
|
"grad_norm": 1.508263073515773, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4048, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 2.5316455696202533, |
|
"grad_norm": 1.393725315494752, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3985, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.5518987341772155, |
|
"grad_norm": 1.7018393169883637, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3942, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 2.572151898734177, |
|
"grad_norm": 1.6451189563967772, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4009, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 2.5924050632911393, |
|
"grad_norm": 1.3903269923273418, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4023, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 2.6126582278481014, |
|
"grad_norm": 1.3438952595757327, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4075, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 2.632911392405063, |
|
"grad_norm": 1.6369476312471234, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4019, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.6531645569620252, |
|
"grad_norm": 1.4002674708903102, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4019, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 2.6734177215189874, |
|
"grad_norm": 1.5225886664411385, |
|
"learning_rate": 5e-06, |
|
"loss": 0.404, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 2.6936708860759495, |
|
"grad_norm": 1.378966215529765, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4067, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 2.7139240506329116, |
|
"grad_norm": 1.4243714009335353, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4051, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 2.7341772151898733, |
|
"grad_norm": 1.2628130063930583, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4047, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 2.7544303797468355, |
|
"grad_norm": 1.4068460224070376, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4003, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 2.7746835443037976, |
|
"grad_norm": 1.5186731540820997, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4064, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 2.7949367088607593, |
|
"grad_norm": 1.415787690993235, |
|
"learning_rate": 5e-06, |
|
"loss": 0.404, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 2.8151898734177214, |
|
"grad_norm": 1.4829742066812814, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4067, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 2.8354430379746836, |
|
"grad_norm": 1.3153556553473909, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4002, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.8556962025316457, |
|
"grad_norm": 1.258632308468112, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4088, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 2.875949367088608, |
|
"grad_norm": 1.3069199594530845, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4049, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 2.8962025316455695, |
|
"grad_norm": 1.348138135055361, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4055, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 2.9164556962025316, |
|
"grad_norm": 1.3516104016534718, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4076, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 2.9367088607594938, |
|
"grad_norm": 1.3482820531763435, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4162, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 2.9569620253164555, |
|
"grad_norm": 1.4854569589520752, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4042, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 2.9772151898734176, |
|
"grad_norm": 1.2602327451924493, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4082, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 2.9954430379746837, |
|
"eval_loss": 0.07233163714408875, |
|
"eval_runtime": 515.8273, |
|
"eval_samples_per_second": 25.792, |
|
"eval_steps_per_second": 0.403, |
|
"step": 1479 |
|
}, |
|
{ |
|
"epoch": 2.9954430379746837, |
|
"step": 1479, |
|
"total_flos": 2477170706350080.0, |
|
"train_loss": 0.4835145230064366, |
|
"train_runtime": 85503.9751, |
|
"train_samples_per_second": 8.868, |
|
"train_steps_per_second": 0.017 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1479, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2477170706350080.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|