danish-legal-lm-base / trainer_state.json
kiddothe2b
500k training steps with 128 tokens
5443746
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 53.57908272610373,
"global_step": 500000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11,
"learning_rate": 4.000000000000001e-06,
"loss": 8.8501,
"step": 1000
},
{
"epoch": 0.21,
"learning_rate": 8.000000000000001e-06,
"loss": 7.0526,
"step": 2000
},
{
"epoch": 0.32,
"learning_rate": 1.2e-05,
"loss": 6.6396,
"step": 3000
},
{
"epoch": 0.43,
"learning_rate": 1.6000000000000003e-05,
"loss": 6.457,
"step": 4000
},
{
"epoch": 0.54,
"learning_rate": 2e-05,
"loss": 6.3073,
"step": 5000
},
{
"epoch": 0.64,
"learning_rate": 2.4e-05,
"loss": 6.1934,
"step": 6000
},
{
"epoch": 0.75,
"learning_rate": 2.8000000000000003e-05,
"loss": 6.0929,
"step": 7000
},
{
"epoch": 0.86,
"learning_rate": 3.2000000000000005e-05,
"loss": 6.0097,
"step": 8000
},
{
"epoch": 0.96,
"learning_rate": 3.6e-05,
"loss": 5.9222,
"step": 9000
},
{
"epoch": 1.07,
"learning_rate": 4e-05,
"loss": 5.8487,
"step": 10000
},
{
"epoch": 1.18,
"learning_rate": 4.4000000000000006e-05,
"loss": 5.7752,
"step": 11000
},
{
"epoch": 1.29,
"learning_rate": 4.8e-05,
"loss": 5.4387,
"step": 12000
},
{
"epoch": 1.39,
"learning_rate": 5.2000000000000004e-05,
"loss": 4.7713,
"step": 13000
},
{
"epoch": 1.5,
"learning_rate": 5.6000000000000006e-05,
"loss": 3.9954,
"step": 14000
},
{
"epoch": 1.61,
"learning_rate": 6e-05,
"loss": 3.4334,
"step": 15000
},
{
"epoch": 1.71,
"learning_rate": 6.400000000000001e-05,
"loss": 3.0948,
"step": 16000
},
{
"epoch": 1.82,
"learning_rate": 6.800000000000001e-05,
"loss": 2.835,
"step": 17000
},
{
"epoch": 1.93,
"learning_rate": 7.2e-05,
"loss": 2.6402,
"step": 18000
},
{
"epoch": 2.04,
"learning_rate": 7.6e-05,
"loss": 2.4955,
"step": 19000
},
{
"epoch": 2.14,
"learning_rate": 8e-05,
"loss": 2.3712,
"step": 20000
},
{
"epoch": 2.25,
"learning_rate": 8.4e-05,
"loss": 2.2723,
"step": 21000
},
{
"epoch": 2.36,
"learning_rate": 8.800000000000001e-05,
"loss": 2.1948,
"step": 22000
},
{
"epoch": 2.46,
"learning_rate": 9.200000000000001e-05,
"loss": 2.1228,
"step": 23000
},
{
"epoch": 2.57,
"learning_rate": 9.6e-05,
"loss": 2.0669,
"step": 24000
},
{
"epoch": 2.68,
"learning_rate": 0.0001,
"loss": 2.0114,
"step": 25000
},
{
"epoch": 2.79,
"learning_rate": 9.999890641901125e-05,
"loss": 1.9638,
"step": 26000
},
{
"epoch": 2.89,
"learning_rate": 9.99956257238817e-05,
"loss": 1.9188,
"step": 27000
},
{
"epoch": 3.0,
"learning_rate": 9.999015805811965e-05,
"loss": 1.8771,
"step": 28000
},
{
"epoch": 3.11,
"learning_rate": 9.998250366089848e-05,
"loss": 1.8311,
"step": 29000
},
{
"epoch": 3.21,
"learning_rate": 9.997266286704631e-05,
"loss": 1.8029,
"step": 30000
},
{
"epoch": 3.32,
"learning_rate": 9.996063610703137e-05,
"loss": 1.7731,
"step": 31000
},
{
"epoch": 3.43,
"learning_rate": 9.994642390694308e-05,
"loss": 1.7475,
"step": 32000
},
{
"epoch": 3.54,
"learning_rate": 9.993002688846913e-05,
"loss": 1.7192,
"step": 33000
},
{
"epoch": 3.64,
"learning_rate": 9.991144576886823e-05,
"loss": 1.6999,
"step": 34000
},
{
"epoch": 3.75,
"learning_rate": 9.989068136093873e-05,
"loss": 1.6824,
"step": 35000
},
{
"epoch": 3.86,
"learning_rate": 9.986773457298311e-05,
"loss": 1.6575,
"step": 36000
},
{
"epoch": 3.96,
"learning_rate": 9.984260640876821e-05,
"loss": 1.6386,
"step": 37000
},
{
"epoch": 4.07,
"learning_rate": 9.981529796748134e-05,
"loss": 1.6187,
"step": 38000
},
{
"epoch": 4.18,
"learning_rate": 9.97858104436822e-05,
"loss": 1.6023,
"step": 39000
},
{
"epoch": 4.29,
"learning_rate": 9.975414512725057e-05,
"loss": 1.5847,
"step": 40000
},
{
"epoch": 4.39,
"learning_rate": 9.972030340333001e-05,
"loss": 1.57,
"step": 41000
},
{
"epoch": 4.5,
"learning_rate": 9.968428675226714e-05,
"loss": 1.5602,
"step": 42000
},
{
"epoch": 4.61,
"learning_rate": 9.964609674954696e-05,
"loss": 1.5471,
"step": 43000
},
{
"epoch": 4.71,
"learning_rate": 9.96057350657239e-05,
"loss": 1.536,
"step": 44000
},
{
"epoch": 4.82,
"learning_rate": 9.956320346634876e-05,
"loss": 1.5244,
"step": 45000
},
{
"epoch": 4.93,
"learning_rate": 9.95185038118915e-05,
"loss": 1.5115,
"step": 46000
},
{
"epoch": 5.04,
"learning_rate": 9.94716380576598e-05,
"loss": 1.4992,
"step": 47000
},
{
"epoch": 5.14,
"learning_rate": 9.942260825371358e-05,
"loss": 1.4833,
"step": 48000
},
{
"epoch": 5.25,
"learning_rate": 9.937141654477528e-05,
"loss": 1.4766,
"step": 49000
},
{
"epoch": 5.36,
"learning_rate": 9.931806517013612e-05,
"loss": 1.4648,
"step": 50000
},
{
"epoch": 5.36,
"eval_loss": 1.2920496463775635,
"eval_runtime": 9.2098,
"eval_samples_per_second": 1085.8,
"eval_steps_per_second": 8.578,
"step": 50000
},
{
"epoch": 5.47,
"learning_rate": 9.926255646355804e-05,
"loss": 1.458,
"step": 51000
},
{
"epoch": 5.57,
"learning_rate": 9.92048928531717e-05,
"loss": 1.4494,
"step": 52000
},
{
"epoch": 5.68,
"learning_rate": 9.914507686137019e-05,
"loss": 1.446,
"step": 53000
},
{
"epoch": 5.79,
"learning_rate": 9.90831111046988e-05,
"loss": 1.4337,
"step": 54000
},
{
"epoch": 5.89,
"learning_rate": 9.901899829374047e-05,
"loss": 1.4313,
"step": 55000
},
{
"epoch": 6.0,
"learning_rate": 9.895274123299723e-05,
"loss": 1.419,
"step": 56000
},
{
"epoch": 6.11,
"learning_rate": 9.888434282076758e-05,
"loss": 1.4027,
"step": 57000
},
{
"epoch": 6.22,
"learning_rate": 9.881380604901964e-05,
"loss": 1.3996,
"step": 58000
},
{
"epoch": 6.32,
"learning_rate": 9.87411340032603e-05,
"loss": 1.395,
"step": 59000
},
{
"epoch": 6.43,
"learning_rate": 9.86663298624003e-05,
"loss": 1.3883,
"step": 60000
},
{
"epoch": 6.54,
"learning_rate": 9.858939689861506e-05,
"loss": 1.3806,
"step": 61000
},
{
"epoch": 6.64,
"learning_rate": 9.851033847720166e-05,
"loss": 1.3773,
"step": 62000
},
{
"epoch": 6.75,
"learning_rate": 9.842915805643155e-05,
"loss": 1.3697,
"step": 63000
},
{
"epoch": 6.86,
"learning_rate": 9.834585918739936e-05,
"loss": 1.367,
"step": 64000
},
{
"epoch": 6.97,
"learning_rate": 9.826044551386744e-05,
"loss": 1.3617,
"step": 65000
},
{
"epoch": 7.07,
"learning_rate": 9.817292077210659e-05,
"loss": 1.3513,
"step": 66000
},
{
"epoch": 7.18,
"learning_rate": 9.808328879073251e-05,
"loss": 1.3407,
"step": 67000
},
{
"epoch": 7.29,
"learning_rate": 9.799155349053851e-05,
"loss": 1.3358,
"step": 68000
},
{
"epoch": 7.39,
"learning_rate": 9.789771888432375e-05,
"loss": 1.3302,
"step": 69000
},
{
"epoch": 7.5,
"learning_rate": 9.780178907671789e-05,
"loss": 1.3295,
"step": 70000
},
{
"epoch": 7.61,
"learning_rate": 9.77037682640015e-05,
"loss": 1.3249,
"step": 71000
},
{
"epoch": 7.72,
"learning_rate": 9.760366073392246e-05,
"loss": 1.3204,
"step": 72000
},
{
"epoch": 7.82,
"learning_rate": 9.750147086550844e-05,
"loss": 1.3185,
"step": 73000
},
{
"epoch": 7.93,
"learning_rate": 9.739720312887535e-05,
"loss": 1.3126,
"step": 74000
},
{
"epoch": 8.04,
"learning_rate": 9.729086208503174e-05,
"loss": 1.3051,
"step": 75000
},
{
"epoch": 8.14,
"learning_rate": 9.718245238567939e-05,
"loss": 1.2962,
"step": 76000
},
{
"epoch": 8.25,
"learning_rate": 9.707197877300974e-05,
"loss": 1.2906,
"step": 77000
},
{
"epoch": 8.36,
"learning_rate": 9.695944607949649e-05,
"loss": 1.2888,
"step": 78000
},
{
"epoch": 8.47,
"learning_rate": 9.684485922768422e-05,
"loss": 1.2903,
"step": 79000
},
{
"epoch": 8.57,
"learning_rate": 9.672822322997305e-05,
"loss": 1.2837,
"step": 80000
},
{
"epoch": 8.68,
"learning_rate": 9.660954318839933e-05,
"loss": 1.2823,
"step": 81000
},
{
"epoch": 8.79,
"learning_rate": 9.648882429441257e-05,
"loss": 1.2746,
"step": 82000
},
{
"epoch": 8.89,
"learning_rate": 9.636607182864827e-05,
"loss": 1.2721,
"step": 83000
},
{
"epoch": 9.0,
"learning_rate": 9.624129116069694e-05,
"loss": 1.2696,
"step": 84000
},
{
"epoch": 9.11,
"learning_rate": 9.611448774886924e-05,
"loss": 1.2589,
"step": 85000
},
{
"epoch": 9.22,
"learning_rate": 9.598566713995718e-05,
"loss": 1.255,
"step": 86000
},
{
"epoch": 9.32,
"learning_rate": 9.58548349689915e-05,
"loss": 1.2547,
"step": 87000
},
{
"epoch": 9.43,
"learning_rate": 9.572199695899522e-05,
"loss": 1.2502,
"step": 88000
},
{
"epoch": 9.54,
"learning_rate": 9.558715892073323e-05,
"loss": 1.2513,
"step": 89000
},
{
"epoch": 9.64,
"learning_rate": 9.545032675245813e-05,
"loss": 1.2441,
"step": 90000
},
{
"epoch": 9.75,
"learning_rate": 9.531150643965223e-05,
"loss": 1.2447,
"step": 91000
},
{
"epoch": 9.86,
"learning_rate": 9.517070405476575e-05,
"loss": 1.2386,
"step": 92000
},
{
"epoch": 9.97,
"learning_rate": 9.502792575695112e-05,
"loss": 1.2364,
"step": 93000
},
{
"epoch": 10.07,
"learning_rate": 9.488317779179361e-05,
"loss": 1.227,
"step": 94000
},
{
"epoch": 10.18,
"learning_rate": 9.473646649103818e-05,
"loss": 1.2196,
"step": 95000
},
{
"epoch": 10.29,
"learning_rate": 9.458779827231237e-05,
"loss": 1.2226,
"step": 96000
},
{
"epoch": 10.39,
"learning_rate": 9.443717963884569e-05,
"loss": 1.2182,
"step": 97000
},
{
"epoch": 10.5,
"learning_rate": 9.428461717918511e-05,
"loss": 1.2235,
"step": 98000
},
{
"epoch": 10.61,
"learning_rate": 9.413011756690685e-05,
"loss": 1.2172,
"step": 99000
},
{
"epoch": 10.72,
"learning_rate": 9.397368756032445e-05,
"loss": 1.2165,
"step": 100000
},
{
"epoch": 10.72,
"eval_loss": 1.0625041723251343,
"eval_runtime": 3.5569,
"eval_samples_per_second": 2811.465,
"eval_steps_per_second": 22.211,
"step": 100000
},
{
"epoch": 10.82,
"learning_rate": 9.381533400219318e-05,
"loss": 1.2112,
"step": 101000
},
{
"epoch": 10.93,
"learning_rate": 9.365506381941066e-05,
"loss": 1.2081,
"step": 102000
},
{
"epoch": 11.04,
"learning_rate": 9.349288402271388e-05,
"loss": 1.2042,
"step": 103000
},
{
"epoch": 11.14,
"learning_rate": 9.332880170637252e-05,
"loss": 1.1959,
"step": 104000
},
{
"epoch": 11.25,
"learning_rate": 9.316282404787871e-05,
"loss": 1.197,
"step": 105000
},
{
"epoch": 11.36,
"learning_rate": 9.299495830763286e-05,
"loss": 1.1942,
"step": 106000
},
{
"epoch": 11.47,
"learning_rate": 9.282521182862629e-05,
"loss": 1.1893,
"step": 107000
},
{
"epoch": 11.57,
"learning_rate": 9.265359203611987e-05,
"loss": 1.1894,
"step": 108000
},
{
"epoch": 11.68,
"learning_rate": 9.248010643731935e-05,
"loss": 1.1883,
"step": 109000
},
{
"epoch": 11.79,
"learning_rate": 9.230476262104677e-05,
"loss": 1.1897,
"step": 110000
},
{
"epoch": 11.89,
"learning_rate": 9.212756825740873e-05,
"loss": 1.1865,
"step": 111000
},
{
"epoch": 12.0,
"learning_rate": 9.194853109746074e-05,
"loss": 1.1825,
"step": 112000
},
{
"epoch": 12.11,
"learning_rate": 9.176765897286813e-05,
"loss": 1.1734,
"step": 113000
},
{
"epoch": 12.22,
"learning_rate": 9.158495979556358e-05,
"loss": 1.1681,
"step": 114000
},
{
"epoch": 12.32,
"learning_rate": 9.140044155740101e-05,
"loss": 1.1722,
"step": 115000
},
{
"epoch": 12.43,
"learning_rate": 9.121411232980588e-05,
"loss": 1.1689,
"step": 116000
},
{
"epoch": 12.54,
"learning_rate": 9.102598026342222e-05,
"loss": 1.1681,
"step": 117000
},
{
"epoch": 12.64,
"learning_rate": 9.083605358775612e-05,
"loss": 1.1621,
"step": 118000
},
{
"epoch": 12.75,
"learning_rate": 9.064434061081562e-05,
"loss": 1.1648,
"step": 119000
},
{
"epoch": 12.86,
"learning_rate": 9.045084971874738e-05,
"loss": 1.1645,
"step": 120000
},
{
"epoch": 12.97,
"learning_rate": 9.025558937546988e-05,
"loss": 1.1624,
"step": 121000
},
{
"epoch": 13.07,
"learning_rate": 9.005856812230304e-05,
"loss": 1.1526,
"step": 122000
},
{
"epoch": 13.18,
"learning_rate": 8.98597945775948e-05,
"loss": 1.15,
"step": 123000
},
{
"epoch": 13.29,
"learning_rate": 8.965927743634391e-05,
"loss": 1.147,
"step": 124000
},
{
"epoch": 13.39,
"learning_rate": 8.945702546981969e-05,
"loss": 1.1507,
"step": 125000
},
{
"epoch": 13.5,
"learning_rate": 8.92530475251784e-05,
"loss": 1.1458,
"step": 126000
},
{
"epoch": 13.61,
"learning_rate": 8.90473525250761e-05,
"loss": 1.148,
"step": 127000
},
{
"epoch": 13.72,
"learning_rate": 8.883994946727849e-05,
"loss": 1.1441,
"step": 128000
},
{
"epoch": 13.82,
"learning_rate": 8.863084742426719e-05,
"loss": 1.1442,
"step": 129000
},
{
"epoch": 13.93,
"learning_rate": 8.842005554284296e-05,
"loss": 1.1413,
"step": 130000
},
{
"epoch": 14.04,
"learning_rate": 8.820758304372557e-05,
"loss": 1.1346,
"step": 131000
},
{
"epoch": 14.14,
"learning_rate": 8.799343922115044e-05,
"loss": 1.129,
"step": 132000
},
{
"epoch": 14.25,
"learning_rate": 8.77776334424621e-05,
"loss": 1.1293,
"step": 133000
},
{
"epoch": 14.36,
"learning_rate": 8.756017514770443e-05,
"loss": 1.1257,
"step": 134000
},
{
"epoch": 14.47,
"learning_rate": 8.73410738492077e-05,
"loss": 1.1296,
"step": 135000
},
{
"epoch": 14.57,
"learning_rate": 8.71203391311725e-05,
"loss": 1.1265,
"step": 136000
},
{
"epoch": 14.68,
"learning_rate": 8.689798064925049e-05,
"loss": 1.1283,
"step": 137000
},
{
"epoch": 14.79,
"learning_rate": 8.6674008130122e-05,
"loss": 1.1211,
"step": 138000
},
{
"epoch": 14.89,
"learning_rate": 8.644843137107059e-05,
"loss": 1.1206,
"step": 139000
},
{
"epoch": 15.0,
"learning_rate": 8.622126023955446e-05,
"loss": 1.1223,
"step": 140000
},
{
"epoch": 15.11,
"learning_rate": 8.599250467277483e-05,
"loss": 1.1146,
"step": 141000
},
{
"epoch": 15.22,
"learning_rate": 8.576217467724128e-05,
"loss": 1.1136,
"step": 142000
},
{
"epoch": 15.32,
"learning_rate": 8.553028032833397e-05,
"loss": 1.1111,
"step": 143000
},
{
"epoch": 15.43,
"learning_rate": 8.529683176986295e-05,
"loss": 1.1128,
"step": 144000
},
{
"epoch": 15.54,
"learning_rate": 8.506183921362443e-05,
"loss": 1.1126,
"step": 145000
},
{
"epoch": 15.65,
"learning_rate": 8.482531293895412e-05,
"loss": 1.1063,
"step": 146000
},
{
"epoch": 15.75,
"learning_rate": 8.458726329227747e-05,
"loss": 1.1081,
"step": 147000
},
{
"epoch": 15.86,
"learning_rate": 8.434770068665723e-05,
"loss": 1.1057,
"step": 148000
},
{
"epoch": 15.97,
"learning_rate": 8.410663560133784e-05,
"loss": 1.1041,
"step": 149000
},
{
"epoch": 16.07,
"learning_rate": 8.386407858128706e-05,
"loss": 1.0952,
"step": 150000
},
{
"epoch": 16.07,
"eval_loss": 0.9611279368400574,
"eval_runtime": 3.5721,
"eval_samples_per_second": 2799.479,
"eval_steps_per_second": 22.116,
"step": 150000
},
{
"epoch": 16.18,
"learning_rate": 8.362004023673474e-05,
"loss": 1.0963,
"step": 151000
},
{
"epoch": 16.29,
"learning_rate": 8.337453124270863e-05,
"loss": 1.0939,
"step": 152000
},
{
"epoch": 16.4,
"learning_rate": 8.31275623385675e-05,
"loss": 1.0942,
"step": 153000
},
{
"epoch": 16.5,
"learning_rate": 8.287914432753123e-05,
"loss": 1.0929,
"step": 154000
},
{
"epoch": 16.61,
"learning_rate": 8.262928807620843e-05,
"loss": 1.0931,
"step": 155000
},
{
"epoch": 16.72,
"learning_rate": 8.237800451412095e-05,
"loss": 1.0893,
"step": 156000
},
{
"epoch": 16.82,
"learning_rate": 8.212530463322583e-05,
"loss": 1.0904,
"step": 157000
},
{
"epoch": 16.93,
"learning_rate": 8.18711994874345e-05,
"loss": 1.0879,
"step": 158000
},
{
"epoch": 17.04,
"learning_rate": 8.161570019212921e-05,
"loss": 1.084,
"step": 159000
},
{
"epoch": 17.15,
"learning_rate": 8.135881792367686e-05,
"loss": 1.081,
"step": 160000
},
{
"epoch": 17.25,
"learning_rate": 8.110056391894005e-05,
"loss": 1.0787,
"step": 161000
},
{
"epoch": 17.36,
"learning_rate": 8.084094947478556e-05,
"loss": 1.0776,
"step": 162000
},
{
"epoch": 17.47,
"learning_rate": 8.057998594759022e-05,
"loss": 1.0782,
"step": 163000
},
{
"epoch": 17.57,
"learning_rate": 8.031768475274413e-05,
"loss": 1.076,
"step": 164000
},
{
"epoch": 17.68,
"learning_rate": 8.005405736415126e-05,
"loss": 1.0744,
"step": 165000
},
{
"epoch": 17.79,
"learning_rate": 7.978911531372765e-05,
"loss": 1.0757,
"step": 166000
},
{
"epoch": 17.9,
"learning_rate": 7.952287019089685e-05,
"loss": 1.0768,
"step": 167000
},
{
"epoch": 18.0,
"learning_rate": 7.925533364208309e-05,
"loss": 1.0705,
"step": 168000
},
{
"epoch": 18.11,
"learning_rate": 7.898651737020166e-05,
"loss": 1.0634,
"step": 169000
},
{
"epoch": 18.22,
"learning_rate": 7.871643313414718e-05,
"loss": 1.0654,
"step": 170000
},
{
"epoch": 18.32,
"learning_rate": 7.844509274827907e-05,
"loss": 1.0634,
"step": 171000
},
{
"epoch": 18.43,
"learning_rate": 7.817250808190483e-05,
"loss": 1.0643,
"step": 172000
},
{
"epoch": 18.54,
"learning_rate": 7.789869105876083e-05,
"loss": 1.0625,
"step": 173000
},
{
"epoch": 18.65,
"learning_rate": 7.762365365649067e-05,
"loss": 1.0637,
"step": 174000
},
{
"epoch": 18.75,
"learning_rate": 7.734740790612136e-05,
"loss": 1.0619,
"step": 175000
},
{
"epoch": 18.86,
"learning_rate": 7.70699658915369e-05,
"loss": 1.0625,
"step": 176000
},
{
"epoch": 18.97,
"learning_rate": 7.679133974894983e-05,
"loss": 1.0582,
"step": 177000
},
{
"epoch": 19.07,
"learning_rate": 7.651154166637025e-05,
"loss": 1.0488,
"step": 178000
},
{
"epoch": 19.18,
"learning_rate": 7.623058388307269e-05,
"loss": 1.0516,
"step": 179000
},
{
"epoch": 19.29,
"learning_rate": 7.594847868906076e-05,
"loss": 1.0497,
"step": 180000
},
{
"epoch": 19.4,
"learning_rate": 7.566523842452958e-05,
"loss": 1.0517,
"step": 181000
},
{
"epoch": 19.5,
"learning_rate": 7.538087547932585e-05,
"loss": 1.0523,
"step": 182000
},
{
"epoch": 19.61,
"learning_rate": 7.509540229240601e-05,
"loss": 1.0444,
"step": 183000
},
{
"epoch": 19.72,
"learning_rate": 7.480883135129211e-05,
"loss": 1.0482,
"step": 184000
},
{
"epoch": 19.82,
"learning_rate": 7.452117519152542e-05,
"loss": 1.0453,
"step": 185000
},
{
"epoch": 19.93,
"learning_rate": 7.423244639611826e-05,
"loss": 1.0433,
"step": 186000
},
{
"epoch": 20.04,
"learning_rate": 7.394265759500348e-05,
"loss": 1.0397,
"step": 187000
},
{
"epoch": 20.15,
"learning_rate": 7.365182146448205e-05,
"loss": 1.0356,
"step": 188000
},
{
"epoch": 20.25,
"learning_rate": 7.335995072666848e-05,
"loss": 1.0386,
"step": 189000
},
{
"epoch": 20.36,
"learning_rate": 7.30670581489344e-05,
"loss": 1.0348,
"step": 190000
},
{
"epoch": 20.47,
"learning_rate": 7.277315654334997e-05,
"loss": 1.0372,
"step": 191000
},
{
"epoch": 20.57,
"learning_rate": 7.247825876612353e-05,
"loss": 1.0352,
"step": 192000
},
{
"epoch": 20.68,
"learning_rate": 7.218237771703921e-05,
"loss": 1.0351,
"step": 193000
},
{
"epoch": 20.79,
"learning_rate": 7.188552633889259e-05,
"loss": 1.0346,
"step": 194000
},
{
"epoch": 20.9,
"learning_rate": 7.158771761692464e-05,
"loss": 1.031,
"step": 195000
},
{
"epoch": 21.0,
"learning_rate": 7.128896457825364e-05,
"loss": 1.0327,
"step": 196000
},
{
"epoch": 21.11,
"learning_rate": 7.09892802913053e-05,
"loss": 1.0235,
"step": 197000
},
{
"epoch": 21.22,
"learning_rate": 7.068867786524116e-05,
"loss": 1.0244,
"step": 198000
},
{
"epoch": 21.32,
"learning_rate": 7.038717044938519e-05,
"loss": 1.024,
"step": 199000
},
{
"epoch": 21.43,
"learning_rate": 7.008477123264848e-05,
"loss": 1.0233,
"step": 200000
},
{
"epoch": 21.43,
"eval_loss": 0.8930683732032776,
"eval_runtime": 3.6639,
"eval_samples_per_second": 2729.338,
"eval_steps_per_second": 21.562,
"step": 200000
},
{
"epoch": 21.54,
"learning_rate": 6.978149344295242e-05,
"loss": 1.0246,
"step": 201000
},
{
"epoch": 21.65,
"learning_rate": 6.947735034665002e-05,
"loss": 1.024,
"step": 202000
},
{
"epoch": 21.75,
"learning_rate": 6.917235524794558e-05,
"loss": 1.0221,
"step": 203000
},
{
"epoch": 21.86,
"learning_rate": 6.886652148831279e-05,
"loss": 1.0196,
"step": 204000
},
{
"epoch": 21.97,
"learning_rate": 6.855986244591104e-05,
"loss": 1.0175,
"step": 205000
},
{
"epoch": 22.07,
"learning_rate": 6.825239153500029e-05,
"loss": 1.0148,
"step": 206000
},
{
"epoch": 22.18,
"learning_rate": 6.794412220535426e-05,
"loss": 1.0104,
"step": 207000
},
{
"epoch": 22.29,
"learning_rate": 6.763506794167208e-05,
"loss": 1.0129,
"step": 208000
},
{
"epoch": 22.4,
"learning_rate": 6.732524226298841e-05,
"loss": 1.0107,
"step": 209000
},
{
"epoch": 22.5,
"learning_rate": 6.701465872208216e-05,
"loss": 1.0067,
"step": 210000
},
{
"epoch": 22.61,
"learning_rate": 6.670333090488356e-05,
"loss": 1.0115,
"step": 211000
},
{
"epoch": 22.72,
"learning_rate": 6.639127242987988e-05,
"loss": 1.0115,
"step": 212000
},
{
"epoch": 22.82,
"learning_rate": 6.607849694751977e-05,
"loss": 1.0124,
"step": 213000
},
{
"epoch": 22.93,
"learning_rate": 6.576501813961609e-05,
"loss": 1.0047,
"step": 214000
},
{
"epoch": 23.04,
"learning_rate": 6.545084971874738e-05,
"loss": 1.0056,
"step": 215000
},
{
"epoch": 23.15,
"learning_rate": 6.513600542765817e-05,
"loss": 1.0023,
"step": 216000
},
{
"epoch": 23.25,
"learning_rate": 6.48204990386577e-05,
"loss": 1.0016,
"step": 217000
},
{
"epoch": 23.36,
"learning_rate": 6.450434435301751e-05,
"loss": 0.9974,
"step": 218000
},
{
"epoch": 23.47,
"learning_rate": 6.418755520036775e-05,
"loss": 0.9979,
"step": 219000
},
{
"epoch": 23.57,
"learning_rate": 6.387014543809223e-05,
"loss": 0.9999,
"step": 220000
},
{
"epoch": 23.68,
"learning_rate": 6.355212895072223e-05,
"loss": 0.9965,
"step": 221000
},
{
"epoch": 23.79,
"learning_rate": 6.323351964932908e-05,
"loss": 0.9979,
"step": 222000
},
{
"epoch": 23.9,
"learning_rate": 6.291433147091583e-05,
"loss": 0.9948,
"step": 223000
},
{
"epoch": 24.0,
"learning_rate": 6.259457837780742e-05,
"loss": 0.9938,
"step": 224000
},
{
"epoch": 24.11,
"learning_rate": 6.227427435703997e-05,
"loss": 0.9888,
"step": 225000
},
{
"epoch": 24.22,
"learning_rate": 6.195343341974899e-05,
"loss": 0.9886,
"step": 226000
},
{
"epoch": 24.32,
"learning_rate": 6.163206960055651e-05,
"loss": 0.9878,
"step": 227000
},
{
"epoch": 24.43,
"learning_rate": 6.131019695695702e-05,
"loss": 0.985,
"step": 228000
},
{
"epoch": 24.54,
"learning_rate": 6.0987829568702656e-05,
"loss": 0.9861,
"step": 229000
},
{
"epoch": 24.65,
"learning_rate": 6.066498153718735e-05,
"loss": 0.9883,
"step": 230000
},
{
"epoch": 24.75,
"learning_rate": 6.034166698482984e-05,
"loss": 0.9854,
"step": 231000
},
{
"epoch": 24.86,
"learning_rate": 6.001790005445607e-05,
"loss": 0.9864,
"step": 232000
},
{
"epoch": 24.97,
"learning_rate": 5.969369490868042e-05,
"loss": 0.9869,
"step": 233000
},
{
"epoch": 25.08,
"learning_rate": 5.9369065729286245e-05,
"loss": 0.9796,
"step": 234000
},
{
"epoch": 25.18,
"learning_rate": 5.90440267166055e-05,
"loss": 0.978,
"step": 235000
},
{
"epoch": 25.29,
"learning_rate": 5.871859208889759e-05,
"loss": 0.9777,
"step": 236000
},
{
"epoch": 25.4,
"learning_rate": 5.8392776081727385e-05,
"loss": 0.9759,
"step": 237000
},
{
"epoch": 25.5,
"learning_rate": 5.8066592947342555e-05,
"loss": 0.9801,
"step": 238000
},
{
"epoch": 25.61,
"learning_rate": 5.7740056954050084e-05,
"loss": 0.9785,
"step": 239000
},
{
"epoch": 25.72,
"learning_rate": 5.74131823855921e-05,
"loss": 0.9735,
"step": 240000
},
{
"epoch": 25.83,
"learning_rate": 5.7085983540521216e-05,
"loss": 0.9753,
"step": 241000
},
{
"epoch": 25.93,
"learning_rate": 5.675847473157485e-05,
"loss": 0.9762,
"step": 242000
},
{
"epoch": 26.04,
"learning_rate": 5.6430670285049314e-05,
"loss": 0.969,
"step": 243000
},
{
"epoch": 26.15,
"learning_rate": 5.6102584540173006e-05,
"loss": 0.9661,
"step": 244000
},
{
"epoch": 26.25,
"learning_rate": 5.577423184847932e-05,
"loss": 0.965,
"step": 245000
},
{
"epoch": 26.36,
"learning_rate": 5.544562657317863e-05,
"loss": 0.965,
"step": 246000
},
{
"epoch": 26.47,
"learning_rate": 5.511678308853026e-05,
"loss": 0.9665,
"step": 247000
},
{
"epoch": 26.58,
"learning_rate": 5.478771577921351e-05,
"loss": 0.9618,
"step": 248000
},
{
"epoch": 26.68,
"learning_rate": 5.445843903969854e-05,
"loss": 0.968,
"step": 249000
},
{
"epoch": 26.79,
"learning_rate": 5.4128967273616625e-05,
"loss": 0.963,
"step": 250000
},
{
"epoch": 26.79,
"eval_loss": 0.8477376103401184,
"eval_runtime": 3.6783,
"eval_samples_per_second": 2718.681,
"eval_steps_per_second": 21.478,
"step": 250000
},
{
"epoch": 26.9,
"learning_rate": 5.379931489313016e-05,
"loss": 0.9618,
"step": 251000
},
{
"epoch": 27.0,
"learning_rate": 5.3469496318302204e-05,
"loss": 0.9646,
"step": 252000
},
{
"epoch": 27.11,
"learning_rate": 5.313952597646568e-05,
"loss": 0.955,
"step": 253000
},
{
"epoch": 27.22,
"learning_rate": 5.280941830159227e-05,
"loss": 0.953,
"step": 254000
},
{
"epoch": 27.33,
"learning_rate": 5.247918773366112e-05,
"loss": 0.9562,
"step": 255000
},
{
"epoch": 27.43,
"learning_rate": 5.214884871802703e-05,
"loss": 0.9564,
"step": 256000
},
{
"epoch": 27.54,
"learning_rate": 5.1818415704788725e-05,
"loss": 0.9559,
"step": 257000
},
{
"epoch": 27.65,
"learning_rate": 5.148790314815663e-05,
"loss": 0.957,
"step": 258000
},
{
"epoch": 27.75,
"learning_rate": 5.1157325505820694e-05,
"loss": 0.9576,
"step": 259000
},
{
"epoch": 27.86,
"learning_rate": 5.0826697238317935e-05,
"loss": 0.9539,
"step": 260000
},
{
"epoch": 27.97,
"learning_rate": 5.0496032808399815e-05,
"loss": 0.9522,
"step": 261000
},
{
"epoch": 28.08,
"learning_rate": 5.016534668039976e-05,
"loss": 0.9492,
"step": 262000
},
{
"epoch": 28.18,
"learning_rate": 4.9834653319600246e-05,
"loss": 0.9456,
"step": 263000
},
{
"epoch": 28.29,
"learning_rate": 4.950396719160018e-05,
"loss": 0.9452,
"step": 264000
},
{
"epoch": 28.4,
"learning_rate": 4.917330276168208e-05,
"loss": 0.9462,
"step": 265000
},
{
"epoch": 28.5,
"learning_rate": 4.884267449417931e-05,
"loss": 0.9448,
"step": 266000
},
{
"epoch": 28.61,
"learning_rate": 4.851209685184338e-05,
"loss": 0.9447,
"step": 267000
},
{
"epoch": 28.72,
"learning_rate": 4.818158429521129e-05,
"loss": 0.9445,
"step": 268000
},
{
"epoch": 28.83,
"learning_rate": 4.785115128197298e-05,
"loss": 0.9431,
"step": 269000
},
{
"epoch": 28.93,
"learning_rate": 4.7520812266338885e-05,
"loss": 0.9447,
"step": 270000
},
{
"epoch": 29.04,
"learning_rate": 4.7190581698407725e-05,
"loss": 0.9404,
"step": 271000
},
{
"epoch": 29.15,
"learning_rate": 4.6860474023534335e-05,
"loss": 0.937,
"step": 272000
},
{
"epoch": 29.25,
"learning_rate": 4.65305036816978e-05,
"loss": 0.9381,
"step": 273000
},
{
"epoch": 29.36,
"learning_rate": 4.620068510686985e-05,
"loss": 0.9389,
"step": 274000
},
{
"epoch": 29.47,
"learning_rate": 4.5871032726383386e-05,
"loss": 0.9342,
"step": 275000
},
{
"epoch": 29.58,
"learning_rate": 4.554156096030149e-05,
"loss": 0.9332,
"step": 276000
},
{
"epoch": 29.68,
"learning_rate": 4.5212284220786494e-05,
"loss": 0.9361,
"step": 277000
},
{
"epoch": 29.79,
"learning_rate": 4.488321691146975e-05,
"loss": 0.9342,
"step": 278000
},
{
"epoch": 29.9,
"learning_rate": 4.4554373426821374e-05,
"loss": 0.9341,
"step": 279000
},
{
"epoch": 30.0,
"learning_rate": 4.4225768151520694e-05,
"loss": 0.9331,
"step": 280000
},
{
"epoch": 30.11,
"learning_rate": 4.3897415459827e-05,
"loss": 0.9289,
"step": 281000
},
{
"epoch": 30.22,
"learning_rate": 4.3569329714950704e-05,
"loss": 0.9279,
"step": 282000
},
{
"epoch": 30.33,
"learning_rate": 4.324152526842517e-05,
"loss": 0.9297,
"step": 283000
},
{
"epoch": 30.43,
"learning_rate": 4.291401645947879e-05,
"loss": 0.9254,
"step": 284000
},
{
"epoch": 30.54,
"learning_rate": 4.2586817614407895e-05,
"loss": 0.925,
"step": 285000
},
{
"epoch": 30.65,
"learning_rate": 4.2259943045949934e-05,
"loss": 0.9247,
"step": 286000
},
{
"epoch": 30.75,
"learning_rate": 4.1933407052657456e-05,
"loss": 0.9272,
"step": 287000
},
{
"epoch": 30.86,
"learning_rate": 4.160722391827262e-05,
"loss": 0.9247,
"step": 288000
},
{
"epoch": 30.97,
"learning_rate": 4.1281407911102425e-05,
"loss": 0.9237,
"step": 289000
},
{
"epoch": 31.08,
"learning_rate": 4.095597328339452e-05,
"loss": 0.9201,
"step": 290000
},
{
"epoch": 31.18,
"learning_rate": 4.063093427071376e-05,
"loss": 0.9181,
"step": 291000
},
{
"epoch": 31.29,
"learning_rate": 4.0306305091319595e-05,
"loss": 0.917,
"step": 292000
},
{
"epoch": 31.4,
"learning_rate": 3.9982099945543945e-05,
"loss": 0.9171,
"step": 293000
},
{
"epoch": 31.5,
"learning_rate": 3.965833301517017e-05,
"loss": 0.9179,
"step": 294000
},
{
"epoch": 31.61,
"learning_rate": 3.933501846281267e-05,
"loss": 0.9143,
"step": 295000
},
{
"epoch": 31.72,
"learning_rate": 3.901217043129735e-05,
"loss": 0.9151,
"step": 296000
},
{
"epoch": 31.83,
"learning_rate": 3.8689803043043e-05,
"loss": 0.9197,
"step": 297000
},
{
"epoch": 31.93,
"learning_rate": 3.836793039944349e-05,
"loss": 0.9147,
"step": 298000
},
{
"epoch": 32.04,
"learning_rate": 3.8046566580251e-05,
"loss": 0.9117,
"step": 299000
},
{
"epoch": 32.15,
"learning_rate": 3.772572564296005e-05,
"loss": 0.9122,
"step": 300000
},
{
"epoch": 32.15,
"eval_loss": 0.8167890906333923,
"eval_runtime": 3.704,
"eval_samples_per_second": 2699.813,
"eval_steps_per_second": 21.329,
"step": 300000
},
{
"epoch": 32.25,
"learning_rate": 3.74054216221926e-05,
"loss": 0.9088,
"step": 301000
},
{
"epoch": 32.36,
"learning_rate": 3.7085668529084184e-05,
"loss": 0.9104,
"step": 302000
},
{
"epoch": 32.47,
"learning_rate": 3.676648035067093e-05,
"loss": 0.9114,
"step": 303000
},
{
"epoch": 32.58,
"learning_rate": 3.6447871049277796e-05,
"loss": 0.9061,
"step": 304000
},
{
"epoch": 32.68,
"learning_rate": 3.612985456190778e-05,
"loss": 0.9081,
"step": 305000
},
{
"epoch": 32.79,
"learning_rate": 3.581244479963225e-05,
"loss": 0.9049,
"step": 306000
},
{
"epoch": 32.9,
"learning_rate": 3.5495655646982505e-05,
"loss": 0.9086,
"step": 307000
},
{
"epoch": 33.0,
"learning_rate": 3.517950096134232e-05,
"loss": 0.9074,
"step": 308000
},
{
"epoch": 33.11,
"learning_rate": 3.4863994572341843e-05,
"loss": 0.9008,
"step": 309000
},
{
"epoch": 33.22,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.9009,
"step": 310000
},
{
"epoch": 33.33,
"learning_rate": 3.423498186038393e-05,
"loss": 0.9014,
"step": 311000
},
{
"epoch": 33.43,
"learning_rate": 3.392150305248024e-05,
"loss": 0.9039,
"step": 312000
},
{
"epoch": 33.54,
"learning_rate": 3.360872757012011e-05,
"loss": 0.8986,
"step": 313000
},
{
"epoch": 33.65,
"learning_rate": 3.329666909511645e-05,
"loss": 0.8996,
"step": 314000
},
{
"epoch": 33.75,
"learning_rate": 3.298534127791785e-05,
"loss": 0.8973,
"step": 315000
},
{
"epoch": 33.86,
"learning_rate": 3.267475773701161e-05,
"loss": 0.8984,
"step": 316000
},
{
"epoch": 33.97,
"learning_rate": 3.236493205832795e-05,
"loss": 0.8967,
"step": 317000
},
{
"epoch": 34.08,
"learning_rate": 3.205587779464576e-05,
"loss": 0.8951,
"step": 318000
},
{
"epoch": 34.18,
"learning_rate": 3.1747608464999725e-05,
"loss": 0.8916,
"step": 319000
},
{
"epoch": 34.29,
"learning_rate": 3.144013755408895e-05,
"loss": 0.8941,
"step": 320000
},
{
"epoch": 34.4,
"learning_rate": 3.113347851168721e-05,
"loss": 0.8918,
"step": 321000
},
{
"epoch": 34.5,
"learning_rate": 3.082764475205442e-05,
"loss": 0.8925,
"step": 322000
},
{
"epoch": 34.61,
"learning_rate": 3.052264965335e-05,
"loss": 0.889,
"step": 323000
},
{
"epoch": 34.72,
"learning_rate": 3.0218506557047598e-05,
"loss": 0.8916,
"step": 324000
},
{
"epoch": 34.83,
"learning_rate": 2.991522876735154e-05,
"loss": 0.8928,
"step": 325000
},
{
"epoch": 34.93,
"learning_rate": 2.9612829550614836e-05,
"loss": 0.8918,
"step": 326000
},
{
"epoch": 35.04,
"learning_rate": 2.931132213475884e-05,
"loss": 0.8902,
"step": 327000
},
{
"epoch": 35.15,
"learning_rate": 2.9010719708694722e-05,
"loss": 0.8852,
"step": 328000
},
{
"epoch": 35.26,
"learning_rate": 2.8711035421746367e-05,
"loss": 0.8843,
"step": 329000
},
{
"epoch": 35.36,
"learning_rate": 2.8412282383075363e-05,
"loss": 0.886,
"step": 330000
},
{
"epoch": 35.47,
"learning_rate": 2.811447366110741e-05,
"loss": 0.8837,
"step": 331000
},
{
"epoch": 35.58,
"learning_rate": 2.7817622282960815e-05,
"loss": 0.885,
"step": 332000
},
{
"epoch": 35.68,
"learning_rate": 2.7521741233876496e-05,
"loss": 0.8844,
"step": 333000
},
{
"epoch": 35.79,
"learning_rate": 2.7226843456650037e-05,
"loss": 0.8826,
"step": 334000
},
{
"epoch": 35.9,
"learning_rate": 2.693294185106562e-05,
"loss": 0.8828,
"step": 335000
},
{
"epoch": 36.01,
"learning_rate": 2.6640049273331515e-05,
"loss": 0.8807,
"step": 336000
},
{
"epoch": 36.11,
"learning_rate": 2.6348178535517966e-05,
"loss": 0.8773,
"step": 337000
},
{
"epoch": 36.22,
"learning_rate": 2.6057342404996522e-05,
"loss": 0.8771,
"step": 338000
},
{
"epoch": 36.33,
"learning_rate": 2.5767553603881767e-05,
"loss": 0.8762,
"step": 339000
},
{
"epoch": 36.43,
"learning_rate": 2.547882480847461e-05,
"loss": 0.8746,
"step": 340000
},
{
"epoch": 36.54,
"learning_rate": 2.5191168648707887e-05,
"loss": 0.8759,
"step": 341000
},
{
"epoch": 36.65,
"learning_rate": 2.490459770759398e-05,
"loss": 0.8769,
"step": 342000
},
{
"epoch": 36.76,
"learning_rate": 2.4619124520674146e-05,
"loss": 0.8786,
"step": 343000
},
{
"epoch": 36.86,
"learning_rate": 2.433476157547044e-05,
"loss": 0.8784,
"step": 344000
},
{
"epoch": 36.97,
"learning_rate": 2.405152131093926e-05,
"loss": 0.877,
"step": 345000
},
{
"epoch": 37.08,
"learning_rate": 2.3769416116927335e-05,
"loss": 0.8728,
"step": 346000
},
{
"epoch": 37.18,
"learning_rate": 2.3488458333629777e-05,
"loss": 0.8707,
"step": 347000
},
{
"epoch": 37.29,
"learning_rate": 2.3208660251050158e-05,
"loss": 0.8703,
"step": 348000
},
{
"epoch": 37.4,
"learning_rate": 2.29300341084631e-05,
"loss": 0.873,
"step": 349000
},
{
"epoch": 37.51,
"learning_rate": 2.2652592093878666e-05,
"loss": 0.8697,
"step": 350000
},
{
"epoch": 37.51,
"eval_loss": 0.7835835218429565,
"eval_runtime": 3.6871,
"eval_samples_per_second": 2712.188,
"eval_steps_per_second": 21.426,
"step": 350000
},
{
"epoch": 37.61,
"learning_rate": 2.237634634350934e-05,
"loss": 0.8737,
"step": 351000
},
{
"epoch": 37.72,
"learning_rate": 2.2101308941239203e-05,
"loss": 0.8695,
"step": 352000
},
{
"epoch": 37.83,
"learning_rate": 2.182749191809518e-05,
"loss": 0.8693,
"step": 353000
},
{
"epoch": 37.93,
"learning_rate": 2.1554907251720945e-05,
"loss": 0.8692,
"step": 354000
},
{
"epoch": 38.04,
"learning_rate": 2.128356686585282e-05,
"loss": 0.868,
"step": 355000
},
{
"epoch": 38.15,
"learning_rate": 2.1013482629798333e-05,
"loss": 0.8634,
"step": 356000
},
{
"epoch": 38.26,
"learning_rate": 2.0744666357916925e-05,
"loss": 0.8643,
"step": 357000
},
{
"epoch": 38.36,
"learning_rate": 2.0477129809103147e-05,
"loss": 0.8639,
"step": 358000
},
{
"epoch": 38.47,
"learning_rate": 2.0210884686272368e-05,
"loss": 0.8648,
"step": 359000
},
{
"epoch": 38.58,
"learning_rate": 1.9945942635848748e-05,
"loss": 0.8632,
"step": 360000
},
{
"epoch": 38.68,
"learning_rate": 1.9682315247255894e-05,
"loss": 0.8643,
"step": 361000
},
{
"epoch": 38.79,
"learning_rate": 1.942001405240979e-05,
"loss": 0.8632,
"step": 362000
},
{
"epoch": 38.9,
"learning_rate": 1.9159050525214452e-05,
"loss": 0.862,
"step": 363000
},
{
"epoch": 39.01,
"learning_rate": 1.8899436081059975e-05,
"loss": 0.8619,
"step": 364000
},
{
"epoch": 39.11,
"learning_rate": 1.8641182076323148e-05,
"loss": 0.8599,
"step": 365000
},
{
"epoch": 39.22,
"learning_rate": 1.838429980787081e-05,
"loss": 0.8579,
"step": 366000
},
{
"epoch": 39.33,
"learning_rate": 1.8128800512565513e-05,
"loss": 0.8569,
"step": 367000
},
{
"epoch": 39.43,
"learning_rate": 1.787469536677419e-05,
"loss": 0.8547,
"step": 368000
},
{
"epoch": 39.54,
"learning_rate": 1.7621995485879062e-05,
"loss": 0.8572,
"step": 369000
},
{
"epoch": 39.65,
"learning_rate": 1.7370711923791567e-05,
"loss": 0.8573,
"step": 370000
},
{
"epoch": 39.76,
"learning_rate": 1.712085567246878e-05,
"loss": 0.8573,
"step": 371000
},
{
"epoch": 39.86,
"learning_rate": 1.6872437661432517e-05,
"loss": 0.8587,
"step": 372000
},
{
"epoch": 39.97,
"learning_rate": 1.662546875729138e-05,
"loss": 0.8558,
"step": 373000
},
{
"epoch": 40.08,
"learning_rate": 1.637995976326527e-05,
"loss": 0.8561,
"step": 374000
},
{
"epoch": 40.18,
"learning_rate": 1.6135921418712956e-05,
"loss": 0.8545,
"step": 375000
},
{
"epoch": 40.29,
"learning_rate": 1.5893364398662176e-05,
"loss": 0.8508,
"step": 376000
},
{
"epoch": 40.4,
"learning_rate": 1.5652299313342773e-05,
"loss": 0.8536,
"step": 377000
},
{
"epoch": 40.51,
"learning_rate": 1.5412736707722537e-05,
"loss": 0.8533,
"step": 378000
},
{
"epoch": 40.61,
"learning_rate": 1.517468706104589e-05,
"loss": 0.851,
"step": 379000
},
{
"epoch": 40.72,
"learning_rate": 1.4938160786375572e-05,
"loss": 0.8529,
"step": 380000
},
{
"epoch": 40.83,
"learning_rate": 1.470316823013707e-05,
"loss": 0.8526,
"step": 381000
},
{
"epoch": 40.93,
"learning_rate": 1.4469719671666043e-05,
"loss": 0.8523,
"step": 382000
},
{
"epoch": 41.04,
"learning_rate": 1.4237825322758736e-05,
"loss": 0.8494,
"step": 383000
},
{
"epoch": 41.15,
"learning_rate": 1.4007495327225162e-05,
"loss": 0.8488,
"step": 384000
},
{
"epoch": 41.26,
"learning_rate": 1.3778739760445552e-05,
"loss": 0.8486,
"step": 385000
},
{
"epoch": 41.36,
"learning_rate": 1.3551568628929434e-05,
"loss": 0.8481,
"step": 386000
},
{
"epoch": 41.47,
"learning_rate": 1.3325991869878013e-05,
"loss": 0.8509,
"step": 387000
},
{
"epoch": 41.58,
"learning_rate": 1.3102019350749528e-05,
"loss": 0.8467,
"step": 388000
},
{
"epoch": 41.68,
"learning_rate": 1.2879660868827508e-05,
"loss": 0.8477,
"step": 389000
},
{
"epoch": 41.79,
"learning_rate": 1.2658926150792322e-05,
"loss": 0.8461,
"step": 390000
},
{
"epoch": 41.9,
"learning_rate": 1.243982485229559e-05,
"loss": 0.8477,
"step": 391000
},
{
"epoch": 42.01,
"learning_rate": 1.2222366557537911e-05,
"loss": 0.8464,
"step": 392000
},
{
"epoch": 42.11,
"learning_rate": 1.2006560778849578e-05,
"loss": 0.8419,
"step": 393000
},
{
"epoch": 42.22,
"learning_rate": 1.1792416956274444e-05,
"loss": 0.8407,
"step": 394000
},
{
"epoch": 42.33,
"learning_rate": 1.157994445715706e-05,
"loss": 0.8424,
"step": 395000
},
{
"epoch": 42.43,
"learning_rate": 1.1369152575732822e-05,
"loss": 0.843,
"step": 396000
},
{
"epoch": 42.54,
"learning_rate": 1.1160050532721528e-05,
"loss": 0.842,
"step": 397000
},
{
"epoch": 42.65,
"learning_rate": 1.095264747492391e-05,
"loss": 0.8443,
"step": 398000
},
{
"epoch": 42.76,
"learning_rate": 1.0746952474821614e-05,
"loss": 0.8418,
"step": 399000
},
{
"epoch": 42.86,
"learning_rate": 1.0542974530180327e-05,
"loss": 0.8397,
"step": 400000
},
{
"epoch": 42.86,
"eval_loss": 0.7559537291526794,
"eval_runtime": 3.6973,
"eval_samples_per_second": 2704.676,
"eval_steps_per_second": 21.367,
"step": 400000
},
{
"epoch": 42.97,
"learning_rate": 1.0340722563656107e-05,
"loss": 0.8447,
"step": 401000
},
{
"epoch": 43.08,
"learning_rate": 1.0140205422405214e-05,
"loss": 0.8393,
"step": 402000
},
{
"epoch": 43.18,
"learning_rate": 9.941431877696955e-06,
"loss": 0.8377,
"step": 403000
},
{
"epoch": 43.29,
"learning_rate": 9.744410624530148e-06,
"loss": 0.8376,
"step": 404000
},
{
"epoch": 43.4,
"learning_rate": 9.549150281252633e-06,
"loss": 0.8408,
"step": 405000
},
{
"epoch": 43.51,
"learning_rate": 9.355659389184396e-06,
"loss": 0.8359,
"step": 406000
},
{
"epoch": 43.61,
"learning_rate": 9.163946412243896e-06,
"loss": 0.839,
"step": 407000
},
{
"epoch": 43.72,
"learning_rate": 8.974019736577777e-06,
"loss": 0.8371,
"step": 408000
},
{
"epoch": 43.83,
"learning_rate": 8.785887670194138e-06,
"loss": 0.8377,
"step": 409000
},
{
"epoch": 43.93,
"learning_rate": 8.599558442598998e-06,
"loss": 0.8372,
"step": 410000
},
{
"epoch": 44.04,
"learning_rate": 8.415040204436426e-06,
"loss": 0.8369,
"step": 411000
},
{
"epoch": 44.15,
"learning_rate": 8.232341027131885e-06,
"loss": 0.8363,
"step": 412000
},
{
"epoch": 44.26,
"learning_rate": 8.051468902539272e-06,
"loss": 0.8337,
"step": 413000
},
{
"epoch": 44.36,
"learning_rate": 7.872431742591268e-06,
"loss": 0.8324,
"step": 414000
},
{
"epoch": 44.47,
"learning_rate": 7.695237378953223e-06,
"loss": 0.8361,
"step": 415000
},
{
"epoch": 44.58,
"learning_rate": 7.519893562680663e-06,
"loss": 0.834,
"step": 416000
},
{
"epoch": 44.68,
"learning_rate": 7.3464079638801365e-06,
"loss": 0.8323,
"step": 417000
},
{
"epoch": 44.79,
"learning_rate": 7.174788171373731e-06,
"loss": 0.8362,
"step": 418000
},
{
"epoch": 44.9,
"learning_rate": 7.005041692367154e-06,
"loss": 0.8325,
"step": 419000
},
{
"epoch": 45.01,
"learning_rate": 6.837175952121306e-06,
"loss": 0.8342,
"step": 420000
},
{
"epoch": 45.11,
"learning_rate": 6.671198293627479e-06,
"loss": 0.8323,
"step": 421000
},
{
"epoch": 45.22,
"learning_rate": 6.5071159772861436e-06,
"loss": 0.8331,
"step": 422000
},
{
"epoch": 45.33,
"learning_rate": 6.344936180589351e-06,
"loss": 0.8313,
"step": 423000
},
{
"epoch": 45.44,
"learning_rate": 6.184665997806832e-06,
"loss": 0.8296,
"step": 424000
},
{
"epoch": 45.54,
"learning_rate": 6.026312439675552e-06,
"loss": 0.8324,
"step": 425000
},
{
"epoch": 45.65,
"learning_rate": 5.869882433093155e-06,
"loss": 0.8315,
"step": 426000
},
{
"epoch": 45.76,
"learning_rate": 5.715382820814885e-06,
"loss": 0.8338,
"step": 427000
},
{
"epoch": 45.86,
"learning_rate": 5.562820361154314e-06,
"loss": 0.8319,
"step": 428000
},
{
"epoch": 45.97,
"learning_rate": 5.412201727687644e-06,
"loss": 0.8283,
"step": 429000
},
{
"epoch": 46.08,
"learning_rate": 5.263533508961827e-06,
"loss": 0.8304,
"step": 430000
},
{
"epoch": 46.19,
"learning_rate": 5.116822208206396e-06,
"loss": 0.8271,
"step": 431000
},
{
"epoch": 46.29,
"learning_rate": 4.972074243048897e-06,
"loss": 0.8288,
"step": 432000
},
{
"epoch": 46.4,
"learning_rate": 4.829295945234258e-06,
"loss": 0.8289,
"step": 433000
},
{
"epoch": 46.51,
"learning_rate": 4.688493560347773e-06,
"loss": 0.8284,
"step": 434000
},
{
"epoch": 46.61,
"learning_rate": 4.549673247541875e-06,
"loss": 0.8272,
"step": 435000
},
{
"epoch": 46.72,
"learning_rate": 4.412841079266777e-06,
"loss": 0.8288,
"step": 436000
},
{
"epoch": 46.83,
"learning_rate": 4.27800304100478e-06,
"loss": 0.8275,
"step": 437000
},
{
"epoch": 46.94,
"learning_rate": 4.145165031008508e-06,
"loss": 0.8273,
"step": 438000
},
{
"epoch": 47.04,
"learning_rate": 4.01433286004283e-06,
"loss": 0.8292,
"step": 439000
},
{
"epoch": 47.15,
"learning_rate": 3.885512251130763e-06,
"loss": 0.8222,
"step": 440000
},
{
"epoch": 47.26,
"learning_rate": 3.75870883930306e-06,
"loss": 0.829,
"step": 441000
},
{
"epoch": 47.36,
"learning_rate": 3.6339281713517303e-06,
"loss": 0.8273,
"step": 442000
},
{
"epoch": 47.47,
"learning_rate": 3.511175705587433e-06,
"loss": 0.8261,
"step": 443000
},
{
"epoch": 47.58,
"learning_rate": 3.390456811600673e-06,
"loss": 0.8272,
"step": 444000
},
{
"epoch": 47.69,
"learning_rate": 3.271776770026963e-06,
"loss": 0.8258,
"step": 445000
},
{
"epoch": 47.79,
"learning_rate": 3.155140772315773e-06,
"loss": 0.8261,
"step": 446000
},
{
"epoch": 47.9,
"learning_rate": 3.040553920503503e-06,
"loss": 0.8265,
"step": 447000
},
{
"epoch": 48.01,
"learning_rate": 2.928021226990263e-06,
"loss": 0.8239,
"step": 448000
},
{
"epoch": 48.11,
"learning_rate": 2.817547614320615e-06,
"loss": 0.8233,
"step": 449000
},
{
"epoch": 48.22,
"learning_rate": 2.7091379149682685e-06,
"loss": 0.8231,
"step": 450000
},
{
"epoch": 48.22,
"eval_loss": 0.7475652098655701,
"eval_runtime": 3.7,
"eval_samples_per_second": 2702.714,
"eval_steps_per_second": 21.351,
"step": 450000
},
{
"epoch": 48.33,
"learning_rate": 2.602796871124663e-06,
"loss": 0.824,
"step": 451000
},
{
"epoch": 48.44,
"learning_rate": 2.4985291344915674e-06,
"loss": 0.8261,
"step": 452000
},
{
"epoch": 48.54,
"learning_rate": 2.3963392660775575e-06,
"loss": 0.8208,
"step": 453000
},
{
"epoch": 48.65,
"learning_rate": 2.296231735998511e-06,
"loss": 0.8228,
"step": 454000
},
{
"epoch": 48.76,
"learning_rate": 2.1982109232821178e-06,
"loss": 0.8241,
"step": 455000
},
{
"epoch": 48.86,
"learning_rate": 2.102281115676258e-06,
"loss": 0.8267,
"step": 456000
},
{
"epoch": 48.97,
"learning_rate": 2.008446509461498e-06,
"loss": 0.8266,
"step": 457000
},
{
"epoch": 49.08,
"learning_rate": 1.91671120926748e-06,
"loss": 0.8227,
"step": 458000
},
{
"epoch": 49.19,
"learning_rate": 1.8270792278934302e-06,
"loss": 0.8195,
"step": 459000
},
{
"epoch": 49.29,
"learning_rate": 1.7395544861325718e-06,
"loss": 0.8222,
"step": 460000
},
{
"epoch": 49.4,
"learning_rate": 1.6541408126006463e-06,
"loss": 0.8213,
"step": 461000
},
{
"epoch": 49.51,
"learning_rate": 1.5708419435684462e-06,
"loss": 0.8233,
"step": 462000
},
{
"epoch": 49.61,
"learning_rate": 1.4896615227983468e-06,
"loss": 0.8216,
"step": 463000
},
{
"epoch": 49.72,
"learning_rate": 1.4106031013849496e-06,
"loss": 0.8221,
"step": 464000
},
{
"epoch": 49.83,
"learning_rate": 1.333670137599713e-06,
"loss": 0.8221,
"step": 465000
},
{
"epoch": 49.94,
"learning_rate": 1.2588659967397e-06,
"loss": 0.8223,
"step": 466000
},
{
"epoch": 50.04,
"learning_rate": 1.1861939509803687e-06,
"loss": 0.8229,
"step": 467000
},
{
"epoch": 50.15,
"learning_rate": 1.1156571792324211e-06,
"loss": 0.823,
"step": 468000
},
{
"epoch": 50.26,
"learning_rate": 1.0472587670027678e-06,
"loss": 0.8219,
"step": 469000
},
{
"epoch": 50.36,
"learning_rate": 9.810017062595322e-07,
"loss": 0.8225,
"step": 470000
},
{
"epoch": 50.47,
"learning_rate": 9.168888953011989e-07,
"loss": 0.8205,
"step": 471000
},
{
"epoch": 50.58,
"learning_rate": 8.549231386298151e-07,
"loss": 0.8215,
"step": 472000
},
{
"epoch": 50.69,
"learning_rate": 7.951071468283167e-07,
"loss": 0.8206,
"step": 473000
},
{
"epoch": 50.79,
"learning_rate": 7.374435364419674e-07,
"loss": 0.8203,
"step": 474000
},
{
"epoch": 50.9,
"learning_rate": 6.819348298638839e-07,
"loss": 0.8207,
"step": 475000
},
{
"epoch": 51.01,
"learning_rate": 6.285834552247128e-07,
"loss": 0.8208,
"step": 476000
},
{
"epoch": 51.11,
"learning_rate": 5.773917462864264e-07,
"loss": 0.8206,
"step": 477000
},
{
"epoch": 51.22,
"learning_rate": 5.283619423401998e-07,
"loss": 0.8198,
"step": 478000
},
{
"epoch": 51.33,
"learning_rate": 4.814961881085045e-07,
"loss": 0.8221,
"step": 479000
},
{
"epoch": 51.44,
"learning_rate": 4.367965336512403e-07,
"loss": 0.8208,
"step": 480000
},
{
"epoch": 51.54,
"learning_rate": 3.9426493427611177e-07,
"loss": 0.8195,
"step": 481000
},
{
"epoch": 51.65,
"learning_rate": 3.5390325045304706e-07,
"loss": 0.8217,
"step": 482000
},
{
"epoch": 51.76,
"learning_rate": 3.157132477328628e-07,
"loss": 0.8234,
"step": 483000
},
{
"epoch": 51.86,
"learning_rate": 2.796965966699927e-07,
"loss": 0.8223,
"step": 484000
},
{
"epoch": 51.97,
"learning_rate": 2.458548727494292e-07,
"loss": 0.8225,
"step": 485000
},
{
"epoch": 52.08,
"learning_rate": 2.1418955631781202e-07,
"loss": 0.8207,
"step": 486000
},
{
"epoch": 52.19,
"learning_rate": 1.847020325186577e-07,
"loss": 0.8193,
"step": 487000
},
{
"epoch": 52.29,
"learning_rate": 1.5739359123178587e-07,
"loss": 0.8181,
"step": 488000
},
{
"epoch": 52.4,
"learning_rate": 1.3226542701689215e-07,
"loss": 0.8194,
"step": 489000
},
{
"epoch": 52.51,
"learning_rate": 1.0931863906127327e-07,
"loss": 0.8213,
"step": 490000
},
{
"epoch": 52.61,
"learning_rate": 8.855423113177664e-08,
"loss": 0.8214,
"step": 491000
},
{
"epoch": 52.72,
"learning_rate": 6.997311153086883e-08,
"loss": 0.8228,
"step": 492000
},
{
"epoch": 52.83,
"learning_rate": 5.3576093056922906e-08,
"loss": 0.8235,
"step": 493000
},
{
"epoch": 52.94,
"learning_rate": 3.936389296864129e-08,
"loss": 0.8207,
"step": 494000
},
{
"epoch": 53.04,
"learning_rate": 2.7337132953697554e-08,
"loss": 0.8212,
"step": 495000
},
{
"epoch": 53.15,
"learning_rate": 1.749633910153592e-08,
"loss": 0.8231,
"step": 496000
},
{
"epoch": 53.26,
"learning_rate": 9.841941880361916e-09,
"loss": 0.8191,
"step": 497000
},
{
"epoch": 53.36,
"learning_rate": 4.3742761183018784e-09,
"loss": 0.8217,
"step": 498000
},
{
"epoch": 53.47,
"learning_rate": 1.0935809887702154e-09,
"loss": 0.8196,
"step": 499000
},
{
"epoch": 53.58,
"learning_rate": 0.0,
"loss": 0.8207,
"step": 500000
},
{
"epoch": 53.58,
"eval_loss": 0.7243289351463318,
"eval_runtime": 3.5788,
"eval_samples_per_second": 2794.204,
"eval_steps_per_second": 22.074,
"step": 500000
},
{
"epoch": 53.58,
"step": 500000,
"total_flos": 1.052837412864e+18,
"train_loss": 1.193239736328125,
"train_runtime": 102111.9968,
"train_samples_per_second": 1253.526,
"train_steps_per_second": 4.897
}
],
"max_steps": 500000,
"num_train_epochs": 54,
"total_flos": 1.052837412864e+18,
"trial_name": null,
"trial_params": null
}