kanishka's picture
End of training
513a94b verified
raw
history blame contribute delete
No virus
65.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 20.0,
"eval_steps": 500,
"global_step": 371900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"grad_norm": 1.1065584421157837,
"learning_rate": 3.125e-06,
"loss": 7.5986,
"step": 1000
},
{
"epoch": 0.11,
"grad_norm": 0.6919564604759216,
"learning_rate": 6.25e-06,
"loss": 5.8259,
"step": 2000
},
{
"epoch": 0.16,
"grad_norm": 0.7977421283721924,
"learning_rate": 9.375000000000001e-06,
"loss": 5.3849,
"step": 3000
},
{
"epoch": 0.22,
"grad_norm": 1.386985421180725,
"learning_rate": 1.25e-05,
"loss": 5.1627,
"step": 4000
},
{
"epoch": 0.27,
"grad_norm": 0.9054310917854309,
"learning_rate": 1.5625e-05,
"loss": 5.005,
"step": 5000
},
{
"epoch": 0.32,
"grad_norm": 1.0550600290298462,
"learning_rate": 1.8750000000000002e-05,
"loss": 4.8627,
"step": 6000
},
{
"epoch": 0.38,
"grad_norm": 1.0669718980789185,
"learning_rate": 2.1875e-05,
"loss": 4.7454,
"step": 7000
},
{
"epoch": 0.43,
"grad_norm": 1.0990982055664062,
"learning_rate": 2.5e-05,
"loss": 4.6486,
"step": 8000
},
{
"epoch": 0.48,
"grad_norm": 1.0567301511764526,
"learning_rate": 2.8125000000000003e-05,
"loss": 4.5585,
"step": 9000
},
{
"epoch": 0.54,
"grad_norm": 1.1454998254776,
"learning_rate": 3.125e-05,
"loss": 4.4828,
"step": 10000
},
{
"epoch": 0.59,
"grad_norm": 1.0922003984451294,
"learning_rate": 3.4375e-05,
"loss": 4.4099,
"step": 11000
},
{
"epoch": 0.65,
"grad_norm": 1.0760829448699951,
"learning_rate": 3.7500000000000003e-05,
"loss": 4.3435,
"step": 12000
},
{
"epoch": 0.7,
"grad_norm": 1.0779520273208618,
"learning_rate": 4.0621875e-05,
"loss": 4.2871,
"step": 13000
},
{
"epoch": 0.75,
"grad_norm": 1.0053024291992188,
"learning_rate": 4.374375e-05,
"loss": 4.2362,
"step": 14000
},
{
"epoch": 0.81,
"grad_norm": 1.0250402688980103,
"learning_rate": 4.686875e-05,
"loss": 4.1841,
"step": 15000
},
{
"epoch": 0.86,
"grad_norm": 1.025078296661377,
"learning_rate": 4.999375e-05,
"loss": 4.1362,
"step": 16000
},
{
"epoch": 0.91,
"grad_norm": 1.020418643951416,
"learning_rate": 5.311875000000001e-05,
"loss": 4.091,
"step": 17000
},
{
"epoch": 0.97,
"grad_norm": 1.023269772529602,
"learning_rate": 5.6240625e-05,
"loss": 4.0524,
"step": 18000
},
{
"epoch": 1.0,
"eval_accuracy": 0.30844237989493534,
"eval_loss": 4.28109073638916,
"eval_runtime": 151.382,
"eval_samples_per_second": 382.595,
"eval_steps_per_second": 5.978,
"step": 18595
},
{
"epoch": 1.02,
"grad_norm": 1.0309756994247437,
"learning_rate": 5.9365625e-05,
"loss": 4.015,
"step": 19000
},
{
"epoch": 1.08,
"grad_norm": 0.9628090262413025,
"learning_rate": 6.24875e-05,
"loss": 3.969,
"step": 20000
},
{
"epoch": 1.13,
"grad_norm": 1.0119906663894653,
"learning_rate": 6.560937500000001e-05,
"loss": 3.933,
"step": 21000
},
{
"epoch": 1.18,
"grad_norm": 0.9792044758796692,
"learning_rate": 6.8734375e-05,
"loss": 3.8956,
"step": 22000
},
{
"epoch": 1.24,
"grad_norm": 1.0101176500320435,
"learning_rate": 7.185312499999999e-05,
"loss": 3.8586,
"step": 23000
},
{
"epoch": 1.29,
"grad_norm": 0.9678108096122742,
"learning_rate": 7.4978125e-05,
"loss": 3.8302,
"step": 24000
},
{
"epoch": 1.34,
"grad_norm": 0.9615768194198608,
"learning_rate": 7.8103125e-05,
"loss": 3.801,
"step": 25000
},
{
"epoch": 1.4,
"grad_norm": 0.9395524859428406,
"learning_rate": 8.1221875e-05,
"loss": 3.7804,
"step": 26000
},
{
"epoch": 1.45,
"grad_norm": 0.8997763395309448,
"learning_rate": 8.434687500000001e-05,
"loss": 3.7585,
"step": 27000
},
{
"epoch": 1.51,
"grad_norm": 0.9433351159095764,
"learning_rate": 8.746875e-05,
"loss": 3.732,
"step": 28000
},
{
"epoch": 1.56,
"grad_norm": 0.8686715960502625,
"learning_rate": 9.059375e-05,
"loss": 3.7131,
"step": 29000
},
{
"epoch": 1.61,
"grad_norm": 0.9142166376113892,
"learning_rate": 9.3715625e-05,
"loss": 3.6941,
"step": 30000
},
{
"epoch": 1.67,
"grad_norm": 0.9287761449813843,
"learning_rate": 9.684062500000001e-05,
"loss": 3.6698,
"step": 31000
},
{
"epoch": 1.72,
"grad_norm": 0.8525364995002747,
"learning_rate": 9.99625e-05,
"loss": 3.6574,
"step": 32000
},
{
"epoch": 1.77,
"grad_norm": 0.8378422260284424,
"learning_rate": 9.970932627243307e-05,
"loss": 3.6384,
"step": 33000
},
{
"epoch": 1.83,
"grad_norm": 0.8161050081253052,
"learning_rate": 9.941512209473375e-05,
"loss": 3.6184,
"step": 34000
},
{
"epoch": 1.88,
"grad_norm": 0.8268817067146301,
"learning_rate": 9.912121212121213e-05,
"loss": 3.5973,
"step": 35000
},
{
"epoch": 1.94,
"grad_norm": 0.7921673059463501,
"learning_rate": 9.882700794351281e-05,
"loss": 3.5878,
"step": 36000
},
{
"epoch": 1.99,
"grad_norm": 0.7795915007591248,
"learning_rate": 9.853309796999118e-05,
"loss": 3.5716,
"step": 37000
},
{
"epoch": 2.0,
"eval_accuracy": 0.36151083317444777,
"eval_loss": 3.7749860286712646,
"eval_runtime": 153.0787,
"eval_samples_per_second": 378.354,
"eval_steps_per_second": 5.912,
"step": 37190
},
{
"epoch": 2.04,
"grad_norm": 0.816111147403717,
"learning_rate": 9.823889379229185e-05,
"loss": 3.5338,
"step": 38000
},
{
"epoch": 2.1,
"grad_norm": 0.8356756567955017,
"learning_rate": 9.794498381877023e-05,
"loss": 3.5187,
"step": 39000
},
{
"epoch": 2.15,
"grad_norm": 0.8443105220794678,
"learning_rate": 9.765077964107092e-05,
"loss": 3.5061,
"step": 40000
},
{
"epoch": 2.2,
"grad_norm": 0.7713498473167419,
"learning_rate": 9.735686966754929e-05,
"loss": 3.4944,
"step": 41000
},
{
"epoch": 2.26,
"grad_norm": 0.795734167098999,
"learning_rate": 9.706266548984995e-05,
"loss": 3.496,
"step": 42000
},
{
"epoch": 2.31,
"grad_norm": 0.8273535370826721,
"learning_rate": 9.676875551632834e-05,
"loss": 3.4833,
"step": 43000
},
{
"epoch": 2.37,
"grad_norm": 0.7914260625839233,
"learning_rate": 9.647455133862902e-05,
"loss": 3.4718,
"step": 44000
},
{
"epoch": 2.42,
"grad_norm": 0.782301664352417,
"learning_rate": 9.618064136510739e-05,
"loss": 3.46,
"step": 45000
},
{
"epoch": 2.47,
"grad_norm": 0.7781464457511902,
"learning_rate": 9.588643718740806e-05,
"loss": 3.4537,
"step": 46000
},
{
"epoch": 2.53,
"grad_norm": 0.774398148059845,
"learning_rate": 9.559223300970874e-05,
"loss": 3.4478,
"step": 47000
},
{
"epoch": 2.58,
"grad_norm": 0.7705032229423523,
"learning_rate": 9.529832303618712e-05,
"loss": 3.4398,
"step": 48000
},
{
"epoch": 2.64,
"grad_norm": 0.773110568523407,
"learning_rate": 9.50041188584878e-05,
"loss": 3.4366,
"step": 49000
},
{
"epoch": 2.69,
"grad_norm": 0.7716487646102905,
"learning_rate": 9.471050308914386e-05,
"loss": 3.4256,
"step": 50000
},
{
"epoch": 2.74,
"grad_norm": 0.7782188653945923,
"learning_rate": 9.441629891144455e-05,
"loss": 3.417,
"step": 51000
},
{
"epoch": 2.8,
"grad_norm": 0.7639435529708862,
"learning_rate": 9.412209473374523e-05,
"loss": 3.4123,
"step": 52000
},
{
"epoch": 2.85,
"grad_norm": 0.7292437553405762,
"learning_rate": 9.382789055604591e-05,
"loss": 3.405,
"step": 53000
},
{
"epoch": 2.9,
"grad_norm": 0.7682722210884094,
"learning_rate": 9.353368637834658e-05,
"loss": 3.399,
"step": 54000
},
{
"epoch": 2.96,
"grad_norm": 0.7343226671218872,
"learning_rate": 9.323977640482495e-05,
"loss": 3.3913,
"step": 55000
},
{
"epoch": 3.0,
"eval_accuracy": 0.3779394670823355,
"eval_loss": 3.600841760635376,
"eval_runtime": 153.5925,
"eval_samples_per_second": 377.089,
"eval_steps_per_second": 5.892,
"step": 55785
},
{
"epoch": 3.01,
"grad_norm": 0.7667288184165955,
"learning_rate": 9.294557222712563e-05,
"loss": 3.3741,
"step": 56000
},
{
"epoch": 3.07,
"grad_norm": 0.791042685508728,
"learning_rate": 9.265166225360401e-05,
"loss": 3.3318,
"step": 57000
},
{
"epoch": 3.12,
"grad_norm": 0.7432328462600708,
"learning_rate": 9.235745807590468e-05,
"loss": 3.3377,
"step": 58000
},
{
"epoch": 3.17,
"grad_norm": 0.7701253294944763,
"learning_rate": 9.206325389820536e-05,
"loss": 3.3324,
"step": 59000
},
{
"epoch": 3.23,
"grad_norm": 0.7645742297172546,
"learning_rate": 9.176963812886143e-05,
"loss": 3.327,
"step": 60000
},
{
"epoch": 3.28,
"grad_norm": 0.7636393904685974,
"learning_rate": 9.147543395116211e-05,
"loss": 3.3306,
"step": 61000
},
{
"epoch": 3.33,
"grad_norm": 0.7321722507476807,
"learning_rate": 9.118122977346278e-05,
"loss": 3.3303,
"step": 62000
},
{
"epoch": 3.39,
"grad_norm": 0.790723443031311,
"learning_rate": 9.088702559576346e-05,
"loss": 3.3243,
"step": 63000
},
{
"epoch": 3.44,
"grad_norm": 0.7484601140022278,
"learning_rate": 9.059311562224185e-05,
"loss": 3.3244,
"step": 64000
},
{
"epoch": 3.5,
"grad_norm": 0.746504008769989,
"learning_rate": 9.029891144454253e-05,
"loss": 3.3176,
"step": 65000
},
{
"epoch": 3.55,
"grad_norm": 0.7723560929298401,
"learning_rate": 9.000500147102089e-05,
"loss": 3.3123,
"step": 66000
},
{
"epoch": 3.6,
"grad_norm": 0.7322754859924316,
"learning_rate": 8.971079729332157e-05,
"loss": 3.3116,
"step": 67000
},
{
"epoch": 3.66,
"grad_norm": 0.7716720104217529,
"learning_rate": 8.941659311562225e-05,
"loss": 3.3073,
"step": 68000
},
{
"epoch": 3.71,
"grad_norm": 0.7449876666069031,
"learning_rate": 8.912268314210063e-05,
"loss": 3.3009,
"step": 69000
},
{
"epoch": 3.76,
"grad_norm": 0.7449329495429993,
"learning_rate": 8.88284789644013e-05,
"loss": 3.306,
"step": 70000
},
{
"epoch": 3.82,
"grad_norm": 0.701579213142395,
"learning_rate": 8.853427478670198e-05,
"loss": 3.2996,
"step": 71000
},
{
"epoch": 3.87,
"grad_norm": 0.7269728183746338,
"learning_rate": 8.824036481318035e-05,
"loss": 3.294,
"step": 72000
},
{
"epoch": 3.93,
"grad_norm": 0.7257309556007385,
"learning_rate": 8.794616063548102e-05,
"loss": 3.2955,
"step": 73000
},
{
"epoch": 3.98,
"grad_norm": 0.6977525353431702,
"learning_rate": 8.76522506619594e-05,
"loss": 3.2921,
"step": 74000
},
{
"epoch": 4.0,
"eval_accuracy": 0.3883133849640905,
"eval_loss": 3.5102479457855225,
"eval_runtime": 153.2986,
"eval_samples_per_second": 377.812,
"eval_steps_per_second": 5.904,
"step": 74380
},
{
"epoch": 4.03,
"grad_norm": 0.7187395691871643,
"learning_rate": 8.735804648426008e-05,
"loss": 3.2537,
"step": 75000
},
{
"epoch": 4.09,
"grad_norm": 0.7566428184509277,
"learning_rate": 8.706413651073846e-05,
"loss": 3.2417,
"step": 76000
},
{
"epoch": 4.14,
"grad_norm": 0.7315700650215149,
"learning_rate": 8.676993233303912e-05,
"loss": 3.2394,
"step": 77000
},
{
"epoch": 4.19,
"grad_norm": 0.7529075741767883,
"learning_rate": 8.64757281553398e-05,
"loss": 3.2415,
"step": 78000
},
{
"epoch": 4.25,
"grad_norm": 0.7442134022712708,
"learning_rate": 8.618152397764049e-05,
"loss": 3.2399,
"step": 79000
},
{
"epoch": 4.3,
"grad_norm": 0.7772579193115234,
"learning_rate": 8.588790820829656e-05,
"loss": 3.2352,
"step": 80000
},
{
"epoch": 4.36,
"grad_norm": 0.7373689413070679,
"learning_rate": 8.559370403059724e-05,
"loss": 3.2348,
"step": 81000
},
{
"epoch": 4.41,
"grad_norm": 0.7681490778923035,
"learning_rate": 8.529949985289791e-05,
"loss": 3.2282,
"step": 82000
},
{
"epoch": 4.46,
"grad_norm": 0.7327556014060974,
"learning_rate": 8.500529567519859e-05,
"loss": 3.2349,
"step": 83000
},
{
"epoch": 4.52,
"grad_norm": 0.7320055961608887,
"learning_rate": 8.471109149749927e-05,
"loss": 3.2342,
"step": 84000
},
{
"epoch": 4.57,
"grad_norm": 0.7526534199714661,
"learning_rate": 8.441718152397764e-05,
"loss": 3.2329,
"step": 85000
},
{
"epoch": 4.62,
"grad_norm": 0.7839016914367676,
"learning_rate": 8.412297734627832e-05,
"loss": 3.2371,
"step": 86000
},
{
"epoch": 4.68,
"grad_norm": 0.7079972624778748,
"learning_rate": 8.382906737275669e-05,
"loss": 3.2293,
"step": 87000
},
{
"epoch": 4.73,
"grad_norm": 0.7379385232925415,
"learning_rate": 8.353486319505737e-05,
"loss": 3.2315,
"step": 88000
},
{
"epoch": 4.79,
"grad_norm": 0.7435966730117798,
"learning_rate": 8.324065901735804e-05,
"loss": 3.2285,
"step": 89000
},
{
"epoch": 4.84,
"grad_norm": 0.7578746676445007,
"learning_rate": 8.294645483965872e-05,
"loss": 3.2233,
"step": 90000
},
{
"epoch": 4.89,
"grad_norm": 0.7228344678878784,
"learning_rate": 8.265254486613711e-05,
"loss": 3.2261,
"step": 91000
},
{
"epoch": 4.95,
"grad_norm": 0.7377974987030029,
"learning_rate": 8.235863489261548e-05,
"loss": 3.2283,
"step": 92000
},
{
"epoch": 5.0,
"eval_accuracy": 0.3929738045053096,
"eval_loss": 3.478940010070801,
"eval_runtime": 154.0106,
"eval_samples_per_second": 376.065,
"eval_steps_per_second": 5.876,
"step": 92975
},
{
"epoch": 5.0,
"grad_norm": 0.7944506406784058,
"learning_rate": 8.206443071491615e-05,
"loss": 3.2153,
"step": 93000
},
{
"epoch": 5.06,
"grad_norm": 0.752309262752533,
"learning_rate": 8.177022653721683e-05,
"loss": 3.16,
"step": 94000
},
{
"epoch": 5.11,
"grad_norm": 0.7849130630493164,
"learning_rate": 8.147631656369521e-05,
"loss": 3.172,
"step": 95000
},
{
"epoch": 5.16,
"grad_norm": 0.7549823522567749,
"learning_rate": 8.118211238599589e-05,
"loss": 3.1717,
"step": 96000
},
{
"epoch": 5.22,
"grad_norm": 0.761482834815979,
"learning_rate": 8.088790820829656e-05,
"loss": 3.1693,
"step": 97000
},
{
"epoch": 5.27,
"grad_norm": 0.7370353937149048,
"learning_rate": 8.059399823477494e-05,
"loss": 3.1749,
"step": 98000
},
{
"epoch": 5.32,
"grad_norm": 0.7203201651573181,
"learning_rate": 8.029979405707561e-05,
"loss": 3.1721,
"step": 99000
},
{
"epoch": 5.38,
"grad_norm": 0.7289775609970093,
"learning_rate": 8.0005884083554e-05,
"loss": 3.1809,
"step": 100000
},
{
"epoch": 5.43,
"grad_norm": 0.7284764051437378,
"learning_rate": 7.971167990585466e-05,
"loss": 3.1722,
"step": 101000
},
{
"epoch": 5.49,
"grad_norm": 0.7419421672821045,
"learning_rate": 7.941747572815534e-05,
"loss": 3.178,
"step": 102000
},
{
"epoch": 5.54,
"grad_norm": 0.7295576930046082,
"learning_rate": 7.912356575463373e-05,
"loss": 3.1777,
"step": 103000
},
{
"epoch": 5.59,
"grad_norm": 0.7482835054397583,
"learning_rate": 7.88293615769344e-05,
"loss": 3.178,
"step": 104000
},
{
"epoch": 5.65,
"grad_norm": 0.778439998626709,
"learning_rate": 7.853515739923508e-05,
"loss": 3.1735,
"step": 105000
},
{
"epoch": 5.7,
"grad_norm": 0.7204304337501526,
"learning_rate": 7.824124742571345e-05,
"loss": 3.1719,
"step": 106000
},
{
"epoch": 5.75,
"grad_norm": 0.7353498339653015,
"learning_rate": 7.794704324801413e-05,
"loss": 3.1761,
"step": 107000
},
{
"epoch": 5.81,
"grad_norm": 0.7248005867004395,
"learning_rate": 7.76528390703148e-05,
"loss": 3.1752,
"step": 108000
},
{
"epoch": 5.86,
"grad_norm": 0.7356109619140625,
"learning_rate": 7.735863489261548e-05,
"loss": 3.1754,
"step": 109000
},
{
"epoch": 5.92,
"grad_norm": 0.708113968372345,
"learning_rate": 7.706472491909386e-05,
"loss": 3.1744,
"step": 110000
},
{
"epoch": 5.97,
"grad_norm": 0.7147455215454102,
"learning_rate": 7.677052074139453e-05,
"loss": 3.1673,
"step": 111000
},
{
"epoch": 6.0,
"eval_accuracy": 0.3961948483989776,
"eval_loss": 3.4379465579986572,
"eval_runtime": 153.8126,
"eval_samples_per_second": 376.549,
"eval_steps_per_second": 5.884,
"step": 111570
},
{
"epoch": 6.02,
"grad_norm": 0.7331594824790955,
"learning_rate": 7.64766107678729e-05,
"loss": 3.1401,
"step": 112000
},
{
"epoch": 6.08,
"grad_norm": 0.7715177536010742,
"learning_rate": 7.618240659017358e-05,
"loss": 3.1162,
"step": 113000
},
{
"epoch": 6.13,
"grad_norm": 0.7466005682945251,
"learning_rate": 7.588820241247426e-05,
"loss": 3.1136,
"step": 114000
},
{
"epoch": 6.18,
"grad_norm": 0.734748899936676,
"learning_rate": 7.559399823477493e-05,
"loss": 3.1227,
"step": 115000
},
{
"epoch": 6.24,
"grad_norm": 0.7688254714012146,
"learning_rate": 7.530008826125331e-05,
"loss": 3.1252,
"step": 116000
},
{
"epoch": 6.29,
"grad_norm": 0.7620559930801392,
"learning_rate": 7.5005884083554e-05,
"loss": 3.1243,
"step": 117000
},
{
"epoch": 6.35,
"grad_norm": 0.7437037825584412,
"learning_rate": 7.471167990585468e-05,
"loss": 3.1286,
"step": 118000
},
{
"epoch": 6.4,
"grad_norm": 0.7445530295372009,
"learning_rate": 7.441747572815534e-05,
"loss": 3.1279,
"step": 119000
},
{
"epoch": 6.45,
"grad_norm": 0.7443315386772156,
"learning_rate": 7.412356575463371e-05,
"loss": 3.1314,
"step": 120000
},
{
"epoch": 6.51,
"grad_norm": 0.7500354647636414,
"learning_rate": 7.38293615769344e-05,
"loss": 3.1288,
"step": 121000
},
{
"epoch": 6.56,
"grad_norm": 0.7664729952812195,
"learning_rate": 7.353545160341278e-05,
"loss": 3.1307,
"step": 122000
},
{
"epoch": 6.61,
"grad_norm": 0.7711361646652222,
"learning_rate": 7.324154162989115e-05,
"loss": 3.132,
"step": 123000
},
{
"epoch": 6.67,
"grad_norm": 0.7488456964492798,
"learning_rate": 7.294733745219182e-05,
"loss": 3.1303,
"step": 124000
},
{
"epoch": 6.72,
"grad_norm": 0.7189005017280579,
"learning_rate": 7.26531332744925e-05,
"loss": 3.1285,
"step": 125000
},
{
"epoch": 6.78,
"grad_norm": 0.7578772902488708,
"learning_rate": 7.235922330097088e-05,
"loss": 3.1281,
"step": 126000
},
{
"epoch": 6.83,
"grad_norm": 0.7351284027099609,
"learning_rate": 7.206501912327155e-05,
"loss": 3.1305,
"step": 127000
},
{
"epoch": 6.88,
"grad_norm": 0.7461217641830444,
"learning_rate": 7.177081494557223e-05,
"loss": 3.1327,
"step": 128000
},
{
"epoch": 6.94,
"grad_norm": 0.7394551038742065,
"learning_rate": 7.14769049720506e-05,
"loss": 3.133,
"step": 129000
},
{
"epoch": 6.99,
"grad_norm": 0.7317260503768921,
"learning_rate": 7.118270079435128e-05,
"loss": 3.127,
"step": 130000
},
{
"epoch": 7.0,
"eval_accuracy": 0.39883100527338866,
"eval_loss": 3.41751766204834,
"eval_runtime": 153.2748,
"eval_samples_per_second": 377.87,
"eval_steps_per_second": 5.904,
"step": 130165
},
{
"epoch": 7.04,
"grad_norm": 0.7801479697227478,
"learning_rate": 7.088879082082967e-05,
"loss": 3.0813,
"step": 131000
},
{
"epoch": 7.1,
"grad_norm": 0.7538677453994751,
"learning_rate": 7.059458664313034e-05,
"loss": 3.077,
"step": 132000
},
{
"epoch": 7.15,
"grad_norm": 0.7728538513183594,
"learning_rate": 7.030038246543102e-05,
"loss": 3.0807,
"step": 133000
},
{
"epoch": 7.21,
"grad_norm": 0.7852578163146973,
"learning_rate": 7.00061782877317e-05,
"loss": 3.0811,
"step": 134000
},
{
"epoch": 7.26,
"grad_norm": 0.7735338807106018,
"learning_rate": 6.971226831421006e-05,
"loss": 3.0861,
"step": 135000
},
{
"epoch": 7.31,
"grad_norm": 0.7543830871582031,
"learning_rate": 6.941835834068844e-05,
"loss": 3.0846,
"step": 136000
},
{
"epoch": 7.37,
"grad_norm": 0.7399442195892334,
"learning_rate": 6.912415416298912e-05,
"loss": 3.0866,
"step": 137000
},
{
"epoch": 7.42,
"grad_norm": 0.7640860676765442,
"learning_rate": 6.88299499852898e-05,
"loss": 3.0893,
"step": 138000
},
{
"epoch": 7.48,
"grad_norm": 0.7997984886169434,
"learning_rate": 6.853574580759047e-05,
"loss": 3.0909,
"step": 139000
},
{
"epoch": 7.53,
"grad_norm": 0.7792380452156067,
"learning_rate": 6.824183583406884e-05,
"loss": 3.0939,
"step": 140000
},
{
"epoch": 7.58,
"grad_norm": 0.7403313517570496,
"learning_rate": 6.794763165636952e-05,
"loss": 3.0906,
"step": 141000
},
{
"epoch": 7.64,
"grad_norm": 0.7356191873550415,
"learning_rate": 6.765342747867019e-05,
"loss": 3.0909,
"step": 142000
},
{
"epoch": 7.69,
"grad_norm": 0.7950366735458374,
"learning_rate": 6.735951750514857e-05,
"loss": 3.0965,
"step": 143000
},
{
"epoch": 7.74,
"grad_norm": 0.7580872774124146,
"learning_rate": 6.706531332744925e-05,
"loss": 3.0922,
"step": 144000
},
{
"epoch": 7.8,
"grad_norm": 0.7285676002502441,
"learning_rate": 6.677140335392763e-05,
"loss": 3.0952,
"step": 145000
},
{
"epoch": 7.85,
"grad_norm": 0.730719268321991,
"learning_rate": 6.64771991762283e-05,
"loss": 3.0943,
"step": 146000
},
{
"epoch": 7.91,
"grad_norm": 0.7722078561782837,
"learning_rate": 6.618299499852897e-05,
"loss": 3.0957,
"step": 147000
},
{
"epoch": 7.96,
"grad_norm": 0.7444345951080322,
"learning_rate": 6.588879082082966e-05,
"loss": 3.0935,
"step": 148000
},
{
"epoch": 8.0,
"eval_accuracy": 0.39977229312392953,
"eval_loss": 3.4271562099456787,
"eval_runtime": 154.0268,
"eval_samples_per_second": 376.025,
"eval_steps_per_second": 5.876,
"step": 148760
},
{
"epoch": 8.01,
"grad_norm": 0.7648381590843201,
"learning_rate": 6.559488084730804e-05,
"loss": 3.0798,
"step": 149000
},
{
"epoch": 8.07,
"grad_norm": 0.746784508228302,
"learning_rate": 6.530067666960871e-05,
"loss": 3.0327,
"step": 150000
},
{
"epoch": 8.12,
"grad_norm": 0.7783890962600708,
"learning_rate": 6.500647249190939e-05,
"loss": 3.0398,
"step": 151000
},
{
"epoch": 8.17,
"grad_norm": 0.7425867915153503,
"learning_rate": 6.471226831421007e-05,
"loss": 3.0455,
"step": 152000
},
{
"epoch": 8.23,
"grad_norm": 0.7634230852127075,
"learning_rate": 6.441835834068844e-05,
"loss": 3.0529,
"step": 153000
},
{
"epoch": 8.28,
"grad_norm": 0.747742235660553,
"learning_rate": 6.412415416298911e-05,
"loss": 3.0487,
"step": 154000
},
{
"epoch": 8.34,
"grad_norm": 0.7804195284843445,
"learning_rate": 6.383024418946749e-05,
"loss": 3.0545,
"step": 155000
},
{
"epoch": 8.39,
"grad_norm": 0.7713324427604675,
"learning_rate": 6.353604001176817e-05,
"loss": 3.06,
"step": 156000
},
{
"epoch": 8.44,
"grad_norm": 0.7601629495620728,
"learning_rate": 6.324183583406885e-05,
"loss": 3.0537,
"step": 157000
},
{
"epoch": 8.5,
"grad_norm": 0.7745895385742188,
"learning_rate": 6.294792586054723e-05,
"loss": 3.0605,
"step": 158000
},
{
"epoch": 8.55,
"grad_norm": 0.7613347172737122,
"learning_rate": 6.265372168284789e-05,
"loss": 3.0573,
"step": 159000
},
{
"epoch": 8.6,
"grad_norm": 0.7753309607505798,
"learning_rate": 6.235981170932628e-05,
"loss": 3.0627,
"step": 160000
},
{
"epoch": 8.66,
"grad_norm": 0.7721776962280273,
"learning_rate": 6.206560753162696e-05,
"loss": 3.0612,
"step": 161000
},
{
"epoch": 8.71,
"grad_norm": 0.8062002062797546,
"learning_rate": 6.177140335392763e-05,
"loss": 3.0622,
"step": 162000
},
{
"epoch": 8.77,
"grad_norm": 0.7599053978919983,
"learning_rate": 6.147719917622831e-05,
"loss": 3.061,
"step": 163000
},
{
"epoch": 8.82,
"grad_norm": 0.7855945229530334,
"learning_rate": 6.118328920270668e-05,
"loss": 3.0638,
"step": 164000
},
{
"epoch": 8.87,
"grad_norm": 0.7659214735031128,
"learning_rate": 6.088908502500735e-05,
"loss": 3.0637,
"step": 165000
},
{
"epoch": 8.93,
"grad_norm": 0.7432619333267212,
"learning_rate": 6.059517505148573e-05,
"loss": 3.0661,
"step": 166000
},
{
"epoch": 8.98,
"grad_norm": 0.7428516745567322,
"learning_rate": 6.030097087378641e-05,
"loss": 3.0679,
"step": 167000
},
{
"epoch": 9.0,
"eval_accuracy": 0.4010507677364335,
"eval_loss": 3.4149951934814453,
"eval_runtime": 153.7947,
"eval_samples_per_second": 376.593,
"eval_steps_per_second": 5.884,
"step": 167355
},
{
"epoch": 9.03,
"grad_norm": 0.7959059476852417,
"learning_rate": 6.000676669608709e-05,
"loss": 3.0238,
"step": 168000
},
{
"epoch": 9.09,
"grad_norm": 0.7902527451515198,
"learning_rate": 5.971256251838776e-05,
"loss": 3.007,
"step": 169000
},
{
"epoch": 9.14,
"grad_norm": 0.7812909483909607,
"learning_rate": 5.941865254486614e-05,
"loss": 3.0162,
"step": 170000
},
{
"epoch": 9.2,
"grad_norm": 0.7669194936752319,
"learning_rate": 5.9124742571344514e-05,
"loss": 3.0162,
"step": 171000
},
{
"epoch": 9.25,
"grad_norm": 0.7774065136909485,
"learning_rate": 5.8830538393645195e-05,
"loss": 3.0219,
"step": 172000
},
{
"epoch": 9.3,
"grad_norm": 0.7773292064666748,
"learning_rate": 5.853662842012357e-05,
"loss": 3.0238,
"step": 173000
},
{
"epoch": 9.36,
"grad_norm": 0.7975197434425354,
"learning_rate": 5.824242424242424e-05,
"loss": 3.0246,
"step": 174000
},
{
"epoch": 9.41,
"grad_norm": 0.7579612731933594,
"learning_rate": 5.794822006472492e-05,
"loss": 3.0259,
"step": 175000
},
{
"epoch": 9.46,
"grad_norm": 0.7753346562385559,
"learning_rate": 5.76540158870256e-05,
"loss": 3.0287,
"step": 176000
},
{
"epoch": 9.52,
"grad_norm": 0.7583608031272888,
"learning_rate": 5.7360105913503966e-05,
"loss": 3.0281,
"step": 177000
},
{
"epoch": 9.57,
"grad_norm": 0.7782021760940552,
"learning_rate": 5.706590173580465e-05,
"loss": 3.0298,
"step": 178000
},
{
"epoch": 9.63,
"grad_norm": 0.7898275852203369,
"learning_rate": 5.677169755810533e-05,
"loss": 3.0384,
"step": 179000
},
{
"epoch": 9.68,
"grad_norm": 0.7844170928001404,
"learning_rate": 5.6477787584583706e-05,
"loss": 3.0341,
"step": 180000
},
{
"epoch": 9.73,
"grad_norm": 0.7562811374664307,
"learning_rate": 5.618387761106208e-05,
"loss": 3.0346,
"step": 181000
},
{
"epoch": 9.79,
"grad_norm": 0.7964505553245544,
"learning_rate": 5.588967343336276e-05,
"loss": 3.0364,
"step": 182000
},
{
"epoch": 9.84,
"grad_norm": 0.7780364751815796,
"learning_rate": 5.5595763459841135e-05,
"loss": 3.0376,
"step": 183000
},
{
"epoch": 9.9,
"grad_norm": 0.7689926028251648,
"learning_rate": 5.530155928214181e-05,
"loss": 3.0343,
"step": 184000
},
{
"epoch": 9.95,
"grad_norm": 0.7878050804138184,
"learning_rate": 5.5007355104442484e-05,
"loss": 3.0402,
"step": 185000
},
{
"epoch": 10.0,
"eval_accuracy": 0.4023324670878444,
"eval_loss": 3.414785146713257,
"eval_runtime": 153.4202,
"eval_samples_per_second": 377.512,
"eval_steps_per_second": 5.899,
"step": 185950
},
{
"epoch": 10.0,
"grad_norm": 0.7869421243667603,
"learning_rate": 5.471344513092086e-05,
"loss": 3.0355,
"step": 186000
},
{
"epoch": 10.06,
"grad_norm": 0.7897782325744629,
"learning_rate": 5.441924095322154e-05,
"loss": 2.9791,
"step": 187000
},
{
"epoch": 10.11,
"grad_norm": 0.810604989528656,
"learning_rate": 5.412503677552222e-05,
"loss": 2.9856,
"step": 188000
},
{
"epoch": 10.16,
"grad_norm": 0.802144467830658,
"learning_rate": 5.383112680200059e-05,
"loss": 2.9921,
"step": 189000
},
{
"epoch": 10.22,
"grad_norm": 0.8142128586769104,
"learning_rate": 5.353692262430127e-05,
"loss": 2.9952,
"step": 190000
},
{
"epoch": 10.27,
"grad_norm": 0.792819082736969,
"learning_rate": 5.3243012650779646e-05,
"loss": 2.9997,
"step": 191000
},
{
"epoch": 10.33,
"grad_norm": 0.7813856601715088,
"learning_rate": 5.294880847308033e-05,
"loss": 3.0017,
"step": 192000
},
{
"epoch": 10.38,
"grad_norm": 0.8088542222976685,
"learning_rate": 5.2654604295380995e-05,
"loss": 3.0005,
"step": 193000
},
{
"epoch": 10.43,
"grad_norm": 0.7641571164131165,
"learning_rate": 5.2360400117681677e-05,
"loss": 3.0007,
"step": 194000
},
{
"epoch": 10.49,
"grad_norm": 0.7936010956764221,
"learning_rate": 5.2066490144160054e-05,
"loss": 3.0038,
"step": 195000
},
{
"epoch": 10.54,
"grad_norm": 0.8503419756889343,
"learning_rate": 5.177258017063843e-05,
"loss": 3.0049,
"step": 196000
},
{
"epoch": 10.59,
"grad_norm": 0.7761277556419373,
"learning_rate": 5.14783759929391e-05,
"loss": 3.0079,
"step": 197000
},
{
"epoch": 10.65,
"grad_norm": 0.7853693962097168,
"learning_rate": 5.118417181523978e-05,
"loss": 3.0091,
"step": 198000
},
{
"epoch": 10.7,
"grad_norm": 0.7898029685020447,
"learning_rate": 5.089026184171816e-05,
"loss": 3.0047,
"step": 199000
},
{
"epoch": 10.76,
"grad_norm": 0.7760916948318481,
"learning_rate": 5.0596351868196535e-05,
"loss": 3.01,
"step": 200000
},
{
"epoch": 10.81,
"grad_norm": 0.8151899576187134,
"learning_rate": 5.03021476904972e-05,
"loss": 3.0079,
"step": 201000
},
{
"epoch": 10.86,
"grad_norm": 0.837149977684021,
"learning_rate": 5.0007943512797884e-05,
"loss": 3.0115,
"step": 202000
},
{
"epoch": 10.92,
"grad_norm": 0.7857736945152283,
"learning_rate": 4.971373933509856e-05,
"loss": 3.0123,
"step": 203000
},
{
"epoch": 10.97,
"grad_norm": 0.7611953020095825,
"learning_rate": 4.9419829361576935e-05,
"loss": 3.0082,
"step": 204000
},
{
"epoch": 11.0,
"eval_accuracy": 0.4029490640396869,
"eval_loss": 3.4169130325317383,
"eval_runtime": 153.2931,
"eval_samples_per_second": 377.825,
"eval_steps_per_second": 5.904,
"step": 204545
},
{
"epoch": 11.02,
"grad_norm": 0.816586971282959,
"learning_rate": 4.9125625183877617e-05,
"loss": 2.9829,
"step": 205000
},
{
"epoch": 11.08,
"grad_norm": 0.7916204929351807,
"learning_rate": 4.883171521035599e-05,
"loss": 2.9574,
"step": 206000
},
{
"epoch": 11.13,
"grad_norm": 0.8013209104537964,
"learning_rate": 4.853751103265667e-05,
"loss": 2.9652,
"step": 207000
},
{
"epoch": 11.19,
"grad_norm": 0.7968335747718811,
"learning_rate": 4.824330685495734e-05,
"loss": 2.9686,
"step": 208000
},
{
"epoch": 11.24,
"grad_norm": 0.8586769104003906,
"learning_rate": 4.794910267725802e-05,
"loss": 2.9722,
"step": 209000
},
{
"epoch": 11.29,
"grad_norm": 0.8471382260322571,
"learning_rate": 4.7655192703736395e-05,
"loss": 2.9747,
"step": 210000
},
{
"epoch": 11.35,
"grad_norm": 0.8427879810333252,
"learning_rate": 4.736098852603707e-05,
"loss": 2.9741,
"step": 211000
},
{
"epoch": 11.4,
"grad_norm": 0.8404529094696045,
"learning_rate": 4.706678434833775e-05,
"loss": 2.9773,
"step": 212000
},
{
"epoch": 11.45,
"grad_norm": 0.8058509826660156,
"learning_rate": 4.677287437481612e-05,
"loss": 2.9749,
"step": 213000
},
{
"epoch": 11.51,
"grad_norm": 0.8321877717971802,
"learning_rate": 4.64786701971168e-05,
"loss": 2.9794,
"step": 214000
},
{
"epoch": 11.56,
"grad_norm": 0.8273854851722717,
"learning_rate": 4.618476022359517e-05,
"loss": 2.9855,
"step": 215000
},
{
"epoch": 11.62,
"grad_norm": 0.8307694792747498,
"learning_rate": 4.589085025007355e-05,
"loss": 2.9862,
"step": 216000
},
{
"epoch": 11.67,
"grad_norm": 0.8184703588485718,
"learning_rate": 4.559694027655193e-05,
"loss": 2.9836,
"step": 217000
},
{
"epoch": 11.72,
"grad_norm": 0.7970172762870789,
"learning_rate": 4.530273609885261e-05,
"loss": 2.9843,
"step": 218000
},
{
"epoch": 11.78,
"grad_norm": 0.8030002117156982,
"learning_rate": 4.500853192115328e-05,
"loss": 2.9891,
"step": 219000
},
{
"epoch": 11.83,
"grad_norm": 0.795408308506012,
"learning_rate": 4.471432774345396e-05,
"loss": 2.987,
"step": 220000
},
{
"epoch": 11.88,
"grad_norm": 0.8084612488746643,
"learning_rate": 4.4420417769932335e-05,
"loss": 2.9898,
"step": 221000
},
{
"epoch": 11.94,
"grad_norm": 0.8061679005622864,
"learning_rate": 4.412650779641071e-05,
"loss": 2.9941,
"step": 222000
},
{
"epoch": 11.99,
"grad_norm": 0.8087071776390076,
"learning_rate": 4.383230361871139e-05,
"loss": 2.988,
"step": 223000
},
{
"epoch": 12.0,
"eval_accuracy": 0.4035294498608861,
"eval_loss": 3.412954568862915,
"eval_runtime": 153.0197,
"eval_samples_per_second": 378.5,
"eval_steps_per_second": 5.914,
"step": 223140
},
{
"epoch": 12.05,
"grad_norm": 0.850972056388855,
"learning_rate": 4.353809944101207e-05,
"loss": 2.9439,
"step": 224000
},
{
"epoch": 12.1,
"grad_norm": 0.8421295285224915,
"learning_rate": 4.324389526331274e-05,
"loss": 2.9402,
"step": 225000
},
{
"epoch": 12.15,
"grad_norm": 0.8112582564353943,
"learning_rate": 4.294998528979112e-05,
"loss": 2.9473,
"step": 226000
},
{
"epoch": 12.21,
"grad_norm": 0.8302417397499084,
"learning_rate": 4.2655781112091794e-05,
"loss": 2.9491,
"step": 227000
},
{
"epoch": 12.26,
"grad_norm": 0.8474385738372803,
"learning_rate": 4.2361576934392475e-05,
"loss": 2.9484,
"step": 228000
},
{
"epoch": 12.32,
"grad_norm": 0.8351003527641296,
"learning_rate": 4.206737275669315e-05,
"loss": 2.9529,
"step": 229000
},
{
"epoch": 12.37,
"grad_norm": 0.85118168592453,
"learning_rate": 4.177346278317153e-05,
"loss": 2.9562,
"step": 230000
},
{
"epoch": 12.42,
"grad_norm": 0.8224354982376099,
"learning_rate": 4.14792586054722e-05,
"loss": 2.9539,
"step": 231000
},
{
"epoch": 12.48,
"grad_norm": 0.8389358520507812,
"learning_rate": 4.1185054427772876e-05,
"loss": 2.9581,
"step": 232000
},
{
"epoch": 12.53,
"grad_norm": 0.8302854895591736,
"learning_rate": 4.089114445425125e-05,
"loss": 2.9604,
"step": 233000
},
{
"epoch": 12.58,
"grad_norm": 0.8315780758857727,
"learning_rate": 4.0596940276551934e-05,
"loss": 2.9655,
"step": 234000
},
{
"epoch": 12.64,
"grad_norm": 0.8219565749168396,
"learning_rate": 4.030273609885261e-05,
"loss": 2.9651,
"step": 235000
},
{
"epoch": 12.69,
"grad_norm": 0.8112459778785706,
"learning_rate": 4.0008826125330986e-05,
"loss": 2.9647,
"step": 236000
},
{
"epoch": 12.75,
"grad_norm": 0.8309066295623779,
"learning_rate": 3.9714916151809357e-05,
"loss": 2.9637,
"step": 237000
},
{
"epoch": 12.8,
"grad_norm": 0.818537175655365,
"learning_rate": 3.942071197411004e-05,
"loss": 2.9648,
"step": 238000
},
{
"epoch": 12.85,
"grad_norm": 0.829467236995697,
"learning_rate": 3.912650779641071e-05,
"loss": 2.9676,
"step": 239000
},
{
"epoch": 12.91,
"grad_norm": 0.8041892647743225,
"learning_rate": 3.883230361871139e-05,
"loss": 2.9631,
"step": 240000
},
{
"epoch": 12.96,
"grad_norm": 0.8085007667541504,
"learning_rate": 3.8538393645189764e-05,
"loss": 2.967,
"step": 241000
},
{
"epoch": 13.0,
"eval_accuracy": 0.40449210160668586,
"eval_loss": 3.3969216346740723,
"eval_runtime": 154.1242,
"eval_samples_per_second": 375.788,
"eval_steps_per_second": 5.872,
"step": 241735
},
{
"epoch": 13.01,
"grad_norm": 0.8514643311500549,
"learning_rate": 3.824418946749044e-05,
"loss": 2.9558,
"step": 242000
},
{
"epoch": 13.07,
"grad_norm": 0.8870530724525452,
"learning_rate": 3.794998528979112e-05,
"loss": 2.9199,
"step": 243000
},
{
"epoch": 13.12,
"grad_norm": 0.8721070885658264,
"learning_rate": 3.765607531626949e-05,
"loss": 2.9276,
"step": 244000
},
{
"epoch": 13.18,
"grad_norm": 0.8669698238372803,
"learning_rate": 3.736187113857017e-05,
"loss": 2.9317,
"step": 245000
},
{
"epoch": 13.23,
"grad_norm": 0.8661383390426636,
"learning_rate": 3.7067666960870846e-05,
"loss": 2.9308,
"step": 246000
},
{
"epoch": 13.28,
"grad_norm": 0.860162079334259,
"learning_rate": 3.677346278317153e-05,
"loss": 2.9322,
"step": 247000
},
{
"epoch": 13.34,
"grad_norm": 0.8323468565940857,
"learning_rate": 3.64795528096499e-05,
"loss": 2.9336,
"step": 248000
},
{
"epoch": 13.39,
"grad_norm": 0.8697391748428345,
"learning_rate": 3.6185642836128275e-05,
"loss": 2.9342,
"step": 249000
},
{
"epoch": 13.44,
"grad_norm": 0.8538952469825745,
"learning_rate": 3.589143865842895e-05,
"loss": 2.9373,
"step": 250000
},
{
"epoch": 13.5,
"grad_norm": 0.848420262336731,
"learning_rate": 3.559723448072963e-05,
"loss": 2.9357,
"step": 251000
},
{
"epoch": 13.55,
"grad_norm": 0.8527274131774902,
"learning_rate": 3.5303324507208e-05,
"loss": 2.9394,
"step": 252000
},
{
"epoch": 13.61,
"grad_norm": 0.8854946494102478,
"learning_rate": 3.500912032950868e-05,
"loss": 2.9423,
"step": 253000
},
{
"epoch": 13.66,
"grad_norm": 0.8356419801712036,
"learning_rate": 3.471491615180936e-05,
"loss": 2.9412,
"step": 254000
},
{
"epoch": 13.71,
"grad_norm": 0.8529571890830994,
"learning_rate": 3.442071197411003e-05,
"loss": 2.9442,
"step": 255000
},
{
"epoch": 13.77,
"grad_norm": 0.8669166564941406,
"learning_rate": 3.412650779641071e-05,
"loss": 2.945,
"step": 256000
},
{
"epoch": 13.82,
"grad_norm": 0.8501659035682678,
"learning_rate": 3.3832892027066786e-05,
"loss": 2.9491,
"step": 257000
},
{
"epoch": 13.87,
"grad_norm": 0.8519834876060486,
"learning_rate": 3.353868784936746e-05,
"loss": 2.946,
"step": 258000
},
{
"epoch": 13.93,
"grad_norm": 0.8449692130088806,
"learning_rate": 3.324477787584584e-05,
"loss": 2.9439,
"step": 259000
},
{
"epoch": 13.98,
"grad_norm": 0.8286080360412598,
"learning_rate": 3.2950867902324215e-05,
"loss": 2.9457,
"step": 260000
},
{
"epoch": 14.0,
"eval_accuracy": 0.4048622747603851,
"eval_loss": 3.4079537391662598,
"eval_runtime": 153.3879,
"eval_samples_per_second": 377.592,
"eval_steps_per_second": 5.9,
"step": 260330
},
{
"epoch": 14.04,
"grad_norm": 0.8589282631874084,
"learning_rate": 3.265666372462489e-05,
"loss": 2.918,
"step": 261000
},
{
"epoch": 14.09,
"grad_norm": 0.8760070204734802,
"learning_rate": 3.2362459546925564e-05,
"loss": 2.9044,
"step": 262000
},
{
"epoch": 14.14,
"grad_norm": 0.883084237575531,
"learning_rate": 3.2068255369226245e-05,
"loss": 2.9054,
"step": 263000
},
{
"epoch": 14.2,
"grad_norm": 0.8560861349105835,
"learning_rate": 3.177405119152692e-05,
"loss": 2.9076,
"step": 264000
},
{
"epoch": 14.25,
"grad_norm": 0.9030760526657104,
"learning_rate": 3.1479847013827594e-05,
"loss": 2.9111,
"step": 265000
},
{
"epoch": 14.3,
"grad_norm": 0.8720868229866028,
"learning_rate": 3.118593704030597e-05,
"loss": 2.9138,
"step": 266000
},
{
"epoch": 14.36,
"grad_norm": 0.8849419951438904,
"learning_rate": 3.089202706678435e-05,
"loss": 2.9211,
"step": 267000
},
{
"epoch": 14.41,
"grad_norm": 0.8608791828155518,
"learning_rate": 3.059782288908502e-05,
"loss": 2.9176,
"step": 268000
},
{
"epoch": 14.47,
"grad_norm": 0.868854284286499,
"learning_rate": 3.03036187113857e-05,
"loss": 2.9216,
"step": 269000
},
{
"epoch": 14.52,
"grad_norm": 0.8694607615470886,
"learning_rate": 3.0009414533686382e-05,
"loss": 2.9244,
"step": 270000
},
{
"epoch": 14.57,
"grad_norm": 0.8707175254821777,
"learning_rate": 2.9715504560164753e-05,
"loss": 2.9216,
"step": 271000
},
{
"epoch": 14.63,
"grad_norm": 0.907792329788208,
"learning_rate": 2.9421300382465434e-05,
"loss": 2.927,
"step": 272000
},
{
"epoch": 14.68,
"grad_norm": 0.8800376057624817,
"learning_rate": 2.912739040894381e-05,
"loss": 2.9259,
"step": 273000
},
{
"epoch": 14.74,
"grad_norm": 0.8789490461349487,
"learning_rate": 2.8833186231244486e-05,
"loss": 2.9261,
"step": 274000
},
{
"epoch": 14.79,
"grad_norm": 0.8529285192489624,
"learning_rate": 2.8539276257722863e-05,
"loss": 2.9234,
"step": 275000
},
{
"epoch": 14.84,
"grad_norm": 0.8620697855949402,
"learning_rate": 2.8245072080023538e-05,
"loss": 2.9255,
"step": 276000
},
{
"epoch": 14.9,
"grad_norm": 0.8713541626930237,
"learning_rate": 2.7950867902324212e-05,
"loss": 2.9287,
"step": 277000
},
{
"epoch": 14.95,
"grad_norm": 0.8321398496627808,
"learning_rate": 2.765695792880259e-05,
"loss": 2.9268,
"step": 278000
},
{
"epoch": 15.0,
"eval_accuracy": 0.40448188993348033,
"eval_loss": 3.4128763675689697,
"eval_runtime": 153.2847,
"eval_samples_per_second": 377.846,
"eval_steps_per_second": 5.904,
"step": 278925
},
{
"epoch": 15.0,
"grad_norm": 0.8870017528533936,
"learning_rate": 2.7363047955280967e-05,
"loss": 2.9236,
"step": 279000
},
{
"epoch": 15.06,
"grad_norm": 0.902341902256012,
"learning_rate": 2.706884377758164e-05,
"loss": 2.8917,
"step": 280000
},
{
"epoch": 15.11,
"grad_norm": 0.8734650015830994,
"learning_rate": 2.677463959988232e-05,
"loss": 2.8882,
"step": 281000
},
{
"epoch": 15.17,
"grad_norm": 0.8736613392829895,
"learning_rate": 2.6480435422182997e-05,
"loss": 2.895,
"step": 282000
},
{
"epoch": 15.22,
"grad_norm": 0.8876488208770752,
"learning_rate": 2.618652544866137e-05,
"loss": 2.8917,
"step": 283000
},
{
"epoch": 15.27,
"grad_norm": 0.9243654012680054,
"learning_rate": 2.589232127096205e-05,
"loss": 2.8952,
"step": 284000
},
{
"epoch": 15.33,
"grad_norm": 0.8820899724960327,
"learning_rate": 2.5598117093262723e-05,
"loss": 2.8973,
"step": 285000
},
{
"epoch": 15.38,
"grad_norm": 0.9063917398452759,
"learning_rate": 2.5304207119741104e-05,
"loss": 2.8972,
"step": 286000
},
{
"epoch": 15.43,
"grad_norm": 0.9035106301307678,
"learning_rate": 2.501000294204178e-05,
"loss": 2.9064,
"step": 287000
},
{
"epoch": 15.49,
"grad_norm": 0.8639585971832275,
"learning_rate": 2.4716092968520156e-05,
"loss": 2.9013,
"step": 288000
},
{
"epoch": 15.54,
"grad_norm": 0.8969417810440063,
"learning_rate": 2.4421888790820833e-05,
"loss": 2.9056,
"step": 289000
},
{
"epoch": 15.6,
"grad_norm": 0.8919821977615356,
"learning_rate": 2.4127684613121508e-05,
"loss": 2.9073,
"step": 290000
},
{
"epoch": 15.65,
"grad_norm": 0.8989285230636597,
"learning_rate": 2.3833774639599885e-05,
"loss": 2.9062,
"step": 291000
},
{
"epoch": 15.7,
"grad_norm": 0.8888868689537048,
"learning_rate": 2.353986466607826e-05,
"loss": 2.9088,
"step": 292000
},
{
"epoch": 15.76,
"grad_norm": 0.9275554418563843,
"learning_rate": 2.3245660488378937e-05,
"loss": 2.9069,
"step": 293000
},
{
"epoch": 15.81,
"grad_norm": 0.8558235168457031,
"learning_rate": 2.295145631067961e-05,
"loss": 2.9037,
"step": 294000
},
{
"epoch": 15.86,
"grad_norm": 0.8763130307197571,
"learning_rate": 2.265725213298029e-05,
"loss": 2.9064,
"step": 295000
},
{
"epoch": 15.92,
"grad_norm": 0.8805882334709167,
"learning_rate": 2.2363342159458667e-05,
"loss": 2.9079,
"step": 296000
},
{
"epoch": 15.97,
"grad_norm": 0.9118674993515015,
"learning_rate": 2.206943218593704e-05,
"loss": 2.911,
"step": 297000
},
{
"epoch": 16.0,
"eval_accuracy": 0.4047090324802421,
"eval_loss": 3.41589617729187,
"eval_runtime": 153.0438,
"eval_samples_per_second": 378.441,
"eval_steps_per_second": 5.913,
"step": 297520
},
{
"epoch": 16.03,
"grad_norm": 0.9044952392578125,
"learning_rate": 2.177522800823772e-05,
"loss": 2.8973,
"step": 298000
},
{
"epoch": 16.08,
"grad_norm": 0.8891879320144653,
"learning_rate": 2.1481023830538393e-05,
"loss": 2.8726,
"step": 299000
},
{
"epoch": 16.13,
"grad_norm": 0.9434779286384583,
"learning_rate": 2.118681965283907e-05,
"loss": 2.8738,
"step": 300000
},
{
"epoch": 16.19,
"grad_norm": 0.9484353065490723,
"learning_rate": 2.0892909679317445e-05,
"loss": 2.8755,
"step": 301000
},
{
"epoch": 16.24,
"grad_norm": 0.9076915979385376,
"learning_rate": 2.0598705501618122e-05,
"loss": 2.8804,
"step": 302000
},
{
"epoch": 16.29,
"grad_norm": 0.9162052869796753,
"learning_rate": 2.03047955280965e-05,
"loss": 2.8824,
"step": 303000
},
{
"epoch": 16.35,
"grad_norm": 0.9373551607131958,
"learning_rate": 2.0010591350397174e-05,
"loss": 2.8834,
"step": 304000
},
{
"epoch": 16.4,
"grad_norm": 0.9191654324531555,
"learning_rate": 1.9716387172697852e-05,
"loss": 2.8875,
"step": 305000
},
{
"epoch": 16.46,
"grad_norm": 0.9333674907684326,
"learning_rate": 1.942218299499853e-05,
"loss": 2.8867,
"step": 306000
},
{
"epoch": 16.51,
"grad_norm": 0.9224890470504761,
"learning_rate": 1.9128273021476907e-05,
"loss": 2.886,
"step": 307000
},
{
"epoch": 16.56,
"grad_norm": 0.9154072403907776,
"learning_rate": 1.8834068843777582e-05,
"loss": 2.8859,
"step": 308000
},
{
"epoch": 16.62,
"grad_norm": 0.8822410702705383,
"learning_rate": 1.854015887025596e-05,
"loss": 2.8887,
"step": 309000
},
{
"epoch": 16.67,
"grad_norm": 0.9022771716117859,
"learning_rate": 1.8245954692556637e-05,
"loss": 2.8884,
"step": 310000
},
{
"epoch": 16.72,
"grad_norm": 0.8923472762107849,
"learning_rate": 1.795175051485731e-05,
"loss": 2.893,
"step": 311000
},
{
"epoch": 16.78,
"grad_norm": 0.9239156246185303,
"learning_rate": 1.765784054133569e-05,
"loss": 2.8919,
"step": 312000
},
{
"epoch": 16.83,
"grad_norm": 0.9343294501304626,
"learning_rate": 1.7363636363636366e-05,
"loss": 2.8876,
"step": 313000
},
{
"epoch": 16.89,
"grad_norm": 0.9064814448356628,
"learning_rate": 1.706972639011474e-05,
"loss": 2.8913,
"step": 314000
},
{
"epoch": 16.94,
"grad_norm": 0.9277246594429016,
"learning_rate": 1.6775522212415418e-05,
"loss": 2.8909,
"step": 315000
},
{
"epoch": 16.99,
"grad_norm": 0.9258543848991394,
"learning_rate": 1.6481612238893792e-05,
"loss": 2.8964,
"step": 316000
},
{
"epoch": 17.0,
"eval_accuracy": 0.40512663616869843,
"eval_loss": 3.422145366668701,
"eval_runtime": 153.2527,
"eval_samples_per_second": 377.925,
"eval_steps_per_second": 5.905,
"step": 316115
},
{
"epoch": 17.05,
"grad_norm": 0.9131640195846558,
"learning_rate": 1.6187702265372166e-05,
"loss": 2.864,
"step": 317000
},
{
"epoch": 17.1,
"grad_norm": 1.1945741176605225,
"learning_rate": 1.5893498087672844e-05,
"loss": 2.8617,
"step": 318000
},
{
"epoch": 17.16,
"grad_norm": 0.9226692914962769,
"learning_rate": 1.5599588114151225e-05,
"loss": 2.8644,
"step": 319000
},
{
"epoch": 17.21,
"grad_norm": 0.9040902256965637,
"learning_rate": 1.53053839364519e-05,
"loss": 2.8643,
"step": 320000
},
{
"epoch": 17.26,
"grad_norm": 0.9265015721321106,
"learning_rate": 1.5011179758752575e-05,
"loss": 2.8684,
"step": 321000
},
{
"epoch": 17.32,
"grad_norm": 0.940202534198761,
"learning_rate": 1.4716975581053253e-05,
"loss": 2.8653,
"step": 322000
},
{
"epoch": 17.37,
"grad_norm": 0.9231876730918884,
"learning_rate": 1.4423065607531627e-05,
"loss": 2.8729,
"step": 323000
},
{
"epoch": 17.42,
"grad_norm": 0.9177024960517883,
"learning_rate": 1.4128861429832305e-05,
"loss": 2.8688,
"step": 324000
},
{
"epoch": 17.48,
"grad_norm": 0.9014251828193665,
"learning_rate": 1.3834657252132983e-05,
"loss": 2.8711,
"step": 325000
},
{
"epoch": 17.53,
"grad_norm": 0.9649085402488708,
"learning_rate": 1.3540453074433657e-05,
"loss": 2.8721,
"step": 326000
},
{
"epoch": 17.59,
"grad_norm": 0.9398193955421448,
"learning_rate": 1.3246543100912034e-05,
"loss": 2.8725,
"step": 327000
},
{
"epoch": 17.64,
"grad_norm": 0.9121034145355225,
"learning_rate": 1.2952338923212709e-05,
"loss": 2.8771,
"step": 328000
},
{
"epoch": 17.69,
"grad_norm": 0.8918709754943848,
"learning_rate": 1.2658134745513387e-05,
"loss": 2.8721,
"step": 329000
},
{
"epoch": 17.75,
"grad_norm": 0.8925199508666992,
"learning_rate": 1.2363930567814063e-05,
"loss": 2.8722,
"step": 330000
},
{
"epoch": 17.8,
"grad_norm": 0.9330604076385498,
"learning_rate": 1.207031479847014e-05,
"loss": 2.8763,
"step": 331000
},
{
"epoch": 17.85,
"grad_norm": 0.9173546433448792,
"learning_rate": 1.1776110620770816e-05,
"loss": 2.8743,
"step": 332000
},
{
"epoch": 17.91,
"grad_norm": 0.9605908393859863,
"learning_rate": 1.1481906443071492e-05,
"loss": 2.8716,
"step": 333000
},
{
"epoch": 17.96,
"grad_norm": 0.9416488409042358,
"learning_rate": 1.1187702265372168e-05,
"loss": 2.8758,
"step": 334000
},
{
"epoch": 18.0,
"eval_accuracy": 0.4054102116463327,
"eval_loss": 3.428605079650879,
"eval_runtime": 153.3009,
"eval_samples_per_second": 377.806,
"eval_steps_per_second": 5.903,
"step": 334710
},
{
"epoch": 18.02,
"grad_norm": 0.9360882639884949,
"learning_rate": 1.0893498087672846e-05,
"loss": 2.8649,
"step": 335000
},
{
"epoch": 18.07,
"grad_norm": 0.9502159357070923,
"learning_rate": 1.0599588114151222e-05,
"loss": 2.8497,
"step": 336000
},
{
"epoch": 18.12,
"grad_norm": 0.9194748997688293,
"learning_rate": 1.0305383936451898e-05,
"loss": 2.8492,
"step": 337000
},
{
"epoch": 18.18,
"grad_norm": 0.9241055250167847,
"learning_rate": 1.0011473962930273e-05,
"loss": 2.8539,
"step": 338000
},
{
"epoch": 18.23,
"grad_norm": 0.9047831296920776,
"learning_rate": 9.717563989408649e-06,
"loss": 2.8555,
"step": 339000
},
{
"epoch": 18.28,
"grad_norm": 0.9505220651626587,
"learning_rate": 9.423359811709327e-06,
"loss": 2.8542,
"step": 340000
},
{
"epoch": 18.34,
"grad_norm": 0.9681676030158997,
"learning_rate": 9.129155634010003e-06,
"loss": 2.8526,
"step": 341000
},
{
"epoch": 18.39,
"grad_norm": 0.9679902791976929,
"learning_rate": 8.834951456310681e-06,
"loss": 2.8543,
"step": 342000
},
{
"epoch": 18.45,
"grad_norm": 0.9757332801818848,
"learning_rate": 8.540747278611357e-06,
"loss": 2.8569,
"step": 343000
},
{
"epoch": 18.5,
"grad_norm": 0.9353688359260559,
"learning_rate": 8.246837305089733e-06,
"loss": 2.8563,
"step": 344000
},
{
"epoch": 18.55,
"grad_norm": 0.9887840747833252,
"learning_rate": 7.952633127390409e-06,
"loss": 2.8577,
"step": 345000
},
{
"epoch": 18.61,
"grad_norm": 0.9348433613777161,
"learning_rate": 7.658723153868786e-06,
"loss": 2.8606,
"step": 346000
},
{
"epoch": 18.66,
"grad_norm": 0.9570315480232239,
"learning_rate": 7.364518976169461e-06,
"loss": 2.8603,
"step": 347000
},
{
"epoch": 18.71,
"grad_norm": 0.9226345419883728,
"learning_rate": 7.0703147984701384e-06,
"loss": 2.8586,
"step": 348000
},
{
"epoch": 18.77,
"grad_norm": 0.9210349321365356,
"learning_rate": 6.776110620770815e-06,
"loss": 2.8566,
"step": 349000
},
{
"epoch": 18.82,
"grad_norm": 0.9562284350395203,
"learning_rate": 6.482200647249191e-06,
"loss": 2.8609,
"step": 350000
},
{
"epoch": 18.88,
"grad_norm": 0.9210054874420166,
"learning_rate": 6.187996469549868e-06,
"loss": 2.8607,
"step": 351000
},
{
"epoch": 18.93,
"grad_norm": 0.9457345008850098,
"learning_rate": 5.893792291850544e-06,
"loss": 2.8598,
"step": 352000
},
{
"epoch": 18.98,
"grad_norm": 0.9460088610649109,
"learning_rate": 5.59988231832892e-06,
"loss": 2.858,
"step": 353000
},
{
"epoch": 19.0,
"eval_accuracy": 0.4053840106427133,
"eval_loss": 3.426475763320923,
"eval_runtime": 153.612,
"eval_samples_per_second": 377.041,
"eval_steps_per_second": 5.891,
"step": 353305
},
{
"epoch": 19.04,
"grad_norm": 0.9623994827270508,
"learning_rate": 5.305678140629597e-06,
"loss": 2.8452,
"step": 354000
},
{
"epoch": 19.09,
"grad_norm": 0.9756667613983154,
"learning_rate": 5.011473962930274e-06,
"loss": 2.842,
"step": 355000
},
{
"epoch": 19.14,
"grad_norm": 0.9565538763999939,
"learning_rate": 4.71726978523095e-06,
"loss": 2.8394,
"step": 356000
},
{
"epoch": 19.2,
"grad_norm": 0.9308847784996033,
"learning_rate": 4.4233598117093264e-06,
"loss": 2.8404,
"step": 357000
},
{
"epoch": 19.25,
"grad_norm": 0.9186875224113464,
"learning_rate": 4.1291556340100035e-06,
"loss": 2.8446,
"step": 358000
},
{
"epoch": 19.31,
"grad_norm": 0.9542786478996277,
"learning_rate": 3.83495145631068e-06,
"loss": 2.8453,
"step": 359000
},
{
"epoch": 19.36,
"grad_norm": 0.9696632027626038,
"learning_rate": 3.5410414827890556e-06,
"loss": 2.8454,
"step": 360000
},
{
"epoch": 19.41,
"grad_norm": 0.9518758058547974,
"learning_rate": 3.2471315092674317e-06,
"loss": 2.8447,
"step": 361000
},
{
"epoch": 19.47,
"grad_norm": 0.9522430896759033,
"learning_rate": 2.9529273315681087e-06,
"loss": 2.8461,
"step": 362000
},
{
"epoch": 19.52,
"grad_norm": 0.9725024700164795,
"learning_rate": 2.6587231538687853e-06,
"loss": 2.8434,
"step": 363000
},
{
"epoch": 19.58,
"grad_norm": 0.9441395401954651,
"learning_rate": 2.364518976169462e-06,
"loss": 2.8441,
"step": 364000
},
{
"epoch": 19.63,
"grad_norm": 0.9425933957099915,
"learning_rate": 2.0706090026478374e-06,
"loss": 2.8438,
"step": 365000
},
{
"epoch": 19.68,
"grad_norm": 0.9448084831237793,
"learning_rate": 1.7764048249485142e-06,
"loss": 2.8458,
"step": 366000
},
{
"epoch": 19.74,
"grad_norm": 0.9476189613342285,
"learning_rate": 1.4824948514268903e-06,
"loss": 2.8462,
"step": 367000
},
{
"epoch": 19.79,
"grad_norm": 0.9310097694396973,
"learning_rate": 1.188290673727567e-06,
"loss": 2.8437,
"step": 368000
},
{
"epoch": 19.84,
"grad_norm": 0.9563276171684265,
"learning_rate": 8.940864960282438e-07,
"loss": 2.8462,
"step": 369000
},
{
"epoch": 19.9,
"grad_norm": 0.9808979630470276,
"learning_rate": 5.998823183289204e-07,
"loss": 2.8412,
"step": 370000
},
{
"epoch": 19.95,
"grad_norm": 0.9566290378570557,
"learning_rate": 3.0597234480729626e-07,
"loss": 2.8493,
"step": 371000
},
{
"epoch": 20.0,
"eval_accuracy": 0.40547080986496004,
"eval_loss": 3.432330846786499,
"eval_runtime": 154.1,
"eval_samples_per_second": 375.847,
"eval_steps_per_second": 5.873,
"step": 371900
},
{
"epoch": 20.0,
"step": 371900,
"total_flos": 1.56693365388288e+18,
"train_loss": 3.1561156885198893,
"train_runtime": 81199.8655,
"train_samples_per_second": 146.561,
"train_steps_per_second": 4.58
}
],
"logging_steps": 1000,
"max_steps": 371900,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 5000,
"total_flos": 1.56693365388288e+18,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}