mahmoudkamal105's picture
Upload folder using huggingface_hub
0f3de72 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.58311345646438,
"eval_steps": 500,
"global_step": 2100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.003769317753486619,
"grad_norm": 2.665037155151367,
"learning_rate": 4.999956146783009e-05,
"loss": 1.564,
"num_input_tokens_seen": 23856,
"step": 5
},
{
"epoch": 0.007538635506973238,
"grad_norm": 1.4514005184173584,
"learning_rate": 4.9998245886705174e-05,
"loss": 0.968,
"num_input_tokens_seen": 47584,
"step": 10
},
{
"epoch": 0.011307953260459858,
"grad_norm": 2.0009350776672363,
"learning_rate": 4.999605330277923e-05,
"loss": 0.9879,
"num_input_tokens_seen": 70864,
"step": 15
},
{
"epoch": 0.015077271013946476,
"grad_norm": 1.2164318561553955,
"learning_rate": 4.999298379297376e-05,
"loss": 1.073,
"num_input_tokens_seen": 94192,
"step": 20
},
{
"epoch": 0.018846588767433094,
"grad_norm": 2.2099952697753906,
"learning_rate": 4.998903746497505e-05,
"loss": 0.8549,
"num_input_tokens_seen": 117472,
"step": 25
},
{
"epoch": 0.022615906520919715,
"grad_norm": 1.6259618997573853,
"learning_rate": 4.998421445723046e-05,
"loss": 0.8704,
"num_input_tokens_seen": 140704,
"step": 30
},
{
"epoch": 0.026385224274406333,
"grad_norm": 1.7849559783935547,
"learning_rate": 4.997851493894349e-05,
"loss": 0.9786,
"num_input_tokens_seen": 163680,
"step": 35
},
{
"epoch": 0.03015454202789295,
"grad_norm": 1.8650152683258057,
"learning_rate": 4.997193911006793e-05,
"loss": 0.8356,
"num_input_tokens_seen": 187072,
"step": 40
},
{
"epoch": 0.03392385978137957,
"grad_norm": 1.681622862815857,
"learning_rate": 4.996448720130077e-05,
"loss": 0.778,
"num_input_tokens_seen": 211040,
"step": 45
},
{
"epoch": 0.03769317753486619,
"grad_norm": 1.6398649215698242,
"learning_rate": 4.995615947407415e-05,
"loss": 0.939,
"num_input_tokens_seen": 234560,
"step": 50
},
{
"epoch": 0.04146249528835281,
"grad_norm": 2.327622890472412,
"learning_rate": 4.994695622054618e-05,
"loss": 1.0274,
"num_input_tokens_seen": 257632,
"step": 55
},
{
"epoch": 0.04523181304183943,
"grad_norm": 2.6391327381134033,
"learning_rate": 4.9936877763590664e-05,
"loss": 0.8822,
"num_input_tokens_seen": 281024,
"step": 60
},
{
"epoch": 0.049001130795326045,
"grad_norm": 1.8776222467422485,
"learning_rate": 4.992592445678582e-05,
"loss": 0.8146,
"num_input_tokens_seen": 304688,
"step": 65
},
{
"epoch": 0.052770448548812667,
"grad_norm": 1.6592698097229004,
"learning_rate": 4.991409668440185e-05,
"loss": 0.9989,
"num_input_tokens_seen": 327424,
"step": 70
},
{
"epoch": 0.05653976630229928,
"grad_norm": 1.709376335144043,
"learning_rate": 4.990139486138743e-05,
"loss": 0.9344,
"num_input_tokens_seen": 350528,
"step": 75
},
{
"epoch": 0.0603090840557859,
"grad_norm": 1.3547413349151611,
"learning_rate": 4.988781943335521e-05,
"loss": 0.7932,
"num_input_tokens_seen": 373280,
"step": 80
},
{
"epoch": 0.06407840180927252,
"grad_norm": 1.3162572383880615,
"learning_rate": 4.987337087656614e-05,
"loss": 0.9445,
"num_input_tokens_seen": 395856,
"step": 85
},
{
"epoch": 0.06784771956275915,
"grad_norm": 1.8211767673492432,
"learning_rate": 4.985804969791279e-05,
"loss": 0.7369,
"num_input_tokens_seen": 418704,
"step": 90
},
{
"epoch": 0.07161703731624576,
"grad_norm": 1.552996039390564,
"learning_rate": 4.984185643490151e-05,
"loss": 1.0226,
"num_input_tokens_seen": 442432,
"step": 95
},
{
"epoch": 0.07538635506973237,
"grad_norm": 1.816767930984497,
"learning_rate": 4.9824791655633676e-05,
"loss": 0.7753,
"num_input_tokens_seen": 466128,
"step": 100
},
{
"epoch": 0.079155672823219,
"grad_norm": 1.6724802255630493,
"learning_rate": 4.9806855958785625e-05,
"loss": 0.8278,
"num_input_tokens_seen": 489536,
"step": 105
},
{
"epoch": 0.08292499057670562,
"grad_norm": 2.187622308731079,
"learning_rate": 4.978804997358779e-05,
"loss": 0.8432,
"num_input_tokens_seen": 513200,
"step": 110
},
{
"epoch": 0.08669430833019223,
"grad_norm": 1.500815987586975,
"learning_rate": 4.9768374359802525e-05,
"loss": 0.9649,
"num_input_tokens_seen": 536432,
"step": 115
},
{
"epoch": 0.09046362608367886,
"grad_norm": 1.6637320518493652,
"learning_rate": 4.9747829807701e-05,
"loss": 0.8249,
"num_input_tokens_seen": 559776,
"step": 120
},
{
"epoch": 0.09423294383716548,
"grad_norm": 1.5978686809539795,
"learning_rate": 4.972641703803896e-05,
"loss": 0.9157,
"num_input_tokens_seen": 583248,
"step": 125
},
{
"epoch": 0.09800226159065209,
"grad_norm": 1.4269095659255981,
"learning_rate": 4.9704136802031485e-05,
"loss": 0.885,
"num_input_tokens_seen": 606768,
"step": 130
},
{
"epoch": 0.1017715793441387,
"grad_norm": 1.6915644407272339,
"learning_rate": 4.96809898813266e-05,
"loss": 0.8736,
"num_input_tokens_seen": 630896,
"step": 135
},
{
"epoch": 0.10554089709762533,
"grad_norm": 1.6915837526321411,
"learning_rate": 4.965697708797784e-05,
"loss": 0.7312,
"num_input_tokens_seen": 654320,
"step": 140
},
{
"epoch": 0.10931021485111195,
"grad_norm": 1.921988606452942,
"learning_rate": 4.963209926441581e-05,
"loss": 0.9478,
"num_input_tokens_seen": 677248,
"step": 145
},
{
"epoch": 0.11307953260459856,
"grad_norm": 2.241665840148926,
"learning_rate": 4.9606357283418575e-05,
"loss": 0.9174,
"num_input_tokens_seen": 700672,
"step": 150
},
{
"epoch": 0.11684885035808519,
"grad_norm": 2.076327323913574,
"learning_rate": 4.957975204808108e-05,
"loss": 0.8453,
"num_input_tokens_seen": 724480,
"step": 155
},
{
"epoch": 0.1206181681115718,
"grad_norm": 1.5655834674835205,
"learning_rate": 4.955228449178345e-05,
"loss": 0.701,
"num_input_tokens_seen": 748144,
"step": 160
},
{
"epoch": 0.12438748586505842,
"grad_norm": 1.8062623739242554,
"learning_rate": 4.952395557815826e-05,
"loss": 0.7981,
"num_input_tokens_seen": 771584,
"step": 165
},
{
"epoch": 0.12815680361854503,
"grad_norm": 2.2940807342529297,
"learning_rate": 4.949476630105669e-05,
"loss": 0.8824,
"num_input_tokens_seen": 795248,
"step": 170
},
{
"epoch": 0.13192612137203166,
"grad_norm": 2.108461856842041,
"learning_rate": 4.9464717684513726e-05,
"loss": 0.8368,
"num_input_tokens_seen": 818272,
"step": 175
},
{
"epoch": 0.1356954391255183,
"grad_norm": 1.5639266967773438,
"learning_rate": 4.943381078271214e-05,
"loss": 0.951,
"num_input_tokens_seen": 841440,
"step": 180
},
{
"epoch": 0.1394647568790049,
"grad_norm": 1.4479436874389648,
"learning_rate": 4.9402046679945613e-05,
"loss": 0.8697,
"num_input_tokens_seen": 864640,
"step": 185
},
{
"epoch": 0.14323407463249152,
"grad_norm": 1.8331745862960815,
"learning_rate": 4.936942649058061e-05,
"loss": 0.7765,
"num_input_tokens_seen": 888032,
"step": 190
},
{
"epoch": 0.14700339238597815,
"grad_norm": 1.6916519403457642,
"learning_rate": 4.933595135901732e-05,
"loss": 0.778,
"num_input_tokens_seen": 911008,
"step": 195
},
{
"epoch": 0.15077271013946475,
"grad_norm": 2.0202736854553223,
"learning_rate": 4.930162245964952e-05,
"loss": 0.8926,
"num_input_tokens_seen": 934432,
"step": 200
},
{
"epoch": 0.15454202789295138,
"grad_norm": 1.8288179636001587,
"learning_rate": 4.926644099682334e-05,
"loss": 0.805,
"num_input_tokens_seen": 958064,
"step": 205
},
{
"epoch": 0.158311345646438,
"grad_norm": 1.9957374334335327,
"learning_rate": 4.9230408204795034e-05,
"loss": 0.8433,
"num_input_tokens_seen": 980992,
"step": 210
},
{
"epoch": 0.1620806633999246,
"grad_norm": 1.5921601057052612,
"learning_rate": 4.9193525347687696e-05,
"loss": 0.8483,
"num_input_tokens_seen": 1004736,
"step": 215
},
{
"epoch": 0.16584998115341124,
"grad_norm": 2.1509461402893066,
"learning_rate": 4.9155793719446863e-05,
"loss": 0.9689,
"num_input_tokens_seen": 1028320,
"step": 220
},
{
"epoch": 0.16961929890689786,
"grad_norm": 1.9183369874954224,
"learning_rate": 4.911721464379516e-05,
"loss": 0.9025,
"num_input_tokens_seen": 1051696,
"step": 225
},
{
"epoch": 0.17338861666038446,
"grad_norm": 1.5253652334213257,
"learning_rate": 4.907778947418585e-05,
"loss": 0.8423,
"num_input_tokens_seen": 1074640,
"step": 230
},
{
"epoch": 0.1771579344138711,
"grad_norm": 1.917833924293518,
"learning_rate": 4.9037519593755356e-05,
"loss": 0.968,
"num_input_tokens_seen": 1098096,
"step": 235
},
{
"epoch": 0.18092725216735772,
"grad_norm": 1.8350000381469727,
"learning_rate": 4.89964064152747e-05,
"loss": 0.8118,
"num_input_tokens_seen": 1121344,
"step": 240
},
{
"epoch": 0.18469656992084432,
"grad_norm": 1.4250982999801636,
"learning_rate": 4.895445138110001e-05,
"loss": 0.8695,
"num_input_tokens_seen": 1144608,
"step": 245
},
{
"epoch": 0.18846588767433095,
"grad_norm": 3.1919894218444824,
"learning_rate": 4.891165596312186e-05,
"loss": 0.9135,
"num_input_tokens_seen": 1168112,
"step": 250
},
{
"epoch": 0.19223520542781755,
"grad_norm": 2.4486422538757324,
"learning_rate": 4.886802166271364e-05,
"loss": 0.798,
"num_input_tokens_seen": 1190992,
"step": 255
},
{
"epoch": 0.19600452318130418,
"grad_norm": 1.6712846755981445,
"learning_rate": 4.882355001067892e-05,
"loss": 0.8068,
"num_input_tokens_seen": 1214832,
"step": 260
},
{
"epoch": 0.1997738409347908,
"grad_norm": 2.27498197555542,
"learning_rate": 4.8778242567197685e-05,
"loss": 0.7621,
"num_input_tokens_seen": 1238080,
"step": 265
},
{
"epoch": 0.2035431586882774,
"grad_norm": 1.8954802751541138,
"learning_rate": 4.873210092177167e-05,
"loss": 0.7604,
"num_input_tokens_seen": 1261056,
"step": 270
},
{
"epoch": 0.20731247644176404,
"grad_norm": 2.805199146270752,
"learning_rate": 4.868512669316855e-05,
"loss": 0.9166,
"num_input_tokens_seen": 1283968,
"step": 275
},
{
"epoch": 0.21108179419525067,
"grad_norm": 1.6355040073394775,
"learning_rate": 4.863732152936514e-05,
"loss": 0.8487,
"num_input_tokens_seen": 1308192,
"step": 280
},
{
"epoch": 0.21485111194873727,
"grad_norm": 1.5382211208343506,
"learning_rate": 4.858868710748963e-05,
"loss": 0.9388,
"num_input_tokens_seen": 1331648,
"step": 285
},
{
"epoch": 0.2186204297022239,
"grad_norm": 4.8234052658081055,
"learning_rate": 4.85392251337627e-05,
"loss": 0.8762,
"num_input_tokens_seen": 1354880,
"step": 290
},
{
"epoch": 0.22238974745571052,
"grad_norm": 1.7781318426132202,
"learning_rate": 4.848893734343769e-05,
"loss": 0.8672,
"num_input_tokens_seen": 1378064,
"step": 295
},
{
"epoch": 0.22615906520919712,
"grad_norm": 1.4167485237121582,
"learning_rate": 4.8437825500739696e-05,
"loss": 0.9335,
"num_input_tokens_seen": 1401568,
"step": 300
},
{
"epoch": 0.22992838296268375,
"grad_norm": 1.112923502922058,
"learning_rate": 4.838589139880371e-05,
"loss": 0.7408,
"num_input_tokens_seen": 1424960,
"step": 305
},
{
"epoch": 0.23369770071617038,
"grad_norm": 1.789054274559021,
"learning_rate": 4.833313685961167e-05,
"loss": 0.7255,
"num_input_tokens_seen": 1448560,
"step": 310
},
{
"epoch": 0.23746701846965698,
"grad_norm": 2.056309223175049,
"learning_rate": 4.82795637339286e-05,
"loss": 0.8214,
"num_input_tokens_seen": 1472224,
"step": 315
},
{
"epoch": 0.2412363362231436,
"grad_norm": 2.14561128616333,
"learning_rate": 4.822517390123761e-05,
"loss": 0.7577,
"num_input_tokens_seen": 1495232,
"step": 320
},
{
"epoch": 0.24500565397663024,
"grad_norm": 2.092505931854248,
"learning_rate": 4.8169969269674016e-05,
"loss": 0.7087,
"num_input_tokens_seen": 1518608,
"step": 325
},
{
"epoch": 0.24877497173011684,
"grad_norm": 3.2833504676818848,
"learning_rate": 4.811395177595836e-05,
"loss": 0.8798,
"num_input_tokens_seen": 1541552,
"step": 330
},
{
"epoch": 0.25254428948360347,
"grad_norm": 3.2298083305358887,
"learning_rate": 4.8057123385328495e-05,
"loss": 0.8511,
"num_input_tokens_seen": 1565136,
"step": 335
},
{
"epoch": 0.25631360723709007,
"grad_norm": 3.187859296798706,
"learning_rate": 4.799948609147061e-05,
"loss": 0.8655,
"num_input_tokens_seen": 1588624,
"step": 340
},
{
"epoch": 0.2600829249905767,
"grad_norm": 1.7054897546768188,
"learning_rate": 4.7941041916449316e-05,
"loss": 0.7643,
"num_input_tokens_seen": 1612016,
"step": 345
},
{
"epoch": 0.2638522427440633,
"grad_norm": 2.040390729904175,
"learning_rate": 4.788179291063667e-05,
"loss": 0.8325,
"num_input_tokens_seen": 1635856,
"step": 350
},
{
"epoch": 0.2676215604975499,
"grad_norm": 1.6121920347213745,
"learning_rate": 4.78217411526403e-05,
"loss": 0.6917,
"num_input_tokens_seen": 1659168,
"step": 355
},
{
"epoch": 0.2713908782510366,
"grad_norm": 1.7065237760543823,
"learning_rate": 4.7760888749230416e-05,
"loss": 0.9086,
"num_input_tokens_seen": 1682480,
"step": 360
},
{
"epoch": 0.2751601960045232,
"grad_norm": 2.437305212020874,
"learning_rate": 4.769923783526593e-05,
"loss": 0.9648,
"num_input_tokens_seen": 1705952,
"step": 365
},
{
"epoch": 0.2789295137580098,
"grad_norm": 1.7989013195037842,
"learning_rate": 4.7636790573619586e-05,
"loss": 0.8712,
"num_input_tokens_seen": 1729504,
"step": 370
},
{
"epoch": 0.28269883151149644,
"grad_norm": 1.2867515087127686,
"learning_rate": 4.7573549155102014e-05,
"loss": 0.7044,
"num_input_tokens_seen": 1752784,
"step": 375
},
{
"epoch": 0.28646814926498304,
"grad_norm": 1.7485004663467407,
"learning_rate": 4.7509515798384956e-05,
"loss": 0.92,
"num_input_tokens_seen": 1775840,
"step": 380
},
{
"epoch": 0.29023746701846964,
"grad_norm": 1.8417341709136963,
"learning_rate": 4.7444692749923345e-05,
"loss": 0.7396,
"num_input_tokens_seen": 1798944,
"step": 385
},
{
"epoch": 0.2940067847719563,
"grad_norm": 1.755854606628418,
"learning_rate": 4.7379082283876566e-05,
"loss": 0.8619,
"num_input_tokens_seen": 1822016,
"step": 390
},
{
"epoch": 0.2977761025254429,
"grad_norm": 1.924985408782959,
"learning_rate": 4.73126867020286e-05,
"loss": 0.7399,
"num_input_tokens_seen": 1845584,
"step": 395
},
{
"epoch": 0.3015454202789295,
"grad_norm": 2.078490972518921,
"learning_rate": 4.724550833370735e-05,
"loss": 0.7882,
"num_input_tokens_seen": 1868976,
"step": 400
},
{
"epoch": 0.30531473803241616,
"grad_norm": 1.7066706418991089,
"learning_rate": 4.717754953570286e-05,
"loss": 0.7579,
"num_input_tokens_seen": 1892096,
"step": 405
},
{
"epoch": 0.30908405578590276,
"grad_norm": 1.5587912797927856,
"learning_rate": 4.710881269218467e-05,
"loss": 0.8531,
"num_input_tokens_seen": 1915136,
"step": 410
},
{
"epoch": 0.31285337353938936,
"grad_norm": 2.13066029548645,
"learning_rate": 4.7039300214618134e-05,
"loss": 0.8279,
"num_input_tokens_seen": 1938464,
"step": 415
},
{
"epoch": 0.316622691292876,
"grad_norm": 1.6272748708724976,
"learning_rate": 4.696901454167988e-05,
"loss": 0.9496,
"num_input_tokens_seen": 1961696,
"step": 420
},
{
"epoch": 0.3203920090463626,
"grad_norm": 1.9481947422027588,
"learning_rate": 4.68979581391722e-05,
"loss": 0.7804,
"num_input_tokens_seen": 1985072,
"step": 425
},
{
"epoch": 0.3241613267998492,
"grad_norm": 2.0169599056243896,
"learning_rate": 4.682613349993655e-05,
"loss": 0.8848,
"num_input_tokens_seen": 2007936,
"step": 430
},
{
"epoch": 0.32793064455333587,
"grad_norm": 2.285831928253174,
"learning_rate": 4.675354314376614e-05,
"loss": 0.7155,
"num_input_tokens_seen": 2031280,
"step": 435
},
{
"epoch": 0.33169996230682247,
"grad_norm": 1.7333121299743652,
"learning_rate": 4.6680189617317474e-05,
"loss": 0.7408,
"num_input_tokens_seen": 2054176,
"step": 440
},
{
"epoch": 0.33546928006030907,
"grad_norm": 1.5312005281448364,
"learning_rate": 4.660607549402103e-05,
"loss": 0.6917,
"num_input_tokens_seen": 2077392,
"step": 445
},
{
"epoch": 0.33923859781379573,
"grad_norm": 1.2847707271575928,
"learning_rate": 4.6531203373991014e-05,
"loss": 0.8376,
"num_input_tokens_seen": 2100464,
"step": 450
},
{
"epoch": 0.34300791556728233,
"grad_norm": 2.3927323818206787,
"learning_rate": 4.645557588393407e-05,
"loss": 0.7672,
"num_input_tokens_seen": 2124240,
"step": 455
},
{
"epoch": 0.34677723332076893,
"grad_norm": 1.467666506767273,
"learning_rate": 4.63791956770572e-05,
"loss": 0.7285,
"num_input_tokens_seen": 2147584,
"step": 460
},
{
"epoch": 0.3505465510742556,
"grad_norm": 2.08063006401062,
"learning_rate": 4.6302065432974616e-05,
"loss": 0.8577,
"num_input_tokens_seen": 2170976,
"step": 465
},
{
"epoch": 0.3543158688277422,
"grad_norm": 2.0516254901885986,
"learning_rate": 4.6224187857613786e-05,
"loss": 0.8195,
"num_input_tokens_seen": 2194128,
"step": 470
},
{
"epoch": 0.3580851865812288,
"grad_norm": 1.4173938035964966,
"learning_rate": 4.6145565683120496e-05,
"loss": 0.878,
"num_input_tokens_seen": 2217056,
"step": 475
},
{
"epoch": 0.36185450433471544,
"grad_norm": 2.968780517578125,
"learning_rate": 4.606620166776294e-05,
"loss": 0.8187,
"num_input_tokens_seen": 2240800,
"step": 480
},
{
"epoch": 0.36562382208820204,
"grad_norm": 2.426954507827759,
"learning_rate": 4.598609859583506e-05,
"loss": 0.7713,
"num_input_tokens_seen": 2263984,
"step": 485
},
{
"epoch": 0.36939313984168864,
"grad_norm": 1.7469669580459595,
"learning_rate": 4.590525927755874e-05,
"loss": 0.8583,
"num_input_tokens_seen": 2287776,
"step": 490
},
{
"epoch": 0.3731624575951753,
"grad_norm": 4.131247043609619,
"learning_rate": 4.582368654898533e-05,
"loss": 0.8106,
"num_input_tokens_seen": 2311072,
"step": 495
},
{
"epoch": 0.3769317753486619,
"grad_norm": 2.013920545578003,
"learning_rate": 4.5741383271896094e-05,
"loss": 0.8578,
"num_input_tokens_seen": 2334768,
"step": 500
},
{
"epoch": 0.3807010931021485,
"grad_norm": 2.0032143592834473,
"learning_rate": 4.565835233370178e-05,
"loss": 0.7192,
"num_input_tokens_seen": 2358272,
"step": 505
},
{
"epoch": 0.3844704108556351,
"grad_norm": 1.9632434844970703,
"learning_rate": 4.557459664734141e-05,
"loss": 0.8681,
"num_input_tokens_seen": 2381440,
"step": 510
},
{
"epoch": 0.38823972860912176,
"grad_norm": 3.1098926067352295,
"learning_rate": 4.549011915118001e-05,
"loss": 0.6713,
"num_input_tokens_seen": 2403984,
"step": 515
},
{
"epoch": 0.39200904636260836,
"grad_norm": 2.1910107135772705,
"learning_rate": 4.540492280890555e-05,
"loss": 0.7997,
"num_input_tokens_seen": 2427392,
"step": 520
},
{
"epoch": 0.39577836411609496,
"grad_norm": 2.5994174480438232,
"learning_rate": 4.531901060942497e-05,
"loss": 0.8715,
"num_input_tokens_seen": 2450384,
"step": 525
},
{
"epoch": 0.3995476818695816,
"grad_norm": 2.206372022628784,
"learning_rate": 4.523238556675935e-05,
"loss": 0.6608,
"num_input_tokens_seen": 2473424,
"step": 530
},
{
"epoch": 0.4033169996230682,
"grad_norm": 2.1349329948425293,
"learning_rate": 4.514505071993812e-05,
"loss": 0.7568,
"num_input_tokens_seen": 2497232,
"step": 535
},
{
"epoch": 0.4070863173765548,
"grad_norm": 2.0707926750183105,
"learning_rate": 4.505700913289246e-05,
"loss": 0.8325,
"num_input_tokens_seen": 2520432,
"step": 540
},
{
"epoch": 0.4108556351300415,
"grad_norm": 1.6440961360931396,
"learning_rate": 4.496826389434784e-05,
"loss": 0.8083,
"num_input_tokens_seen": 2543616,
"step": 545
},
{
"epoch": 0.4146249528835281,
"grad_norm": 2.5610768795013428,
"learning_rate": 4.48788181177156e-05,
"loss": 0.7702,
"num_input_tokens_seen": 2566992,
"step": 550
},
{
"epoch": 0.4183942706370147,
"grad_norm": 2.882587432861328,
"learning_rate": 4.478867494098381e-05,
"loss": 0.6993,
"num_input_tokens_seen": 2590144,
"step": 555
},
{
"epoch": 0.42216358839050133,
"grad_norm": 2.3161754608154297,
"learning_rate": 4.469783752660709e-05,
"loss": 0.8109,
"num_input_tokens_seen": 2614240,
"step": 560
},
{
"epoch": 0.42593290614398793,
"grad_norm": 2.578278064727783,
"learning_rate": 4.460630906139571e-05,
"loss": 0.7901,
"num_input_tokens_seen": 2637696,
"step": 565
},
{
"epoch": 0.42970222389747453,
"grad_norm": 2.362873077392578,
"learning_rate": 4.451409275640379e-05,
"loss": 0.9066,
"num_input_tokens_seen": 2660768,
"step": 570
},
{
"epoch": 0.4334715416509612,
"grad_norm": 1.8829095363616943,
"learning_rate": 4.442119184681664e-05,
"loss": 0.703,
"num_input_tokens_seen": 2683792,
"step": 575
},
{
"epoch": 0.4372408594044478,
"grad_norm": 1.8470242023468018,
"learning_rate": 4.432760959183725e-05,
"loss": 0.9203,
"num_input_tokens_seen": 2707136,
"step": 580
},
{
"epoch": 0.4410101771579344,
"grad_norm": 2.575732707977295,
"learning_rate": 4.423334927457198e-05,
"loss": 0.9314,
"num_input_tokens_seen": 2729808,
"step": 585
},
{
"epoch": 0.44477949491142105,
"grad_norm": 1.765871524810791,
"learning_rate": 4.413841420191532e-05,
"loss": 0.7105,
"num_input_tokens_seen": 2752992,
"step": 590
},
{
"epoch": 0.44854881266490765,
"grad_norm": 1.8366035223007202,
"learning_rate": 4.404280770443398e-05,
"loss": 0.894,
"num_input_tokens_seen": 2776480,
"step": 595
},
{
"epoch": 0.45231813041839425,
"grad_norm": 2.326671838760376,
"learning_rate": 4.3946533136249926e-05,
"loss": 0.8868,
"num_input_tokens_seen": 2799728,
"step": 600
},
{
"epoch": 0.4560874481718809,
"grad_norm": 1.8004709482192993,
"learning_rate": 4.384959387492277e-05,
"loss": 0.7679,
"num_input_tokens_seen": 2823008,
"step": 605
},
{
"epoch": 0.4598567659253675,
"grad_norm": 1.893351435661316,
"learning_rate": 4.37519933213313e-05,
"loss": 0.7895,
"num_input_tokens_seen": 2845984,
"step": 610
},
{
"epoch": 0.4636260836788541,
"grad_norm": 2.1309449672698975,
"learning_rate": 4.365373489955411e-05,
"loss": 0.8527,
"num_input_tokens_seen": 2869024,
"step": 615
},
{
"epoch": 0.46739540143234076,
"grad_norm": 2.4292616844177246,
"learning_rate": 4.355482205674951e-05,
"loss": 0.7533,
"num_input_tokens_seen": 2892240,
"step": 620
},
{
"epoch": 0.47116471918582736,
"grad_norm": 4.796603679656982,
"learning_rate": 4.3455258263034605e-05,
"loss": 0.8048,
"num_input_tokens_seen": 2915792,
"step": 625
},
{
"epoch": 0.47493403693931396,
"grad_norm": 2.2890474796295166,
"learning_rate": 4.33550470113635e-05,
"loss": 0.7072,
"num_input_tokens_seen": 2939488,
"step": 630
},
{
"epoch": 0.4787033546928006,
"grad_norm": 1.8012391328811646,
"learning_rate": 4.3254191817404804e-05,
"loss": 0.7911,
"num_input_tokens_seen": 2962992,
"step": 635
},
{
"epoch": 0.4824726724462872,
"grad_norm": 1.8014194965362549,
"learning_rate": 4.3152696219418295e-05,
"loss": 0.8293,
"num_input_tokens_seen": 2986544,
"step": 640
},
{
"epoch": 0.4862419901997738,
"grad_norm": 2.239366292953491,
"learning_rate": 4.305056377813075e-05,
"loss": 0.8835,
"num_input_tokens_seen": 3009984,
"step": 645
},
{
"epoch": 0.4900113079532605,
"grad_norm": 1.5583738088607788,
"learning_rate": 4.294779807661105e-05,
"loss": 0.7262,
"num_input_tokens_seen": 3033520,
"step": 650
},
{
"epoch": 0.4937806257067471,
"grad_norm": 1.7024521827697754,
"learning_rate": 4.2844402720144496e-05,
"loss": 0.8231,
"num_input_tokens_seen": 3057056,
"step": 655
},
{
"epoch": 0.4975499434602337,
"grad_norm": 3.108896493911743,
"learning_rate": 4.274038133610628e-05,
"loss": 0.8302,
"num_input_tokens_seen": 3080656,
"step": 660
},
{
"epoch": 0.5013192612137203,
"grad_norm": 3.166442394256592,
"learning_rate": 4.263573757383427e-05,
"loss": 0.8095,
"num_input_tokens_seen": 3103792,
"step": 665
},
{
"epoch": 0.5050885789672069,
"grad_norm": 2.218747854232788,
"learning_rate": 4.2530475104500956e-05,
"loss": 0.8756,
"num_input_tokens_seen": 3126976,
"step": 670
},
{
"epoch": 0.5088578967206936,
"grad_norm": 2.5289299488067627,
"learning_rate": 4.242459762098466e-05,
"loss": 0.7733,
"num_input_tokens_seen": 3150224,
"step": 675
},
{
"epoch": 0.5126272144741801,
"grad_norm": 1.674834132194519,
"learning_rate": 4.231810883773999e-05,
"loss": 0.7715,
"num_input_tokens_seen": 3173296,
"step": 680
},
{
"epoch": 0.5163965322276668,
"grad_norm": 1.7702105045318604,
"learning_rate": 4.2211012490667524e-05,
"loss": 0.6996,
"num_input_tokens_seen": 3196560,
"step": 685
},
{
"epoch": 0.5201658499811534,
"grad_norm": 2.7741544246673584,
"learning_rate": 4.2103312336982734e-05,
"loss": 0.8889,
"num_input_tokens_seen": 3220432,
"step": 690
},
{
"epoch": 0.52393516773464,
"grad_norm": 2.749328136444092,
"learning_rate": 4.19950121550842e-05,
"loss": 0.8717,
"num_input_tokens_seen": 3243712,
"step": 695
},
{
"epoch": 0.5277044854881267,
"grad_norm": 2.4472267627716064,
"learning_rate": 4.188611574442101e-05,
"loss": 0.6314,
"num_input_tokens_seen": 3266976,
"step": 700
},
{
"epoch": 0.5314738032416133,
"grad_norm": 2.1869001388549805,
"learning_rate": 4.177662692535952e-05,
"loss": 0.7517,
"num_input_tokens_seen": 3290656,
"step": 705
},
{
"epoch": 0.5352431209950999,
"grad_norm": 1.7907201051712036,
"learning_rate": 4.166654953904926e-05,
"loss": 0.8373,
"num_input_tokens_seen": 3313712,
"step": 710
},
{
"epoch": 0.5390124387485865,
"grad_norm": 1.8779516220092773,
"learning_rate": 4.155588744728826e-05,
"loss": 0.7025,
"num_input_tokens_seen": 3336880,
"step": 715
},
{
"epoch": 0.5427817565020732,
"grad_norm": 2.0460379123687744,
"learning_rate": 4.144464453238748e-05,
"loss": 0.6709,
"num_input_tokens_seen": 3360416,
"step": 720
},
{
"epoch": 0.5465510742555597,
"grad_norm": 2.096501350402832,
"learning_rate": 4.133282469703469e-05,
"loss": 0.6451,
"num_input_tokens_seen": 3383408,
"step": 725
},
{
"epoch": 0.5503203920090464,
"grad_norm": 2.441951036453247,
"learning_rate": 4.122043186415746e-05,
"loss": 0.754,
"num_input_tokens_seen": 3406752,
"step": 730
},
{
"epoch": 0.554089709762533,
"grad_norm": 2.059370517730713,
"learning_rate": 4.110746997678565e-05,
"loss": 0.8784,
"num_input_tokens_seen": 3429968,
"step": 735
},
{
"epoch": 0.5578590275160196,
"grad_norm": 1.969912052154541,
"learning_rate": 4.0993942997912984e-05,
"loss": 0.9384,
"num_input_tokens_seen": 3453136,
"step": 740
},
{
"epoch": 0.5616283452695062,
"grad_norm": 2.3595261573791504,
"learning_rate": 4.087985491035804e-05,
"loss": 0.7915,
"num_input_tokens_seen": 3476384,
"step": 745
},
{
"epoch": 0.5653976630229929,
"grad_norm": 2.187009572982788,
"learning_rate": 4.076520971662455e-05,
"loss": 0.8374,
"num_input_tokens_seen": 3500176,
"step": 750
},
{
"epoch": 0.5691669807764794,
"grad_norm": 1.7618275880813599,
"learning_rate": 4.065001143876097e-05,
"loss": 0.7302,
"num_input_tokens_seen": 3524048,
"step": 755
},
{
"epoch": 0.5729362985299661,
"grad_norm": 2.1269426345825195,
"learning_rate": 4.053426411821934e-05,
"loss": 0.754,
"num_input_tokens_seen": 3547056,
"step": 760
},
{
"epoch": 0.5767056162834527,
"grad_norm": 2.3369758129119873,
"learning_rate": 4.0417971815713584e-05,
"loss": 0.8208,
"num_input_tokens_seen": 3570608,
"step": 765
},
{
"epoch": 0.5804749340369393,
"grad_norm": 2.156280279159546,
"learning_rate": 4.030113861107693e-05,
"loss": 0.9635,
"num_input_tokens_seen": 3594192,
"step": 770
},
{
"epoch": 0.5842442517904259,
"grad_norm": 1.7685117721557617,
"learning_rate": 4.0183768603118886e-05,
"loss": 0.7606,
"num_input_tokens_seen": 3617600,
"step": 775
},
{
"epoch": 0.5880135695439126,
"grad_norm": 2.0940916538238525,
"learning_rate": 4.0065865909481417e-05,
"loss": 0.7283,
"num_input_tokens_seen": 3640416,
"step": 780
},
{
"epoch": 0.5917828872973991,
"grad_norm": 2.5771267414093018,
"learning_rate": 3.994743466649442e-05,
"loss": 0.6878,
"num_input_tokens_seen": 3663696,
"step": 785
},
{
"epoch": 0.5955522050508858,
"grad_norm": 2.336862087249756,
"learning_rate": 3.982847902903071e-05,
"loss": 0.8009,
"num_input_tokens_seen": 3686880,
"step": 790
},
{
"epoch": 0.5993215228043725,
"grad_norm": 2.91589093208313,
"learning_rate": 3.9709003170360176e-05,
"loss": 0.794,
"num_input_tokens_seen": 3710224,
"step": 795
},
{
"epoch": 0.603090840557859,
"grad_norm": 3.051378011703491,
"learning_rate": 3.958901128200344e-05,
"loss": 0.7532,
"num_input_tokens_seen": 3733536,
"step": 800
},
{
"epoch": 0.6068601583113457,
"grad_norm": 3.457322835922241,
"learning_rate": 3.946850757358475e-05,
"loss": 0.8581,
"num_input_tokens_seen": 3756688,
"step": 805
},
{
"epoch": 0.6106294760648323,
"grad_norm": 1.4198774099349976,
"learning_rate": 3.9347496272684325e-05,
"loss": 0.7889,
"num_input_tokens_seen": 3779504,
"step": 810
},
{
"epoch": 0.6143987938183189,
"grad_norm": 2.5762572288513184,
"learning_rate": 3.922598162469003e-05,
"loss": 0.7401,
"num_input_tokens_seen": 3802544,
"step": 815
},
{
"epoch": 0.6181681115718055,
"grad_norm": 2.5552995204925537,
"learning_rate": 3.910396789264845e-05,
"loss": 0.799,
"num_input_tokens_seen": 3825680,
"step": 820
},
{
"epoch": 0.6219374293252922,
"grad_norm": 1.8057173490524292,
"learning_rate": 3.8981459357115325e-05,
"loss": 0.8015,
"num_input_tokens_seen": 3849040,
"step": 825
},
{
"epoch": 0.6257067470787787,
"grad_norm": 1.8398315906524658,
"learning_rate": 3.885846031600536e-05,
"loss": 0.6905,
"num_input_tokens_seen": 3872320,
"step": 830
},
{
"epoch": 0.6294760648322654,
"grad_norm": 2.7629125118255615,
"learning_rate": 3.8734975084441466e-05,
"loss": 0.845,
"num_input_tokens_seen": 3895920,
"step": 835
},
{
"epoch": 0.633245382585752,
"grad_norm": 1.9969942569732666,
"learning_rate": 3.8611007994603365e-05,
"loss": 0.7317,
"num_input_tokens_seen": 3919360,
"step": 840
},
{
"epoch": 0.6370147003392386,
"grad_norm": 3.591285467147827,
"learning_rate": 3.8486563395575625e-05,
"loss": 0.724,
"num_input_tokens_seen": 3942736,
"step": 845
},
{
"epoch": 0.6407840180927252,
"grad_norm": 2.4714577198028564,
"learning_rate": 3.8361645653195026e-05,
"loss": 0.664,
"num_input_tokens_seen": 3966144,
"step": 850
},
{
"epoch": 0.6445533358462119,
"grad_norm": 3.081908702850342,
"learning_rate": 3.823625914989748e-05,
"loss": 0.5922,
"num_input_tokens_seen": 3989472,
"step": 855
},
{
"epoch": 0.6483226535996984,
"grad_norm": 3.5514636039733887,
"learning_rate": 3.811040828456421e-05,
"loss": 0.9465,
"num_input_tokens_seen": 4012656,
"step": 860
},
{
"epoch": 0.6520919713531851,
"grad_norm": 3.4660677909851074,
"learning_rate": 3.798409747236745e-05,
"loss": 0.7781,
"num_input_tokens_seen": 4035888,
"step": 865
},
{
"epoch": 0.6558612891066717,
"grad_norm": 3.08896541595459,
"learning_rate": 3.7857331144615574e-05,
"loss": 0.8309,
"num_input_tokens_seen": 4059504,
"step": 870
},
{
"epoch": 0.6596306068601583,
"grad_norm": 2.4300408363342285,
"learning_rate": 3.773011374859761e-05,
"loss": 0.7812,
"num_input_tokens_seen": 4083440,
"step": 875
},
{
"epoch": 0.6633999246136449,
"grad_norm": 3.5635619163513184,
"learning_rate": 3.7602449747427204e-05,
"loss": 0.7888,
"num_input_tokens_seen": 4106928,
"step": 880
},
{
"epoch": 0.6671692423671316,
"grad_norm": 1.9514297246932983,
"learning_rate": 3.747434361988608e-05,
"loss": 0.7584,
"num_input_tokens_seen": 4130336,
"step": 885
},
{
"epoch": 0.6709385601206181,
"grad_norm": 1.6948657035827637,
"learning_rate": 3.734579986026688e-05,
"loss": 0.6848,
"num_input_tokens_seen": 4153872,
"step": 890
},
{
"epoch": 0.6747078778741048,
"grad_norm": 1.9811207056045532,
"learning_rate": 3.7216822978215514e-05,
"loss": 0.8068,
"num_input_tokens_seen": 4177072,
"step": 895
},
{
"epoch": 0.6784771956275915,
"grad_norm": 3.7668049335479736,
"learning_rate": 3.7087417498572944e-05,
"loss": 0.8122,
"num_input_tokens_seen": 4200256,
"step": 900
},
{
"epoch": 0.682246513381078,
"grad_norm": 2.3684747219085693,
"learning_rate": 3.695758796121642e-05,
"loss": 0.5764,
"num_input_tokens_seen": 4223264,
"step": 905
},
{
"epoch": 0.6860158311345647,
"grad_norm": 2.597867250442505,
"learning_rate": 3.6827338920900254e-05,
"loss": 0.8898,
"num_input_tokens_seen": 4247024,
"step": 910
},
{
"epoch": 0.6897851488880513,
"grad_norm": 2.0299720764160156,
"learning_rate": 3.6696674947095984e-05,
"loss": 0.7741,
"num_input_tokens_seen": 4270800,
"step": 915
},
{
"epoch": 0.6935544666415379,
"grad_norm": 2.4363317489624023,
"learning_rate": 3.656560062383208e-05,
"loss": 0.8219,
"num_input_tokens_seen": 4294352,
"step": 920
},
{
"epoch": 0.6973237843950245,
"grad_norm": 1.752125859260559,
"learning_rate": 3.6434120549533135e-05,
"loss": 0.6813,
"num_input_tokens_seen": 4318208,
"step": 925
},
{
"epoch": 0.7010931021485112,
"grad_norm": 3.325110912322998,
"learning_rate": 3.6302239336858545e-05,
"loss": 0.6866,
"num_input_tokens_seen": 4341728,
"step": 930
},
{
"epoch": 0.7048624199019977,
"grad_norm": 1.9037253856658936,
"learning_rate": 3.6169961612540645e-05,
"loss": 0.8628,
"num_input_tokens_seen": 4365392,
"step": 935
},
{
"epoch": 0.7086317376554844,
"grad_norm": 1.9913270473480225,
"learning_rate": 3.603729201722244e-05,
"loss": 0.8978,
"num_input_tokens_seen": 4389184,
"step": 940
},
{
"epoch": 0.712401055408971,
"grad_norm": 2.668295383453369,
"learning_rate": 3.5904235205294776e-05,
"loss": 0.7572,
"num_input_tokens_seen": 4412272,
"step": 945
},
{
"epoch": 0.7161703731624576,
"grad_norm": 2.363468885421753,
"learning_rate": 3.5770795844733035e-05,
"loss": 0.7205,
"num_input_tokens_seen": 4435072,
"step": 950
},
{
"epoch": 0.7199396909159442,
"grad_norm": 2.3162343502044678,
"learning_rate": 3.5636978616933416e-05,
"loss": 0.7521,
"num_input_tokens_seen": 4458272,
"step": 955
},
{
"epoch": 0.7237090086694309,
"grad_norm": 1.9907945394515991,
"learning_rate": 3.550278821654866e-05,
"loss": 0.7205,
"num_input_tokens_seen": 4481056,
"step": 960
},
{
"epoch": 0.7274783264229174,
"grad_norm": 2.0076990127563477,
"learning_rate": 3.536822935132336e-05,
"loss": 0.6597,
"num_input_tokens_seen": 4504288,
"step": 965
},
{
"epoch": 0.7312476441764041,
"grad_norm": 1.6346359252929688,
"learning_rate": 3.5233306741928806e-05,
"loss": 0.7293,
"num_input_tokens_seen": 4527552,
"step": 970
},
{
"epoch": 0.7350169619298907,
"grad_norm": 2.227130651473999,
"learning_rate": 3.509802512179737e-05,
"loss": 0.7055,
"num_input_tokens_seen": 4551024,
"step": 975
},
{
"epoch": 0.7387862796833773,
"grad_norm": 3.003014087677002,
"learning_rate": 3.496238923695646e-05,
"loss": 0.763,
"num_input_tokens_seen": 4574464,
"step": 980
},
{
"epoch": 0.742555597436864,
"grad_norm": 2.196281671524048,
"learning_rate": 3.4826403845861986e-05,
"loss": 0.6831,
"num_input_tokens_seen": 4597600,
"step": 985
},
{
"epoch": 0.7463249151903506,
"grad_norm": 2.487607717514038,
"learning_rate": 3.4690073719231425e-05,
"loss": 0.6586,
"num_input_tokens_seen": 4621424,
"step": 990
},
{
"epoch": 0.7500942329438371,
"grad_norm": 2.686861753463745,
"learning_rate": 3.455340363987648e-05,
"loss": 0.7481,
"num_input_tokens_seen": 4644400,
"step": 995
},
{
"epoch": 0.7538635506973238,
"grad_norm": 2.511491060256958,
"learning_rate": 3.4416398402535284e-05,
"loss": 0.8084,
"num_input_tokens_seen": 4667568,
"step": 1000
},
{
"epoch": 0.7576328684508103,
"grad_norm": 3.698335886001587,
"learning_rate": 3.427906281370414e-05,
"loss": 0.8876,
"num_input_tokens_seen": 4690624,
"step": 1005
},
{
"epoch": 0.761402186204297,
"grad_norm": 4.032379150390625,
"learning_rate": 3.414140169146896e-05,
"loss": 0.8391,
"num_input_tokens_seen": 4714032,
"step": 1010
},
{
"epoch": 0.7651715039577837,
"grad_norm": 1.8396016359329224,
"learning_rate": 3.400341986533618e-05,
"loss": 0.6847,
"num_input_tokens_seen": 4737280,
"step": 1015
},
{
"epoch": 0.7689408217112702,
"grad_norm": 2.474912166595459,
"learning_rate": 3.386512217606339e-05,
"loss": 0.6742,
"num_input_tokens_seen": 4760848,
"step": 1020
},
{
"epoch": 0.7727101394647569,
"grad_norm": 3.3939056396484375,
"learning_rate": 3.3726513475489445e-05,
"loss": 0.8607,
"num_input_tokens_seen": 4783888,
"step": 1025
},
{
"epoch": 0.7764794572182435,
"grad_norm": 2.451918363571167,
"learning_rate": 3.3587598626364294e-05,
"loss": 0.6614,
"num_input_tokens_seen": 4807312,
"step": 1030
},
{
"epoch": 0.7802487749717301,
"grad_norm": 2.324479341506958,
"learning_rate": 3.344838250217833e-05,
"loss": 0.7078,
"num_input_tokens_seen": 4831088,
"step": 1035
},
{
"epoch": 0.7840180927252167,
"grad_norm": 2.647658586502075,
"learning_rate": 3.330886998699149e-05,
"loss": 0.7944,
"num_input_tokens_seen": 4854608,
"step": 1040
},
{
"epoch": 0.7877874104787034,
"grad_norm": 2.21478533744812,
"learning_rate": 3.316906597526186e-05,
"loss": 0.7436,
"num_input_tokens_seen": 4877648,
"step": 1045
},
{
"epoch": 0.7915567282321899,
"grad_norm": 2.0611255168914795,
"learning_rate": 3.302897537167397e-05,
"loss": 0.8234,
"num_input_tokens_seen": 4900672,
"step": 1050
},
{
"epoch": 0.7953260459856766,
"grad_norm": 2.0772039890289307,
"learning_rate": 3.288860309096671e-05,
"loss": 0.6463,
"num_input_tokens_seen": 4924192,
"step": 1055
},
{
"epoch": 0.7990953637391632,
"grad_norm": 3.051643133163452,
"learning_rate": 3.2747954057760965e-05,
"loss": 0.6872,
"num_input_tokens_seen": 4947600,
"step": 1060
},
{
"epoch": 0.8028646814926498,
"grad_norm": 2.400989532470703,
"learning_rate": 3.260703320638679e-05,
"loss": 0.6143,
"num_input_tokens_seen": 4971888,
"step": 1065
},
{
"epoch": 0.8066339992461364,
"grad_norm": 2.88338303565979,
"learning_rate": 3.246584548071034e-05,
"loss": 0.8022,
"num_input_tokens_seen": 4995056,
"step": 1070
},
{
"epoch": 0.8104033169996231,
"grad_norm": 3.0658576488494873,
"learning_rate": 3.232439583396036e-05,
"loss": 0.7144,
"num_input_tokens_seen": 5018208,
"step": 1075
},
{
"epoch": 0.8141726347531096,
"grad_norm": 2.397193193435669,
"learning_rate": 3.2182689228554517e-05,
"loss": 0.757,
"num_input_tokens_seen": 5041584,
"step": 1080
},
{
"epoch": 0.8179419525065963,
"grad_norm": 2.7260894775390625,
"learning_rate": 3.204073063592522e-05,
"loss": 0.729,
"num_input_tokens_seen": 5064352,
"step": 1085
},
{
"epoch": 0.821711270260083,
"grad_norm": 2.864222764968872,
"learning_rate": 3.189852503634523e-05,
"loss": 0.7441,
"num_input_tokens_seen": 5087712,
"step": 1090
},
{
"epoch": 0.8254805880135695,
"grad_norm": 2.9511120319366455,
"learning_rate": 3.1756077418752967e-05,
"loss": 0.8861,
"num_input_tokens_seen": 5111104,
"step": 1095
},
{
"epoch": 0.8292499057670562,
"grad_norm": 2.240609645843506,
"learning_rate": 3.1613392780577455e-05,
"loss": 0.7098,
"num_input_tokens_seen": 5134720,
"step": 1100
},
{
"epoch": 0.8330192235205428,
"grad_norm": 2.762382984161377,
"learning_rate": 3.147047612756302e-05,
"loss": 0.7099,
"num_input_tokens_seen": 5157744,
"step": 1105
},
{
"epoch": 0.8367885412740294,
"grad_norm": 2.2460415363311768,
"learning_rate": 3.132733247359366e-05,
"loss": 0.7648,
"num_input_tokens_seen": 5180976,
"step": 1110
},
{
"epoch": 0.840557859027516,
"grad_norm": 2.3306996822357178,
"learning_rate": 3.118396684051714e-05,
"loss": 0.7906,
"num_input_tokens_seen": 5204976,
"step": 1115
},
{
"epoch": 0.8443271767810027,
"grad_norm": 2.5235891342163086,
"learning_rate": 3.104038425796884e-05,
"loss": 0.6022,
"num_input_tokens_seen": 5228048,
"step": 1120
},
{
"epoch": 0.8480964945344892,
"grad_norm": 1.9855157136917114,
"learning_rate": 3.089658976319528e-05,
"loss": 0.8142,
"num_input_tokens_seen": 5251664,
"step": 1125
},
{
"epoch": 0.8518658122879759,
"grad_norm": 2.2459208965301514,
"learning_rate": 3.0752588400877405e-05,
"loss": 0.8263,
"num_input_tokens_seen": 5274976,
"step": 1130
},
{
"epoch": 0.8556351300414625,
"grad_norm": 2.2121737003326416,
"learning_rate": 3.060838522295361e-05,
"loss": 0.8581,
"num_input_tokens_seen": 5298352,
"step": 1135
},
{
"epoch": 0.8594044477949491,
"grad_norm": 2.092641592025757,
"learning_rate": 3.0463985288442475e-05,
"loss": 0.7224,
"num_input_tokens_seen": 5321440,
"step": 1140
},
{
"epoch": 0.8631737655484357,
"grad_norm": 2.473989963531494,
"learning_rate": 3.031939366326535e-05,
"loss": 0.7379,
"num_input_tokens_seen": 5344992,
"step": 1145
},
{
"epoch": 0.8669430833019224,
"grad_norm": 2.082732677459717,
"learning_rate": 3.0174615420068563e-05,
"loss": 0.6864,
"num_input_tokens_seen": 5368448,
"step": 1150
},
{
"epoch": 0.8707124010554089,
"grad_norm": 2.066802501678467,
"learning_rate": 3.0029655638045496e-05,
"loss": 0.6144,
"num_input_tokens_seen": 5391568,
"step": 1155
},
{
"epoch": 0.8744817188088956,
"grad_norm": 1.7655906677246094,
"learning_rate": 2.9884519402758342e-05,
"loss": 0.6818,
"num_input_tokens_seen": 5414992,
"step": 1160
},
{
"epoch": 0.8782510365623822,
"grad_norm": 2.6639342308044434,
"learning_rate": 2.9739211805959783e-05,
"loss": 0.8296,
"num_input_tokens_seen": 5438368,
"step": 1165
},
{
"epoch": 0.8820203543158688,
"grad_norm": 2.0490756034851074,
"learning_rate": 2.9593737945414264e-05,
"loss": 0.7764,
"num_input_tokens_seen": 5461504,
"step": 1170
},
{
"epoch": 0.8857896720693554,
"grad_norm": 2.066798448562622,
"learning_rate": 2.9448102924719207e-05,
"loss": 0.7245,
"num_input_tokens_seen": 5484992,
"step": 1175
},
{
"epoch": 0.8895589898228421,
"grad_norm": 2.2488503456115723,
"learning_rate": 2.9302311853125942e-05,
"loss": 0.8319,
"num_input_tokens_seen": 5508480,
"step": 1180
},
{
"epoch": 0.8933283075763286,
"grad_norm": 3.150104284286499,
"learning_rate": 2.9156369845360467e-05,
"loss": 0.6652,
"num_input_tokens_seen": 5531520,
"step": 1185
},
{
"epoch": 0.8970976253298153,
"grad_norm": 2.160212755203247,
"learning_rate": 2.9010282021444008e-05,
"loss": 0.5858,
"num_input_tokens_seen": 5554480,
"step": 1190
},
{
"epoch": 0.900866943083302,
"grad_norm": 2.795851707458496,
"learning_rate": 2.8864053506513405e-05,
"loss": 0.6248,
"num_input_tokens_seen": 5577888,
"step": 1195
},
{
"epoch": 0.9046362608367885,
"grad_norm": 2.5486669540405273,
"learning_rate": 2.8717689430641292e-05,
"loss": 0.8447,
"num_input_tokens_seen": 5601424,
"step": 1200
},
{
"epoch": 0.9084055785902752,
"grad_norm": 2.2841227054595947,
"learning_rate": 2.857119492865613e-05,
"loss": 0.6355,
"num_input_tokens_seen": 5624880,
"step": 1205
},
{
"epoch": 0.9121748963437618,
"grad_norm": 2.077192544937134,
"learning_rate": 2.842457513996207e-05,
"loss": 0.5075,
"num_input_tokens_seen": 5648160,
"step": 1210
},
{
"epoch": 0.9159442140972484,
"grad_norm": 2.1494250297546387,
"learning_rate": 2.8277835208358637e-05,
"loss": 0.5762,
"num_input_tokens_seen": 5671280,
"step": 1215
},
{
"epoch": 0.919713531850735,
"grad_norm": 2.5202853679656982,
"learning_rate": 2.813098028186028e-05,
"loss": 0.666,
"num_input_tokens_seen": 5694352,
"step": 1220
},
{
"epoch": 0.9234828496042217,
"grad_norm": 3.3707363605499268,
"learning_rate": 2.798401551251576e-05,
"loss": 0.6056,
"num_input_tokens_seen": 5717536,
"step": 1225
},
{
"epoch": 0.9272521673577082,
"grad_norm": 3.4344711303710938,
"learning_rate": 2.7836946056227426e-05,
"loss": 0.8095,
"num_input_tokens_seen": 5740416,
"step": 1230
},
{
"epoch": 0.9310214851111949,
"grad_norm": 2.2710204124450684,
"learning_rate": 2.7689777072570287e-05,
"loss": 0.7501,
"num_input_tokens_seen": 5763712,
"step": 1235
},
{
"epoch": 0.9347908028646815,
"grad_norm": 2.100459337234497,
"learning_rate": 2.7542513724611057e-05,
"loss": 0.5595,
"num_input_tokens_seen": 5787168,
"step": 1240
},
{
"epoch": 0.9385601206181681,
"grad_norm": 3.0176243782043457,
"learning_rate": 2.739516117872697e-05,
"loss": 0.694,
"num_input_tokens_seen": 5810704,
"step": 1245
},
{
"epoch": 0.9423294383716547,
"grad_norm": 2.814894676208496,
"learning_rate": 2.7247724604424557e-05,
"loss": 0.8521,
"num_input_tokens_seen": 5834192,
"step": 1250
},
{
"epoch": 0.9460987561251414,
"grad_norm": 1.729038953781128,
"learning_rate": 2.71002091741583e-05,
"loss": 0.7794,
"num_input_tokens_seen": 5857344,
"step": 1255
},
{
"epoch": 0.9498680738786279,
"grad_norm": 2.401945114135742,
"learning_rate": 2.695262006314912e-05,
"loss": 0.7721,
"num_input_tokens_seen": 5880448,
"step": 1260
},
{
"epoch": 0.9536373916321146,
"grad_norm": 2.005068302154541,
"learning_rate": 2.680496244920287e-05,
"loss": 0.6403,
"num_input_tokens_seen": 5903200,
"step": 1265
},
{
"epoch": 0.9574067093856012,
"grad_norm": 1.8433932065963745,
"learning_rate": 2.665724151252868e-05,
"loss": 0.738,
"num_input_tokens_seen": 5926272,
"step": 1270
},
{
"epoch": 0.9611760271390878,
"grad_norm": 2.32893705368042,
"learning_rate": 2.6509462435557152e-05,
"loss": 0.6469,
"num_input_tokens_seen": 5949680,
"step": 1275
},
{
"epoch": 0.9649453448925744,
"grad_norm": 2.5352189540863037,
"learning_rate": 2.6361630402758648e-05,
"loss": 0.6953,
"num_input_tokens_seen": 5973088,
"step": 1280
},
{
"epoch": 0.9687146626460611,
"grad_norm": 2.217909574508667,
"learning_rate": 2.6213750600461334e-05,
"loss": 0.6365,
"num_input_tokens_seen": 5996688,
"step": 1285
},
{
"epoch": 0.9724839803995476,
"grad_norm": 1.218327283859253,
"learning_rate": 2.6065828216669253e-05,
"loss": 0.6691,
"num_input_tokens_seen": 6019744,
"step": 1290
},
{
"epoch": 0.9762532981530343,
"grad_norm": 1.710153579711914,
"learning_rate": 2.5917868440880317e-05,
"loss": 0.64,
"num_input_tokens_seen": 6042640,
"step": 1295
},
{
"epoch": 0.980022615906521,
"grad_norm": 2.1945626735687256,
"learning_rate": 2.5769876463904265e-05,
"loss": 0.6674,
"num_input_tokens_seen": 6066112,
"step": 1300
},
{
"epoch": 0.9837919336600075,
"grad_norm": 2.905019760131836,
"learning_rate": 2.5621857477680506e-05,
"loss": 0.6288,
"num_input_tokens_seen": 6089296,
"step": 1305
},
{
"epoch": 0.9875612514134942,
"grad_norm": 2.2408697605133057,
"learning_rate": 2.5473816675096017e-05,
"loss": 0.8477,
"num_input_tokens_seen": 6112784,
"step": 1310
},
{
"epoch": 0.9913305691669808,
"grad_norm": 2.5332565307617188,
"learning_rate": 2.5325759249803154e-05,
"loss": 0.7663,
"num_input_tokens_seen": 6136048,
"step": 1315
},
{
"epoch": 0.9950998869204674,
"grad_norm": 2.0274627208709717,
"learning_rate": 2.517769039603744e-05,
"loss": 0.861,
"num_input_tokens_seen": 6159920,
"step": 1320
},
{
"epoch": 0.998869204673954,
"grad_norm": 2.1323111057281494,
"learning_rate": 2.5029615308435338e-05,
"loss": 0.8545,
"num_input_tokens_seen": 6183024,
"step": 1325
},
{
"epoch": 1.0026385224274406,
"grad_norm": 1.8671432733535767,
"learning_rate": 2.4881539181851986e-05,
"loss": 0.6469,
"num_input_tokens_seen": 6206224,
"step": 1330
},
{
"epoch": 1.0064078401809273,
"grad_norm": 1.7728124856948853,
"learning_rate": 2.4733467211179008e-05,
"loss": 0.7507,
"num_input_tokens_seen": 6229936,
"step": 1335
},
{
"epoch": 1.0101771579344139,
"grad_norm": 3.0307295322418213,
"learning_rate": 2.4585404591162218e-05,
"loss": 0.7181,
"num_input_tokens_seen": 6253152,
"step": 1340
},
{
"epoch": 1.0139464756879004,
"grad_norm": 1.6619493961334229,
"learning_rate": 2.4437356516219358e-05,
"loss": 0.627,
"num_input_tokens_seen": 6276208,
"step": 1345
},
{
"epoch": 1.0177157934413872,
"grad_norm": 2.804950714111328,
"learning_rate": 2.4289328180257926e-05,
"loss": 0.7266,
"num_input_tokens_seen": 6299408,
"step": 1350
},
{
"epoch": 1.0214851111948737,
"grad_norm": 1.9919047355651855,
"learning_rate": 2.4141324776492915e-05,
"loss": 0.5684,
"num_input_tokens_seen": 6323024,
"step": 1355
},
{
"epoch": 1.0252544289483603,
"grad_norm": 5.080316543579102,
"learning_rate": 2.399335149726463e-05,
"loss": 0.6196,
"num_input_tokens_seen": 6345952,
"step": 1360
},
{
"epoch": 1.029023746701847,
"grad_norm": 2.5290539264678955,
"learning_rate": 2.3845413533856517e-05,
"loss": 0.5296,
"num_input_tokens_seen": 6369856,
"step": 1365
},
{
"epoch": 1.0327930644553336,
"grad_norm": 2.438260555267334,
"learning_rate": 2.3697516076313066e-05,
"loss": 0.7537,
"num_input_tokens_seen": 6393136,
"step": 1370
},
{
"epoch": 1.0365623822088201,
"grad_norm": 3.0706968307495117,
"learning_rate": 2.354966431325773e-05,
"loss": 0.5909,
"num_input_tokens_seen": 6416736,
"step": 1375
},
{
"epoch": 1.040331699962307,
"grad_norm": 2.409477472305298,
"learning_rate": 2.3401863431710863e-05,
"loss": 0.7042,
"num_input_tokens_seen": 6440048,
"step": 1380
},
{
"epoch": 1.0441010177157934,
"grad_norm": 2.7514872550964355,
"learning_rate": 2.325411861690776e-05,
"loss": 0.7301,
"num_input_tokens_seen": 6463504,
"step": 1385
},
{
"epoch": 1.04787033546928,
"grad_norm": 3.314840793609619,
"learning_rate": 2.3106435052116764e-05,
"loss": 0.7472,
"num_input_tokens_seen": 6486608,
"step": 1390
},
{
"epoch": 1.0516396532227668,
"grad_norm": 3.0806994438171387,
"learning_rate": 2.2958817918457412e-05,
"loss": 0.5799,
"num_input_tokens_seen": 6509760,
"step": 1395
},
{
"epoch": 1.0554089709762533,
"grad_norm": 3.1639201641082764,
"learning_rate": 2.2811272394718647e-05,
"loss": 0.6512,
"num_input_tokens_seen": 6532992,
"step": 1400
},
{
"epoch": 1.0591782887297398,
"grad_norm": 2.3345203399658203,
"learning_rate": 2.2663803657177173e-05,
"loss": 0.748,
"num_input_tokens_seen": 6556384,
"step": 1405
},
{
"epoch": 1.0629476064832266,
"grad_norm": 6.463221549987793,
"learning_rate": 2.2516416879415824e-05,
"loss": 0.6635,
"num_input_tokens_seen": 6580336,
"step": 1410
},
{
"epoch": 1.0667169242367132,
"grad_norm": 2.5758097171783447,
"learning_rate": 2.2369117232142077e-05,
"loss": 0.5894,
"num_input_tokens_seen": 6603584,
"step": 1415
},
{
"epoch": 1.0704862419901997,
"grad_norm": 2.079308271408081,
"learning_rate": 2.2221909883006646e-05,
"loss": 0.5952,
"num_input_tokens_seen": 6626864,
"step": 1420
},
{
"epoch": 1.0742555597436865,
"grad_norm": 2.637446403503418,
"learning_rate": 2.20747999964222e-05,
"loss": 0.7849,
"num_input_tokens_seen": 6650480,
"step": 1425
},
{
"epoch": 1.078024877497173,
"grad_norm": 2.275803565979004,
"learning_rate": 2.192779273338215e-05,
"loss": 0.7059,
"num_input_tokens_seen": 6673808,
"step": 1430
},
{
"epoch": 1.0817941952506596,
"grad_norm": 3.0671417713165283,
"learning_rate": 2.1780893251279626e-05,
"loss": 0.7389,
"num_input_tokens_seen": 6697232,
"step": 1435
},
{
"epoch": 1.0855635130041463,
"grad_norm": 2.4702844619750977,
"learning_rate": 2.163410670372652e-05,
"loss": 0.5858,
"num_input_tokens_seen": 6721104,
"step": 1440
},
{
"epoch": 1.0893328307576329,
"grad_norm": 2.3466484546661377,
"learning_rate": 2.148743824037269e-05,
"loss": 0.6743,
"num_input_tokens_seen": 6744624,
"step": 1445
},
{
"epoch": 1.0931021485111194,
"grad_norm": 1.9515085220336914,
"learning_rate": 2.1340893006725288e-05,
"loss": 0.5893,
"num_input_tokens_seen": 6768000,
"step": 1450
},
{
"epoch": 1.0968714662646062,
"grad_norm": 2.264751434326172,
"learning_rate": 2.1194476143968258e-05,
"loss": 0.653,
"num_input_tokens_seen": 6791296,
"step": 1455
},
{
"epoch": 1.1006407840180927,
"grad_norm": 3.2526028156280518,
"learning_rate": 2.1048192788781977e-05,
"loss": 0.6829,
"num_input_tokens_seen": 6814800,
"step": 1460
},
{
"epoch": 1.1044101017715793,
"grad_norm": 3.7399232387542725,
"learning_rate": 2.090204807316301e-05,
"loss": 0.5642,
"num_input_tokens_seen": 6838128,
"step": 1465
},
{
"epoch": 1.108179419525066,
"grad_norm": 4.240833282470703,
"learning_rate": 2.0756047124244095e-05,
"loss": 0.6401,
"num_input_tokens_seen": 6861312,
"step": 1470
},
{
"epoch": 1.1119487372785526,
"grad_norm": 3.439541816711426,
"learning_rate": 2.0610195064114273e-05,
"loss": 0.6188,
"num_input_tokens_seen": 6884128,
"step": 1475
},
{
"epoch": 1.1157180550320391,
"grad_norm": 3.2410943508148193,
"learning_rate": 2.0464497009639176e-05,
"loss": 0.5929,
"num_input_tokens_seen": 6908336,
"step": 1480
},
{
"epoch": 1.119487372785526,
"grad_norm": 2.0210845470428467,
"learning_rate": 2.0318958072281517e-05,
"loss": 0.7596,
"num_input_tokens_seen": 6931392,
"step": 1485
},
{
"epoch": 1.1232566905390124,
"grad_norm": 3.183546781539917,
"learning_rate": 2.017358335792178e-05,
"loss": 0.6313,
"num_input_tokens_seen": 6954800,
"step": 1490
},
{
"epoch": 1.127026008292499,
"grad_norm": 3.0664725303649902,
"learning_rate": 2.0028377966679092e-05,
"loss": 0.6371,
"num_input_tokens_seen": 6978384,
"step": 1495
},
{
"epoch": 1.1307953260459858,
"grad_norm": 2.9559929370880127,
"learning_rate": 1.9883346992732256e-05,
"loss": 0.6982,
"num_input_tokens_seen": 7001632,
"step": 1500
},
{
"epoch": 1.1345646437994723,
"grad_norm": 3.3636457920074463,
"learning_rate": 1.9738495524141098e-05,
"loss": 0.7299,
"num_input_tokens_seen": 7024768,
"step": 1505
},
{
"epoch": 1.1383339615529589,
"grad_norm": 2.8605830669403076,
"learning_rate": 1.9593828642667928e-05,
"loss": 0.6548,
"num_input_tokens_seen": 7047936,
"step": 1510
},
{
"epoch": 1.1421032793064456,
"grad_norm": 4.070641040802002,
"learning_rate": 1.944935142359926e-05,
"loss": 0.8541,
"num_input_tokens_seen": 7070672,
"step": 1515
},
{
"epoch": 1.1458725970599322,
"grad_norm": 2.0054848194122314,
"learning_rate": 1.9305068935567767e-05,
"loss": 0.6746,
"num_input_tokens_seen": 7093616,
"step": 1520
},
{
"epoch": 1.1496419148134187,
"grad_norm": 4.216817855834961,
"learning_rate": 1.9160986240374445e-05,
"loss": 0.7696,
"num_input_tokens_seen": 7116560,
"step": 1525
},
{
"epoch": 1.1534112325669055,
"grad_norm": 3.155444860458374,
"learning_rate": 1.9017108392811065e-05,
"loss": 0.7202,
"num_input_tokens_seen": 7140224,
"step": 1530
},
{
"epoch": 1.157180550320392,
"grad_norm": 2.316871166229248,
"learning_rate": 1.887344044048278e-05,
"loss": 0.7255,
"num_input_tokens_seen": 7163696,
"step": 1535
},
{
"epoch": 1.1609498680738786,
"grad_norm": 3.767616033554077,
"learning_rate": 1.8729987423631128e-05,
"loss": 0.5598,
"num_input_tokens_seen": 7187376,
"step": 1540
},
{
"epoch": 1.1647191858273653,
"grad_norm": 3.7264201641082764,
"learning_rate": 1.8586754374957112e-05,
"loss": 0.6163,
"num_input_tokens_seen": 7210576,
"step": 1545
},
{
"epoch": 1.1684885035808519,
"grad_norm": 1.6776454448699951,
"learning_rate": 1.8443746319444717e-05,
"loss": 0.6024,
"num_input_tokens_seen": 7233824,
"step": 1550
},
{
"epoch": 1.1722578213343384,
"grad_norm": 2.1953823566436768,
"learning_rate": 1.830096827418456e-05,
"loss": 0.7974,
"num_input_tokens_seen": 7257248,
"step": 1555
},
{
"epoch": 1.1760271390878252,
"grad_norm": 2.4573707580566406,
"learning_rate": 1.815842524819793e-05,
"loss": 0.6738,
"num_input_tokens_seen": 7281040,
"step": 1560
},
{
"epoch": 1.1797964568413117,
"grad_norm": 2.82814884185791,
"learning_rate": 1.8016122242261024e-05,
"loss": 0.7753,
"num_input_tokens_seen": 7304592,
"step": 1565
},
{
"epoch": 1.1835657745947983,
"grad_norm": 3.2760045528411865,
"learning_rate": 1.787406424872952e-05,
"loss": 0.6555,
"num_input_tokens_seen": 7327840,
"step": 1570
},
{
"epoch": 1.187335092348285,
"grad_norm": 2.3789122104644775,
"learning_rate": 1.7732256251363433e-05,
"loss": 0.5336,
"num_input_tokens_seen": 7351024,
"step": 1575
},
{
"epoch": 1.1911044101017716,
"grad_norm": 2.9896657466888428,
"learning_rate": 1.7590703225152266e-05,
"loss": 0.5507,
"num_input_tokens_seen": 7373824,
"step": 1580
},
{
"epoch": 1.1948737278552581,
"grad_norm": 2.1554269790649414,
"learning_rate": 1.744941013614047e-05,
"loss": 0.5809,
"num_input_tokens_seen": 7397648,
"step": 1585
},
{
"epoch": 1.198643045608745,
"grad_norm": 2.7246153354644775,
"learning_rate": 1.7308381941253253e-05,
"loss": 0.7303,
"num_input_tokens_seen": 7421104,
"step": 1590
},
{
"epoch": 1.2024123633622315,
"grad_norm": 2.7653276920318604,
"learning_rate": 1.716762358812263e-05,
"loss": 0.6493,
"num_input_tokens_seen": 7444128,
"step": 1595
},
{
"epoch": 1.206181681115718,
"grad_norm": 2.1696999073028564,
"learning_rate": 1.7027140014913897e-05,
"loss": 0.5308,
"num_input_tokens_seen": 7467152,
"step": 1600
},
{
"epoch": 1.2099509988692048,
"grad_norm": 2.3543174266815186,
"learning_rate": 1.6886936150152325e-05,
"loss": 0.6573,
"num_input_tokens_seen": 7490320,
"step": 1605
},
{
"epoch": 1.2137203166226913,
"grad_norm": 3.704310178756714,
"learning_rate": 1.674701691255034e-05,
"loss": 0.6721,
"num_input_tokens_seen": 7513312,
"step": 1610
},
{
"epoch": 1.2174896343761779,
"grad_norm": 1.757002830505371,
"learning_rate": 1.6607387210834887e-05,
"loss": 0.7255,
"num_input_tokens_seen": 7536752,
"step": 1615
},
{
"epoch": 1.2212589521296646,
"grad_norm": 3.143267869949341,
"learning_rate": 1.646805194357524e-05,
"loss": 0.6997,
"num_input_tokens_seen": 7559872,
"step": 1620
},
{
"epoch": 1.2250282698831512,
"grad_norm": 2.8911032676696777,
"learning_rate": 1.6329015999011183e-05,
"loss": 0.7311,
"num_input_tokens_seen": 7583456,
"step": 1625
},
{
"epoch": 1.2287975876366377,
"grad_norm": 2.385547399520874,
"learning_rate": 1.6190284254881466e-05,
"loss": 0.6106,
"num_input_tokens_seen": 7606608,
"step": 1630
},
{
"epoch": 1.2325669053901245,
"grad_norm": 2.5071749687194824,
"learning_rate": 1.6051861578252718e-05,
"loss": 0.6203,
"num_input_tokens_seen": 7629744,
"step": 1635
},
{
"epoch": 1.236336223143611,
"grad_norm": 2.8049020767211914,
"learning_rate": 1.5913752825348675e-05,
"loss": 0.611,
"num_input_tokens_seen": 7653200,
"step": 1640
},
{
"epoch": 1.2401055408970976,
"grad_norm": 2.7031116485595703,
"learning_rate": 1.5775962841379818e-05,
"loss": 0.5812,
"num_input_tokens_seen": 7676352,
"step": 1645
},
{
"epoch": 1.2438748586505843,
"grad_norm": 3.705801486968994,
"learning_rate": 1.5638496460373413e-05,
"loss": 0.7053,
"num_input_tokens_seen": 7699488,
"step": 1650
},
{
"epoch": 1.2476441764040709,
"grad_norm": 4.845921516418457,
"learning_rate": 1.5501358505003874e-05,
"loss": 0.6876,
"num_input_tokens_seen": 7722720,
"step": 1655
},
{
"epoch": 1.2514134941575574,
"grad_norm": 3.630434513092041,
"learning_rate": 1.5364553786423623e-05,
"loss": 0.6504,
"num_input_tokens_seen": 7746448,
"step": 1660
},
{
"epoch": 1.2551828119110442,
"grad_norm": 2.0619561672210693,
"learning_rate": 1.5228087104094261e-05,
"loss": 0.5559,
"num_input_tokens_seen": 7769824,
"step": 1665
},
{
"epoch": 1.2589521296645307,
"grad_norm": 2.3254404067993164,
"learning_rate": 1.5091963245618224e-05,
"loss": 0.6128,
"num_input_tokens_seen": 7793072,
"step": 1670
},
{
"epoch": 1.2627214474180173,
"grad_norm": 2.9495444297790527,
"learning_rate": 1.495618698657078e-05,
"loss": 0.7294,
"num_input_tokens_seen": 7816448,
"step": 1675
},
{
"epoch": 1.266490765171504,
"grad_norm": 3.726112127304077,
"learning_rate": 1.482076309033254e-05,
"loss": 0.7042,
"num_input_tokens_seen": 7839712,
"step": 1680
},
{
"epoch": 1.2702600829249906,
"grad_norm": 2.721297264099121,
"learning_rate": 1.4685696307922312e-05,
"loss": 0.673,
"num_input_tokens_seen": 7863504,
"step": 1685
},
{
"epoch": 1.2740294006784771,
"grad_norm": 2.8665177822113037,
"learning_rate": 1.4550991377830426e-05,
"loss": 0.6719,
"num_input_tokens_seen": 7886720,
"step": 1690
},
{
"epoch": 1.277798718431964,
"grad_norm": 4.013219356536865,
"learning_rate": 1.4416653025852498e-05,
"loss": 0.5221,
"num_input_tokens_seen": 7910032,
"step": 1695
},
{
"epoch": 1.2815680361854505,
"grad_norm": 2.000631093978882,
"learning_rate": 1.4282685964923642e-05,
"loss": 0.576,
"num_input_tokens_seen": 7933408,
"step": 1700
},
{
"epoch": 1.285337353938937,
"grad_norm": 3.1721489429473877,
"learning_rate": 1.4149094894953157e-05,
"loss": 0.7068,
"num_input_tokens_seen": 7956400,
"step": 1705
},
{
"epoch": 1.2891066716924238,
"grad_norm": 2.824788808822632,
"learning_rate": 1.4015884502659573e-05,
"loss": 0.7079,
"num_input_tokens_seen": 7979536,
"step": 1710
},
{
"epoch": 1.2928759894459103,
"grad_norm": 3.540363311767578,
"learning_rate": 1.3883059461406294e-05,
"loss": 0.6605,
"num_input_tokens_seen": 8002992,
"step": 1715
},
{
"epoch": 1.2966453071993969,
"grad_norm": 2.856283187866211,
"learning_rate": 1.3750624431037581e-05,
"loss": 0.7572,
"num_input_tokens_seen": 8026448,
"step": 1720
},
{
"epoch": 1.3004146249528836,
"grad_norm": 3.1001534461975098,
"learning_rate": 1.3618584057715144e-05,
"loss": 0.7231,
"num_input_tokens_seen": 8049392,
"step": 1725
},
{
"epoch": 1.3041839427063702,
"grad_norm": 3.295738697052002,
"learning_rate": 1.3486942973755101e-05,
"loss": 0.5847,
"num_input_tokens_seen": 8072992,
"step": 1730
},
{
"epoch": 1.3079532604598567,
"grad_norm": 3.7648136615753174,
"learning_rate": 1.3355705797465462e-05,
"loss": 0.4926,
"num_input_tokens_seen": 8096464,
"step": 1735
},
{
"epoch": 1.3117225782133435,
"grad_norm": 2.8087363243103027,
"learning_rate": 1.3224877132984132e-05,
"loss": 0.7734,
"num_input_tokens_seen": 8120048,
"step": 1740
},
{
"epoch": 1.31549189596683,
"grad_norm": 2.7877838611602783,
"learning_rate": 1.3094461570117356e-05,
"loss": 0.52,
"num_input_tokens_seen": 8143104,
"step": 1745
},
{
"epoch": 1.3192612137203166,
"grad_norm": 3.363126039505005,
"learning_rate": 1.296446368417871e-05,
"loss": 0.5911,
"num_input_tokens_seen": 8166624,
"step": 1750
},
{
"epoch": 1.3230305314738033,
"grad_norm": 4.23477029800415,
"learning_rate": 1.2834888035828596e-05,
"loss": 0.682,
"num_input_tokens_seen": 8190800,
"step": 1755
},
{
"epoch": 1.3267998492272899,
"grad_norm": 2.0776336193084717,
"learning_rate": 1.2705739170914238e-05,
"loss": 0.4787,
"num_input_tokens_seen": 8213840,
"step": 1760
},
{
"epoch": 1.3305691669807764,
"grad_norm": 3.8550596237182617,
"learning_rate": 1.2577021620310192e-05,
"loss": 0.6678,
"num_input_tokens_seen": 8237840,
"step": 1765
},
{
"epoch": 1.3343384847342632,
"grad_norm": 3.1489477157592773,
"learning_rate": 1.2448739899759398e-05,
"loss": 0.7067,
"num_input_tokens_seen": 8261216,
"step": 1770
},
{
"epoch": 1.3381078024877497,
"grad_norm": 3.930402994155884,
"learning_rate": 1.232089850971477e-05,
"loss": 0.619,
"num_input_tokens_seen": 8284304,
"step": 1775
},
{
"epoch": 1.3418771202412363,
"grad_norm": 3.715315818786621,
"learning_rate": 1.2193501935181264e-05,
"loss": 0.571,
"num_input_tokens_seen": 8307360,
"step": 1780
},
{
"epoch": 1.345646437994723,
"grad_norm": 2.5447168350219727,
"learning_rate": 1.2066554645558578e-05,
"loss": 0.5191,
"num_input_tokens_seen": 8330480,
"step": 1785
},
{
"epoch": 1.3494157557482096,
"grad_norm": 2.3495020866394043,
"learning_rate": 1.1940061094484365e-05,
"loss": 0.545,
"num_input_tokens_seen": 8354208,
"step": 1790
},
{
"epoch": 1.3531850735016961,
"grad_norm": 3.82429838180542,
"learning_rate": 1.181402571967793e-05,
"loss": 0.5501,
"num_input_tokens_seen": 8377936,
"step": 1795
},
{
"epoch": 1.356954391255183,
"grad_norm": 2.5798404216766357,
"learning_rate": 1.1688452942784591e-05,
"loss": 0.5564,
"num_input_tokens_seen": 8401120,
"step": 1800
},
{
"epoch": 1.3607237090086695,
"grad_norm": 2.9661645889282227,
"learning_rate": 1.156334716922052e-05,
"loss": 0.6894,
"num_input_tokens_seen": 8424704,
"step": 1805
},
{
"epoch": 1.364493026762156,
"grad_norm": 3.9135797023773193,
"learning_rate": 1.1438712788018233e-05,
"loss": 0.6809,
"num_input_tokens_seen": 8448784,
"step": 1810
},
{
"epoch": 1.3682623445156428,
"grad_norm": 3.126460552215576,
"learning_rate": 1.1314554171672578e-05,
"loss": 0.7183,
"num_input_tokens_seen": 8471888,
"step": 1815
},
{
"epoch": 1.3720316622691293,
"grad_norm": 2.595989227294922,
"learning_rate": 1.1190875675987356e-05,
"loss": 0.6292,
"num_input_tokens_seen": 8495168,
"step": 1820
},
{
"epoch": 1.3758009800226159,
"grad_norm": 3.0311925411224365,
"learning_rate": 1.1067681639922486e-05,
"loss": 0.6579,
"num_input_tokens_seen": 8518864,
"step": 1825
},
{
"epoch": 1.3795702977761026,
"grad_norm": 2.8372302055358887,
"learning_rate": 1.0944976385441821e-05,
"loss": 0.6564,
"num_input_tokens_seen": 8541904,
"step": 1830
},
{
"epoch": 1.3833396155295892,
"grad_norm": 2.3767635822296143,
"learning_rate": 1.0822764217361462e-05,
"loss": 0.5748,
"num_input_tokens_seen": 8565152,
"step": 1835
},
{
"epoch": 1.3871089332830757,
"grad_norm": 3.5401554107666016,
"learning_rate": 1.0701049423198794e-05,
"loss": 0.7318,
"num_input_tokens_seen": 8588448,
"step": 1840
},
{
"epoch": 1.3908782510365625,
"grad_norm": 2.8969168663024902,
"learning_rate": 1.0579836273022045e-05,
"loss": 0.6908,
"num_input_tokens_seen": 8611520,
"step": 1845
},
{
"epoch": 1.394647568790049,
"grad_norm": 2.5562503337860107,
"learning_rate": 1.0459129019300476e-05,
"loss": 0.6769,
"num_input_tokens_seen": 8634672,
"step": 1850
},
{
"epoch": 1.3984168865435356,
"grad_norm": 2.2505788803100586,
"learning_rate": 1.03389318967552e-05,
"loss": 0.6782,
"num_input_tokens_seen": 8657792,
"step": 1855
},
{
"epoch": 1.4021862042970223,
"grad_norm": 3.3430016040802,
"learning_rate": 1.021924912221062e-05,
"loss": 0.6511,
"num_input_tokens_seen": 8681120,
"step": 1860
},
{
"epoch": 1.4059555220505089,
"grad_norm": 2.8084487915039062,
"learning_rate": 1.0100084894446455e-05,
"loss": 0.689,
"num_input_tokens_seen": 8704096,
"step": 1865
},
{
"epoch": 1.4097248398039954,
"grad_norm": 3.9685423374176025,
"learning_rate": 9.981443394050525e-06,
"loss": 0.4842,
"num_input_tokens_seen": 8727168,
"step": 1870
},
{
"epoch": 1.4134941575574822,
"grad_norm": 2.859672784805298,
"learning_rate": 9.863328783271989e-06,
"loss": 0.7458,
"num_input_tokens_seen": 8750704,
"step": 1875
},
{
"epoch": 1.4172634753109687,
"grad_norm": 2.2209770679473877,
"learning_rate": 9.745745205875373e-06,
"loss": 0.5579,
"num_input_tokens_seen": 8773680,
"step": 1880
},
{
"epoch": 1.4210327930644553,
"grad_norm": 3.285346031188965,
"learning_rate": 9.62869678699519e-06,
"loss": 0.5636,
"num_input_tokens_seen": 8797200,
"step": 1885
},
{
"epoch": 1.424802110817942,
"grad_norm": 2.7494428157806396,
"learning_rate": 9.512187632991192e-06,
"loss": 0.5483,
"num_input_tokens_seen": 8820832,
"step": 1890
},
{
"epoch": 1.4285714285714286,
"grad_norm": 2.5011394023895264,
"learning_rate": 9.396221831304364e-06,
"loss": 0.6082,
"num_input_tokens_seen": 8844096,
"step": 1895
},
{
"epoch": 1.4323407463249151,
"grad_norm": 3.395313262939453,
"learning_rate": 9.28080345031347e-06,
"loss": 0.6085,
"num_input_tokens_seen": 8867264,
"step": 1900
},
{
"epoch": 1.436110064078402,
"grad_norm": 2.5743775367736816,
"learning_rate": 9.165936539192358e-06,
"loss": 0.5687,
"num_input_tokens_seen": 8890896,
"step": 1905
},
{
"epoch": 1.4398793818318885,
"grad_norm": 2.9382436275482178,
"learning_rate": 9.05162512776789e-06,
"loss": 0.5493,
"num_input_tokens_seen": 8913712,
"step": 1910
},
{
"epoch": 1.443648699585375,
"grad_norm": 3.453364610671997,
"learning_rate": 8.937873226378582e-06,
"loss": 0.6865,
"num_input_tokens_seen": 8937536,
"step": 1915
},
{
"epoch": 1.4474180173388618,
"grad_norm": 3.3606762886047363,
"learning_rate": 8.824684825733865e-06,
"loss": 0.6156,
"num_input_tokens_seen": 8960752,
"step": 1920
},
{
"epoch": 1.4511873350923483,
"grad_norm": 3.234661817550659,
"learning_rate": 8.712063896774145e-06,
"loss": 0.7309,
"num_input_tokens_seen": 8983760,
"step": 1925
},
{
"epoch": 1.4549566528458349,
"grad_norm": 2.574348211288452,
"learning_rate": 8.60001439053145e-06,
"loss": 0.5862,
"num_input_tokens_seen": 9006688,
"step": 1930
},
{
"epoch": 1.4587259705993216,
"grad_norm": 3.313852548599243,
"learning_rate": 8.488540237990828e-06,
"loss": 0.6271,
"num_input_tokens_seen": 9030112,
"step": 1935
},
{
"epoch": 1.4624952883528082,
"grad_norm": 2.6033058166503906,
"learning_rate": 8.37764534995244e-06,
"loss": 0.6465,
"num_input_tokens_seen": 9054000,
"step": 1940
},
{
"epoch": 1.4662646061062947,
"grad_norm": 2.645503282546997,
"learning_rate": 8.267333616894363e-06,
"loss": 0.4938,
"num_input_tokens_seen": 9078032,
"step": 1945
},
{
"epoch": 1.4700339238597815,
"grad_norm": 2.527324676513672,
"learning_rate": 8.15760890883607e-06,
"loss": 0.3723,
"num_input_tokens_seen": 9101488,
"step": 1950
},
{
"epoch": 1.473803241613268,
"grad_norm": 4.690756320953369,
"learning_rate": 8.048475075202727e-06,
"loss": 0.574,
"num_input_tokens_seen": 9124464,
"step": 1955
},
{
"epoch": 1.4775725593667546,
"grad_norm": 2.1947431564331055,
"learning_rate": 7.939935944690077e-06,
"loss": 0.6388,
"num_input_tokens_seen": 9147616,
"step": 1960
},
{
"epoch": 1.4813418771202413,
"grad_norm": 3.5152084827423096,
"learning_rate": 7.831995325130153e-06,
"loss": 0.6665,
"num_input_tokens_seen": 9170624,
"step": 1965
},
{
"epoch": 1.485111194873728,
"grad_norm": 2.243281841278076,
"learning_rate": 7.724657003357696e-06,
"loss": 0.6963,
"num_input_tokens_seen": 9193808,
"step": 1970
},
{
"epoch": 1.4888805126272144,
"grad_norm": 4.0665812492370605,
"learning_rate": 7.617924745077259e-06,
"loss": 0.626,
"num_input_tokens_seen": 9216880,
"step": 1975
},
{
"epoch": 1.4926498303807012,
"grad_norm": 3.32173228263855,
"learning_rate": 7.51180229473116e-06,
"loss": 0.6302,
"num_input_tokens_seen": 9240080,
"step": 1980
},
{
"epoch": 1.4964191481341877,
"grad_norm": 5.613902568817139,
"learning_rate": 7.406293375368067e-06,
"loss": 0.6275,
"num_input_tokens_seen": 9263216,
"step": 1985
},
{
"epoch": 1.5001884658876743,
"grad_norm": 2.583724021911621,
"learning_rate": 7.301401688512416e-06,
"loss": 0.5228,
"num_input_tokens_seen": 9287088,
"step": 1990
},
{
"epoch": 1.503957783641161,
"grad_norm": 2.905665397644043,
"learning_rate": 7.197130914034522e-06,
"loss": 0.6225,
"num_input_tokens_seen": 9310480,
"step": 1995
},
{
"epoch": 1.5077271013946476,
"grad_norm": 2.8812687397003174,
"learning_rate": 7.0934847100215195e-06,
"loss": 0.5698,
"num_input_tokens_seen": 9333520,
"step": 2000
},
{
"epoch": 1.5114964191481342,
"grad_norm": 2.4012463092803955,
"learning_rate": 6.9904667126489685e-06,
"loss": 0.5032,
"num_input_tokens_seen": 9356928,
"step": 2005
},
{
"epoch": 1.515265736901621,
"grad_norm": 2.464493989944458,
"learning_rate": 6.888080536053351e-06,
"loss": 0.6599,
"num_input_tokens_seen": 9380672,
"step": 2010
},
{
"epoch": 1.5190350546551075,
"grad_norm": 2.6313998699188232,
"learning_rate": 6.786329772205247e-06,
"loss": 0.6288,
"num_input_tokens_seen": 9403888,
"step": 2015
},
{
"epoch": 1.522804372408594,
"grad_norm": 4.101465702056885,
"learning_rate": 6.68521799078331e-06,
"loss": 0.5918,
"num_input_tokens_seen": 9427312,
"step": 2020
},
{
"epoch": 1.5265736901620808,
"grad_norm": 1.9185794591903687,
"learning_rate": 6.58474873904906e-06,
"loss": 0.5564,
"num_input_tokens_seen": 9450416,
"step": 2025
},
{
"epoch": 1.5303430079155673,
"grad_norm": 7.757553577423096,
"learning_rate": 6.484925541722417e-06,
"loss": 0.6373,
"num_input_tokens_seen": 9474080,
"step": 2030
},
{
"epoch": 1.5341123256690539,
"grad_norm": 2.474942207336426,
"learning_rate": 6.385751900858031e-06,
"loss": 0.6829,
"num_input_tokens_seen": 9497584,
"step": 2035
},
{
"epoch": 1.5378816434225406,
"grad_norm": 2.8253934383392334,
"learning_rate": 6.28723129572247e-06,
"loss": 0.5933,
"num_input_tokens_seen": 9520832,
"step": 2040
},
{
"epoch": 1.5416509611760272,
"grad_norm": 4.235915660858154,
"learning_rate": 6.189367182672115e-06,
"loss": 0.6933,
"num_input_tokens_seen": 9544480,
"step": 2045
},
{
"epoch": 1.5454202789295137,
"grad_norm": 2.408738136291504,
"learning_rate": 6.092162995031911e-06,
"loss": 0.5513,
"num_input_tokens_seen": 9567888,
"step": 2050
},
{
"epoch": 1.5491895966830005,
"grad_norm": 2.3192906379699707,
"learning_rate": 5.99562214297493e-06,
"loss": 0.5939,
"num_input_tokens_seen": 9590688,
"step": 2055
},
{
"epoch": 1.552958914436487,
"grad_norm": 3.6835973262786865,
"learning_rate": 5.899748013402706e-06,
"loss": 0.6753,
"num_input_tokens_seen": 9613376,
"step": 2060
},
{
"epoch": 1.5567282321899736,
"grad_norm": 4.104626655578613,
"learning_rate": 5.804543969826453e-06,
"loss": 0.7191,
"num_input_tokens_seen": 9636608,
"step": 2065
},
{
"epoch": 1.5604975499434603,
"grad_norm": 4.243546485900879,
"learning_rate": 5.710013352249038e-06,
"loss": 0.6538,
"num_input_tokens_seen": 9659888,
"step": 2070
},
{
"epoch": 1.564266867696947,
"grad_norm": 2.856745958328247,
"learning_rate": 5.616159477047806e-06,
"loss": 0.5668,
"num_input_tokens_seen": 9683520,
"step": 2075
},
{
"epoch": 1.5680361854504334,
"grad_norm": 3.920729637145996,
"learning_rate": 5.522985636858239e-06,
"loss": 0.6383,
"num_input_tokens_seen": 9706384,
"step": 2080
},
{
"epoch": 1.5718055032039202,
"grad_norm": 2.312131404876709,
"learning_rate": 5.430495100458454e-06,
"loss": 0.7085,
"num_input_tokens_seen": 9729760,
"step": 2085
},
{
"epoch": 1.5755748209574068,
"grad_norm": 2.786285877227783,
"learning_rate": 5.338691112654484e-06,
"loss": 0.5722,
"num_input_tokens_seen": 9753200,
"step": 2090
},
{
"epoch": 1.5793441387108933,
"grad_norm": 2.269404649734497,
"learning_rate": 5.247576894166495e-06,
"loss": 0.549,
"num_input_tokens_seen": 9776464,
"step": 2095
},
{
"epoch": 1.58311345646438,
"grad_norm": 3.3955113887786865,
"learning_rate": 5.157155641515765e-06,
"loss": 0.6631,
"num_input_tokens_seen": 9800160,
"step": 2100
}
],
"logging_steps": 5,
"max_steps": 2652,
"num_input_tokens_seen": 9800160,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.1671074958491648e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}