llama3_1_truth_model_final / trainer_state.json
Ogamon's picture
Initial commit
67c2f1f verified
raw
history blame contribute delete
No virus
187 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.989308624376337,
"eval_steps": 500,
"global_step": 875,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005702066999287242,
"grad_norm": 884.3019409179688,
"learning_rate": 5.000000000000001e-07,
"loss": 12.0862,
"num_input_tokens_seen": 6368,
"step": 1
},
{
"epoch": 0.011404133998574484,
"grad_norm": 843.7129516601562,
"learning_rate": 1.0000000000000002e-06,
"loss": 12.1914,
"num_input_tokens_seen": 12384,
"step": 2
},
{
"epoch": 0.017106200997861726,
"grad_norm": 838.2268676757812,
"learning_rate": 1.5e-06,
"loss": 11.9748,
"num_input_tokens_seen": 18624,
"step": 3
},
{
"epoch": 0.022808267997148968,
"grad_norm": 757.4126586914062,
"learning_rate": 2.0000000000000003e-06,
"loss": 11.5009,
"num_input_tokens_seen": 25488,
"step": 4
},
{
"epoch": 0.02851033499643621,
"grad_norm": 627.78759765625,
"learning_rate": 2.5e-06,
"loss": 8.7827,
"num_input_tokens_seen": 32368,
"step": 5
},
{
"epoch": 0.03421240199572345,
"grad_norm": 409.04754638671875,
"learning_rate": 3e-06,
"loss": 6.5054,
"num_input_tokens_seen": 39008,
"step": 6
},
{
"epoch": 0.03991446899501069,
"grad_norm": 373.82159423828125,
"learning_rate": 3.5e-06,
"loss": 5.2127,
"num_input_tokens_seen": 45984,
"step": 7
},
{
"epoch": 0.045616535994297935,
"grad_norm": 351.687744140625,
"learning_rate": 4.000000000000001e-06,
"loss": 2.749,
"num_input_tokens_seen": 52656,
"step": 8
},
{
"epoch": 0.05131860299358518,
"grad_norm": 278.171875,
"learning_rate": 4.5e-06,
"loss": 1.0344,
"num_input_tokens_seen": 59408,
"step": 9
},
{
"epoch": 0.05702066999287242,
"grad_norm": 309.93524169921875,
"learning_rate": 5e-06,
"loss": 0.6484,
"num_input_tokens_seen": 66096,
"step": 10
},
{
"epoch": 0.06272273699215966,
"grad_norm": 360.1611633300781,
"learning_rate": 4.999983511654997e-06,
"loss": 1.8744,
"num_input_tokens_seen": 72592,
"step": 11
},
{
"epoch": 0.0684248039914469,
"grad_norm": 90.9323501586914,
"learning_rate": 4.999934046837479e-06,
"loss": 0.4507,
"num_input_tokens_seen": 79456,
"step": 12
},
{
"epoch": 0.07412687099073414,
"grad_norm": 303.21185302734375,
"learning_rate": 4.99985160619992e-06,
"loss": 1.6065,
"num_input_tokens_seen": 86016,
"step": 13
},
{
"epoch": 0.07982893799002139,
"grad_norm": 264.76409912109375,
"learning_rate": 4.99973619082977e-06,
"loss": 1.647,
"num_input_tokens_seen": 92336,
"step": 14
},
{
"epoch": 0.08553100498930863,
"grad_norm": 102.49153137207031,
"learning_rate": 4.999587802249433e-06,
"loss": 0.4799,
"num_input_tokens_seen": 99120,
"step": 15
},
{
"epoch": 0.09123307198859587,
"grad_norm": 128.62033081054688,
"learning_rate": 4.999406442416258e-06,
"loss": 0.5683,
"num_input_tokens_seen": 105712,
"step": 16
},
{
"epoch": 0.09693513898788311,
"grad_norm": 137.42129516601562,
"learning_rate": 4.9991921137225e-06,
"loss": 1.1372,
"num_input_tokens_seen": 112480,
"step": 17
},
{
"epoch": 0.10263720598717035,
"grad_norm": 11.351543426513672,
"learning_rate": 4.998944818995301e-06,
"loss": 0.3574,
"num_input_tokens_seen": 119136,
"step": 18
},
{
"epoch": 0.1083392729864576,
"grad_norm": 27.159238815307617,
"learning_rate": 4.9986645614966465e-06,
"loss": 0.3492,
"num_input_tokens_seen": 125632,
"step": 19
},
{
"epoch": 0.11404133998574484,
"grad_norm": 111.3335952758789,
"learning_rate": 4.998351344923323e-06,
"loss": 1.0766,
"num_input_tokens_seen": 132208,
"step": 20
},
{
"epoch": 0.11974340698503208,
"grad_norm": 41.73637771606445,
"learning_rate": 4.998005173406865e-06,
"loss": 0.3635,
"num_input_tokens_seen": 138624,
"step": 21
},
{
"epoch": 0.12544547398431932,
"grad_norm": 131.07913208007812,
"learning_rate": 4.997626051513512e-06,
"loss": 1.5452,
"num_input_tokens_seen": 145056,
"step": 22
},
{
"epoch": 0.13114754098360656,
"grad_norm": 85.91304779052734,
"learning_rate": 4.997213984244138e-06,
"loss": 0.9203,
"num_input_tokens_seen": 151696,
"step": 23
},
{
"epoch": 0.1368496079828938,
"grad_norm": 31.326322555541992,
"learning_rate": 4.9967689770341885e-06,
"loss": 0.3603,
"num_input_tokens_seen": 158080,
"step": 24
},
{
"epoch": 0.14255167498218105,
"grad_norm": 81.54974365234375,
"learning_rate": 4.996291035753608e-06,
"loss": 0.7598,
"num_input_tokens_seen": 164560,
"step": 25
},
{
"epoch": 0.1482537419814683,
"grad_norm": 65.69086456298828,
"learning_rate": 4.995780166706768e-06,
"loss": 0.5039,
"num_input_tokens_seen": 171296,
"step": 26
},
{
"epoch": 0.15395580898075553,
"grad_norm": 63.37894821166992,
"learning_rate": 4.995236376632373e-06,
"loss": 0.4744,
"num_input_tokens_seen": 177696,
"step": 27
},
{
"epoch": 0.15965787598004277,
"grad_norm": 32.942325592041016,
"learning_rate": 4.994659672703384e-06,
"loss": 0.3387,
"num_input_tokens_seen": 184384,
"step": 28
},
{
"epoch": 0.16535994297933002,
"grad_norm": 27.978893280029297,
"learning_rate": 4.994050062526915e-06,
"loss": 0.3363,
"num_input_tokens_seen": 190736,
"step": 29
},
{
"epoch": 0.17106200997861726,
"grad_norm": 25.147125244140625,
"learning_rate": 4.993407554144137e-06,
"loss": 0.2842,
"num_input_tokens_seen": 197328,
"step": 30
},
{
"epoch": 0.1767640769779045,
"grad_norm": 17.1385498046875,
"learning_rate": 4.992732156030169e-06,
"loss": 0.2779,
"num_input_tokens_seen": 204096,
"step": 31
},
{
"epoch": 0.18246614397719174,
"grad_norm": 20.462833404541016,
"learning_rate": 4.992023877093969e-06,
"loss": 0.242,
"num_input_tokens_seen": 210432,
"step": 32
},
{
"epoch": 0.18816821097647898,
"grad_norm": 18.327606201171875,
"learning_rate": 4.991282726678215e-06,
"loss": 0.2279,
"num_input_tokens_seen": 217072,
"step": 33
},
{
"epoch": 0.19387027797576623,
"grad_norm": 8.27484130859375,
"learning_rate": 4.990508714559182e-06,
"loss": 0.1862,
"num_input_tokens_seen": 223680,
"step": 34
},
{
"epoch": 0.19957234497505347,
"grad_norm": 26.527591705322266,
"learning_rate": 4.989701850946613e-06,
"loss": 0.2974,
"num_input_tokens_seen": 230272,
"step": 35
},
{
"epoch": 0.2052744119743407,
"grad_norm": 6.094027519226074,
"learning_rate": 4.988862146483585e-06,
"loss": 0.182,
"num_input_tokens_seen": 236912,
"step": 36
},
{
"epoch": 0.21097647897362795,
"grad_norm": 13.250824928283691,
"learning_rate": 4.9879896122463675e-06,
"loss": 0.1516,
"num_input_tokens_seen": 243424,
"step": 37
},
{
"epoch": 0.2166785459729152,
"grad_norm": 7.909171104431152,
"learning_rate": 4.987084259744276e-06,
"loss": 0.2325,
"num_input_tokens_seen": 250288,
"step": 38
},
{
"epoch": 0.22238061297220243,
"grad_norm": 5.98098611831665,
"learning_rate": 4.986146100919522e-06,
"loss": 0.1389,
"num_input_tokens_seen": 256640,
"step": 39
},
{
"epoch": 0.22808267997148968,
"grad_norm": 10.780566215515137,
"learning_rate": 4.985175148147057e-06,
"loss": 0.2076,
"num_input_tokens_seen": 263360,
"step": 40
},
{
"epoch": 0.23378474697077692,
"grad_norm": 11.498775482177734,
"learning_rate": 4.9841714142344015e-06,
"loss": 0.1476,
"num_input_tokens_seen": 270304,
"step": 41
},
{
"epoch": 0.23948681397006416,
"grad_norm": 9.130556106567383,
"learning_rate": 4.9831349124214855e-06,
"loss": 0.159,
"num_input_tokens_seen": 276752,
"step": 42
},
{
"epoch": 0.2451888809693514,
"grad_norm": 17.099760055541992,
"learning_rate": 4.982065656380468e-06,
"loss": 0.1761,
"num_input_tokens_seen": 283296,
"step": 43
},
{
"epoch": 0.25089094796863864,
"grad_norm": 7.222073554992676,
"learning_rate": 4.980963660215561e-06,
"loss": 0.1581,
"num_input_tokens_seen": 289888,
"step": 44
},
{
"epoch": 0.25659301496792586,
"grad_norm": 8.980274200439453,
"learning_rate": 4.979828938462836e-06,
"loss": 0.1454,
"num_input_tokens_seen": 296528,
"step": 45
},
{
"epoch": 0.26229508196721313,
"grad_norm": 23.71878433227539,
"learning_rate": 4.9786615060900415e-06,
"loss": 0.1823,
"num_input_tokens_seen": 303216,
"step": 46
},
{
"epoch": 0.26799714896650034,
"grad_norm": 26.473339080810547,
"learning_rate": 4.9774613784964e-06,
"loss": 0.2675,
"num_input_tokens_seen": 309968,
"step": 47
},
{
"epoch": 0.2736992159657876,
"grad_norm": 11.917190551757812,
"learning_rate": 4.976228571512405e-06,
"loss": 0.1482,
"num_input_tokens_seen": 316512,
"step": 48
},
{
"epoch": 0.2794012829650748,
"grad_norm": 9.98794937133789,
"learning_rate": 4.9749631013996146e-06,
"loss": 0.1258,
"num_input_tokens_seen": 323008,
"step": 49
},
{
"epoch": 0.2851033499643621,
"grad_norm": 17.47338104248047,
"learning_rate": 4.973664984850435e-06,
"loss": 0.1921,
"num_input_tokens_seen": 329888,
"step": 50
},
{
"epoch": 0.2908054169636493,
"grad_norm": 18.32182502746582,
"learning_rate": 4.9723342389879e-06,
"loss": 0.1491,
"num_input_tokens_seen": 336480,
"step": 51
},
{
"epoch": 0.2965074839629366,
"grad_norm": 10.995071411132812,
"learning_rate": 4.970970881365449e-06,
"loss": 0.1186,
"num_input_tokens_seen": 342992,
"step": 52
},
{
"epoch": 0.3022095509622238,
"grad_norm": 15.977797508239746,
"learning_rate": 4.96957492996669e-06,
"loss": 0.2011,
"num_input_tokens_seen": 349776,
"step": 53
},
{
"epoch": 0.30791161796151106,
"grad_norm": 5.898417949676514,
"learning_rate": 4.968146403205164e-06,
"loss": 0.1301,
"num_input_tokens_seen": 356432,
"step": 54
},
{
"epoch": 0.3136136849607983,
"grad_norm": 10.29259967803955,
"learning_rate": 4.966685319924105e-06,
"loss": 0.1563,
"num_input_tokens_seen": 362592,
"step": 55
},
{
"epoch": 0.31931575196008555,
"grad_norm": 12.916869163513184,
"learning_rate": 4.965191699396192e-06,
"loss": 0.1667,
"num_input_tokens_seen": 369040,
"step": 56
},
{
"epoch": 0.32501781895937276,
"grad_norm": 6.394758701324463,
"learning_rate": 4.9636655613232866e-06,
"loss": 0.134,
"num_input_tokens_seen": 375552,
"step": 57
},
{
"epoch": 0.33071988595866003,
"grad_norm": 4.55596399307251,
"learning_rate": 4.962106925836182e-06,
"loss": 0.1753,
"num_input_tokens_seen": 382304,
"step": 58
},
{
"epoch": 0.33642195295794725,
"grad_norm": 5.786614418029785,
"learning_rate": 4.960515813494335e-06,
"loss": 0.1381,
"num_input_tokens_seen": 388960,
"step": 59
},
{
"epoch": 0.3421240199572345,
"grad_norm": 3.520648956298828,
"learning_rate": 4.958892245285594e-06,
"loss": 0.0927,
"num_input_tokens_seen": 395888,
"step": 60
},
{
"epoch": 0.34782608695652173,
"grad_norm": 6.477549076080322,
"learning_rate": 4.957236242625918e-06,
"loss": 0.1608,
"num_input_tokens_seen": 402432,
"step": 61
},
{
"epoch": 0.353528153955809,
"grad_norm": 8.49998664855957,
"learning_rate": 4.955547827359103e-06,
"loss": 0.1219,
"num_input_tokens_seen": 408832,
"step": 62
},
{
"epoch": 0.3592302209550962,
"grad_norm": 7.314529895782471,
"learning_rate": 4.953827021756489e-06,
"loss": 0.1674,
"num_input_tokens_seen": 415232,
"step": 63
},
{
"epoch": 0.3649322879543835,
"grad_norm": 8.064135551452637,
"learning_rate": 4.952073848516663e-06,
"loss": 0.1413,
"num_input_tokens_seen": 421536,
"step": 64
},
{
"epoch": 0.3706343549536707,
"grad_norm": 6.086092472076416,
"learning_rate": 4.950288330765167e-06,
"loss": 0.1515,
"num_input_tokens_seen": 428176,
"step": 65
},
{
"epoch": 0.37633642195295797,
"grad_norm": 5.5795063972473145,
"learning_rate": 4.948470492054186e-06,
"loss": 0.1904,
"num_input_tokens_seen": 434592,
"step": 66
},
{
"epoch": 0.3820384889522452,
"grad_norm": 4.6880879402160645,
"learning_rate": 4.946620356362243e-06,
"loss": 0.1283,
"num_input_tokens_seen": 440912,
"step": 67
},
{
"epoch": 0.38774055595153245,
"grad_norm": 13.155076026916504,
"learning_rate": 4.944737948093876e-06,
"loss": 0.1686,
"num_input_tokens_seen": 447232,
"step": 68
},
{
"epoch": 0.39344262295081966,
"grad_norm": 3.164670467376709,
"learning_rate": 4.942823292079325e-06,
"loss": 0.1451,
"num_input_tokens_seen": 453920,
"step": 69
},
{
"epoch": 0.39914468995010693,
"grad_norm": 15.198010444641113,
"learning_rate": 4.9408764135741955e-06,
"loss": 0.1635,
"num_input_tokens_seen": 460320,
"step": 70
},
{
"epoch": 0.40484675694939415,
"grad_norm": 7.284903049468994,
"learning_rate": 4.9388973382591325e-06,
"loss": 0.1182,
"num_input_tokens_seen": 466816,
"step": 71
},
{
"epoch": 0.4105488239486814,
"grad_norm": 7.4862799644470215,
"learning_rate": 4.936886092239475e-06,
"loss": 0.1328,
"num_input_tokens_seen": 473488,
"step": 72
},
{
"epoch": 0.41625089094796863,
"grad_norm": 4.4039435386657715,
"learning_rate": 4.9348427020449206e-06,
"loss": 0.1312,
"num_input_tokens_seen": 480592,
"step": 73
},
{
"epoch": 0.4219529579472559,
"grad_norm": 7.053802967071533,
"learning_rate": 4.932767194629164e-06,
"loss": 0.1439,
"num_input_tokens_seen": 487584,
"step": 74
},
{
"epoch": 0.4276550249465431,
"grad_norm": 6.4606709480285645,
"learning_rate": 4.9306595973695545e-06,
"loss": 0.1534,
"num_input_tokens_seen": 494400,
"step": 75
},
{
"epoch": 0.4333570919458304,
"grad_norm": 8.80456256866455,
"learning_rate": 4.928519938066723e-06,
"loss": 0.1159,
"num_input_tokens_seen": 500624,
"step": 76
},
{
"epoch": 0.4390591589451176,
"grad_norm": 5.863775253295898,
"learning_rate": 4.926348244944221e-06,
"loss": 0.1291,
"num_input_tokens_seen": 507168,
"step": 77
},
{
"epoch": 0.44476122594440487,
"grad_norm": 6.417506217956543,
"learning_rate": 4.924144546648151e-06,
"loss": 0.145,
"num_input_tokens_seen": 513968,
"step": 78
},
{
"epoch": 0.4504632929436921,
"grad_norm": 4.409994602203369,
"learning_rate": 4.9219088722467825e-06,
"loss": 0.1155,
"num_input_tokens_seen": 520432,
"step": 79
},
{
"epoch": 0.45616535994297935,
"grad_norm": 4.534794330596924,
"learning_rate": 4.91964125123017e-06,
"loss": 0.1153,
"num_input_tokens_seen": 527392,
"step": 80
},
{
"epoch": 0.46186742694226657,
"grad_norm": 20.643110275268555,
"learning_rate": 4.917341713509772e-06,
"loss": 0.1673,
"num_input_tokens_seen": 533712,
"step": 81
},
{
"epoch": 0.46756949394155384,
"grad_norm": 27.360891342163086,
"learning_rate": 4.915010289418042e-06,
"loss": 0.2122,
"num_input_tokens_seen": 540544,
"step": 82
},
{
"epoch": 0.47327156094084105,
"grad_norm": 13.000382423400879,
"learning_rate": 4.912647009708041e-06,
"loss": 0.1092,
"num_input_tokens_seen": 546832,
"step": 83
},
{
"epoch": 0.4789736279401283,
"grad_norm": 5.801584720611572,
"learning_rate": 4.910251905553025e-06,
"loss": 0.148,
"num_input_tokens_seen": 553776,
"step": 84
},
{
"epoch": 0.48467569493941554,
"grad_norm": 9.308876037597656,
"learning_rate": 4.907825008546039e-06,
"loss": 0.1237,
"num_input_tokens_seen": 560208,
"step": 85
},
{
"epoch": 0.4903777619387028,
"grad_norm": 3.73714542388916,
"learning_rate": 4.905366350699493e-06,
"loss": 0.106,
"num_input_tokens_seen": 566688,
"step": 86
},
{
"epoch": 0.49607982893799,
"grad_norm": 4.344607830047607,
"learning_rate": 4.902875964444746e-06,
"loss": 0.1131,
"num_input_tokens_seen": 573184,
"step": 87
},
{
"epoch": 0.5017818959372773,
"grad_norm": 4.39575719833374,
"learning_rate": 4.900353882631679e-06,
"loss": 0.1343,
"num_input_tokens_seen": 579920,
"step": 88
},
{
"epoch": 0.5074839629365645,
"grad_norm": 2.9490160942077637,
"learning_rate": 4.897800138528254e-06,
"loss": 0.0873,
"num_input_tokens_seen": 586864,
"step": 89
},
{
"epoch": 0.5131860299358517,
"grad_norm": 7.676606178283691,
"learning_rate": 4.8952147658200815e-06,
"loss": 0.1412,
"num_input_tokens_seen": 593392,
"step": 90
},
{
"epoch": 0.518888096935139,
"grad_norm": 5.407953262329102,
"learning_rate": 4.892597798609976e-06,
"loss": 0.0983,
"num_input_tokens_seen": 599824,
"step": 91
},
{
"epoch": 0.5245901639344263,
"grad_norm": 4.343061923980713,
"learning_rate": 4.889949271417504e-06,
"loss": 0.1322,
"num_input_tokens_seen": 606384,
"step": 92
},
{
"epoch": 0.5302922309337135,
"grad_norm": 4.196661949157715,
"learning_rate": 4.88726921917853e-06,
"loss": 0.1345,
"num_input_tokens_seen": 613408,
"step": 93
},
{
"epoch": 0.5359942979330007,
"grad_norm": 6.040660381317139,
"learning_rate": 4.884557677244755e-06,
"loss": 0.1367,
"num_input_tokens_seen": 620096,
"step": 94
},
{
"epoch": 0.5416963649322879,
"grad_norm": 4.8859100341796875,
"learning_rate": 4.8818146813832475e-06,
"loss": 0.1228,
"num_input_tokens_seen": 626400,
"step": 95
},
{
"epoch": 0.5473984319315752,
"grad_norm": 13.37174129486084,
"learning_rate": 4.879040267775981e-06,
"loss": 0.2031,
"num_input_tokens_seen": 633088,
"step": 96
},
{
"epoch": 0.5531004989308624,
"grad_norm": 4.321401596069336,
"learning_rate": 4.8762344730193445e-06,
"loss": 0.1235,
"num_input_tokens_seen": 639584,
"step": 97
},
{
"epoch": 0.5588025659301497,
"grad_norm": 10.194490432739258,
"learning_rate": 4.873397334123667e-06,
"loss": 0.1136,
"num_input_tokens_seen": 646160,
"step": 98
},
{
"epoch": 0.5645046329294369,
"grad_norm": 4.425650119781494,
"learning_rate": 4.87052888851273e-06,
"loss": 0.1065,
"num_input_tokens_seen": 652992,
"step": 99
},
{
"epoch": 0.5702066999287242,
"grad_norm": 13.328121185302734,
"learning_rate": 4.867629174023269e-06,
"loss": 0.1489,
"num_input_tokens_seen": 659952,
"step": 100
},
{
"epoch": 0.5759087669280114,
"grad_norm": 3.278292417526245,
"learning_rate": 4.864698228904479e-06,
"loss": 0.1057,
"num_input_tokens_seen": 666400,
"step": 101
},
{
"epoch": 0.5816108339272986,
"grad_norm": 13.085524559020996,
"learning_rate": 4.861736091817506e-06,
"loss": 0.1382,
"num_input_tokens_seen": 673008,
"step": 102
},
{
"epoch": 0.5873129009265858,
"grad_norm": 10.3684663772583,
"learning_rate": 4.858742801834943e-06,
"loss": 0.119,
"num_input_tokens_seen": 679680,
"step": 103
},
{
"epoch": 0.5930149679258732,
"grad_norm": 3.594886064529419,
"learning_rate": 4.855718398440306e-06,
"loss": 0.1058,
"num_input_tokens_seen": 685968,
"step": 104
},
{
"epoch": 0.5987170349251604,
"grad_norm": 3.6079792976379395,
"learning_rate": 4.852662921527523e-06,
"loss": 0.0622,
"num_input_tokens_seen": 692496,
"step": 105
},
{
"epoch": 0.6044191019244476,
"grad_norm": 4.3337602615356445,
"learning_rate": 4.849576411400397e-06,
"loss": 0.08,
"num_input_tokens_seen": 699040,
"step": 106
},
{
"epoch": 0.6101211689237348,
"grad_norm": 7.190303325653076,
"learning_rate": 4.846458908772085e-06,
"loss": 0.1147,
"num_input_tokens_seen": 705792,
"step": 107
},
{
"epoch": 0.6158232359230221,
"grad_norm": 11.85684871673584,
"learning_rate": 4.843310454764553e-06,
"loss": 0.1084,
"num_input_tokens_seen": 712912,
"step": 108
},
{
"epoch": 0.6215253029223093,
"grad_norm": 5.83061408996582,
"learning_rate": 4.840131090908038e-06,
"loss": 0.1195,
"num_input_tokens_seen": 719488,
"step": 109
},
{
"epoch": 0.6272273699215966,
"grad_norm": 5.0566511154174805,
"learning_rate": 4.8369208591405e-06,
"loss": 0.0877,
"num_input_tokens_seen": 725872,
"step": 110
},
{
"epoch": 0.6329294369208838,
"grad_norm": 7.780054092407227,
"learning_rate": 4.833679801807064e-06,
"loss": 0.1405,
"num_input_tokens_seen": 732336,
"step": 111
},
{
"epoch": 0.6386315039201711,
"grad_norm": 7.04516077041626,
"learning_rate": 4.8304079616594684e-06,
"loss": 0.1014,
"num_input_tokens_seen": 739200,
"step": 112
},
{
"epoch": 0.6443335709194583,
"grad_norm": 7.1423516273498535,
"learning_rate": 4.827105381855496e-06,
"loss": 0.1094,
"num_input_tokens_seen": 745568,
"step": 113
},
{
"epoch": 0.6500356379187455,
"grad_norm": 3.5626931190490723,
"learning_rate": 4.823772105958408e-06,
"loss": 0.1301,
"num_input_tokens_seen": 751872,
"step": 114
},
{
"epoch": 0.6557377049180327,
"grad_norm": 4.6096296310424805,
"learning_rate": 4.820408177936365e-06,
"loss": 0.0939,
"num_input_tokens_seen": 758784,
"step": 115
},
{
"epoch": 0.6614397719173201,
"grad_norm": 7.992829322814941,
"learning_rate": 4.817013642161853e-06,
"loss": 0.122,
"num_input_tokens_seen": 765552,
"step": 116
},
{
"epoch": 0.6671418389166073,
"grad_norm": 4.512622356414795,
"learning_rate": 4.8135885434110935e-06,
"loss": 0.1296,
"num_input_tokens_seen": 772096,
"step": 117
},
{
"epoch": 0.6728439059158945,
"grad_norm": 4.544010639190674,
"learning_rate": 4.810132926863454e-06,
"loss": 0.108,
"num_input_tokens_seen": 778512,
"step": 118
},
{
"epoch": 0.6785459729151817,
"grad_norm": 2.2902719974517822,
"learning_rate": 4.8066468381008525e-06,
"loss": 0.0779,
"num_input_tokens_seen": 784992,
"step": 119
},
{
"epoch": 0.684248039914469,
"grad_norm": 5.215991497039795,
"learning_rate": 4.803130323107157e-06,
"loss": 0.1358,
"num_input_tokens_seen": 791392,
"step": 120
},
{
"epoch": 0.6899501069137562,
"grad_norm": 3.8735485076904297,
"learning_rate": 4.799583428267577e-06,
"loss": 0.1485,
"num_input_tokens_seen": 798320,
"step": 121
},
{
"epoch": 0.6956521739130435,
"grad_norm": 3.4041693210601807,
"learning_rate": 4.796006200368054e-06,
"loss": 0.09,
"num_input_tokens_seen": 804896,
"step": 122
},
{
"epoch": 0.7013542409123307,
"grad_norm": 5.4052839279174805,
"learning_rate": 4.792398686594641e-06,
"loss": 0.108,
"num_input_tokens_seen": 811504,
"step": 123
},
{
"epoch": 0.707056307911618,
"grad_norm": 9.151551246643066,
"learning_rate": 4.788760934532883e-06,
"loss": 0.1557,
"num_input_tokens_seen": 818176,
"step": 124
},
{
"epoch": 0.7127583749109052,
"grad_norm": 8.451269149780273,
"learning_rate": 4.785092992167192e-06,
"loss": 0.1206,
"num_input_tokens_seen": 824272,
"step": 125
},
{
"epoch": 0.7184604419101924,
"grad_norm": 5.943455219268799,
"learning_rate": 4.7813949078802035e-06,
"loss": 0.0977,
"num_input_tokens_seen": 830656,
"step": 126
},
{
"epoch": 0.7241625089094796,
"grad_norm": 5.474506855010986,
"learning_rate": 4.777666730452151e-06,
"loss": 0.1223,
"num_input_tokens_seen": 837680,
"step": 127
},
{
"epoch": 0.729864575908767,
"grad_norm": 8.626420974731445,
"learning_rate": 4.773908509060214e-06,
"loss": 0.1751,
"num_input_tokens_seen": 844624,
"step": 128
},
{
"epoch": 0.7355666429080542,
"grad_norm": 3.8866775035858154,
"learning_rate": 4.770120293277876e-06,
"loss": 0.1016,
"num_input_tokens_seen": 851312,
"step": 129
},
{
"epoch": 0.7412687099073414,
"grad_norm": 4.498022556304932,
"learning_rate": 4.766302133074261e-06,
"loss": 0.1351,
"num_input_tokens_seen": 858256,
"step": 130
},
{
"epoch": 0.7469707769066286,
"grad_norm": 3.3039486408233643,
"learning_rate": 4.762454078813483e-06,
"loss": 0.0891,
"num_input_tokens_seen": 864912,
"step": 131
},
{
"epoch": 0.7526728439059159,
"grad_norm": 2.1555142402648926,
"learning_rate": 4.758576181253981e-06,
"loss": 0.07,
"num_input_tokens_seen": 871552,
"step": 132
},
{
"epoch": 0.7583749109052031,
"grad_norm": 4.617002964019775,
"learning_rate": 4.7546684915478445e-06,
"loss": 0.1325,
"num_input_tokens_seen": 878256,
"step": 133
},
{
"epoch": 0.7640769779044904,
"grad_norm": 4.712822914123535,
"learning_rate": 4.750731061240143e-06,
"loss": 0.1074,
"num_input_tokens_seen": 885216,
"step": 134
},
{
"epoch": 0.7697790449037776,
"grad_norm": 3.698430061340332,
"learning_rate": 4.746763942268243e-06,
"loss": 0.0949,
"num_input_tokens_seen": 891552,
"step": 135
},
{
"epoch": 0.7754811119030649,
"grad_norm": 6.280089855194092,
"learning_rate": 4.742767186961126e-06,
"loss": 0.0813,
"num_input_tokens_seen": 898432,
"step": 136
},
{
"epoch": 0.7811831789023521,
"grad_norm": 3.984236240386963,
"learning_rate": 4.738740848038695e-06,
"loss": 0.092,
"num_input_tokens_seen": 905184,
"step": 137
},
{
"epoch": 0.7868852459016393,
"grad_norm": 4.700631141662598,
"learning_rate": 4.7346849786110835e-06,
"loss": 0.0679,
"num_input_tokens_seen": 911856,
"step": 138
},
{
"epoch": 0.7925873129009265,
"grad_norm": 4.287609100341797,
"learning_rate": 4.730599632177952e-06,
"loss": 0.1234,
"num_input_tokens_seen": 918320,
"step": 139
},
{
"epoch": 0.7982893799002139,
"grad_norm": 10.069003105163574,
"learning_rate": 4.726484862627779e-06,
"loss": 0.1281,
"num_input_tokens_seen": 925536,
"step": 140
},
{
"epoch": 0.8039914468995011,
"grad_norm": 3.5951642990112305,
"learning_rate": 4.7223407242371595e-06,
"loss": 0.0834,
"num_input_tokens_seen": 932064,
"step": 141
},
{
"epoch": 0.8096935138987883,
"grad_norm": 3.834705352783203,
"learning_rate": 4.718167271670078e-06,
"loss": 0.0863,
"num_input_tokens_seen": 938688,
"step": 142
},
{
"epoch": 0.8153955808980755,
"grad_norm": 5.181819438934326,
"learning_rate": 4.713964559977196e-06,
"loss": 0.0503,
"num_input_tokens_seen": 945456,
"step": 143
},
{
"epoch": 0.8210976478973628,
"grad_norm": 6.386320114135742,
"learning_rate": 4.709732644595122e-06,
"loss": 0.1619,
"num_input_tokens_seen": 951872,
"step": 144
},
{
"epoch": 0.82679971489665,
"grad_norm": 3.679490566253662,
"learning_rate": 4.7054715813456795e-06,
"loss": 0.1112,
"num_input_tokens_seen": 958320,
"step": 145
},
{
"epoch": 0.8325017818959373,
"grad_norm": 5.969015121459961,
"learning_rate": 4.701181426435174e-06,
"loss": 0.0939,
"num_input_tokens_seen": 965008,
"step": 146
},
{
"epoch": 0.8382038488952245,
"grad_norm": 2.577047824859619,
"learning_rate": 4.69686223645365e-06,
"loss": 0.0549,
"num_input_tokens_seen": 971520,
"step": 147
},
{
"epoch": 0.8439059158945118,
"grad_norm": 5.369522571563721,
"learning_rate": 4.692514068374142e-06,
"loss": 0.1128,
"num_input_tokens_seen": 978512,
"step": 148
},
{
"epoch": 0.849607982893799,
"grad_norm": 5.3961615562438965,
"learning_rate": 4.688136979551926e-06,
"loss": 0.0862,
"num_input_tokens_seen": 985264,
"step": 149
},
{
"epoch": 0.8553100498930862,
"grad_norm": 3.643087148666382,
"learning_rate": 4.683731027723764e-06,
"loss": 0.0769,
"num_input_tokens_seen": 991552,
"step": 150
},
{
"epoch": 0.8610121168923734,
"grad_norm": 4.090428352355957,
"learning_rate": 4.679296271007138e-06,
"loss": 0.1199,
"num_input_tokens_seen": 998064,
"step": 151
},
{
"epoch": 0.8667141838916608,
"grad_norm": 6.78312349319458,
"learning_rate": 4.674832767899486e-06,
"loss": 0.093,
"num_input_tokens_seen": 1004864,
"step": 152
},
{
"epoch": 0.872416250890948,
"grad_norm": 5.438461780548096,
"learning_rate": 4.670340577277433e-06,
"loss": 0.0903,
"num_input_tokens_seen": 1011568,
"step": 153
},
{
"epoch": 0.8781183178902352,
"grad_norm": 5.802255153656006,
"learning_rate": 4.665819758396009e-06,
"loss": 0.0877,
"num_input_tokens_seen": 1018320,
"step": 154
},
{
"epoch": 0.8838203848895224,
"grad_norm": 4.546239852905273,
"learning_rate": 4.661270370887872e-06,
"loss": 0.1049,
"num_input_tokens_seen": 1024640,
"step": 155
},
{
"epoch": 0.8895224518888097,
"grad_norm": 5.347127914428711,
"learning_rate": 4.656692474762518e-06,
"loss": 0.1052,
"num_input_tokens_seen": 1031312,
"step": 156
},
{
"epoch": 0.895224518888097,
"grad_norm": 6.227194786071777,
"learning_rate": 4.652086130405492e-06,
"loss": 0.1196,
"num_input_tokens_seen": 1037936,
"step": 157
},
{
"epoch": 0.9009265858873842,
"grad_norm": 3.103381872177124,
"learning_rate": 4.647451398577589e-06,
"loss": 0.0427,
"num_input_tokens_seen": 1044496,
"step": 158
},
{
"epoch": 0.9066286528866714,
"grad_norm": 2.1822800636291504,
"learning_rate": 4.642788340414056e-06,
"loss": 0.0764,
"num_input_tokens_seen": 1051456,
"step": 159
},
{
"epoch": 0.9123307198859587,
"grad_norm": 4.704215049743652,
"learning_rate": 4.638097017423783e-06,
"loss": 0.0812,
"num_input_tokens_seen": 1057952,
"step": 160
},
{
"epoch": 0.9180327868852459,
"grad_norm": 3.070636510848999,
"learning_rate": 4.63337749148849e-06,
"loss": 0.0807,
"num_input_tokens_seen": 1065024,
"step": 161
},
{
"epoch": 0.9237348538845331,
"grad_norm": 8.418425559997559,
"learning_rate": 4.628629824861915e-06,
"loss": 0.1475,
"num_input_tokens_seen": 1071968,
"step": 162
},
{
"epoch": 0.9294369208838203,
"grad_norm": 4.310948371887207,
"learning_rate": 4.62385408016899e-06,
"loss": 0.0507,
"num_input_tokens_seen": 1078256,
"step": 163
},
{
"epoch": 0.9351389878831077,
"grad_norm": 8.601131439208984,
"learning_rate": 4.619050320405017e-06,
"loss": 0.1015,
"num_input_tokens_seen": 1084752,
"step": 164
},
{
"epoch": 0.9408410548823949,
"grad_norm": 4.491447925567627,
"learning_rate": 4.614218608934834e-06,
"loss": 0.0957,
"num_input_tokens_seen": 1091376,
"step": 165
},
{
"epoch": 0.9465431218816821,
"grad_norm": 8.606071472167969,
"learning_rate": 4.609359009491981e-06,
"loss": 0.1327,
"num_input_tokens_seen": 1098176,
"step": 166
},
{
"epoch": 0.9522451888809693,
"grad_norm": 5.092894077301025,
"learning_rate": 4.60447158617786e-06,
"loss": 0.0782,
"num_input_tokens_seen": 1104848,
"step": 167
},
{
"epoch": 0.9579472558802566,
"grad_norm": 4.862101078033447,
"learning_rate": 4.599556403460889e-06,
"loss": 0.0768,
"num_input_tokens_seen": 1111120,
"step": 168
},
{
"epoch": 0.9636493228795439,
"grad_norm": 4.805572032928467,
"learning_rate": 4.59461352617565e-06,
"loss": 0.033,
"num_input_tokens_seen": 1117440,
"step": 169
},
{
"epoch": 0.9693513898788311,
"grad_norm": 4.407527446746826,
"learning_rate": 4.589643019522036e-06,
"loss": 0.1295,
"num_input_tokens_seen": 1123968,
"step": 170
},
{
"epoch": 0.9750534568781183,
"grad_norm": 12.811196327209473,
"learning_rate": 4.584644949064391e-06,
"loss": 0.1216,
"num_input_tokens_seen": 1130656,
"step": 171
},
{
"epoch": 0.9807555238774056,
"grad_norm": 5.432493686676025,
"learning_rate": 4.579619380730642e-06,
"loss": 0.0896,
"num_input_tokens_seen": 1137088,
"step": 172
},
{
"epoch": 0.9864575908766928,
"grad_norm": 4.3895745277404785,
"learning_rate": 4.574566380811432e-06,
"loss": 0.101,
"num_input_tokens_seen": 1144064,
"step": 173
},
{
"epoch": 0.99215965787598,
"grad_norm": 6.614623069763184,
"learning_rate": 4.569486015959247e-06,
"loss": 0.0787,
"num_input_tokens_seen": 1150560,
"step": 174
},
{
"epoch": 0.9978617248752673,
"grad_norm": 10.594895362854004,
"learning_rate": 4.564378353187533e-06,
"loss": 0.1118,
"num_input_tokens_seen": 1157184,
"step": 175
},
{
"epoch": 1.0035637918745546,
"grad_norm": 6.2672576904296875,
"learning_rate": 4.5592434598698144e-06,
"loss": 0.0671,
"num_input_tokens_seen": 1163712,
"step": 176
},
{
"epoch": 1.0092658588738417,
"grad_norm": 5.011196136474609,
"learning_rate": 4.5540814037388056e-06,
"loss": 0.0862,
"num_input_tokens_seen": 1170480,
"step": 177
},
{
"epoch": 1.014967925873129,
"grad_norm": 2.275132179260254,
"learning_rate": 4.548892252885518e-06,
"loss": 0.0385,
"num_input_tokens_seen": 1176768,
"step": 178
},
{
"epoch": 1.0206699928724163,
"grad_norm": 5.401920795440674,
"learning_rate": 4.543676075758356e-06,
"loss": 0.0557,
"num_input_tokens_seen": 1183520,
"step": 179
},
{
"epoch": 1.0263720598717034,
"grad_norm": 6.662536144256592,
"learning_rate": 4.538432941162227e-06,
"loss": 0.0692,
"num_input_tokens_seen": 1189968,
"step": 180
},
{
"epoch": 1.0320741268709908,
"grad_norm": 6.5178093910217285,
"learning_rate": 4.533162918257615e-06,
"loss": 0.1001,
"num_input_tokens_seen": 1196432,
"step": 181
},
{
"epoch": 1.037776193870278,
"grad_norm": 4.606156349182129,
"learning_rate": 4.5278660765596885e-06,
"loss": 0.0782,
"num_input_tokens_seen": 1203024,
"step": 182
},
{
"epoch": 1.0434782608695652,
"grad_norm": 3.283186674118042,
"learning_rate": 4.522542485937369e-06,
"loss": 0.0324,
"num_input_tokens_seen": 1209744,
"step": 183
},
{
"epoch": 1.0491803278688525,
"grad_norm": 6.526776313781738,
"learning_rate": 4.517192216612415e-06,
"loss": 0.0671,
"num_input_tokens_seen": 1216080,
"step": 184
},
{
"epoch": 1.0548823948681396,
"grad_norm": 3.7458949089050293,
"learning_rate": 4.511815339158497e-06,
"loss": 0.051,
"num_input_tokens_seen": 1223328,
"step": 185
},
{
"epoch": 1.060584461867427,
"grad_norm": 5.047933101654053,
"learning_rate": 4.506411924500263e-06,
"loss": 0.0565,
"num_input_tokens_seen": 1230192,
"step": 186
},
{
"epoch": 1.0662865288667143,
"grad_norm": 3.9890875816345215,
"learning_rate": 4.500982043912404e-06,
"loss": 0.0393,
"num_input_tokens_seen": 1236992,
"step": 187
},
{
"epoch": 1.0719885958660014,
"grad_norm": 5.807724475860596,
"learning_rate": 4.495525769018717e-06,
"loss": 0.0922,
"num_input_tokens_seen": 1243776,
"step": 188
},
{
"epoch": 1.0776906628652887,
"grad_norm": 6.556837558746338,
"learning_rate": 4.490043171791155e-06,
"loss": 0.0518,
"num_input_tokens_seen": 1250752,
"step": 189
},
{
"epoch": 1.0833927298645758,
"grad_norm": 4.41609525680542,
"learning_rate": 4.484534324548883e-06,
"loss": 0.0894,
"num_input_tokens_seen": 1257344,
"step": 190
},
{
"epoch": 1.0890947968638631,
"grad_norm": 5.981895446777344,
"learning_rate": 4.47899929995732e-06,
"loss": 0.0631,
"num_input_tokens_seen": 1264064,
"step": 191
},
{
"epoch": 1.0947968638631504,
"grad_norm": 4.5273966789245605,
"learning_rate": 4.47343817102718e-06,
"loss": 0.0464,
"num_input_tokens_seen": 1270608,
"step": 192
},
{
"epoch": 1.1004989308624376,
"grad_norm": 4.48996639251709,
"learning_rate": 4.4678510111135154e-06,
"loss": 0.0652,
"num_input_tokens_seen": 1277056,
"step": 193
},
{
"epoch": 1.1062009978617249,
"grad_norm": 3.518558979034424,
"learning_rate": 4.462237893914742e-06,
"loss": 0.0317,
"num_input_tokens_seen": 1283776,
"step": 194
},
{
"epoch": 1.1119030648610122,
"grad_norm": 8.252947807312012,
"learning_rate": 4.456598893471668e-06,
"loss": 0.0485,
"num_input_tokens_seen": 1290368,
"step": 195
},
{
"epoch": 1.1176051318602993,
"grad_norm": 2.3548762798309326,
"learning_rate": 4.450934084166524e-06,
"loss": 0.0325,
"num_input_tokens_seen": 1297040,
"step": 196
},
{
"epoch": 1.1233071988595866,
"grad_norm": 3.5539638996124268,
"learning_rate": 4.445243540721972e-06,
"loss": 0.0753,
"num_input_tokens_seen": 1303856,
"step": 197
},
{
"epoch": 1.129009265858874,
"grad_norm": 1.5808628797531128,
"learning_rate": 4.439527338200129e-06,
"loss": 0.0159,
"num_input_tokens_seen": 1310384,
"step": 198
},
{
"epoch": 1.134711332858161,
"grad_norm": 6.414031982421875,
"learning_rate": 4.433785552001569e-06,
"loss": 0.0718,
"num_input_tokens_seen": 1316976,
"step": 199
},
{
"epoch": 1.1404133998574484,
"grad_norm": 6.116299629211426,
"learning_rate": 4.428018257864333e-06,
"loss": 0.0491,
"num_input_tokens_seen": 1323520,
"step": 200
},
{
"epoch": 1.1461154668567355,
"grad_norm": 3.120823860168457,
"learning_rate": 4.422225531862929e-06,
"loss": 0.0608,
"num_input_tokens_seen": 1329984,
"step": 201
},
{
"epoch": 1.1518175338560228,
"grad_norm": 3.8637633323669434,
"learning_rate": 4.416407450407332e-06,
"loss": 0.0631,
"num_input_tokens_seen": 1336704,
"step": 202
},
{
"epoch": 1.1575196008553101,
"grad_norm": 2.4866721630096436,
"learning_rate": 4.4105640902419665e-06,
"loss": 0.024,
"num_input_tokens_seen": 1343344,
"step": 203
},
{
"epoch": 1.1632216678545972,
"grad_norm": 3.9751436710357666,
"learning_rate": 4.404695528444704e-06,
"loss": 0.0453,
"num_input_tokens_seen": 1350144,
"step": 204
},
{
"epoch": 1.1689237348538846,
"grad_norm": 5.458123683929443,
"learning_rate": 4.398801842425842e-06,
"loss": 0.0345,
"num_input_tokens_seen": 1356672,
"step": 205
},
{
"epoch": 1.1746258018531717,
"grad_norm": 2.4415106773376465,
"learning_rate": 4.392883109927083e-06,
"loss": 0.015,
"num_input_tokens_seen": 1363200,
"step": 206
},
{
"epoch": 1.180327868852459,
"grad_norm": 5.5863423347473145,
"learning_rate": 4.3869394090205105e-06,
"loss": 0.0731,
"num_input_tokens_seen": 1369408,
"step": 207
},
{
"epoch": 1.1860299358517463,
"grad_norm": 5.382776737213135,
"learning_rate": 4.380970818107556e-06,
"loss": 0.056,
"num_input_tokens_seen": 1376480,
"step": 208
},
{
"epoch": 1.1917320028510334,
"grad_norm": 8.42451286315918,
"learning_rate": 4.374977415917969e-06,
"loss": 0.1013,
"num_input_tokens_seen": 1382944,
"step": 209
},
{
"epoch": 1.1974340698503207,
"grad_norm": 4.907031536102295,
"learning_rate": 4.368959281508776e-06,
"loss": 0.0254,
"num_input_tokens_seen": 1389824,
"step": 210
},
{
"epoch": 1.203136136849608,
"grad_norm": 4.632014274597168,
"learning_rate": 4.3629164942632385e-06,
"loss": 0.0526,
"num_input_tokens_seen": 1396448,
"step": 211
},
{
"epoch": 1.2088382038488952,
"grad_norm": 5.454948902130127,
"learning_rate": 4.356849133889805e-06,
"loss": 0.037,
"num_input_tokens_seen": 1402976,
"step": 212
},
{
"epoch": 1.2145402708481825,
"grad_norm": 2.4538490772247314,
"learning_rate": 4.350757280421061e-06,
"loss": 0.0438,
"num_input_tokens_seen": 1409648,
"step": 213
},
{
"epoch": 1.2202423378474698,
"grad_norm": 5.704830169677734,
"learning_rate": 4.34464101421267e-06,
"loss": 0.0496,
"num_input_tokens_seen": 1416320,
"step": 214
},
{
"epoch": 1.225944404846757,
"grad_norm": 2.5545387268066406,
"learning_rate": 4.3385004159423195e-06,
"loss": 0.0453,
"num_input_tokens_seen": 1422768,
"step": 215
},
{
"epoch": 1.2316464718460443,
"grad_norm": 2.7966182231903076,
"learning_rate": 4.332335566608651e-06,
"loss": 0.0428,
"num_input_tokens_seen": 1429392,
"step": 216
},
{
"epoch": 1.2373485388453314,
"grad_norm": 6.403561592102051,
"learning_rate": 4.3261465475301956e-06,
"loss": 0.113,
"num_input_tokens_seen": 1435872,
"step": 217
},
{
"epoch": 1.2430506058446187,
"grad_norm": 3.515782594680786,
"learning_rate": 4.319933440344298e-06,
"loss": 0.0934,
"num_input_tokens_seen": 1443024,
"step": 218
},
{
"epoch": 1.248752672843906,
"grad_norm": 2.879307985305786,
"learning_rate": 4.313696327006042e-06,
"loss": 0.0495,
"num_input_tokens_seen": 1449488,
"step": 219
},
{
"epoch": 1.2544547398431931,
"grad_norm": 5.576188564300537,
"learning_rate": 4.307435289787169e-06,
"loss": 0.0962,
"num_input_tokens_seen": 1455808,
"step": 220
},
{
"epoch": 1.2601568068424804,
"grad_norm": 3.0014944076538086,
"learning_rate": 4.301150411274993e-06,
"loss": 0.0546,
"num_input_tokens_seen": 1462480,
"step": 221
},
{
"epoch": 1.2658588738417675,
"grad_norm": 2.2942137718200684,
"learning_rate": 4.294841774371308e-06,
"loss": 0.0321,
"num_input_tokens_seen": 1468832,
"step": 222
},
{
"epoch": 1.2715609408410549,
"grad_norm": 4.320146560668945,
"learning_rate": 4.288509462291302e-06,
"loss": 0.042,
"num_input_tokens_seen": 1475440,
"step": 223
},
{
"epoch": 1.2772630078403422,
"grad_norm": 2.6255133152008057,
"learning_rate": 4.282153558562451e-06,
"loss": 0.0554,
"num_input_tokens_seen": 1482336,
"step": 224
},
{
"epoch": 1.2829650748396293,
"grad_norm": 2.2276813983917236,
"learning_rate": 4.2757741470234214e-06,
"loss": 0.0263,
"num_input_tokens_seen": 1489472,
"step": 225
},
{
"epoch": 1.2886671418389166,
"grad_norm": 2.610847234725952,
"learning_rate": 4.269371311822965e-06,
"loss": 0.0433,
"num_input_tokens_seen": 1496304,
"step": 226
},
{
"epoch": 1.2943692088382037,
"grad_norm": 4.202786445617676,
"learning_rate": 4.262945137418806e-06,
"loss": 0.0569,
"num_input_tokens_seen": 1502896,
"step": 227
},
{
"epoch": 1.300071275837491,
"grad_norm": 4.87699031829834,
"learning_rate": 4.256495708576527e-06,
"loss": 0.0532,
"num_input_tokens_seen": 1510224,
"step": 228
},
{
"epoch": 1.3057733428367784,
"grad_norm": 2.4840846061706543,
"learning_rate": 4.250023110368458e-06,
"loss": 0.0155,
"num_input_tokens_seen": 1516624,
"step": 229
},
{
"epoch": 1.3114754098360657,
"grad_norm": 3.5216634273529053,
"learning_rate": 4.243527428172541e-06,
"loss": 0.0277,
"num_input_tokens_seen": 1523088,
"step": 230
},
{
"epoch": 1.3171774768353528,
"grad_norm": 5.229778289794922,
"learning_rate": 4.237008747671218e-06,
"loss": 0.074,
"num_input_tokens_seen": 1529936,
"step": 231
},
{
"epoch": 1.3228795438346401,
"grad_norm": 7.132479190826416,
"learning_rate": 4.230467154850289e-06,
"loss": 0.0861,
"num_input_tokens_seen": 1536320,
"step": 232
},
{
"epoch": 1.3285816108339272,
"grad_norm": 4.4164228439331055,
"learning_rate": 4.2239027359977885e-06,
"loss": 0.0669,
"num_input_tokens_seen": 1543056,
"step": 233
},
{
"epoch": 1.3342836778332146,
"grad_norm": 5.823759078979492,
"learning_rate": 4.217315577702836e-06,
"loss": 0.0703,
"num_input_tokens_seen": 1549664,
"step": 234
},
{
"epoch": 1.3399857448325019,
"grad_norm": 1.5898804664611816,
"learning_rate": 4.210705766854505e-06,
"loss": 0.0172,
"num_input_tokens_seen": 1556384,
"step": 235
},
{
"epoch": 1.345687811831789,
"grad_norm": 7.746312618255615,
"learning_rate": 4.2040733906406664e-06,
"loss": 0.1336,
"num_input_tokens_seen": 1563216,
"step": 236
},
{
"epoch": 1.3513898788310763,
"grad_norm": 2.8517649173736572,
"learning_rate": 4.197418536546846e-06,
"loss": 0.0207,
"num_input_tokens_seen": 1569664,
"step": 237
},
{
"epoch": 1.3570919458303634,
"grad_norm": 8.109379768371582,
"learning_rate": 4.190741292355071e-06,
"loss": 0.0815,
"num_input_tokens_seen": 1576464,
"step": 238
},
{
"epoch": 1.3627940128296507,
"grad_norm": 9.140835762023926,
"learning_rate": 4.184041746142702e-06,
"loss": 0.0453,
"num_input_tokens_seen": 1582816,
"step": 239
},
{
"epoch": 1.368496079828938,
"grad_norm": 4.517364025115967,
"learning_rate": 4.177319986281285e-06,
"loss": 0.0791,
"num_input_tokens_seen": 1589344,
"step": 240
},
{
"epoch": 1.3741981468282252,
"grad_norm": 3.070476770401001,
"learning_rate": 4.170576101435377e-06,
"loss": 0.0344,
"num_input_tokens_seen": 1595888,
"step": 241
},
{
"epoch": 1.3799002138275125,
"grad_norm": 4.99822998046875,
"learning_rate": 4.163810180561376e-06,
"loss": 0.0532,
"num_input_tokens_seen": 1602736,
"step": 242
},
{
"epoch": 1.3856022808267996,
"grad_norm": 4.312646389007568,
"learning_rate": 4.157022312906352e-06,
"loss": 0.0574,
"num_input_tokens_seen": 1609456,
"step": 243
},
{
"epoch": 1.391304347826087,
"grad_norm": 6.092965126037598,
"learning_rate": 4.150212588006871e-06,
"loss": 0.1002,
"num_input_tokens_seen": 1616032,
"step": 244
},
{
"epoch": 1.3970064148253742,
"grad_norm": 3.6262662410736084,
"learning_rate": 4.143381095687805e-06,
"loss": 0.0314,
"num_input_tokens_seen": 1622800,
"step": 245
},
{
"epoch": 1.4027084818246616,
"grad_norm": 7.102707386016846,
"learning_rate": 4.1365279260611575e-06,
"loss": 0.0689,
"num_input_tokens_seen": 1629248,
"step": 246
},
{
"epoch": 1.4084105488239487,
"grad_norm": 6.31537389755249,
"learning_rate": 4.129653169524867e-06,
"loss": 0.0587,
"num_input_tokens_seen": 1635568,
"step": 247
},
{
"epoch": 1.414112615823236,
"grad_norm": 3.1155130863189697,
"learning_rate": 4.12275691676162e-06,
"loss": 0.0433,
"num_input_tokens_seen": 1642208,
"step": 248
},
{
"epoch": 1.419814682822523,
"grad_norm": 3.728424310684204,
"learning_rate": 4.115839258737654e-06,
"loss": 0.0306,
"num_input_tokens_seen": 1648720,
"step": 249
},
{
"epoch": 1.4255167498218104,
"grad_norm": 3.9587793350219727,
"learning_rate": 4.108900286701553e-06,
"loss": 0.0454,
"num_input_tokens_seen": 1655296,
"step": 250
},
{
"epoch": 1.4312188168210978,
"grad_norm": 5.638779163360596,
"learning_rate": 4.101940092183048e-06,
"loss": 0.0464,
"num_input_tokens_seen": 1662272,
"step": 251
},
{
"epoch": 1.4369208838203849,
"grad_norm": 8.67476749420166,
"learning_rate": 4.094958766991812e-06,
"loss": 0.0578,
"num_input_tokens_seen": 1669168,
"step": 252
},
{
"epoch": 1.4426229508196722,
"grad_norm": 4.2457404136657715,
"learning_rate": 4.087956403216243e-06,
"loss": 0.0369,
"num_input_tokens_seen": 1675440,
"step": 253
},
{
"epoch": 1.4483250178189593,
"grad_norm": 6.338396072387695,
"learning_rate": 4.080933093222253e-06,
"loss": 0.0686,
"num_input_tokens_seen": 1682080,
"step": 254
},
{
"epoch": 1.4540270848182466,
"grad_norm": 3.969381809234619,
"learning_rate": 4.073888929652048e-06,
"loss": 0.0873,
"num_input_tokens_seen": 1688896,
"step": 255
},
{
"epoch": 1.459729151817534,
"grad_norm": 5.51995325088501,
"learning_rate": 4.066824005422907e-06,
"loss": 0.0466,
"num_input_tokens_seen": 1695760,
"step": 256
},
{
"epoch": 1.465431218816821,
"grad_norm": 7.369187355041504,
"learning_rate": 4.059738413725958e-06,
"loss": 0.0349,
"num_input_tokens_seen": 1702352,
"step": 257
},
{
"epoch": 1.4711332858161084,
"grad_norm": 8.143928527832031,
"learning_rate": 4.0526322480249435e-06,
"loss": 0.061,
"num_input_tokens_seen": 1709120,
"step": 258
},
{
"epoch": 1.4768353528153955,
"grad_norm": 4.497892379760742,
"learning_rate": 4.045505602054995e-06,
"loss": 0.0801,
"num_input_tokens_seen": 1716000,
"step": 259
},
{
"epoch": 1.4825374198146828,
"grad_norm": 4.872470378875732,
"learning_rate": 4.0383585698213874e-06,
"loss": 0.0406,
"num_input_tokens_seen": 1722080,
"step": 260
},
{
"epoch": 1.4882394868139701,
"grad_norm": 3.8097238540649414,
"learning_rate": 4.03119124559831e-06,
"loss": 0.0417,
"num_input_tokens_seen": 1728960,
"step": 261
},
{
"epoch": 1.4939415538132574,
"grad_norm": 3.029526948928833,
"learning_rate": 4.0240037239276146e-06,
"loss": 0.0178,
"num_input_tokens_seen": 1735408,
"step": 262
},
{
"epoch": 1.4996436208125445,
"grad_norm": 2.9828808307647705,
"learning_rate": 4.016796099617569e-06,
"loss": 0.0238,
"num_input_tokens_seen": 1742032,
"step": 263
},
{
"epoch": 1.5053456878118316,
"grad_norm": 10.337291717529297,
"learning_rate": 4.009568467741611e-06,
"loss": 0.0842,
"num_input_tokens_seen": 1748848,
"step": 264
},
{
"epoch": 1.511047754811119,
"grad_norm": 5.436164855957031,
"learning_rate": 4.002320923637091e-06,
"loss": 0.0354,
"num_input_tokens_seen": 1755584,
"step": 265
},
{
"epoch": 1.5167498218104063,
"grad_norm": 7.981906414031982,
"learning_rate": 3.995053562904015e-06,
"loss": 0.072,
"num_input_tokens_seen": 1762464,
"step": 266
},
{
"epoch": 1.5224518888096936,
"grad_norm": 2.6612589359283447,
"learning_rate": 3.987766481403785e-06,
"loss": 0.0297,
"num_input_tokens_seen": 1769008,
"step": 267
},
{
"epoch": 1.5281539558089807,
"grad_norm": 5.873383522033691,
"learning_rate": 3.98045977525793e-06,
"loss": 0.0355,
"num_input_tokens_seen": 1775808,
"step": 268
},
{
"epoch": 1.533856022808268,
"grad_norm": 4.245385646820068,
"learning_rate": 3.973133540846844e-06,
"loss": 0.0669,
"num_input_tokens_seen": 1782416,
"step": 269
},
{
"epoch": 1.5395580898075552,
"grad_norm": 5.252135276794434,
"learning_rate": 3.965787874808513e-06,
"loss": 0.0622,
"num_input_tokens_seen": 1788960,
"step": 270
},
{
"epoch": 1.5452601568068425,
"grad_norm": 7.067147731781006,
"learning_rate": 3.958422874037236e-06,
"loss": 0.0436,
"num_input_tokens_seen": 1795600,
"step": 271
},
{
"epoch": 1.5509622238061298,
"grad_norm": 3.885929822921753,
"learning_rate": 3.951038635682352e-06,
"loss": 0.0265,
"num_input_tokens_seen": 1802144,
"step": 272
},
{
"epoch": 1.5566642908054171,
"grad_norm": 3.3886828422546387,
"learning_rate": 3.943635257146959e-06,
"loss": 0.0165,
"num_input_tokens_seen": 1808656,
"step": 273
},
{
"epoch": 1.5623663578047042,
"grad_norm": 1.3812897205352783,
"learning_rate": 3.936212836086621e-06,
"loss": 0.0246,
"num_input_tokens_seen": 1815360,
"step": 274
},
{
"epoch": 1.5680684248039913,
"grad_norm": 5.722872257232666,
"learning_rate": 3.928771470408092e-06,
"loss": 0.0929,
"num_input_tokens_seen": 1822048,
"step": 275
},
{
"epoch": 1.5737704918032787,
"grad_norm": 5.5266313552856445,
"learning_rate": 3.921311258268013e-06,
"loss": 0.0591,
"num_input_tokens_seen": 1828416,
"step": 276
},
{
"epoch": 1.579472558802566,
"grad_norm": 4.463235855102539,
"learning_rate": 3.913832298071629e-06,
"loss": 0.0479,
"num_input_tokens_seen": 1835008,
"step": 277
},
{
"epoch": 1.5851746258018533,
"grad_norm": 5.2619757652282715,
"learning_rate": 3.906334688471479e-06,
"loss": 0.0414,
"num_input_tokens_seen": 1841536,
"step": 278
},
{
"epoch": 1.5908766928011404,
"grad_norm": 3.2384073734283447,
"learning_rate": 3.8988185283661005e-06,
"loss": 0.0591,
"num_input_tokens_seen": 1848064,
"step": 279
},
{
"epoch": 1.5965787598004275,
"grad_norm": 9.16089153289795,
"learning_rate": 3.891283916898729e-06,
"loss": 0.0797,
"num_input_tokens_seen": 1854640,
"step": 280
},
{
"epoch": 1.6022808267997148,
"grad_norm": 2.8571524620056152,
"learning_rate": 3.88373095345598e-06,
"loss": 0.0338,
"num_input_tokens_seen": 1861184,
"step": 281
},
{
"epoch": 1.6079828937990022,
"grad_norm": 5.12972354888916,
"learning_rate": 3.876159737666552e-06,
"loss": 0.0764,
"num_input_tokens_seen": 1867856,
"step": 282
},
{
"epoch": 1.6136849607982895,
"grad_norm": 4.020384311676025,
"learning_rate": 3.868570369399894e-06,
"loss": 0.0835,
"num_input_tokens_seen": 1874768,
"step": 283
},
{
"epoch": 1.6193870277975766,
"grad_norm": 3.4921162128448486,
"learning_rate": 3.860962948764906e-06,
"loss": 0.0644,
"num_input_tokens_seen": 1881488,
"step": 284
},
{
"epoch": 1.6250890947968637,
"grad_norm": 3.6133933067321777,
"learning_rate": 3.85333757610861e-06,
"loss": 0.0265,
"num_input_tokens_seen": 1888032,
"step": 285
},
{
"epoch": 1.630791161796151,
"grad_norm": 2.9440054893493652,
"learning_rate": 3.845694352014825e-06,
"loss": 0.0934,
"num_input_tokens_seen": 1895008,
"step": 286
},
{
"epoch": 1.6364932287954383,
"grad_norm": 4.363600730895996,
"learning_rate": 3.838033377302844e-06,
"loss": 0.0436,
"num_input_tokens_seen": 1901392,
"step": 287
},
{
"epoch": 1.6421952957947257,
"grad_norm": 2.8621530532836914,
"learning_rate": 3.8303547530261025e-06,
"loss": 0.0461,
"num_input_tokens_seen": 1907936,
"step": 288
},
{
"epoch": 1.6478973627940128,
"grad_norm": 4.192432403564453,
"learning_rate": 3.8226585804708435e-06,
"loss": 0.0521,
"num_input_tokens_seen": 1914608,
"step": 289
},
{
"epoch": 1.6535994297933,
"grad_norm": 2.4695684909820557,
"learning_rate": 3.814944961154788e-06,
"loss": 0.0282,
"num_input_tokens_seen": 1920848,
"step": 290
},
{
"epoch": 1.6593014967925872,
"grad_norm": 4.6032562255859375,
"learning_rate": 3.807213996825788e-06,
"loss": 0.0408,
"num_input_tokens_seen": 1927600,
"step": 291
},
{
"epoch": 1.6650035637918745,
"grad_norm": 3.675687789916992,
"learning_rate": 3.799465789460491e-06,
"loss": 0.0435,
"num_input_tokens_seen": 1934096,
"step": 292
},
{
"epoch": 1.6707056307911619,
"grad_norm": 2.347770929336548,
"learning_rate": 3.791700441262987e-06,
"loss": 0.0459,
"num_input_tokens_seen": 1940816,
"step": 293
},
{
"epoch": 1.6764076977904492,
"grad_norm": 4.666854381561279,
"learning_rate": 3.7839180546634703e-06,
"loss": 0.046,
"num_input_tokens_seen": 1947392,
"step": 294
},
{
"epoch": 1.6821097647897363,
"grad_norm": 5.212928295135498,
"learning_rate": 3.77611873231688e-06,
"loss": 0.0338,
"num_input_tokens_seen": 1954272,
"step": 295
},
{
"epoch": 1.6878118317890234,
"grad_norm": 0.8700922727584839,
"learning_rate": 3.7683025771015515e-06,
"loss": 0.0304,
"num_input_tokens_seen": 1960640,
"step": 296
},
{
"epoch": 1.6935138987883107,
"grad_norm": 6.290396690368652,
"learning_rate": 3.760469692117854e-06,
"loss": 0.0652,
"num_input_tokens_seen": 1967440,
"step": 297
},
{
"epoch": 1.699215965787598,
"grad_norm": 3.9211745262145996,
"learning_rate": 3.7526201806868372e-06,
"loss": 0.0233,
"num_input_tokens_seen": 1973968,
"step": 298
},
{
"epoch": 1.7049180327868854,
"grad_norm": 3.1297361850738525,
"learning_rate": 3.744754146348862e-06,
"loss": 0.0447,
"num_input_tokens_seen": 1980496,
"step": 299
},
{
"epoch": 1.7106200997861725,
"grad_norm": 6.064594745635986,
"learning_rate": 3.736871692862239e-06,
"loss": 0.0599,
"num_input_tokens_seen": 1987184,
"step": 300
},
{
"epoch": 1.7163221667854596,
"grad_norm": 6.277172565460205,
"learning_rate": 3.7289729242018584e-06,
"loss": 0.0607,
"num_input_tokens_seen": 1993968,
"step": 301
},
{
"epoch": 1.722024233784747,
"grad_norm": 4.980268955230713,
"learning_rate": 3.721057944557819e-06,
"loss": 0.0449,
"num_input_tokens_seen": 2000640,
"step": 302
},
{
"epoch": 1.7277263007840342,
"grad_norm": 4.455365180969238,
"learning_rate": 3.713126858334052e-06,
"loss": 0.0294,
"num_input_tokens_seen": 2006784,
"step": 303
},
{
"epoch": 1.7334283677833215,
"grad_norm": 8.115938186645508,
"learning_rate": 3.705179770146946e-06,
"loss": 0.058,
"num_input_tokens_seen": 2013280,
"step": 304
},
{
"epoch": 1.7391304347826086,
"grad_norm": 5.212691307067871,
"learning_rate": 3.6972167848239677e-06,
"loss": 0.0774,
"num_input_tokens_seen": 2019984,
"step": 305
},
{
"epoch": 1.744832501781896,
"grad_norm": 3.045757532119751,
"learning_rate": 3.689238007402275e-06,
"loss": 0.0718,
"num_input_tokens_seen": 2026624,
"step": 306
},
{
"epoch": 1.750534568781183,
"grad_norm": 4.732903003692627,
"learning_rate": 3.6812435431273375e-06,
"loss": 0.0642,
"num_input_tokens_seen": 2033136,
"step": 307
},
{
"epoch": 1.7562366357804704,
"grad_norm": 4.424813270568848,
"learning_rate": 3.673233497451541e-06,
"loss": 0.0992,
"num_input_tokens_seen": 2039680,
"step": 308
},
{
"epoch": 1.7619387027797577,
"grad_norm": 2.3566956520080566,
"learning_rate": 3.6652079760328045e-06,
"loss": 0.0325,
"num_input_tokens_seen": 2046096,
"step": 309
},
{
"epoch": 1.767640769779045,
"grad_norm": 3.0203731060028076,
"learning_rate": 3.6571670847331802e-06,
"loss": 0.0403,
"num_input_tokens_seen": 2052432,
"step": 310
},
{
"epoch": 1.7733428367783322,
"grad_norm": 4.3480753898620605,
"learning_rate": 3.6491109296174604e-06,
"loss": 0.0619,
"num_input_tokens_seen": 2058848,
"step": 311
},
{
"epoch": 1.7790449037776193,
"grad_norm": 4.105159282684326,
"learning_rate": 3.6410396169517763e-06,
"loss": 0.0609,
"num_input_tokens_seen": 2065344,
"step": 312
},
{
"epoch": 1.7847469707769066,
"grad_norm": 2.991960048675537,
"learning_rate": 3.632953253202199e-06,
"loss": 0.0523,
"num_input_tokens_seen": 2071696,
"step": 313
},
{
"epoch": 1.790449037776194,
"grad_norm": 3.855825662612915,
"learning_rate": 3.624851945033332e-06,
"loss": 0.0481,
"num_input_tokens_seen": 2078736,
"step": 314
},
{
"epoch": 1.7961511047754812,
"grad_norm": 3.1060192584991455,
"learning_rate": 3.6167357993069075e-06,
"loss": 0.0353,
"num_input_tokens_seen": 2085088,
"step": 315
},
{
"epoch": 1.8018531717747683,
"grad_norm": 2.189758777618408,
"learning_rate": 3.608604923080373e-06,
"loss": 0.0408,
"num_input_tokens_seen": 2091504,
"step": 316
},
{
"epoch": 1.8075552387740554,
"grad_norm": 2.719782590866089,
"learning_rate": 3.6004594236054837e-06,
"loss": 0.0368,
"num_input_tokens_seen": 2097984,
"step": 317
},
{
"epoch": 1.8132573057733428,
"grad_norm": 4.720352649688721,
"learning_rate": 3.592299408326883e-06,
"loss": 0.0614,
"num_input_tokens_seen": 2104560,
"step": 318
},
{
"epoch": 1.81895937277263,
"grad_norm": 4.813574314117432,
"learning_rate": 3.584124984880689e-06,
"loss": 0.0328,
"num_input_tokens_seen": 2110832,
"step": 319
},
{
"epoch": 1.8246614397719174,
"grad_norm": 6.9829816818237305,
"learning_rate": 3.5759362610930733e-06,
"loss": 0.0658,
"num_input_tokens_seen": 2117008,
"step": 320
},
{
"epoch": 1.8303635067712045,
"grad_norm": 3.228569269180298,
"learning_rate": 3.5677333449788376e-06,
"loss": 0.0491,
"num_input_tokens_seen": 2123312,
"step": 321
},
{
"epoch": 1.8360655737704918,
"grad_norm": 3.437143087387085,
"learning_rate": 3.5595163447399912e-06,
"loss": 0.0411,
"num_input_tokens_seen": 2129872,
"step": 322
},
{
"epoch": 1.841767640769779,
"grad_norm": 3.455941677093506,
"learning_rate": 3.551285368764321e-06,
"loss": 0.0281,
"num_input_tokens_seen": 2136688,
"step": 323
},
{
"epoch": 1.8474697077690663,
"grad_norm": 5.369020938873291,
"learning_rate": 3.5430405256239653e-06,
"loss": 0.0319,
"num_input_tokens_seen": 2143184,
"step": 324
},
{
"epoch": 1.8531717747683536,
"grad_norm": 4.390781402587891,
"learning_rate": 3.5347819240739783e-06,
"loss": 0.0919,
"num_input_tokens_seen": 2149712,
"step": 325
},
{
"epoch": 1.858873841767641,
"grad_norm": 5.870974540710449,
"learning_rate": 3.5265096730508972e-06,
"loss": 0.069,
"num_input_tokens_seen": 2156432,
"step": 326
},
{
"epoch": 1.864575908766928,
"grad_norm": 3.885589122772217,
"learning_rate": 3.5182238816713055e-06,
"loss": 0.0615,
"num_input_tokens_seen": 2163088,
"step": 327
},
{
"epoch": 1.8702779757662151,
"grad_norm": 4.062203407287598,
"learning_rate": 3.509924659230392e-06,
"loss": 0.046,
"num_input_tokens_seen": 2169664,
"step": 328
},
{
"epoch": 1.8759800427655025,
"grad_norm": 4.7195000648498535,
"learning_rate": 3.5016121152005123e-06,
"loss": 0.0317,
"num_input_tokens_seen": 2176272,
"step": 329
},
{
"epoch": 1.8816821097647898,
"grad_norm": 3.965040922164917,
"learning_rate": 3.4932863592297393e-06,
"loss": 0.0614,
"num_input_tokens_seen": 2182736,
"step": 330
},
{
"epoch": 1.887384176764077,
"grad_norm": 6.761993885040283,
"learning_rate": 3.4849475011404242e-06,
"loss": 0.0651,
"num_input_tokens_seen": 2189824,
"step": 331
},
{
"epoch": 1.8930862437633642,
"grad_norm": 3.391899585723877,
"learning_rate": 3.4765956509277416e-06,
"loss": 0.0283,
"num_input_tokens_seen": 2196400,
"step": 332
},
{
"epoch": 1.8987883107626513,
"grad_norm": 3.1956787109375,
"learning_rate": 3.4682309187582425e-06,
"loss": 0.0406,
"num_input_tokens_seen": 2202896,
"step": 333
},
{
"epoch": 1.9044903777619386,
"grad_norm": 3.3207476139068604,
"learning_rate": 3.459853414968398e-06,
"loss": 0.029,
"num_input_tokens_seen": 2209552,
"step": 334
},
{
"epoch": 1.910192444761226,
"grad_norm": 4.4404616355896,
"learning_rate": 3.451463250063146e-06,
"loss": 0.0202,
"num_input_tokens_seen": 2215920,
"step": 335
},
{
"epoch": 1.9158945117605133,
"grad_norm": 2.3631815910339355,
"learning_rate": 3.443060534714434e-06,
"loss": 0.0246,
"num_input_tokens_seen": 2222384,
"step": 336
},
{
"epoch": 1.9215965787598004,
"grad_norm": 5.453686714172363,
"learning_rate": 3.4346453797597577e-06,
"loss": 0.0755,
"num_input_tokens_seen": 2228864,
"step": 337
},
{
"epoch": 1.9272986457590877,
"grad_norm": 1.8340721130371094,
"learning_rate": 3.4262178962006994e-06,
"loss": 0.0162,
"num_input_tokens_seen": 2235328,
"step": 338
},
{
"epoch": 1.9330007127583748,
"grad_norm": 1.1222234964370728,
"learning_rate": 3.4177781952014646e-06,
"loss": 0.0127,
"num_input_tokens_seen": 2241760,
"step": 339
},
{
"epoch": 1.9387027797576621,
"grad_norm": 2.823270082473755,
"learning_rate": 3.409326388087414e-06,
"loss": 0.0225,
"num_input_tokens_seen": 2247968,
"step": 340
},
{
"epoch": 1.9444048467569495,
"grad_norm": 5.46552038192749,
"learning_rate": 3.400862586343597e-06,
"loss": 0.0556,
"num_input_tokens_seen": 2254528,
"step": 341
},
{
"epoch": 1.9501069137562368,
"grad_norm": 3.3685600757598877,
"learning_rate": 3.3923869016132816e-06,
"loss": 0.0282,
"num_input_tokens_seen": 2260928,
"step": 342
},
{
"epoch": 1.955808980755524,
"grad_norm": 1.8859189748764038,
"learning_rate": 3.3838994456964774e-06,
"loss": 0.0222,
"num_input_tokens_seen": 2268016,
"step": 343
},
{
"epoch": 1.961511047754811,
"grad_norm": 6.896831035614014,
"learning_rate": 3.375400330548466e-06,
"loss": 0.0387,
"num_input_tokens_seen": 2274592,
"step": 344
},
{
"epoch": 1.9672131147540983,
"grad_norm": 5.995792388916016,
"learning_rate": 3.3668896682783216e-06,
"loss": 0.0589,
"num_input_tokens_seen": 2281216,
"step": 345
},
{
"epoch": 1.9729151817533856,
"grad_norm": 7.283161640167236,
"learning_rate": 3.358367571147433e-06,
"loss": 0.1223,
"num_input_tokens_seen": 2287680,
"step": 346
},
{
"epoch": 1.978617248752673,
"grad_norm": 7.267601013183594,
"learning_rate": 3.3498341515680216e-06,
"loss": 0.0546,
"num_input_tokens_seen": 2294592,
"step": 347
},
{
"epoch": 1.98431931575196,
"grad_norm": 5.150763511657715,
"learning_rate": 3.3412895221016605e-06,
"loss": 0.0686,
"num_input_tokens_seen": 2301120,
"step": 348
},
{
"epoch": 1.9900213827512472,
"grad_norm": 4.490513801574707,
"learning_rate": 3.3327337954577893e-06,
"loss": 0.0316,
"num_input_tokens_seen": 2307552,
"step": 349
},
{
"epoch": 1.9957234497505345,
"grad_norm": 4.953909873962402,
"learning_rate": 3.324167084492226e-06,
"loss": 0.036,
"num_input_tokens_seen": 2314048,
"step": 350
},
{
"epoch": 2.001425516749822,
"grad_norm": 3.9158685207366943,
"learning_rate": 3.315589502205678e-06,
"loss": 0.0681,
"num_input_tokens_seen": 2320912,
"step": 351
},
{
"epoch": 2.007127583749109,
"grad_norm": 1.852107286453247,
"learning_rate": 3.3070011617422564e-06,
"loss": 0.0133,
"num_input_tokens_seen": 2327488,
"step": 352
},
{
"epoch": 2.0128296507483965,
"grad_norm": 2.2540292739868164,
"learning_rate": 3.2984021763879757e-06,
"loss": 0.0086,
"num_input_tokens_seen": 2334432,
"step": 353
},
{
"epoch": 2.0185317177476834,
"grad_norm": 1.3720698356628418,
"learning_rate": 3.2897926595692663e-06,
"loss": 0.0066,
"num_input_tokens_seen": 2341136,
"step": 354
},
{
"epoch": 2.0242337847469707,
"grad_norm": 1.604500651359558,
"learning_rate": 3.281172724851476e-06,
"loss": 0.0137,
"num_input_tokens_seen": 2347680,
"step": 355
},
{
"epoch": 2.029935851746258,
"grad_norm": 1.1898442506790161,
"learning_rate": 3.272542485937369e-06,
"loss": 0.0104,
"num_input_tokens_seen": 2354352,
"step": 356
},
{
"epoch": 2.0356379187455453,
"grad_norm": 2.400050640106201,
"learning_rate": 3.2639020566656316e-06,
"loss": 0.029,
"num_input_tokens_seen": 2360912,
"step": 357
},
{
"epoch": 2.0413399857448327,
"grad_norm": 6.066676139831543,
"learning_rate": 3.2552515510093674e-06,
"loss": 0.0628,
"num_input_tokens_seen": 2367552,
"step": 358
},
{
"epoch": 2.0470420527441195,
"grad_norm": 3.0650413036346436,
"learning_rate": 3.2465910830745926e-06,
"loss": 0.0492,
"num_input_tokens_seen": 2373888,
"step": 359
},
{
"epoch": 2.052744119743407,
"grad_norm": 3.076413154602051,
"learning_rate": 3.2379207670987352e-06,
"loss": 0.0217,
"num_input_tokens_seen": 2380528,
"step": 360
},
{
"epoch": 2.058446186742694,
"grad_norm": 5.445115089416504,
"learning_rate": 3.2292407174491224e-06,
"loss": 0.01,
"num_input_tokens_seen": 2387584,
"step": 361
},
{
"epoch": 2.0641482537419815,
"grad_norm": 2.9979920387268066,
"learning_rate": 3.2205510486214783e-06,
"loss": 0.0193,
"num_input_tokens_seen": 2394560,
"step": 362
},
{
"epoch": 2.069850320741269,
"grad_norm": 2.777930736541748,
"learning_rate": 3.211851875238408e-06,
"loss": 0.016,
"num_input_tokens_seen": 2401232,
"step": 363
},
{
"epoch": 2.075552387740556,
"grad_norm": 0.7351799607276917,
"learning_rate": 3.2031433120478894e-06,
"loss": 0.0025,
"num_input_tokens_seen": 2407888,
"step": 364
},
{
"epoch": 2.081254454739843,
"grad_norm": 0.19965003430843353,
"learning_rate": 3.1944254739217584e-06,
"loss": 0.002,
"num_input_tokens_seen": 2414288,
"step": 365
},
{
"epoch": 2.0869565217391304,
"grad_norm": 1.0815672874450684,
"learning_rate": 3.1856984758541924e-06,
"loss": 0.0113,
"num_input_tokens_seen": 2420768,
"step": 366
},
{
"epoch": 2.0926585887384177,
"grad_norm": 1.7040127515792847,
"learning_rate": 3.176962432960197e-06,
"loss": 0.0047,
"num_input_tokens_seen": 2427280,
"step": 367
},
{
"epoch": 2.098360655737705,
"grad_norm": 2.0529940128326416,
"learning_rate": 3.1682174604740808e-06,
"loss": 0.0186,
"num_input_tokens_seen": 2433824,
"step": 368
},
{
"epoch": 2.1040627227369924,
"grad_norm": 0.9493989944458008,
"learning_rate": 3.159463673747945e-06,
"loss": 0.0033,
"num_input_tokens_seen": 2440416,
"step": 369
},
{
"epoch": 2.1097647897362792,
"grad_norm": 3.0499930381774902,
"learning_rate": 3.150701188250153e-06,
"loss": 0.0344,
"num_input_tokens_seen": 2446928,
"step": 370
},
{
"epoch": 2.1154668567355666,
"grad_norm": 5.7982177734375,
"learning_rate": 3.141930119563812e-06,
"loss": 0.0362,
"num_input_tokens_seen": 2453664,
"step": 371
},
{
"epoch": 2.121168923734854,
"grad_norm": 0.2506842017173767,
"learning_rate": 3.133150583385247e-06,
"loss": 0.0009,
"num_input_tokens_seen": 2460256,
"step": 372
},
{
"epoch": 2.126870990734141,
"grad_norm": 1.3631197214126587,
"learning_rate": 3.1243626955224766e-06,
"loss": 0.0317,
"num_input_tokens_seen": 2466912,
"step": 373
},
{
"epoch": 2.1325730577334285,
"grad_norm": 2.615264892578125,
"learning_rate": 3.1155665718936806e-06,
"loss": 0.0158,
"num_input_tokens_seen": 2473936,
"step": 374
},
{
"epoch": 2.1382751247327154,
"grad_norm": 3.0895676612854004,
"learning_rate": 3.106762328525677e-06,
"loss": 0.0154,
"num_input_tokens_seen": 2480640,
"step": 375
},
{
"epoch": 2.1439771917320027,
"grad_norm": 2.583371639251709,
"learning_rate": 3.0979500815523865e-06,
"loss": 0.0178,
"num_input_tokens_seen": 2487152,
"step": 376
},
{
"epoch": 2.14967925873129,
"grad_norm": 8.645212173461914,
"learning_rate": 3.089129947213305e-06,
"loss": 0.0686,
"num_input_tokens_seen": 2493664,
"step": 377
},
{
"epoch": 2.1553813257305774,
"grad_norm": 5.1428046226501465,
"learning_rate": 3.0803020418519666e-06,
"loss": 0.0289,
"num_input_tokens_seen": 2500336,
"step": 378
},
{
"epoch": 2.1610833927298647,
"grad_norm": 6.207760810852051,
"learning_rate": 3.071466481914409e-06,
"loss": 0.0122,
"num_input_tokens_seen": 2507360,
"step": 379
},
{
"epoch": 2.1667854597291516,
"grad_norm": 2.8542068004608154,
"learning_rate": 3.0626233839476434e-06,
"loss": 0.0258,
"num_input_tokens_seen": 2513872,
"step": 380
},
{
"epoch": 2.172487526728439,
"grad_norm": 0.9682677388191223,
"learning_rate": 3.053772864598108e-06,
"loss": 0.0033,
"num_input_tokens_seen": 2520208,
"step": 381
},
{
"epoch": 2.1781895937277262,
"grad_norm": 1.7404452562332153,
"learning_rate": 3.0449150406101367e-06,
"loss": 0.01,
"num_input_tokens_seen": 2526752,
"step": 382
},
{
"epoch": 2.1838916607270136,
"grad_norm": 5.735099792480469,
"learning_rate": 3.0360500288244155e-06,
"loss": 0.0185,
"num_input_tokens_seen": 2532784,
"step": 383
},
{
"epoch": 2.189593727726301,
"grad_norm": 5.3574748039245605,
"learning_rate": 3.0271779461764427e-06,
"loss": 0.0171,
"num_input_tokens_seen": 2539456,
"step": 384
},
{
"epoch": 2.1952957947255882,
"grad_norm": 3.465667486190796,
"learning_rate": 3.018298909694986e-06,
"loss": 0.0049,
"num_input_tokens_seen": 2546144,
"step": 385
},
{
"epoch": 2.200997861724875,
"grad_norm": 1.0288059711456299,
"learning_rate": 3.00941303650054e-06,
"loss": 0.0044,
"num_input_tokens_seen": 2552768,
"step": 386
},
{
"epoch": 2.2066999287241624,
"grad_norm": 1.7451399564743042,
"learning_rate": 3.0005204438037767e-06,
"loss": 0.0218,
"num_input_tokens_seen": 2559248,
"step": 387
},
{
"epoch": 2.2124019957234498,
"grad_norm": 4.361851215362549,
"learning_rate": 2.991621248904007e-06,
"loss": 0.026,
"num_input_tokens_seen": 2566096,
"step": 388
},
{
"epoch": 2.218104062722737,
"grad_norm": 1.2106692790985107,
"learning_rate": 2.9827155691876266e-06,
"loss": 0.0037,
"num_input_tokens_seen": 2572944,
"step": 389
},
{
"epoch": 2.2238061297220244,
"grad_norm": 1.0988516807556152,
"learning_rate": 2.973803522126571e-06,
"loss": 0.0031,
"num_input_tokens_seen": 2579600,
"step": 390
},
{
"epoch": 2.2295081967213113,
"grad_norm": 1.429746150970459,
"learning_rate": 2.964885225276767e-06,
"loss": 0.0188,
"num_input_tokens_seen": 2586624,
"step": 391
},
{
"epoch": 2.2352102637205986,
"grad_norm": 2.6677236557006836,
"learning_rate": 2.9559607962765773e-06,
"loss": 0.0302,
"num_input_tokens_seen": 2593600,
"step": 392
},
{
"epoch": 2.240912330719886,
"grad_norm": 2.7065508365631104,
"learning_rate": 2.947030352845255e-06,
"loss": 0.0205,
"num_input_tokens_seen": 2600224,
"step": 393
},
{
"epoch": 2.2466143977191733,
"grad_norm": 5.244087219238281,
"learning_rate": 2.9380940127813834e-06,
"loss": 0.0347,
"num_input_tokens_seen": 2606704,
"step": 394
},
{
"epoch": 2.2523164647184606,
"grad_norm": 5.431631088256836,
"learning_rate": 2.9291518939613317e-06,
"loss": 0.065,
"num_input_tokens_seen": 2612816,
"step": 395
},
{
"epoch": 2.258018531717748,
"grad_norm": 7.171090126037598,
"learning_rate": 2.9202041143376895e-06,
"loss": 0.0662,
"num_input_tokens_seen": 2619840,
"step": 396
},
{
"epoch": 2.263720598717035,
"grad_norm": 2.2391579151153564,
"learning_rate": 2.9112507919377213e-06,
"loss": 0.0348,
"num_input_tokens_seen": 2626400,
"step": 397
},
{
"epoch": 2.269422665716322,
"grad_norm": 5.090347766876221,
"learning_rate": 2.9022920448618e-06,
"loss": 0.0105,
"num_input_tokens_seen": 2632928,
"step": 398
},
{
"epoch": 2.2751247327156094,
"grad_norm": 5.373437881469727,
"learning_rate": 2.893327991281857e-06,
"loss": 0.0478,
"num_input_tokens_seen": 2639488,
"step": 399
},
{
"epoch": 2.2808267997148968,
"grad_norm": 2.078587770462036,
"learning_rate": 2.8843587494398177e-06,
"loss": 0.0048,
"num_input_tokens_seen": 2645696,
"step": 400
},
{
"epoch": 2.2865288667141836,
"grad_norm": 4.209392070770264,
"learning_rate": 2.8753844376460462e-06,
"loss": 0.0416,
"num_input_tokens_seen": 2652768,
"step": 401
},
{
"epoch": 2.292230933713471,
"grad_norm": 1.7363594770431519,
"learning_rate": 2.8664051742777803e-06,
"loss": 0.009,
"num_input_tokens_seen": 2659600,
"step": 402
},
{
"epoch": 2.2979330007127583,
"grad_norm": 2.1213040351867676,
"learning_rate": 2.857421077777576e-06,
"loss": 0.0145,
"num_input_tokens_seen": 2666608,
"step": 403
},
{
"epoch": 2.3036350677120456,
"grad_norm": 1.049506664276123,
"learning_rate": 2.8484322666517373e-06,
"loss": 0.0239,
"num_input_tokens_seen": 2673408,
"step": 404
},
{
"epoch": 2.309337134711333,
"grad_norm": 1.5871895551681519,
"learning_rate": 2.83943885946876e-06,
"loss": 0.0151,
"num_input_tokens_seen": 2679824,
"step": 405
},
{
"epoch": 2.3150392017106203,
"grad_norm": 2.483149766921997,
"learning_rate": 2.8304409748577655e-06,
"loss": 0.0597,
"num_input_tokens_seen": 2686608,
"step": 406
},
{
"epoch": 2.320741268709907,
"grad_norm": 1.4659909009933472,
"learning_rate": 2.821438731506933e-06,
"loss": 0.0213,
"num_input_tokens_seen": 2693728,
"step": 407
},
{
"epoch": 2.3264433357091945,
"grad_norm": 2.8195438385009766,
"learning_rate": 2.8124322481619388e-06,
"loss": 0.0197,
"num_input_tokens_seen": 2700240,
"step": 408
},
{
"epoch": 2.332145402708482,
"grad_norm": 6.738165855407715,
"learning_rate": 2.803421643624386e-06,
"loss": 0.0435,
"num_input_tokens_seen": 2706592,
"step": 409
},
{
"epoch": 2.337847469707769,
"grad_norm": 1.9087073802947998,
"learning_rate": 2.7944070367502404e-06,
"loss": 0.0127,
"num_input_tokens_seen": 2712960,
"step": 410
},
{
"epoch": 2.3435495367070565,
"grad_norm": 0.8072167038917542,
"learning_rate": 2.7853885464482594e-06,
"loss": 0.0123,
"num_input_tokens_seen": 2719648,
"step": 411
},
{
"epoch": 2.3492516037063433,
"grad_norm": 3.834771156311035,
"learning_rate": 2.7763662916784285e-06,
"loss": 0.0331,
"num_input_tokens_seen": 2726672,
"step": 412
},
{
"epoch": 2.3549536707056307,
"grad_norm": 2.7633399963378906,
"learning_rate": 2.767340391450384e-06,
"loss": 0.0507,
"num_input_tokens_seen": 2733520,
"step": 413
},
{
"epoch": 2.360655737704918,
"grad_norm": 1.5225905179977417,
"learning_rate": 2.758310964821855e-06,
"loss": 0.0447,
"num_input_tokens_seen": 2740208,
"step": 414
},
{
"epoch": 2.3663578047042053,
"grad_norm": 1.4298495054244995,
"learning_rate": 2.7492781308970805e-06,
"loss": 0.0252,
"num_input_tokens_seen": 2746560,
"step": 415
},
{
"epoch": 2.3720598717034926,
"grad_norm": 1.6047407388687134,
"learning_rate": 2.7402420088252472e-06,
"loss": 0.0117,
"num_input_tokens_seen": 2752672,
"step": 416
},
{
"epoch": 2.37776193870278,
"grad_norm": 1.5428413152694702,
"learning_rate": 2.7312027177989132e-06,
"loss": 0.0149,
"num_input_tokens_seen": 2759168,
"step": 417
},
{
"epoch": 2.383464005702067,
"grad_norm": 0.30333733558654785,
"learning_rate": 2.7221603770524374e-06,
"loss": 0.0038,
"num_input_tokens_seen": 2765680,
"step": 418
},
{
"epoch": 2.389166072701354,
"grad_norm": 6.950188636779785,
"learning_rate": 2.713115105860407e-06,
"loss": 0.0374,
"num_input_tokens_seen": 2772480,
"step": 419
},
{
"epoch": 2.3948681397006415,
"grad_norm": 1.8761096000671387,
"learning_rate": 2.7040670235360643e-06,
"loss": 0.0246,
"num_input_tokens_seen": 2779072,
"step": 420
},
{
"epoch": 2.400570206699929,
"grad_norm": 3.416837453842163,
"learning_rate": 2.6950162494297316e-06,
"loss": 0.0363,
"num_input_tokens_seen": 2785536,
"step": 421
},
{
"epoch": 2.406272273699216,
"grad_norm": 0.9194361567497253,
"learning_rate": 2.6859629029272365e-06,
"loss": 0.0197,
"num_input_tokens_seen": 2792192,
"step": 422
},
{
"epoch": 2.411974340698503,
"grad_norm": 1.3932160139083862,
"learning_rate": 2.676907103448341e-06,
"loss": 0.0113,
"num_input_tokens_seen": 2798992,
"step": 423
},
{
"epoch": 2.4176764076977904,
"grad_norm": 5.138923168182373,
"learning_rate": 2.667848970445161e-06,
"loss": 0.0255,
"num_input_tokens_seen": 2805360,
"step": 424
},
{
"epoch": 2.4233784746970777,
"grad_norm": 2.4364206790924072,
"learning_rate": 2.658788623400595e-06,
"loss": 0.0184,
"num_input_tokens_seen": 2812000,
"step": 425
},
{
"epoch": 2.429080541696365,
"grad_norm": 3.580717086791992,
"learning_rate": 2.6497261818267438e-06,
"loss": 0.0397,
"num_input_tokens_seen": 2818752,
"step": 426
},
{
"epoch": 2.4347826086956523,
"grad_norm": 2.4379868507385254,
"learning_rate": 2.6406617652633405e-06,
"loss": 0.0242,
"num_input_tokens_seen": 2825616,
"step": 427
},
{
"epoch": 2.4404846756949397,
"grad_norm": 2.464996337890625,
"learning_rate": 2.6315954932761645e-06,
"loss": 0.0168,
"num_input_tokens_seen": 2832384,
"step": 428
},
{
"epoch": 2.4461867426942265,
"grad_norm": 1.3222076892852783,
"learning_rate": 2.6225274854554733e-06,
"loss": 0.0034,
"num_input_tokens_seen": 2839152,
"step": 429
},
{
"epoch": 2.451888809693514,
"grad_norm": 1.6748195886611938,
"learning_rate": 2.61345786141442e-06,
"loss": 0.0063,
"num_input_tokens_seen": 2845856,
"step": 430
},
{
"epoch": 2.457590876692801,
"grad_norm": 2.3060872554779053,
"learning_rate": 2.6043867407874774e-06,
"loss": 0.0229,
"num_input_tokens_seen": 2852736,
"step": 431
},
{
"epoch": 2.4632929436920885,
"grad_norm": 0.5100630521774292,
"learning_rate": 2.5953142432288573e-06,
"loss": 0.0047,
"num_input_tokens_seen": 2859456,
"step": 432
},
{
"epoch": 2.4689950106913754,
"grad_norm": 1.8006185293197632,
"learning_rate": 2.5862404884109366e-06,
"loss": 0.0271,
"num_input_tokens_seen": 2866016,
"step": 433
},
{
"epoch": 2.4746970776906627,
"grad_norm": 1.213702917098999,
"learning_rate": 2.5771655960226753e-06,
"loss": 0.0041,
"num_input_tokens_seen": 2872448,
"step": 434
},
{
"epoch": 2.48039914468995,
"grad_norm": 1.6707085371017456,
"learning_rate": 2.568089685768038e-06,
"loss": 0.015,
"num_input_tokens_seen": 2879088,
"step": 435
},
{
"epoch": 2.4861012116892374,
"grad_norm": 5.4278059005737305,
"learning_rate": 2.559012877364417e-06,
"loss": 0.042,
"num_input_tokens_seen": 2885408,
"step": 436
},
{
"epoch": 2.4918032786885247,
"grad_norm": 1.1846269369125366,
"learning_rate": 2.54993529054105e-06,
"loss": 0.0099,
"num_input_tokens_seen": 2891936,
"step": 437
},
{
"epoch": 2.497505345687812,
"grad_norm": 3.3710663318634033,
"learning_rate": 2.5408570450374452e-06,
"loss": 0.0215,
"num_input_tokens_seen": 2898304,
"step": 438
},
{
"epoch": 2.5032074126870993,
"grad_norm": 0.21116487681865692,
"learning_rate": 2.531778260601796e-06,
"loss": 0.0013,
"num_input_tokens_seen": 2905200,
"step": 439
},
{
"epoch": 2.5089094796863862,
"grad_norm": 0.9096330404281616,
"learning_rate": 2.522699056989408e-06,
"loss": 0.0248,
"num_input_tokens_seen": 2911648,
"step": 440
},
{
"epoch": 2.5146115466856735,
"grad_norm": 4.185869216918945,
"learning_rate": 2.5136195539611135e-06,
"loss": 0.034,
"num_input_tokens_seen": 2918864,
"step": 441
},
{
"epoch": 2.520313613684961,
"grad_norm": 5.981312274932861,
"learning_rate": 2.5045398712816954e-06,
"loss": 0.0484,
"num_input_tokens_seen": 2925648,
"step": 442
},
{
"epoch": 2.526015680684248,
"grad_norm": 4.155544757843018,
"learning_rate": 2.4954601287183054e-06,
"loss": 0.0519,
"num_input_tokens_seen": 2932368,
"step": 443
},
{
"epoch": 2.531717747683535,
"grad_norm": 1.5854977369308472,
"learning_rate": 2.4863804460388877e-06,
"loss": 0.0088,
"num_input_tokens_seen": 2938752,
"step": 444
},
{
"epoch": 2.5374198146828224,
"grad_norm": 1.6894891262054443,
"learning_rate": 2.4773009430105923e-06,
"loss": 0.0249,
"num_input_tokens_seen": 2945280,
"step": 445
},
{
"epoch": 2.5431218816821097,
"grad_norm": 0.4828067719936371,
"learning_rate": 2.468221739398205e-06,
"loss": 0.0041,
"num_input_tokens_seen": 2952048,
"step": 446
},
{
"epoch": 2.548823948681397,
"grad_norm": 1.1254082918167114,
"learning_rate": 2.459142954962555e-06,
"loss": 0.0058,
"num_input_tokens_seen": 2958848,
"step": 447
},
{
"epoch": 2.5545260156806844,
"grad_norm": 1.2685133218765259,
"learning_rate": 2.4500647094589507e-06,
"loss": 0.0053,
"num_input_tokens_seen": 2965264,
"step": 448
},
{
"epoch": 2.5602280826799717,
"grad_norm": 2.40993595123291,
"learning_rate": 2.4409871226355835e-06,
"loss": 0.0212,
"num_input_tokens_seen": 2971920,
"step": 449
},
{
"epoch": 2.5659301496792586,
"grad_norm": 1.316887617111206,
"learning_rate": 2.4319103142319624e-06,
"loss": 0.0074,
"num_input_tokens_seen": 2978368,
"step": 450
},
{
"epoch": 2.571632216678546,
"grad_norm": 0.8872145414352417,
"learning_rate": 2.422834403977325e-06,
"loss": 0.0075,
"num_input_tokens_seen": 2985168,
"step": 451
},
{
"epoch": 2.5773342836778332,
"grad_norm": 1.6685765981674194,
"learning_rate": 2.413759511589064e-06,
"loss": 0.0119,
"num_input_tokens_seen": 2991744,
"step": 452
},
{
"epoch": 2.5830363506771206,
"grad_norm": 2.919286012649536,
"learning_rate": 2.404685756771143e-06,
"loss": 0.0231,
"num_input_tokens_seen": 2998320,
"step": 453
},
{
"epoch": 2.5887384176764074,
"grad_norm": 1.551300287246704,
"learning_rate": 2.3956132592125234e-06,
"loss": 0.0046,
"num_input_tokens_seen": 3005056,
"step": 454
},
{
"epoch": 2.5944404846756948,
"grad_norm": 5.427515983581543,
"learning_rate": 2.3865421385855807e-06,
"loss": 0.0071,
"num_input_tokens_seen": 3011696,
"step": 455
},
{
"epoch": 2.600142551674982,
"grad_norm": 2.1635303497314453,
"learning_rate": 2.3774725145445276e-06,
"loss": 0.0148,
"num_input_tokens_seen": 3018416,
"step": 456
},
{
"epoch": 2.6058446186742694,
"grad_norm": 0.261139839887619,
"learning_rate": 2.3684045067238363e-06,
"loss": 0.0011,
"num_input_tokens_seen": 3025040,
"step": 457
},
{
"epoch": 2.6115466856735567,
"grad_norm": 0.7715526819229126,
"learning_rate": 2.359338234736661e-06,
"loss": 0.005,
"num_input_tokens_seen": 3031472,
"step": 458
},
{
"epoch": 2.617248752672844,
"grad_norm": 1.2660444974899292,
"learning_rate": 2.3502738181732566e-06,
"loss": 0.0348,
"num_input_tokens_seen": 3038176,
"step": 459
},
{
"epoch": 2.6229508196721314,
"grad_norm": 0.49736925959587097,
"learning_rate": 2.341211376599406e-06,
"loss": 0.0024,
"num_input_tokens_seen": 3044592,
"step": 460
},
{
"epoch": 2.6286528866714183,
"grad_norm": 0.3166240155696869,
"learning_rate": 2.3321510295548396e-06,
"loss": 0.0013,
"num_input_tokens_seen": 3050976,
"step": 461
},
{
"epoch": 2.6343549536707056,
"grad_norm": 2.3578577041625977,
"learning_rate": 2.32309289655166e-06,
"loss": 0.0408,
"num_input_tokens_seen": 3057568,
"step": 462
},
{
"epoch": 2.640057020669993,
"grad_norm": 2.359955310821533,
"learning_rate": 2.3140370970727644e-06,
"loss": 0.0069,
"num_input_tokens_seen": 3064288,
"step": 463
},
{
"epoch": 2.6457590876692803,
"grad_norm": 1.4914777278900146,
"learning_rate": 2.30498375057027e-06,
"loss": 0.0382,
"num_input_tokens_seen": 3070832,
"step": 464
},
{
"epoch": 2.651461154668567,
"grad_norm": 2.532496213912964,
"learning_rate": 2.2959329764639366e-06,
"loss": 0.0179,
"num_input_tokens_seen": 3076992,
"step": 465
},
{
"epoch": 2.6571632216678545,
"grad_norm": 3.0252954959869385,
"learning_rate": 2.286884894139594e-06,
"loss": 0.0073,
"num_input_tokens_seen": 3083632,
"step": 466
},
{
"epoch": 2.662865288667142,
"grad_norm": 2.534813165664673,
"learning_rate": 2.2778396229475634e-06,
"loss": 0.06,
"num_input_tokens_seen": 3090128,
"step": 467
},
{
"epoch": 2.668567355666429,
"grad_norm": 1.6760576963424683,
"learning_rate": 2.2687972822010885e-06,
"loss": 0.0443,
"num_input_tokens_seen": 3096768,
"step": 468
},
{
"epoch": 2.6742694226657164,
"grad_norm": 2.89322829246521,
"learning_rate": 2.259757991174753e-06,
"loss": 0.0082,
"num_input_tokens_seen": 3103104,
"step": 469
},
{
"epoch": 2.6799714896650038,
"grad_norm": 2.089461088180542,
"learning_rate": 2.2507218691029204e-06,
"loss": 0.0036,
"num_input_tokens_seen": 3109616,
"step": 470
},
{
"epoch": 2.6856735566642906,
"grad_norm": 2.481194019317627,
"learning_rate": 2.2416890351781452e-06,
"loss": 0.0129,
"num_input_tokens_seen": 3116432,
"step": 471
},
{
"epoch": 2.691375623663578,
"grad_norm": 2.657470703125,
"learning_rate": 2.2326596085496166e-06,
"loss": 0.0122,
"num_input_tokens_seen": 3123232,
"step": 472
},
{
"epoch": 2.6970776906628653,
"grad_norm": 2.6968326568603516,
"learning_rate": 2.2236337083215727e-06,
"loss": 0.0285,
"num_input_tokens_seen": 3129664,
"step": 473
},
{
"epoch": 2.7027797576621526,
"grad_norm": 2.0984086990356445,
"learning_rate": 2.2146114535517415e-06,
"loss": 0.0132,
"num_input_tokens_seen": 3136128,
"step": 474
},
{
"epoch": 2.7084818246614395,
"grad_norm": 3.109532117843628,
"learning_rate": 2.20559296324976e-06,
"loss": 0.0359,
"num_input_tokens_seen": 3142960,
"step": 475
},
{
"epoch": 2.714183891660727,
"grad_norm": 0.43073195219039917,
"learning_rate": 2.1965783563756148e-06,
"loss": 0.0039,
"num_input_tokens_seen": 3149216,
"step": 476
},
{
"epoch": 2.719885958660014,
"grad_norm": 1.3117117881774902,
"learning_rate": 2.1875677518380616e-06,
"loss": 0.0151,
"num_input_tokens_seen": 3155840,
"step": 477
},
{
"epoch": 2.7255880256593015,
"grad_norm": 3.4339799880981445,
"learning_rate": 2.178561268493068e-06,
"loss": 0.0406,
"num_input_tokens_seen": 3162176,
"step": 478
},
{
"epoch": 2.731290092658589,
"grad_norm": 0.5435781478881836,
"learning_rate": 2.1695590251422353e-06,
"loss": 0.0055,
"num_input_tokens_seen": 3168528,
"step": 479
},
{
"epoch": 2.736992159657876,
"grad_norm": 3.829059600830078,
"learning_rate": 2.1605611405312406e-06,
"loss": 0.0414,
"num_input_tokens_seen": 3175296,
"step": 480
},
{
"epoch": 2.7426942266571634,
"grad_norm": 2.039680242538452,
"learning_rate": 2.1515677333482635e-06,
"loss": 0.0199,
"num_input_tokens_seen": 3182112,
"step": 481
},
{
"epoch": 2.7483962936564503,
"grad_norm": 0.9767200946807861,
"learning_rate": 2.1425789222224254e-06,
"loss": 0.0141,
"num_input_tokens_seen": 3189008,
"step": 482
},
{
"epoch": 2.7540983606557377,
"grad_norm": 2.2033214569091797,
"learning_rate": 2.13359482572222e-06,
"loss": 0.0281,
"num_input_tokens_seen": 3195280,
"step": 483
},
{
"epoch": 2.759800427655025,
"grad_norm": 2.3245043754577637,
"learning_rate": 2.124615562353955e-06,
"loss": 0.0273,
"num_input_tokens_seen": 3201840,
"step": 484
},
{
"epoch": 2.7655024946543123,
"grad_norm": 0.9290107488632202,
"learning_rate": 2.115641250560183e-06,
"loss": 0.0048,
"num_input_tokens_seen": 3208048,
"step": 485
},
{
"epoch": 2.771204561653599,
"grad_norm": 3.848163604736328,
"learning_rate": 2.106672008718144e-06,
"loss": 0.0312,
"num_input_tokens_seen": 3214368,
"step": 486
},
{
"epoch": 2.7769066286528865,
"grad_norm": 2.497357130050659,
"learning_rate": 2.0977079551382006e-06,
"loss": 0.011,
"num_input_tokens_seen": 3221008,
"step": 487
},
{
"epoch": 2.782608695652174,
"grad_norm": 2.660881519317627,
"learning_rate": 2.08874920806228e-06,
"loss": 0.0109,
"num_input_tokens_seen": 3227584,
"step": 488
},
{
"epoch": 2.788310762651461,
"grad_norm": 1.4301801919937134,
"learning_rate": 2.079795885662311e-06,
"loss": 0.0051,
"num_input_tokens_seen": 3234224,
"step": 489
},
{
"epoch": 2.7940128296507485,
"grad_norm": 1.022800087928772,
"learning_rate": 2.07084810603867e-06,
"loss": 0.0033,
"num_input_tokens_seen": 3240544,
"step": 490
},
{
"epoch": 2.799714896650036,
"grad_norm": 2.826751708984375,
"learning_rate": 2.0619059872186174e-06,
"loss": 0.0228,
"num_input_tokens_seen": 3247120,
"step": 491
},
{
"epoch": 2.805416963649323,
"grad_norm": 6.670321941375732,
"learning_rate": 2.0529696471547455e-06,
"loss": 0.0192,
"num_input_tokens_seen": 3253744,
"step": 492
},
{
"epoch": 2.81111903064861,
"grad_norm": 3.9984092712402344,
"learning_rate": 2.044039203723423e-06,
"loss": 0.0067,
"num_input_tokens_seen": 3260512,
"step": 493
},
{
"epoch": 2.8168210976478973,
"grad_norm": 4.25165319442749,
"learning_rate": 2.035114774723233e-06,
"loss": 0.0558,
"num_input_tokens_seen": 3267008,
"step": 494
},
{
"epoch": 2.8225231646471847,
"grad_norm": 1.6763029098510742,
"learning_rate": 2.02619647787343e-06,
"loss": 0.0247,
"num_input_tokens_seen": 3273696,
"step": 495
},
{
"epoch": 2.828225231646472,
"grad_norm": 4.306890964508057,
"learning_rate": 2.017284430812374e-06,
"loss": 0.0501,
"num_input_tokens_seen": 3280368,
"step": 496
},
{
"epoch": 2.833927298645759,
"grad_norm": 4.766158103942871,
"learning_rate": 2.008378751095994e-06,
"loss": 0.0268,
"num_input_tokens_seen": 3287200,
"step": 497
},
{
"epoch": 2.839629365645046,
"grad_norm": 2.9017302989959717,
"learning_rate": 1.9994795561962237e-06,
"loss": 0.0323,
"num_input_tokens_seen": 3293648,
"step": 498
},
{
"epoch": 2.8453314326443335,
"grad_norm": 1.3675230741500854,
"learning_rate": 1.990586963499461e-06,
"loss": 0.0469,
"num_input_tokens_seen": 3300496,
"step": 499
},
{
"epoch": 2.851033499643621,
"grad_norm": 6.752871513366699,
"learning_rate": 1.981701090305014e-06,
"loss": 0.06,
"num_input_tokens_seen": 3306624,
"step": 500
},
{
"epoch": 2.856735566642908,
"grad_norm": 3.9464950561523438,
"learning_rate": 1.9728220538235577e-06,
"loss": 0.0153,
"num_input_tokens_seen": 3313312,
"step": 501
},
{
"epoch": 2.8624376336421955,
"grad_norm": 0.293773353099823,
"learning_rate": 1.963949971175585e-06,
"loss": 0.0019,
"num_input_tokens_seen": 3319872,
"step": 502
},
{
"epoch": 2.8681397006414824,
"grad_norm": 0.5789422392845154,
"learning_rate": 1.955084959389864e-06,
"loss": 0.0037,
"num_input_tokens_seen": 3326288,
"step": 503
},
{
"epoch": 2.8738417676407697,
"grad_norm": 1.1285568475723267,
"learning_rate": 1.9462271354018925e-06,
"loss": 0.0091,
"num_input_tokens_seen": 3332816,
"step": 504
},
{
"epoch": 2.879543834640057,
"grad_norm": 0.7351142764091492,
"learning_rate": 1.937376616052357e-06,
"loss": 0.0038,
"num_input_tokens_seen": 3339248,
"step": 505
},
{
"epoch": 2.8852459016393444,
"grad_norm": 0.5724226236343384,
"learning_rate": 1.9285335180855906e-06,
"loss": 0.0026,
"num_input_tokens_seen": 3345440,
"step": 506
},
{
"epoch": 2.8909479686386312,
"grad_norm": 4.474715709686279,
"learning_rate": 1.9196979581480347e-06,
"loss": 0.0198,
"num_input_tokens_seen": 3352176,
"step": 507
},
{
"epoch": 2.8966500356379186,
"grad_norm": 4.141156196594238,
"learning_rate": 1.9108700527866954e-06,
"loss": 0.0372,
"num_input_tokens_seen": 3358720,
"step": 508
},
{
"epoch": 2.902352102637206,
"grad_norm": 0.5210357904434204,
"learning_rate": 1.9020499184476137e-06,
"loss": 0.0063,
"num_input_tokens_seen": 3365472,
"step": 509
},
{
"epoch": 2.908054169636493,
"grad_norm": 0.7672094106674194,
"learning_rate": 1.8932376714743237e-06,
"loss": 0.0034,
"num_input_tokens_seen": 3371728,
"step": 510
},
{
"epoch": 2.9137562366357805,
"grad_norm": 1.55528724193573,
"learning_rate": 1.88443342810632e-06,
"loss": 0.0049,
"num_input_tokens_seen": 3378608,
"step": 511
},
{
"epoch": 2.919458303635068,
"grad_norm": 2.993779420852661,
"learning_rate": 1.8756373044775238e-06,
"loss": 0.0236,
"num_input_tokens_seen": 3385280,
"step": 512
},
{
"epoch": 2.925160370634355,
"grad_norm": 1.1400753259658813,
"learning_rate": 1.8668494166147532e-06,
"loss": 0.0326,
"num_input_tokens_seen": 3391872,
"step": 513
},
{
"epoch": 2.930862437633642,
"grad_norm": 1.8988653421401978,
"learning_rate": 1.8580698804361886e-06,
"loss": 0.0349,
"num_input_tokens_seen": 3398512,
"step": 514
},
{
"epoch": 2.9365645046329294,
"grad_norm": 0.7114527225494385,
"learning_rate": 1.8492988117498478e-06,
"loss": 0.0034,
"num_input_tokens_seen": 3405376,
"step": 515
},
{
"epoch": 2.9422665716322167,
"grad_norm": 3.2026875019073486,
"learning_rate": 1.840536326252055e-06,
"loss": 0.0309,
"num_input_tokens_seen": 3412192,
"step": 516
},
{
"epoch": 2.947968638631504,
"grad_norm": 3.8607699871063232,
"learning_rate": 1.8317825395259199e-06,
"loss": 0.0185,
"num_input_tokens_seen": 3418896,
"step": 517
},
{
"epoch": 2.953670705630791,
"grad_norm": 0.7184828519821167,
"learning_rate": 1.8230375670398037e-06,
"loss": 0.0054,
"num_input_tokens_seen": 3425456,
"step": 518
},
{
"epoch": 2.9593727726300783,
"grad_norm": 8.588293075561523,
"learning_rate": 1.8143015241458082e-06,
"loss": 0.0076,
"num_input_tokens_seen": 3432384,
"step": 519
},
{
"epoch": 2.9650748396293656,
"grad_norm": 1.3687328100204468,
"learning_rate": 1.8055745260782415e-06,
"loss": 0.0191,
"num_input_tokens_seen": 3438928,
"step": 520
},
{
"epoch": 2.970776906628653,
"grad_norm": 4.8277812004089355,
"learning_rate": 1.7968566879521112e-06,
"loss": 0.0348,
"num_input_tokens_seen": 3445424,
"step": 521
},
{
"epoch": 2.9764789736279402,
"grad_norm": 1.5100287199020386,
"learning_rate": 1.7881481247615923e-06,
"loss": 0.0166,
"num_input_tokens_seen": 3452176,
"step": 522
},
{
"epoch": 2.9821810406272276,
"grad_norm": 4.24040412902832,
"learning_rate": 1.779448951378523e-06,
"loss": 0.0322,
"num_input_tokens_seen": 3458912,
"step": 523
},
{
"epoch": 2.987883107626515,
"grad_norm": 1.88912832736969,
"learning_rate": 1.7707592825508776e-06,
"loss": 0.0063,
"num_input_tokens_seen": 3465136,
"step": 524
},
{
"epoch": 2.9935851746258018,
"grad_norm": 0.41476720571517944,
"learning_rate": 1.7620792329012656e-06,
"loss": 0.0021,
"num_input_tokens_seen": 3471920,
"step": 525
},
{
"epoch": 2.999287241625089,
"grad_norm": 1.5852344036102295,
"learning_rate": 1.7534089169254076e-06,
"loss": 0.0116,
"num_input_tokens_seen": 3478608,
"step": 526
},
{
"epoch": 3.0049893086243764,
"grad_norm": 0.4748835861682892,
"learning_rate": 1.7447484489906332e-06,
"loss": 0.0027,
"num_input_tokens_seen": 3485232,
"step": 527
},
{
"epoch": 3.0106913756236637,
"grad_norm": 1.6355117559432983,
"learning_rate": 1.7360979433343686e-06,
"loss": 0.0076,
"num_input_tokens_seen": 3491840,
"step": 528
},
{
"epoch": 3.0163934426229506,
"grad_norm": 1.1035512685775757,
"learning_rate": 1.7274575140626318e-06,
"loss": 0.006,
"num_input_tokens_seen": 3498896,
"step": 529
},
{
"epoch": 3.022095509622238,
"grad_norm": 0.10061535239219666,
"learning_rate": 1.7188272751485246e-06,
"loss": 0.0008,
"num_input_tokens_seen": 3505488,
"step": 530
},
{
"epoch": 3.0277975766215253,
"grad_norm": 1.6864421367645264,
"learning_rate": 1.710207340430734e-06,
"loss": 0.0139,
"num_input_tokens_seen": 3512144,
"step": 531
},
{
"epoch": 3.0334996436208126,
"grad_norm": 1.3023935556411743,
"learning_rate": 1.7015978236120245e-06,
"loss": 0.0123,
"num_input_tokens_seen": 3519008,
"step": 532
},
{
"epoch": 3.0392017106201,
"grad_norm": 0.0782478079199791,
"learning_rate": 1.6929988382577442e-06,
"loss": 0.0004,
"num_input_tokens_seen": 3525408,
"step": 533
},
{
"epoch": 3.0449037776193872,
"grad_norm": 0.06191781163215637,
"learning_rate": 1.6844104977943219e-06,
"loss": 0.0005,
"num_input_tokens_seen": 3531744,
"step": 534
},
{
"epoch": 3.050605844618674,
"grad_norm": 0.7478132247924805,
"learning_rate": 1.6758329155077746e-06,
"loss": 0.0025,
"num_input_tokens_seen": 3538240,
"step": 535
},
{
"epoch": 3.0563079116179614,
"grad_norm": 0.1811976581811905,
"learning_rate": 1.667266204542211e-06,
"loss": 0.0008,
"num_input_tokens_seen": 3544944,
"step": 536
},
{
"epoch": 3.0620099786172488,
"grad_norm": 0.06903179734945297,
"learning_rate": 1.6587104778983397e-06,
"loss": 0.0004,
"num_input_tokens_seen": 3551664,
"step": 537
},
{
"epoch": 3.067712045616536,
"grad_norm": 0.029220720753073692,
"learning_rate": 1.650165848431979e-06,
"loss": 0.0003,
"num_input_tokens_seen": 3558368,
"step": 538
},
{
"epoch": 3.0734141126158234,
"grad_norm": 3.4635422229766846,
"learning_rate": 1.641632428852568e-06,
"loss": 0.0092,
"num_input_tokens_seen": 3564912,
"step": 539
},
{
"epoch": 3.0791161796151103,
"grad_norm": 2.6266937255859375,
"learning_rate": 1.633110331721679e-06,
"loss": 0.009,
"num_input_tokens_seen": 3571760,
"step": 540
},
{
"epoch": 3.0848182466143976,
"grad_norm": 4.367438316345215,
"learning_rate": 1.624599669451535e-06,
"loss": 0.0356,
"num_input_tokens_seen": 3578608,
"step": 541
},
{
"epoch": 3.090520313613685,
"grad_norm": 0.09718136489391327,
"learning_rate": 1.6161005543035234e-06,
"loss": 0.0009,
"num_input_tokens_seen": 3585568,
"step": 542
},
{
"epoch": 3.0962223806129723,
"grad_norm": 0.16700978577136993,
"learning_rate": 1.6076130983867194e-06,
"loss": 0.0006,
"num_input_tokens_seen": 3592448,
"step": 543
},
{
"epoch": 3.1019244476122596,
"grad_norm": 0.21411505341529846,
"learning_rate": 1.5991374136564033e-06,
"loss": 0.0007,
"num_input_tokens_seen": 3598896,
"step": 544
},
{
"epoch": 3.1076265146115465,
"grad_norm": 0.13308338820934296,
"learning_rate": 1.5906736119125871e-06,
"loss": 0.0006,
"num_input_tokens_seen": 3605664,
"step": 545
},
{
"epoch": 3.113328581610834,
"grad_norm": 0.09795942157506943,
"learning_rate": 1.582221804798536e-06,
"loss": 0.0005,
"num_input_tokens_seen": 3612272,
"step": 546
},
{
"epoch": 3.119030648610121,
"grad_norm": 0.03890947997570038,
"learning_rate": 1.5737821037993016e-06,
"loss": 0.0002,
"num_input_tokens_seen": 3618992,
"step": 547
},
{
"epoch": 3.1247327156094085,
"grad_norm": 0.07518395781517029,
"learning_rate": 1.565354620240243e-06,
"loss": 0.0004,
"num_input_tokens_seen": 3625584,
"step": 548
},
{
"epoch": 3.130434782608696,
"grad_norm": 0.03526531159877777,
"learning_rate": 1.5569394652855674e-06,
"loss": 0.0002,
"num_input_tokens_seen": 3631936,
"step": 549
},
{
"epoch": 3.1361368496079827,
"grad_norm": 1.2385015487670898,
"learning_rate": 1.5485367499368547e-06,
"loss": 0.0085,
"num_input_tokens_seen": 3638640,
"step": 550
},
{
"epoch": 3.14183891660727,
"grad_norm": 0.7123975157737732,
"learning_rate": 1.5401465850316038e-06,
"loss": 0.0007,
"num_input_tokens_seen": 3645248,
"step": 551
},
{
"epoch": 3.1475409836065573,
"grad_norm": 1.5488688945770264,
"learning_rate": 1.5317690812417583e-06,
"loss": 0.002,
"num_input_tokens_seen": 3651696,
"step": 552
},
{
"epoch": 3.1532430506058446,
"grad_norm": 0.1425732523202896,
"learning_rate": 1.5234043490722588e-06,
"loss": 0.0005,
"num_input_tokens_seen": 3658368,
"step": 553
},
{
"epoch": 3.158945117605132,
"grad_norm": 0.09034785628318787,
"learning_rate": 1.5150524988595764e-06,
"loss": 0.0002,
"num_input_tokens_seen": 3664944,
"step": 554
},
{
"epoch": 3.1646471846044193,
"grad_norm": 1.720540165901184,
"learning_rate": 1.5067136407702615e-06,
"loss": 0.002,
"num_input_tokens_seen": 3671632,
"step": 555
},
{
"epoch": 3.170349251603706,
"grad_norm": 0.052824657410383224,
"learning_rate": 1.4983878847994888e-06,
"loss": 0.0001,
"num_input_tokens_seen": 3678288,
"step": 556
},
{
"epoch": 3.1760513186029935,
"grad_norm": 0.19170065224170685,
"learning_rate": 1.4900753407696087e-06,
"loss": 0.0002,
"num_input_tokens_seen": 3684976,
"step": 557
},
{
"epoch": 3.181753385602281,
"grad_norm": 0.0425780825316906,
"learning_rate": 1.4817761183286949e-06,
"loss": 0.0003,
"num_input_tokens_seen": 3692000,
"step": 558
},
{
"epoch": 3.187455452601568,
"grad_norm": 2.74068284034729,
"learning_rate": 1.4734903269491036e-06,
"loss": 0.0312,
"num_input_tokens_seen": 3698608,
"step": 559
},
{
"epoch": 3.1931575196008555,
"grad_norm": 0.05014929175376892,
"learning_rate": 1.4652180759260224e-06,
"loss": 0.0003,
"num_input_tokens_seen": 3705328,
"step": 560
},
{
"epoch": 3.1988595866001424,
"grad_norm": 0.03240165859460831,
"learning_rate": 1.4569594743760362e-06,
"loss": 0.0001,
"num_input_tokens_seen": 3711952,
"step": 561
},
{
"epoch": 3.2045616535994297,
"grad_norm": 2.2007806301116943,
"learning_rate": 1.4487146312356797e-06,
"loss": 0.0475,
"num_input_tokens_seen": 3718432,
"step": 562
},
{
"epoch": 3.210263720598717,
"grad_norm": 1.0569113492965698,
"learning_rate": 1.4404836552600102e-06,
"loss": 0.0016,
"num_input_tokens_seen": 3724880,
"step": 563
},
{
"epoch": 3.2159657875980043,
"grad_norm": 0.05176587775349617,
"learning_rate": 1.4322666550211628e-06,
"loss": 0.0002,
"num_input_tokens_seen": 3731504,
"step": 564
},
{
"epoch": 3.2216678545972917,
"grad_norm": 0.06603796780109406,
"learning_rate": 1.4240637389069284e-06,
"loss": 0.0002,
"num_input_tokens_seen": 3737728,
"step": 565
},
{
"epoch": 3.227369921596579,
"grad_norm": 1.1058824062347412,
"learning_rate": 1.4158750151193118e-06,
"loss": 0.014,
"num_input_tokens_seen": 3744256,
"step": 566
},
{
"epoch": 3.233071988595866,
"grad_norm": 1.7707825899124146,
"learning_rate": 1.4077005916731178e-06,
"loss": 0.043,
"num_input_tokens_seen": 3751008,
"step": 567
},
{
"epoch": 3.238774055595153,
"grad_norm": 1.7744121551513672,
"learning_rate": 1.399540576394517e-06,
"loss": 0.0283,
"num_input_tokens_seen": 3757888,
"step": 568
},
{
"epoch": 3.2444761225944405,
"grad_norm": 1.144587755203247,
"learning_rate": 1.3913950769196275e-06,
"loss": 0.0137,
"num_input_tokens_seen": 3764400,
"step": 569
},
{
"epoch": 3.250178189593728,
"grad_norm": 1.3597573041915894,
"learning_rate": 1.3832642006930935e-06,
"loss": 0.0249,
"num_input_tokens_seen": 3770816,
"step": 570
},
{
"epoch": 3.2558802565930147,
"grad_norm": 0.1739058941602707,
"learning_rate": 1.375148054966669e-06,
"loss": 0.0014,
"num_input_tokens_seen": 3777360,
"step": 571
},
{
"epoch": 3.261582323592302,
"grad_norm": 0.8273450136184692,
"learning_rate": 1.3670467467978016e-06,
"loss": 0.0034,
"num_input_tokens_seen": 3783968,
"step": 572
},
{
"epoch": 3.2672843905915894,
"grad_norm": 1.998255968093872,
"learning_rate": 1.3589603830482246e-06,
"loss": 0.0073,
"num_input_tokens_seen": 3790448,
"step": 573
},
{
"epoch": 3.2729864575908767,
"grad_norm": 2.1162617206573486,
"learning_rate": 1.3508890703825406e-06,
"loss": 0.0147,
"num_input_tokens_seen": 3796992,
"step": 574
},
{
"epoch": 3.278688524590164,
"grad_norm": 0.47663643956184387,
"learning_rate": 1.342832915266821e-06,
"loss": 0.0019,
"num_input_tokens_seen": 3803488,
"step": 575
},
{
"epoch": 3.2843905915894513,
"grad_norm": 1.3029382228851318,
"learning_rate": 1.334792023967196e-06,
"loss": 0.0025,
"num_input_tokens_seen": 3810304,
"step": 576
},
{
"epoch": 3.2900926585887382,
"grad_norm": 0.48294466733932495,
"learning_rate": 1.3267665025484597e-06,
"loss": 0.0018,
"num_input_tokens_seen": 3817072,
"step": 577
},
{
"epoch": 3.2957947255880256,
"grad_norm": 0.5423978567123413,
"learning_rate": 1.3187564568726642e-06,
"loss": 0.0023,
"num_input_tokens_seen": 3824064,
"step": 578
},
{
"epoch": 3.301496792587313,
"grad_norm": 1.4614927768707275,
"learning_rate": 1.3107619925977262e-06,
"loss": 0.0077,
"num_input_tokens_seen": 3830848,
"step": 579
},
{
"epoch": 3.3071988595866,
"grad_norm": 0.24164794385433197,
"learning_rate": 1.3027832151760328e-06,
"loss": 0.0014,
"num_input_tokens_seen": 3837696,
"step": 580
},
{
"epoch": 3.3129009265858875,
"grad_norm": 0.607139527797699,
"learning_rate": 1.2948202298530544e-06,
"loss": 0.0037,
"num_input_tokens_seen": 3844096,
"step": 581
},
{
"epoch": 3.3186029935851744,
"grad_norm": 1.8388640880584717,
"learning_rate": 1.2868731416659492e-06,
"loss": 0.016,
"num_input_tokens_seen": 3850432,
"step": 582
},
{
"epoch": 3.3243050605844617,
"grad_norm": 0.24856287240982056,
"learning_rate": 1.2789420554421823e-06,
"loss": 0.0014,
"num_input_tokens_seen": 3856992,
"step": 583
},
{
"epoch": 3.330007127583749,
"grad_norm": 3.3995144367218018,
"learning_rate": 1.2710270757981418e-06,
"loss": 0.0307,
"num_input_tokens_seen": 3863664,
"step": 584
},
{
"epoch": 3.3357091945830364,
"grad_norm": 0.22360347211360931,
"learning_rate": 1.263128307137762e-06,
"loss": 0.0011,
"num_input_tokens_seen": 3870512,
"step": 585
},
{
"epoch": 3.3414112615823237,
"grad_norm": 4.8242950439453125,
"learning_rate": 1.255245853651139e-06,
"loss": 0.0187,
"num_input_tokens_seen": 3877264,
"step": 586
},
{
"epoch": 3.347113328581611,
"grad_norm": 1.2942771911621094,
"learning_rate": 1.2473798193131634e-06,
"loss": 0.0092,
"num_input_tokens_seen": 3883936,
"step": 587
},
{
"epoch": 3.352815395580898,
"grad_norm": 0.22458510100841522,
"learning_rate": 1.2395303078821466e-06,
"loss": 0.0016,
"num_input_tokens_seen": 3890464,
"step": 588
},
{
"epoch": 3.3585174625801852,
"grad_norm": 2.22206711769104,
"learning_rate": 1.2316974228984489e-06,
"loss": 0.0118,
"num_input_tokens_seen": 3896720,
"step": 589
},
{
"epoch": 3.3642195295794726,
"grad_norm": 1.945656180381775,
"learning_rate": 1.22388126768312e-06,
"loss": 0.0227,
"num_input_tokens_seen": 3903440,
"step": 590
},
{
"epoch": 3.36992159657876,
"grad_norm": 1.0150567293167114,
"learning_rate": 1.2160819453365301e-06,
"loss": 0.0084,
"num_input_tokens_seen": 3910112,
"step": 591
},
{
"epoch": 3.375623663578047,
"grad_norm": 1.5612642765045166,
"learning_rate": 1.2082995587370136e-06,
"loss": 0.019,
"num_input_tokens_seen": 3916704,
"step": 592
},
{
"epoch": 3.381325730577334,
"grad_norm": 1.6889121532440186,
"learning_rate": 1.2005342105395094e-06,
"loss": 0.0073,
"num_input_tokens_seen": 3923136,
"step": 593
},
{
"epoch": 3.3870277975766214,
"grad_norm": 0.3054364025592804,
"learning_rate": 1.1927860031742116e-06,
"loss": 0.0009,
"num_input_tokens_seen": 3929696,
"step": 594
},
{
"epoch": 3.3927298645759087,
"grad_norm": 1.479589581489563,
"learning_rate": 1.1850550388452125e-06,
"loss": 0.0117,
"num_input_tokens_seen": 3936016,
"step": 595
},
{
"epoch": 3.398431931575196,
"grad_norm": 2.0437428951263428,
"learning_rate": 1.177341419529157e-06,
"loss": 0.017,
"num_input_tokens_seen": 3942480,
"step": 596
},
{
"epoch": 3.4041339985744834,
"grad_norm": 1.273624062538147,
"learning_rate": 1.1696452469738984e-06,
"loss": 0.0054,
"num_input_tokens_seen": 3948928,
"step": 597
},
{
"epoch": 3.4098360655737707,
"grad_norm": 0.9993485808372498,
"learning_rate": 1.1619666226971565e-06,
"loss": 0.0041,
"num_input_tokens_seen": 3955248,
"step": 598
},
{
"epoch": 3.4155381325730576,
"grad_norm": 0.3728763163089752,
"learning_rate": 1.1543056479851755e-06,
"loss": 0.002,
"num_input_tokens_seen": 3961760,
"step": 599
},
{
"epoch": 3.421240199572345,
"grad_norm": 0.3200441002845764,
"learning_rate": 1.1466624238913907e-06,
"loss": 0.0022,
"num_input_tokens_seen": 3968112,
"step": 600
},
{
"epoch": 3.4269422665716323,
"grad_norm": 0.08588572591543198,
"learning_rate": 1.1390370512350936e-06,
"loss": 0.0004,
"num_input_tokens_seen": 3974864,
"step": 601
},
{
"epoch": 3.4326443335709196,
"grad_norm": 6.460862636566162,
"learning_rate": 1.1314296306001065e-06,
"loss": 0.0148,
"num_input_tokens_seen": 3981296,
"step": 602
},
{
"epoch": 3.4383464005702065,
"grad_norm": 1.9962701797485352,
"learning_rate": 1.1238402623334494e-06,
"loss": 0.049,
"num_input_tokens_seen": 3987648,
"step": 603
},
{
"epoch": 3.444048467569494,
"grad_norm": 0.4922191798686981,
"learning_rate": 1.1162690465440198e-06,
"loss": 0.0027,
"num_input_tokens_seen": 3994208,
"step": 604
},
{
"epoch": 3.449750534568781,
"grad_norm": 0.15748071670532227,
"learning_rate": 1.1087160831012713e-06,
"loss": 0.0007,
"num_input_tokens_seen": 4001280,
"step": 605
},
{
"epoch": 3.4554526015680684,
"grad_norm": 0.9574026465415955,
"learning_rate": 1.1011814716338995e-06,
"loss": 0.006,
"num_input_tokens_seen": 4008128,
"step": 606
},
{
"epoch": 3.4611546685673558,
"grad_norm": 0.867205798625946,
"learning_rate": 1.093665311528521e-06,
"loss": 0.0066,
"num_input_tokens_seen": 4014848,
"step": 607
},
{
"epoch": 3.466856735566643,
"grad_norm": 2.434412717819214,
"learning_rate": 1.0861677019283717e-06,
"loss": 0.0255,
"num_input_tokens_seen": 4021296,
"step": 608
},
{
"epoch": 3.47255880256593,
"grad_norm": 1.8824217319488525,
"learning_rate": 1.0786887417319867e-06,
"loss": 0.0075,
"num_input_tokens_seen": 4027872,
"step": 609
},
{
"epoch": 3.4782608695652173,
"grad_norm": 3.1213793754577637,
"learning_rate": 1.0712285295919092e-06,
"loss": 0.0068,
"num_input_tokens_seen": 4034768,
"step": 610
},
{
"epoch": 3.4839629365645046,
"grad_norm": 0.11539681255817413,
"learning_rate": 1.0637871639133793e-06,
"loss": 0.0008,
"num_input_tokens_seen": 4041408,
"step": 611
},
{
"epoch": 3.489665003563792,
"grad_norm": 1.6134257316589355,
"learning_rate": 1.056364742853043e-06,
"loss": 0.0132,
"num_input_tokens_seen": 4048256,
"step": 612
},
{
"epoch": 3.4953670705630793,
"grad_norm": 0.3129463493824005,
"learning_rate": 1.048961364317648e-06,
"loss": 0.0009,
"num_input_tokens_seen": 4055184,
"step": 613
},
{
"epoch": 3.501069137562366,
"grad_norm": 0.39868372678756714,
"learning_rate": 1.0415771259627646e-06,
"loss": 0.0009,
"num_input_tokens_seen": 4061904,
"step": 614
},
{
"epoch": 3.5067712045616535,
"grad_norm": 0.08058975636959076,
"learning_rate": 1.034212125191487e-06,
"loss": 0.0006,
"num_input_tokens_seen": 4068560,
"step": 615
},
{
"epoch": 3.512473271560941,
"grad_norm": 0.15611045062541962,
"learning_rate": 1.0268664591531557e-06,
"loss": 0.001,
"num_input_tokens_seen": 4075072,
"step": 616
},
{
"epoch": 3.518175338560228,
"grad_norm": 1.9207406044006348,
"learning_rate": 1.0195402247420705e-06,
"loss": 0.0219,
"num_input_tokens_seen": 4081840,
"step": 617
},
{
"epoch": 3.5238774055595155,
"grad_norm": 0.09913857281208038,
"learning_rate": 1.012233518596216e-06,
"loss": 0.0006,
"num_input_tokens_seen": 4088512,
"step": 618
},
{
"epoch": 3.5295794725588028,
"grad_norm": 10.642138481140137,
"learning_rate": 1.0049464370959846e-06,
"loss": 0.036,
"num_input_tokens_seen": 4095168,
"step": 619
},
{
"epoch": 3.5352815395580897,
"grad_norm": 5.640021324157715,
"learning_rate": 9.97679076362909e-07,
"loss": 0.0034,
"num_input_tokens_seen": 4102112,
"step": 620
},
{
"epoch": 3.540983606557377,
"grad_norm": 0.6071515679359436,
"learning_rate": 9.904315322583894e-07,
"loss": 0.0039,
"num_input_tokens_seen": 4108432,
"step": 621
},
{
"epoch": 3.5466856735566643,
"grad_norm": 0.25435176491737366,
"learning_rate": 9.832039003824317e-07,
"loss": 0.0011,
"num_input_tokens_seen": 4115152,
"step": 622
},
{
"epoch": 3.5523877405559516,
"grad_norm": 0.1996757537126541,
"learning_rate": 9.759962760723856e-07,
"loss": 0.0017,
"num_input_tokens_seen": 4121696,
"step": 623
},
{
"epoch": 3.5580898075552385,
"grad_norm": 1.3054097890853882,
"learning_rate": 9.6880875440169e-07,
"loss": 0.0057,
"num_input_tokens_seen": 4128160,
"step": 624
},
{
"epoch": 3.563791874554526,
"grad_norm": 1.3929048776626587,
"learning_rate": 9.616414301786128e-07,
"loss": 0.0199,
"num_input_tokens_seen": 4134912,
"step": 625
},
{
"epoch": 3.569493941553813,
"grad_norm": 0.8480359315872192,
"learning_rate": 9.544943979450067e-07,
"loss": 0.0294,
"num_input_tokens_seen": 4141504,
"step": 626
},
{
"epoch": 3.5751960085531005,
"grad_norm": 1.4291871786117554,
"learning_rate": 9.473677519750568e-07,
"loss": 0.0124,
"num_input_tokens_seen": 4148144,
"step": 627
},
{
"epoch": 3.580898075552388,
"grad_norm": 0.862804651260376,
"learning_rate": 9.40261586274043e-07,
"loss": 0.0058,
"num_input_tokens_seen": 4154880,
"step": 628
},
{
"epoch": 3.586600142551675,
"grad_norm": 0.9178187847137451,
"learning_rate": 9.331759945770935e-07,
"loss": 0.0113,
"num_input_tokens_seen": 4162240,
"step": 629
},
{
"epoch": 3.5923022095509625,
"grad_norm": 2.0041568279266357,
"learning_rate": 9.261110703479531e-07,
"loss": 0.0058,
"num_input_tokens_seen": 4169008,
"step": 630
},
{
"epoch": 3.5980042765502493,
"grad_norm": 0.3672866225242615,
"learning_rate": 9.190669067777475e-07,
"loss": 0.0019,
"num_input_tokens_seen": 4175488,
"step": 631
},
{
"epoch": 3.6037063435495367,
"grad_norm": 1.909638524055481,
"learning_rate": 9.120435967837571e-07,
"loss": 0.0443,
"num_input_tokens_seen": 4182208,
"step": 632
},
{
"epoch": 3.609408410548824,
"grad_norm": 3.312385320663452,
"learning_rate": 9.050412330081885e-07,
"loss": 0.0246,
"num_input_tokens_seen": 4188512,
"step": 633
},
{
"epoch": 3.6151104775481113,
"grad_norm": 0.535926342010498,
"learning_rate": 8.980599078169528e-07,
"loss": 0.0039,
"num_input_tokens_seen": 4194992,
"step": 634
},
{
"epoch": 3.620812544547398,
"grad_norm": 2.76286244392395,
"learning_rate": 8.910997132984481e-07,
"loss": 0.0302,
"num_input_tokens_seen": 4201632,
"step": 635
},
{
"epoch": 3.6265146115466855,
"grad_norm": 0.6582239866256714,
"learning_rate": 8.841607412623471e-07,
"loss": 0.0039,
"num_input_tokens_seen": 4208096,
"step": 636
},
{
"epoch": 3.632216678545973,
"grad_norm": 1.006136417388916,
"learning_rate": 8.772430832383797e-07,
"loss": 0.0096,
"num_input_tokens_seen": 4214720,
"step": 637
},
{
"epoch": 3.63791874554526,
"grad_norm": 4.465051651000977,
"learning_rate": 8.703468304751342e-07,
"loss": 0.0314,
"num_input_tokens_seen": 4221280,
"step": 638
},
{
"epoch": 3.6436208125445475,
"grad_norm": 2.6803739070892334,
"learning_rate": 8.634720739388433e-07,
"loss": 0.0301,
"num_input_tokens_seen": 4227744,
"step": 639
},
{
"epoch": 3.649322879543835,
"grad_norm": 0.1741700917482376,
"learning_rate": 8.566189043121953e-07,
"loss": 0.0012,
"num_input_tokens_seen": 4234240,
"step": 640
},
{
"epoch": 3.655024946543122,
"grad_norm": 0.14672909677028656,
"learning_rate": 8.49787411993129e-07,
"loss": 0.001,
"num_input_tokens_seen": 4240640,
"step": 641
},
{
"epoch": 3.660727013542409,
"grad_norm": 0.39767521619796753,
"learning_rate": 8.429776870936485e-07,
"loss": 0.0053,
"num_input_tokens_seen": 4247232,
"step": 642
},
{
"epoch": 3.6664290805416964,
"grad_norm": 1.980200171470642,
"learning_rate": 8.361898194386251e-07,
"loss": 0.0107,
"num_input_tokens_seen": 4254368,
"step": 643
},
{
"epoch": 3.6721311475409837,
"grad_norm": 0.1616886407136917,
"learning_rate": 8.294238985646244e-07,
"loss": 0.0016,
"num_input_tokens_seen": 4260992,
"step": 644
},
{
"epoch": 3.677833214540271,
"grad_norm": 0.871669352054596,
"learning_rate": 8.22680013718715e-07,
"loss": 0.0109,
"num_input_tokens_seen": 4267632,
"step": 645
},
{
"epoch": 3.683535281539558,
"grad_norm": 1.4398808479309082,
"learning_rate": 8.159582538572985e-07,
"loss": 0.0107,
"num_input_tokens_seen": 4274032,
"step": 646
},
{
"epoch": 3.689237348538845,
"grad_norm": 0.1330898255109787,
"learning_rate": 8.092587076449304e-07,
"loss": 0.0013,
"num_input_tokens_seen": 4280608,
"step": 647
},
{
"epoch": 3.6949394155381325,
"grad_norm": 0.6880252957344055,
"learning_rate": 8.025814634531545e-07,
"loss": 0.0097,
"num_input_tokens_seen": 4287632,
"step": 648
},
{
"epoch": 3.70064148253742,
"grad_norm": 0.8941037058830261,
"learning_rate": 7.959266093593343e-07,
"loss": 0.0082,
"num_input_tokens_seen": 4294560,
"step": 649
},
{
"epoch": 3.706343549536707,
"grad_norm": 2.7201638221740723,
"learning_rate": 7.892942331454959e-07,
"loss": 0.0209,
"num_input_tokens_seen": 4300880,
"step": 650
},
{
"epoch": 3.7120456165359945,
"grad_norm": 0.16370153427124023,
"learning_rate": 7.826844222971644e-07,
"loss": 0.0014,
"num_input_tokens_seen": 4307776,
"step": 651
},
{
"epoch": 3.7177476835352814,
"grad_norm": 1.3239191770553589,
"learning_rate": 7.760972640022127e-07,
"loss": 0.019,
"num_input_tokens_seen": 4314304,
"step": 652
},
{
"epoch": 3.7234497505345687,
"grad_norm": 2.6364285945892334,
"learning_rate": 7.695328451497111e-07,
"loss": 0.0187,
"num_input_tokens_seen": 4320656,
"step": 653
},
{
"epoch": 3.729151817533856,
"grad_norm": 1.4038536548614502,
"learning_rate": 7.629912523287833e-07,
"loss": 0.0118,
"num_input_tokens_seen": 4327056,
"step": 654
},
{
"epoch": 3.7348538845331434,
"grad_norm": 0.34292855858802795,
"learning_rate": 7.564725718274601e-07,
"loss": 0.0027,
"num_input_tokens_seen": 4333712,
"step": 655
},
{
"epoch": 3.7405559515324303,
"grad_norm": 1.6340289115905762,
"learning_rate": 7.49976889631544e-07,
"loss": 0.0088,
"num_input_tokens_seen": 4340336,
"step": 656
},
{
"epoch": 3.7462580185317176,
"grad_norm": 1.5728880167007446,
"learning_rate": 7.435042914234733e-07,
"loss": 0.0252,
"num_input_tokens_seen": 4346688,
"step": 657
},
{
"epoch": 3.751960085531005,
"grad_norm": 0.4033033549785614,
"learning_rate": 7.370548625811954e-07,
"loss": 0.003,
"num_input_tokens_seen": 4353440,
"step": 658
},
{
"epoch": 3.7576621525302922,
"grad_norm": 3.087113618850708,
"learning_rate": 7.306286881770361e-07,
"loss": 0.0149,
"num_input_tokens_seen": 4359952,
"step": 659
},
{
"epoch": 3.7633642195295796,
"grad_norm": 0.11130305379629135,
"learning_rate": 7.242258529765794e-07,
"loss": 0.0009,
"num_input_tokens_seen": 4366672,
"step": 660
},
{
"epoch": 3.769066286528867,
"grad_norm": 0.28085440397262573,
"learning_rate": 7.178464414375496e-07,
"loss": 0.0023,
"num_input_tokens_seen": 4373136,
"step": 661
},
{
"epoch": 3.774768353528154,
"grad_norm": 0.9258376359939575,
"learning_rate": 7.114905377086989e-07,
"loss": 0.0062,
"num_input_tokens_seen": 4379392,
"step": 662
},
{
"epoch": 3.780470420527441,
"grad_norm": 0.7785882949829102,
"learning_rate": 7.051582256286929e-07,
"loss": 0.0077,
"num_input_tokens_seen": 4386320,
"step": 663
},
{
"epoch": 3.7861724875267284,
"grad_norm": 0.8045741319656372,
"learning_rate": 6.98849588725009e-07,
"loss": 0.0091,
"num_input_tokens_seen": 4393008,
"step": 664
},
{
"epoch": 3.7918745545260157,
"grad_norm": 0.30582883954048157,
"learning_rate": 6.925647102128319e-07,
"loss": 0.0027,
"num_input_tokens_seen": 4399680,
"step": 665
},
{
"epoch": 3.797576621525303,
"grad_norm": 1.122971773147583,
"learning_rate": 6.863036729939593e-07,
"loss": 0.0048,
"num_input_tokens_seen": 4406624,
"step": 666
},
{
"epoch": 3.80327868852459,
"grad_norm": 1.0433896780014038,
"learning_rate": 6.800665596557027e-07,
"loss": 0.0084,
"num_input_tokens_seen": 4413200,
"step": 667
},
{
"epoch": 3.8089807555238773,
"grad_norm": 0.5954147577285767,
"learning_rate": 6.738534524698057e-07,
"loss": 0.0037,
"num_input_tokens_seen": 4419728,
"step": 668
},
{
"epoch": 3.8146828225231646,
"grad_norm": 0.2919423282146454,
"learning_rate": 6.676644333913496e-07,
"loss": 0.0013,
"num_input_tokens_seen": 4426320,
"step": 669
},
{
"epoch": 3.820384889522452,
"grad_norm": 1.0446901321411133,
"learning_rate": 6.614995840576816e-07,
"loss": 0.0099,
"num_input_tokens_seen": 4432480,
"step": 670
},
{
"epoch": 3.8260869565217392,
"grad_norm": 0.051127150654792786,
"learning_rate": 6.553589857873304e-07,
"loss": 0.0004,
"num_input_tokens_seen": 4438784,
"step": 671
},
{
"epoch": 3.8317890235210266,
"grad_norm": 1.680815577507019,
"learning_rate": 6.4924271957894e-07,
"loss": 0.0196,
"num_input_tokens_seen": 4445200,
"step": 672
},
{
"epoch": 3.8374910905203135,
"grad_norm": 0.45610514283180237,
"learning_rate": 6.431508661101954e-07,
"loss": 0.0013,
"num_input_tokens_seen": 4451616,
"step": 673
},
{
"epoch": 3.8431931575196008,
"grad_norm": 0.179911807179451,
"learning_rate": 6.370835057367622e-07,
"loss": 0.0008,
"num_input_tokens_seen": 4457808,
"step": 674
},
{
"epoch": 3.848895224518888,
"grad_norm": 1.4267868995666504,
"learning_rate": 6.31040718491224e-07,
"loss": 0.0177,
"num_input_tokens_seen": 4464448,
"step": 675
},
{
"epoch": 3.8545972915181754,
"grad_norm": 1.7962915897369385,
"learning_rate": 6.250225840820315e-07,
"loss": 0.0148,
"num_input_tokens_seen": 4471168,
"step": 676
},
{
"epoch": 3.8602993585174623,
"grad_norm": 0.5019521713256836,
"learning_rate": 6.190291818924449e-07,
"loss": 0.0012,
"num_input_tokens_seen": 4477424,
"step": 677
},
{
"epoch": 3.8660014255167496,
"grad_norm": 0.6949150562286377,
"learning_rate": 6.130605909794907e-07,
"loss": 0.0031,
"num_input_tokens_seen": 4484080,
"step": 678
},
{
"epoch": 3.871703492516037,
"grad_norm": 1.9541274309158325,
"learning_rate": 6.071168900729171e-07,
"loss": 0.0024,
"num_input_tokens_seen": 4490832,
"step": 679
},
{
"epoch": 3.8774055595153243,
"grad_norm": 0.13762035965919495,
"learning_rate": 6.011981575741583e-07,
"loss": 0.0008,
"num_input_tokens_seen": 4497472,
"step": 680
},
{
"epoch": 3.8831076265146116,
"grad_norm": 0.760161280632019,
"learning_rate": 5.953044715552967e-07,
"loss": 0.0042,
"num_input_tokens_seen": 4504064,
"step": 681
},
{
"epoch": 3.888809693513899,
"grad_norm": 2.114305257797241,
"learning_rate": 5.894359097580343e-07,
"loss": 0.0235,
"num_input_tokens_seen": 4511136,
"step": 682
},
{
"epoch": 3.8945117605131863,
"grad_norm": 0.7031646370887756,
"learning_rate": 5.835925495926684e-07,
"loss": 0.0029,
"num_input_tokens_seen": 4518176,
"step": 683
},
{
"epoch": 3.900213827512473,
"grad_norm": 1.012243628501892,
"learning_rate": 5.777744681370709e-07,
"loss": 0.0037,
"num_input_tokens_seen": 4524592,
"step": 684
},
{
"epoch": 3.9059158945117605,
"grad_norm": 1.2794705629348755,
"learning_rate": 5.719817421356686e-07,
"loss": 0.0145,
"num_input_tokens_seen": 4531216,
"step": 685
},
{
"epoch": 3.911617961511048,
"grad_norm": 0.8958796262741089,
"learning_rate": 5.662144479984322e-07,
"loss": 0.0093,
"num_input_tokens_seen": 4537776,
"step": 686
},
{
"epoch": 3.917320028510335,
"grad_norm": 0.7218899130821228,
"learning_rate": 5.604726617998721e-07,
"loss": 0.0066,
"num_input_tokens_seen": 4544272,
"step": 687
},
{
"epoch": 3.923022095509622,
"grad_norm": 0.16522099077701569,
"learning_rate": 5.54756459278028e-07,
"loss": 0.001,
"num_input_tokens_seen": 4550736,
"step": 688
},
{
"epoch": 3.9287241625089093,
"grad_norm": 0.7687943577766418,
"learning_rate": 5.490659158334774e-07,
"loss": 0.0029,
"num_input_tokens_seen": 4557024,
"step": 689
},
{
"epoch": 3.9344262295081966,
"grad_norm": 2.5592455863952637,
"learning_rate": 5.434011065283326e-07,
"loss": 0.0232,
"num_input_tokens_seen": 4563712,
"step": 690
},
{
"epoch": 3.940128296507484,
"grad_norm": 0.07579555362462997,
"learning_rate": 5.377621060852595e-07,
"loss": 0.0004,
"num_input_tokens_seen": 4570336,
"step": 691
},
{
"epoch": 3.9458303635067713,
"grad_norm": 0.05152352526783943,
"learning_rate": 5.321489888864848e-07,
"loss": 0.0003,
"num_input_tokens_seen": 4577008,
"step": 692
},
{
"epoch": 3.9515324305060586,
"grad_norm": 0.2156032770872116,
"learning_rate": 5.2656182897282e-07,
"loss": 0.0011,
"num_input_tokens_seen": 4583744,
"step": 693
},
{
"epoch": 3.957234497505346,
"grad_norm": 0.3031976819038391,
"learning_rate": 5.210007000426812e-07,
"loss": 0.0009,
"num_input_tokens_seen": 4590432,
"step": 694
},
{
"epoch": 3.962936564504633,
"grad_norm": 0.21700264513492584,
"learning_rate": 5.154656754511175e-07,
"loss": 0.0009,
"num_input_tokens_seen": 4596816,
"step": 695
},
{
"epoch": 3.96863863150392,
"grad_norm": 0.5650395154953003,
"learning_rate": 5.099568282088446e-07,
"loss": 0.0039,
"num_input_tokens_seen": 4603536,
"step": 696
},
{
"epoch": 3.9743406985032075,
"grad_norm": 1.3010212182998657,
"learning_rate": 5.044742309812833e-07,
"loss": 0.0109,
"num_input_tokens_seen": 4610128,
"step": 697
},
{
"epoch": 3.980042765502495,
"grad_norm": 2.5407936573028564,
"learning_rate": 4.990179560875963e-07,
"loss": 0.0455,
"num_input_tokens_seen": 4616400,
"step": 698
},
{
"epoch": 3.9857448325017817,
"grad_norm": 0.05278922989964485,
"learning_rate": 4.935880754997377e-07,
"loss": 0.0003,
"num_input_tokens_seen": 4622544,
"step": 699
},
{
"epoch": 3.991446899501069,
"grad_norm": 0.14981479942798615,
"learning_rate": 4.881846608415028e-07,
"loss": 0.0004,
"num_input_tokens_seen": 4628928,
"step": 700
},
{
"epoch": 3.9971489665003563,
"grad_norm": 0.054193057119846344,
"learning_rate": 4.828077833875849e-07,
"loss": 0.0003,
"num_input_tokens_seen": 4635312,
"step": 701
},
{
"epoch": 4.002851033499644,
"grad_norm": 0.1286012977361679,
"learning_rate": 4.774575140626317e-07,
"loss": 0.0006,
"num_input_tokens_seen": 4641696,
"step": 702
},
{
"epoch": 4.008553100498931,
"grad_norm": 0.817335307598114,
"learning_rate": 4.7213392344031215e-07,
"loss": 0.0041,
"num_input_tokens_seen": 4648800,
"step": 703
},
{
"epoch": 4.014255167498218,
"grad_norm": 0.2602868676185608,
"learning_rate": 4.6683708174238506e-07,
"loss": 0.0017,
"num_input_tokens_seen": 4655392,
"step": 704
},
{
"epoch": 4.019957234497506,
"grad_norm": 0.05918978899717331,
"learning_rate": 4.6156705883777434e-07,
"loss": 0.0003,
"num_input_tokens_seen": 4661808,
"step": 705
},
{
"epoch": 4.025659301496793,
"grad_norm": 3.1517393589019775,
"learning_rate": 4.5632392424164367e-07,
"loss": 0.0101,
"num_input_tokens_seen": 4668576,
"step": 706
},
{
"epoch": 4.031361368496079,
"grad_norm": 1.1861796379089355,
"learning_rate": 4.511077471144831e-07,
"loss": 0.0206,
"num_input_tokens_seen": 4674944,
"step": 707
},
{
"epoch": 4.037063435495367,
"grad_norm": 0.8727283477783203,
"learning_rate": 4.4591859626119447e-07,
"loss": 0.0058,
"num_input_tokens_seen": 4681840,
"step": 708
},
{
"epoch": 4.042765502494654,
"grad_norm": 0.18443292379379272,
"learning_rate": 4.4075654013018635e-07,
"loss": 0.0007,
"num_input_tokens_seen": 4688528,
"step": 709
},
{
"epoch": 4.048467569493941,
"grad_norm": 0.6552385687828064,
"learning_rate": 4.3562164681246825e-07,
"loss": 0.0035,
"num_input_tokens_seen": 4694992,
"step": 710
},
{
"epoch": 4.054169636493229,
"grad_norm": 0.8831287026405334,
"learning_rate": 4.3051398404075436e-07,
"loss": 0.0074,
"num_input_tokens_seen": 4701328,
"step": 711
},
{
"epoch": 4.059871703492516,
"grad_norm": 0.026319758966565132,
"learning_rate": 4.254336191885683e-07,
"loss": 0.0002,
"num_input_tokens_seen": 4707808,
"step": 712
},
{
"epoch": 4.065573770491803,
"grad_norm": 0.17212285101413727,
"learning_rate": 4.203806192693588e-07,
"loss": 0.001,
"num_input_tokens_seen": 4714592,
"step": 713
},
{
"epoch": 4.071275837491091,
"grad_norm": 0.0908040776848793,
"learning_rate": 4.1535505093560885e-07,
"loss": 0.0004,
"num_input_tokens_seen": 4721408,
"step": 714
},
{
"epoch": 4.076977904490378,
"grad_norm": 0.4635961055755615,
"learning_rate": 4.103569804779642e-07,
"loss": 0.0044,
"num_input_tokens_seen": 4728224,
"step": 715
},
{
"epoch": 4.082679971489665,
"grad_norm": 0.8470099568367004,
"learning_rate": 4.0538647382435017e-07,
"loss": 0.0065,
"num_input_tokens_seen": 4735040,
"step": 716
},
{
"epoch": 4.088382038488953,
"grad_norm": 0.09076406061649323,
"learning_rate": 4.0044359653911183e-07,
"loss": 0.0004,
"num_input_tokens_seen": 4741584,
"step": 717
},
{
"epoch": 4.094084105488239,
"grad_norm": 0.707586407661438,
"learning_rate": 3.955284138221402e-07,
"loss": 0.0044,
"num_input_tokens_seen": 4748160,
"step": 718
},
{
"epoch": 4.099786172487526,
"grad_norm": 2.276195526123047,
"learning_rate": 3.9064099050802024e-07,
"loss": 0.0111,
"num_input_tokens_seen": 4754752,
"step": 719
},
{
"epoch": 4.105488239486814,
"grad_norm": 1.1030926704406738,
"learning_rate": 3.85781391065167e-07,
"loss": 0.0123,
"num_input_tokens_seen": 4761168,
"step": 720
},
{
"epoch": 4.111190306486101,
"grad_norm": 0.12873871624469757,
"learning_rate": 3.809496795949838e-07,
"loss": 0.0011,
"num_input_tokens_seen": 4767376,
"step": 721
},
{
"epoch": 4.116892373485388,
"grad_norm": 0.030298732221126556,
"learning_rate": 3.7614591983100995e-07,
"loss": 0.0002,
"num_input_tokens_seen": 4774224,
"step": 722
},
{
"epoch": 4.122594440484676,
"grad_norm": 0.4066639840602875,
"learning_rate": 3.713701751380855e-07,
"loss": 0.002,
"num_input_tokens_seen": 4780656,
"step": 723
},
{
"epoch": 4.128296507483963,
"grad_norm": 0.6175351738929749,
"learning_rate": 3.666225085115108e-07,
"loss": 0.004,
"num_input_tokens_seen": 4786944,
"step": 724
},
{
"epoch": 4.13399857448325,
"grad_norm": 0.16811884939670563,
"learning_rate": 3.619029825762177e-07,
"loss": 0.001,
"num_input_tokens_seen": 4793488,
"step": 725
},
{
"epoch": 4.139700641482538,
"grad_norm": 0.3199079930782318,
"learning_rate": 3.5721165958594345e-07,
"loss": 0.0013,
"num_input_tokens_seen": 4799872,
"step": 726
},
{
"epoch": 4.145402708481825,
"grad_norm": 0.02896907553076744,
"learning_rate": 3.525486014224108e-07,
"loss": 0.0002,
"num_input_tokens_seen": 4806656,
"step": 727
},
{
"epoch": 4.151104775481112,
"grad_norm": 0.12801134586334229,
"learning_rate": 3.479138695945086e-07,
"loss": 0.0006,
"num_input_tokens_seen": 4813408,
"step": 728
},
{
"epoch": 4.156806842480399,
"grad_norm": 0.22655466198921204,
"learning_rate": 3.433075252374826e-07,
"loss": 0.0011,
"num_input_tokens_seen": 4819904,
"step": 729
},
{
"epoch": 4.162508909479686,
"grad_norm": 0.7603595852851868,
"learning_rate": 3.3872962911212836e-07,
"loss": 0.0046,
"num_input_tokens_seen": 4826848,
"step": 730
},
{
"epoch": 4.168210976478973,
"grad_norm": 0.557383120059967,
"learning_rate": 3.341802416039913e-07,
"loss": 0.0026,
"num_input_tokens_seen": 4833648,
"step": 731
},
{
"epoch": 4.173913043478261,
"grad_norm": 0.26344913244247437,
"learning_rate": 3.296594227225677e-07,
"loss": 0.0045,
"num_input_tokens_seen": 4840000,
"step": 732
},
{
"epoch": 4.179615110477548,
"grad_norm": 3.3977065086364746,
"learning_rate": 3.2516723210051476e-07,
"loss": 0.0154,
"num_input_tokens_seen": 4846544,
"step": 733
},
{
"epoch": 4.185317177476835,
"grad_norm": 0.20362620055675507,
"learning_rate": 3.20703728992863e-07,
"loss": 0.0009,
"num_input_tokens_seen": 4853344,
"step": 734
},
{
"epoch": 4.191019244476123,
"grad_norm": 0.03981434926390648,
"learning_rate": 3.162689722762366e-07,
"loss": 0.0002,
"num_input_tokens_seen": 4860016,
"step": 735
},
{
"epoch": 4.19672131147541,
"grad_norm": 0.08564602583646774,
"learning_rate": 3.118630204480741e-07,
"loss": 0.0005,
"num_input_tokens_seen": 4866816,
"step": 736
},
{
"epoch": 4.202423378474697,
"grad_norm": 0.583420991897583,
"learning_rate": 3.0748593162585853e-07,
"loss": 0.0021,
"num_input_tokens_seen": 4873168,
"step": 737
},
{
"epoch": 4.208125445473985,
"grad_norm": 0.10381372272968292,
"learning_rate": 3.0313776354635005e-07,
"loss": 0.0006,
"num_input_tokens_seen": 4879776,
"step": 738
},
{
"epoch": 4.213827512473271,
"grad_norm": 1.8905394077301025,
"learning_rate": 2.988185735648258e-07,
"loss": 0.0359,
"num_input_tokens_seen": 4886720,
"step": 739
},
{
"epoch": 4.2195295794725585,
"grad_norm": 0.05700286850333214,
"learning_rate": 2.9452841865432126e-07,
"loss": 0.0002,
"num_input_tokens_seen": 4893376,
"step": 740
},
{
"epoch": 4.225231646471846,
"grad_norm": 1.4328497648239136,
"learning_rate": 2.902673554048793e-07,
"loss": 0.0093,
"num_input_tokens_seen": 4899984,
"step": 741
},
{
"epoch": 4.230933713471133,
"grad_norm": 0.29192915558815,
"learning_rate": 2.8603544002280463e-07,
"loss": 0.0011,
"num_input_tokens_seen": 4906800,
"step": 742
},
{
"epoch": 4.23663578047042,
"grad_norm": 0.314852237701416,
"learning_rate": 2.818327283299227e-07,
"loss": 0.0014,
"num_input_tokens_seen": 4913296,
"step": 743
},
{
"epoch": 4.242337847469708,
"grad_norm": 0.5625317692756653,
"learning_rate": 2.7765927576284076e-07,
"loss": 0.0041,
"num_input_tokens_seen": 4919808,
"step": 744
},
{
"epoch": 4.248039914468995,
"grad_norm": 0.6194710731506348,
"learning_rate": 2.735151373722214e-07,
"loss": 0.0077,
"num_input_tokens_seen": 4926176,
"step": 745
},
{
"epoch": 4.253741981468282,
"grad_norm": 0.1176173985004425,
"learning_rate": 2.694003678220489e-07,
"loss": 0.0005,
"num_input_tokens_seen": 4932480,
"step": 746
},
{
"epoch": 4.25944404846757,
"grad_norm": 0.28074905276298523,
"learning_rate": 2.6531502138891665e-07,
"loss": 0.0015,
"num_input_tokens_seen": 4939104,
"step": 747
},
{
"epoch": 4.265146115466857,
"grad_norm": 1.147383451461792,
"learning_rate": 2.612591519613053e-07,
"loss": 0.0072,
"num_input_tokens_seen": 4945728,
"step": 748
},
{
"epoch": 4.270848182466144,
"grad_norm": 0.22705648839473724,
"learning_rate": 2.5723281303887545e-07,
"loss": 0.001,
"num_input_tokens_seen": 4952240,
"step": 749
},
{
"epoch": 4.276550249465431,
"grad_norm": 0.6860646605491638,
"learning_rate": 2.5323605773175775e-07,
"loss": 0.0043,
"num_input_tokens_seen": 4958752,
"step": 750
},
{
"epoch": 4.282252316464718,
"grad_norm": 0.6111099123954773,
"learning_rate": 2.4926893875985766e-07,
"loss": 0.003,
"num_input_tokens_seen": 4965376,
"step": 751
},
{
"epoch": 4.2879543834640055,
"grad_norm": 1.1241867542266846,
"learning_rate": 2.4533150845215514e-07,
"loss": 0.0047,
"num_input_tokens_seen": 4971680,
"step": 752
},
{
"epoch": 4.293656450463293,
"grad_norm": 0.09292508661746979,
"learning_rate": 2.414238187460191e-07,
"loss": 0.0004,
"num_input_tokens_seen": 4978368,
"step": 753
},
{
"epoch": 4.29935851746258,
"grad_norm": 0.08100103586912155,
"learning_rate": 2.3754592118651748e-07,
"loss": 0.0004,
"num_input_tokens_seen": 4984816,
"step": 754
},
{
"epoch": 4.3050605844618675,
"grad_norm": 0.8663838505744934,
"learning_rate": 2.336978669257406e-07,
"loss": 0.0036,
"num_input_tokens_seen": 4991648,
"step": 755
},
{
"epoch": 4.310762651461155,
"grad_norm": 0.07981377094984055,
"learning_rate": 2.2987970672212506e-07,
"loss": 0.0004,
"num_input_tokens_seen": 4997904,
"step": 756
},
{
"epoch": 4.316464718460442,
"grad_norm": 0.05137365683913231,
"learning_rate": 2.26091490939786e-07,
"loss": 0.0003,
"num_input_tokens_seen": 5004592,
"step": 757
},
{
"epoch": 4.322166785459729,
"grad_norm": 1.287177562713623,
"learning_rate": 2.2233326954785e-07,
"loss": 0.0089,
"num_input_tokens_seen": 5011152,
"step": 758
},
{
"epoch": 4.327868852459017,
"grad_norm": 0.024704981595277786,
"learning_rate": 2.186050921197974e-07,
"loss": 0.0002,
"num_input_tokens_seen": 5017888,
"step": 759
},
{
"epoch": 4.333570919458303,
"grad_norm": 0.03882192447781563,
"learning_rate": 2.149070078328089e-07,
"loss": 0.0002,
"num_input_tokens_seen": 5024656,
"step": 760
},
{
"epoch": 4.3392729864575905,
"grad_norm": 0.6126242876052856,
"learning_rate": 2.1123906546711675e-07,
"loss": 0.0033,
"num_input_tokens_seen": 5030944,
"step": 761
},
{
"epoch": 4.344975053456878,
"grad_norm": 0.9626344442367554,
"learning_rate": 2.0760131340536006e-07,
"loss": 0.0109,
"num_input_tokens_seen": 5037584,
"step": 762
},
{
"epoch": 4.350677120456165,
"grad_norm": 0.6008232235908508,
"learning_rate": 2.0399379963194715e-07,
"loss": 0.0031,
"num_input_tokens_seen": 5043904,
"step": 763
},
{
"epoch": 4.3563791874554525,
"grad_norm": 0.28160446882247925,
"learning_rate": 2.004165717324233e-07,
"loss": 0.0005,
"num_input_tokens_seen": 5050256,
"step": 764
},
{
"epoch": 4.36208125445474,
"grad_norm": 0.22114770114421844,
"learning_rate": 1.9686967689284354e-07,
"loss": 0.0006,
"num_input_tokens_seen": 5057056,
"step": 765
},
{
"epoch": 4.367783321454027,
"grad_norm": 0.5921658277511597,
"learning_rate": 1.9335316189914826e-07,
"loss": 0.0035,
"num_input_tokens_seen": 5063472,
"step": 766
},
{
"epoch": 4.3734853884533145,
"grad_norm": 1.4691166877746582,
"learning_rate": 1.8986707313654707e-07,
"loss": 0.0143,
"num_input_tokens_seen": 5070160,
"step": 767
},
{
"epoch": 4.379187455452602,
"grad_norm": 0.6398491859436035,
"learning_rate": 1.864114565889069e-07,
"loss": 0.0037,
"num_input_tokens_seen": 5076784,
"step": 768
},
{
"epoch": 4.384889522451889,
"grad_norm": 1.2515215873718262,
"learning_rate": 1.8298635783814743e-07,
"loss": 0.0082,
"num_input_tokens_seen": 5083280,
"step": 769
},
{
"epoch": 4.3905915894511764,
"grad_norm": 0.048939384520053864,
"learning_rate": 1.7959182206363502e-07,
"loss": 0.0003,
"num_input_tokens_seen": 5089824,
"step": 770
},
{
"epoch": 4.396293656450463,
"grad_norm": 1.496209740638733,
"learning_rate": 1.7622789404159318e-07,
"loss": 0.0156,
"num_input_tokens_seen": 5096176,
"step": 771
},
{
"epoch": 4.40199572344975,
"grad_norm": 0.07701216638088226,
"learning_rate": 1.728946181445043e-07,
"loss": 0.0003,
"num_input_tokens_seen": 5102784,
"step": 772
},
{
"epoch": 4.4076977904490375,
"grad_norm": 0.8384713530540466,
"learning_rate": 1.6959203834053224e-07,
"loss": 0.0028,
"num_input_tokens_seen": 5109504,
"step": 773
},
{
"epoch": 4.413399857448325,
"grad_norm": 0.03958812728524208,
"learning_rate": 1.6632019819293632e-07,
"loss": 0.0002,
"num_input_tokens_seen": 5116240,
"step": 774
},
{
"epoch": 4.419101924447612,
"grad_norm": 1.7000349760055542,
"learning_rate": 1.6307914085950117e-07,
"loss": 0.0109,
"num_input_tokens_seen": 5122576,
"step": 775
},
{
"epoch": 4.4248039914468995,
"grad_norm": 1.9781132936477661,
"learning_rate": 1.5986890909196224e-07,
"loss": 0.0214,
"num_input_tokens_seen": 5128976,
"step": 776
},
{
"epoch": 4.430506058446187,
"grad_norm": 0.0699346512556076,
"learning_rate": 1.5668954523544776e-07,
"loss": 0.0004,
"num_input_tokens_seen": 5135456,
"step": 777
},
{
"epoch": 4.436208125445474,
"grad_norm": 0.7114868760108948,
"learning_rate": 1.535410912279159e-07,
"loss": 0.0034,
"num_input_tokens_seen": 5142000,
"step": 778
},
{
"epoch": 4.4419101924447615,
"grad_norm": 0.28993305563926697,
"learning_rate": 1.504235885996036e-07,
"loss": 0.0014,
"num_input_tokens_seen": 5148704,
"step": 779
},
{
"epoch": 4.447612259444049,
"grad_norm": 0.2542056143283844,
"learning_rate": 1.4733707847247814e-07,
"loss": 0.0012,
"num_input_tokens_seen": 5155472,
"step": 780
},
{
"epoch": 4.453314326443335,
"grad_norm": 0.2563529312610626,
"learning_rate": 1.4428160155969402e-07,
"loss": 0.0009,
"num_input_tokens_seen": 5162096,
"step": 781
},
{
"epoch": 4.459016393442623,
"grad_norm": 0.6606563925743103,
"learning_rate": 1.4125719816505761e-07,
"loss": 0.0038,
"num_input_tokens_seen": 5168944,
"step": 782
},
{
"epoch": 4.46471846044191,
"grad_norm": 0.05460350960493088,
"learning_rate": 1.3826390818249434e-07,
"loss": 0.0003,
"num_input_tokens_seen": 5175328,
"step": 783
},
{
"epoch": 4.470420527441197,
"grad_norm": 0.2057548314332962,
"learning_rate": 1.3530177109552233e-07,
"loss": 0.0009,
"num_input_tokens_seen": 5182032,
"step": 784
},
{
"epoch": 4.4761225944404845,
"grad_norm": 1.137696385383606,
"learning_rate": 1.3237082597673173e-07,
"loss": 0.012,
"num_input_tokens_seen": 5189200,
"step": 785
},
{
"epoch": 4.481824661439772,
"grad_norm": 0.0833611786365509,
"learning_rate": 1.294711114872707e-07,
"loss": 0.0003,
"num_input_tokens_seen": 5195968,
"step": 786
},
{
"epoch": 4.487526728439059,
"grad_norm": 0.025540605187416077,
"learning_rate": 1.2660266587633334e-07,
"loss": 0.0001,
"num_input_tokens_seen": 5202528,
"step": 787
},
{
"epoch": 4.4932287954383465,
"grad_norm": 0.7630849480628967,
"learning_rate": 1.2376552698065647e-07,
"loss": 0.0066,
"num_input_tokens_seen": 5208944,
"step": 788
},
{
"epoch": 4.498930862437634,
"grad_norm": 0.015869060531258583,
"learning_rate": 1.2095973222401914e-07,
"loss": 0.0001,
"num_input_tokens_seen": 5215888,
"step": 789
},
{
"epoch": 4.504632929436921,
"grad_norm": 0.1820346564054489,
"learning_rate": 1.1818531861675226e-07,
"loss": 0.0008,
"num_input_tokens_seen": 5222608,
"step": 790
},
{
"epoch": 4.5103349964362085,
"grad_norm": 0.028767000883817673,
"learning_rate": 1.1544232275524592e-07,
"loss": 0.0001,
"num_input_tokens_seen": 5229600,
"step": 791
},
{
"epoch": 4.516037063435496,
"grad_norm": 0.009326450526714325,
"learning_rate": 1.1273078082147059e-07,
"loss": 0.0001,
"num_input_tokens_seen": 5236368,
"step": 792
},
{
"epoch": 4.521739130434782,
"grad_norm": 0.16667701303958893,
"learning_rate": 1.1005072858249616e-07,
"loss": 0.0007,
"num_input_tokens_seen": 5243296,
"step": 793
},
{
"epoch": 4.52744119743407,
"grad_norm": 0.06731978803873062,
"learning_rate": 1.0740220139002445e-07,
"loss": 0.0003,
"num_input_tokens_seen": 5249952,
"step": 794
},
{
"epoch": 4.533143264433357,
"grad_norm": 0.6377565264701843,
"learning_rate": 1.0478523417991882e-07,
"loss": 0.0034,
"num_input_tokens_seen": 5257328,
"step": 795
},
{
"epoch": 4.538845331432644,
"grad_norm": 0.09127297252416611,
"learning_rate": 1.0219986147174705e-07,
"loss": 0.0004,
"num_input_tokens_seen": 5264608,
"step": 796
},
{
"epoch": 4.544547398431932,
"grad_norm": 0.08946079760789871,
"learning_rate": 9.964611736832109e-08,
"loss": 0.0003,
"num_input_tokens_seen": 5270928,
"step": 797
},
{
"epoch": 4.550249465431219,
"grad_norm": 0.043789055198431015,
"learning_rate": 9.712403555525357e-08,
"loss": 0.0002,
"num_input_tokens_seen": 5277408,
"step": 798
},
{
"epoch": 4.555951532430506,
"grad_norm": 1.0420541763305664,
"learning_rate": 9.463364930050762e-08,
"loss": 0.0097,
"num_input_tokens_seen": 5284304,
"step": 799
},
{
"epoch": 4.5616535994297935,
"grad_norm": 0.037041183561086655,
"learning_rate": 9.217499145396187e-08,
"loss": 0.0002,
"num_input_tokens_seen": 5290816,
"step": 800
},
{
"epoch": 4.567355666429081,
"grad_norm": 0.5905625820159912,
"learning_rate": 8.97480944469753e-08,
"loss": 0.0029,
"num_input_tokens_seen": 5297680,
"step": 801
},
{
"epoch": 4.573057733428367,
"grad_norm": 1.1155067682266235,
"learning_rate": 8.73529902919601e-08,
"loss": 0.0059,
"num_input_tokens_seen": 5304464,
"step": 802
},
{
"epoch": 4.578759800427655,
"grad_norm": 0.018409363925457,
"learning_rate": 8.498971058195887e-08,
"loss": 0.0001,
"num_input_tokens_seen": 5311072,
"step": 803
},
{
"epoch": 4.584461867426942,
"grad_norm": 0.09573342651128769,
"learning_rate": 8.265828649022867e-08,
"loss": 0.0004,
"num_input_tokens_seen": 5317776,
"step": 804
},
{
"epoch": 4.590163934426229,
"grad_norm": 0.029178114607930183,
"learning_rate": 8.035874876982957e-08,
"loss": 0.0002,
"num_input_tokens_seen": 5324160,
"step": 805
},
{
"epoch": 4.595866001425517,
"grad_norm": 0.789979875087738,
"learning_rate": 7.809112775321869e-08,
"loss": 0.004,
"num_input_tokens_seen": 5330768,
"step": 806
},
{
"epoch": 4.601568068424804,
"grad_norm": 0.28636226058006287,
"learning_rate": 7.585545335184935e-08,
"loss": 0.0013,
"num_input_tokens_seen": 5337376,
"step": 807
},
{
"epoch": 4.607270135424091,
"grad_norm": 0.11878740787506104,
"learning_rate": 7.365175505577915e-08,
"loss": 0.0011,
"num_input_tokens_seen": 5344080,
"step": 808
},
{
"epoch": 4.612972202423379,
"grad_norm": 0.3786606788635254,
"learning_rate": 7.148006193327783e-08,
"loss": 0.0017,
"num_input_tokens_seen": 5350448,
"step": 809
},
{
"epoch": 4.618674269422666,
"grad_norm": 1.0686554908752441,
"learning_rate": 6.93404026304459e-08,
"loss": 0.0049,
"num_input_tokens_seen": 5356800,
"step": 810
},
{
"epoch": 4.624376336421953,
"grad_norm": 0.708344578742981,
"learning_rate": 6.723280537083571e-08,
"loss": 0.0047,
"num_input_tokens_seen": 5363056,
"step": 811
},
{
"epoch": 4.6300784034212406,
"grad_norm": 0.029089799150824547,
"learning_rate": 6.515729795508019e-08,
"loss": 0.0001,
"num_input_tokens_seen": 5369280,
"step": 812
},
{
"epoch": 4.635780470420528,
"grad_norm": 0.06976296752691269,
"learning_rate": 6.311390776052529e-08,
"loss": 0.0002,
"num_input_tokens_seen": 5376016,
"step": 813
},
{
"epoch": 4.641482537419814,
"grad_norm": 0.05825020745396614,
"learning_rate": 6.110266174086859e-08,
"loss": 0.0003,
"num_input_tokens_seen": 5382768,
"step": 814
},
{
"epoch": 4.647184604419102,
"grad_norm": 1.069995641708374,
"learning_rate": 5.912358642580496e-08,
"loss": 0.0098,
"num_input_tokens_seen": 5389200,
"step": 815
},
{
"epoch": 4.652886671418389,
"grad_norm": 0.28126129508018494,
"learning_rate": 5.717670792067587e-08,
"loss": 0.0014,
"num_input_tokens_seen": 5395776,
"step": 816
},
{
"epoch": 4.658588738417676,
"grad_norm": 0.14872530102729797,
"learning_rate": 5.526205190612449e-08,
"loss": 0.0006,
"num_input_tokens_seen": 5402688,
"step": 817
},
{
"epoch": 4.664290805416964,
"grad_norm": 1.2570905685424805,
"learning_rate": 5.337964363775816e-08,
"loss": 0.0099,
"num_input_tokens_seen": 5409152,
"step": 818
},
{
"epoch": 4.669992872416251,
"grad_norm": 0.3209165036678314,
"learning_rate": 5.1529507945814185e-08,
"loss": 0.0019,
"num_input_tokens_seen": 5415744,
"step": 819
},
{
"epoch": 4.675694939415538,
"grad_norm": 0.1433120220899582,
"learning_rate": 4.971166923483372e-08,
"loss": 0.0009,
"num_input_tokens_seen": 5422048,
"step": 820
},
{
"epoch": 4.681397006414826,
"grad_norm": 1.7018483877182007,
"learning_rate": 4.792615148333735e-08,
"loss": 0.0189,
"num_input_tokens_seen": 5428368,
"step": 821
},
{
"epoch": 4.687099073414113,
"grad_norm": 0.011967192403972149,
"learning_rate": 4.617297824351219e-08,
"loss": 0.0001,
"num_input_tokens_seen": 5434960,
"step": 822
},
{
"epoch": 4.6928011404134,
"grad_norm": 1.1888624429702759,
"learning_rate": 4.4452172640897515e-08,
"loss": 0.0075,
"num_input_tokens_seen": 5441776,
"step": 823
},
{
"epoch": 4.698503207412687,
"grad_norm": 0.0656069964170456,
"learning_rate": 4.2763757374083006e-08,
"loss": 0.0004,
"num_input_tokens_seen": 5448768,
"step": 824
},
{
"epoch": 4.704205274411974,
"grad_norm": 0.02361738122999668,
"learning_rate": 4.110775471440703e-08,
"loss": 0.0002,
"num_input_tokens_seen": 5455472,
"step": 825
},
{
"epoch": 4.709907341411261,
"grad_norm": 0.10109017789363861,
"learning_rate": 3.948418650566499e-08,
"loss": 0.0004,
"num_input_tokens_seen": 5461856,
"step": 826
},
{
"epoch": 4.715609408410549,
"grad_norm": 0.31169602274894714,
"learning_rate": 3.7893074163817824e-08,
"loss": 0.0015,
"num_input_tokens_seen": 5468608,
"step": 827
},
{
"epoch": 4.721311475409836,
"grad_norm": 0.04045119881629944,
"learning_rate": 3.633443867671393e-08,
"loss": 0.0002,
"num_input_tokens_seen": 5475216,
"step": 828
},
{
"epoch": 4.727013542409123,
"grad_norm": 0.6061815023422241,
"learning_rate": 3.4808300603808574e-08,
"loss": 0.0137,
"num_input_tokens_seen": 5481952,
"step": 829
},
{
"epoch": 4.732715609408411,
"grad_norm": 0.09090006351470947,
"learning_rate": 3.331468007589489e-08,
"loss": 0.0004,
"num_input_tokens_seen": 5488720,
"step": 830
},
{
"epoch": 4.738417676407698,
"grad_norm": 0.1739615947008133,
"learning_rate": 3.18535967948369e-08,
"loss": 0.0006,
"num_input_tokens_seen": 5495440,
"step": 831
},
{
"epoch": 4.744119743406985,
"grad_norm": 0.04561722278594971,
"learning_rate": 3.042507003331141e-08,
"loss": 0.0003,
"num_input_tokens_seen": 5501840,
"step": 832
},
{
"epoch": 4.749821810406273,
"grad_norm": 0.09544459730386734,
"learning_rate": 2.902911863455121e-08,
"loss": 0.0006,
"num_input_tokens_seen": 5508192,
"step": 833
},
{
"epoch": 4.75552387740556,
"grad_norm": 0.9669187664985657,
"learning_rate": 2.7665761012099777e-08,
"loss": 0.0064,
"num_input_tokens_seen": 5514832,
"step": 834
},
{
"epoch": 4.761225944404847,
"grad_norm": 7.617751121520996,
"learning_rate": 2.6335015149565324e-08,
"loss": 0.01,
"num_input_tokens_seen": 5520832,
"step": 835
},
{
"epoch": 4.766928011404134,
"grad_norm": 0.7793776988983154,
"learning_rate": 2.5036898600385716e-08,
"loss": 0.0047,
"num_input_tokens_seen": 5527200,
"step": 836
},
{
"epoch": 4.772630078403421,
"grad_norm": 0.07317044585943222,
"learning_rate": 2.377142848759506e-08,
"loss": 0.0004,
"num_input_tokens_seen": 5533664,
"step": 837
},
{
"epoch": 4.778332145402708,
"grad_norm": 0.10957920551300049,
"learning_rate": 2.2538621503600255e-08,
"loss": 0.0004,
"num_input_tokens_seen": 5540400,
"step": 838
},
{
"epoch": 4.784034212401996,
"grad_norm": 0.3689444363117218,
"learning_rate": 2.1338493909958956e-08,
"loss": 0.0018,
"num_input_tokens_seen": 5547152,
"step": 839
},
{
"epoch": 4.789736279401283,
"grad_norm": 0.09837047755718231,
"learning_rate": 2.017106153716475e-08,
"loss": 0.0003,
"num_input_tokens_seen": 5553792,
"step": 840
},
{
"epoch": 4.79543834640057,
"grad_norm": 1.0303527116775513,
"learning_rate": 1.9036339784440082e-08,
"loss": 0.0093,
"num_input_tokens_seen": 5560144,
"step": 841
},
{
"epoch": 4.801140413399858,
"grad_norm": 0.24677984416484833,
"learning_rate": 1.793434361953228e-08,
"loss": 0.0007,
"num_input_tokens_seen": 5566640,
"step": 842
},
{
"epoch": 4.806842480399145,
"grad_norm": 0.6943243741989136,
"learning_rate": 1.6865087578515073e-08,
"loss": 0.0043,
"num_input_tokens_seen": 5573424,
"step": 843
},
{
"epoch": 4.812544547398432,
"grad_norm": 0.018405841663479805,
"learning_rate": 1.5828585765599315e-08,
"loss": 0.0001,
"num_input_tokens_seen": 5580208,
"step": 844
},
{
"epoch": 4.818246614397719,
"grad_norm": 0.025661015883088112,
"learning_rate": 1.482485185294369e-08,
"loss": 0.0001,
"num_input_tokens_seen": 5586720,
"step": 845
},
{
"epoch": 4.823948681397006,
"grad_norm": 0.021062418818473816,
"learning_rate": 1.3853899080477628e-08,
"loss": 0.0001,
"num_input_tokens_seen": 5593120,
"step": 846
},
{
"epoch": 4.829650748396293,
"grad_norm": 0.7667257785797119,
"learning_rate": 1.291574025572423e-08,
"loss": 0.0046,
"num_input_tokens_seen": 5600016,
"step": 847
},
{
"epoch": 4.835352815395581,
"grad_norm": 0.018968243151903152,
"learning_rate": 1.2010387753632891e-08,
"loss": 0.0001,
"num_input_tokens_seen": 5606800,
"step": 848
},
{
"epoch": 4.841054882394868,
"grad_norm": 0.4931604266166687,
"learning_rate": 1.1137853516414999e-08,
"loss": 0.0025,
"num_input_tokens_seen": 5613200,
"step": 849
},
{
"epoch": 4.846756949394155,
"grad_norm": 0.0513661690056324,
"learning_rate": 1.0298149053387107e-08,
"loss": 0.0002,
"num_input_tokens_seen": 5619792,
"step": 850
},
{
"epoch": 4.852459016393443,
"grad_norm": 0.04170714318752289,
"learning_rate": 9.491285440818288e-09,
"loss": 0.0002,
"num_input_tokens_seen": 5626720,
"step": 851
},
{
"epoch": 4.85816108339273,
"grad_norm": 0.031740520149469376,
"learning_rate": 8.717273321785513e-09,
"loss": 0.0002,
"num_input_tokens_seen": 5633328,
"step": 852
},
{
"epoch": 4.863863150392017,
"grad_norm": 0.15657615661621094,
"learning_rate": 7.976122906031559e-09,
"loss": 0.0005,
"num_input_tokens_seen": 5640240,
"step": 853
},
{
"epoch": 4.869565217391305,
"grad_norm": 0.2195972204208374,
"learning_rate": 7.267843969831767e-09,
"loss": 0.002,
"num_input_tokens_seen": 5646608,
"step": 854
},
{
"epoch": 4.875267284390592,
"grad_norm": 0.8132025003433228,
"learning_rate": 6.592445855863883e-09,
"loss": 0.0052,
"num_input_tokens_seen": 5653632,
"step": 855
},
{
"epoch": 4.880969351389879,
"grad_norm": 0.01100305374711752,
"learning_rate": 5.949937473085643e-09,
"loss": 0.0001,
"num_input_tokens_seen": 5660576,
"step": 856
},
{
"epoch": 4.886671418389166,
"grad_norm": 0.6500959396362305,
"learning_rate": 5.340327296616821e-09,
"loss": 0.0032,
"num_input_tokens_seen": 5667312,
"step": 857
},
{
"epoch": 4.892373485388453,
"grad_norm": 0.05256979167461395,
"learning_rate": 4.763623367627645e-09,
"loss": 0.0003,
"num_input_tokens_seen": 5673984,
"step": 858
},
{
"epoch": 4.89807555238774,
"grad_norm": 0.0746457576751709,
"learning_rate": 4.219833293233333e-09,
"loss": 0.0003,
"num_input_tokens_seen": 5680448,
"step": 859
},
{
"epoch": 4.903777619387028,
"grad_norm": 0.021520154550671577,
"learning_rate": 3.7089642463922217e-09,
"loss": 0.0001,
"num_input_tokens_seen": 5687248,
"step": 860
},
{
"epoch": 4.909479686386315,
"grad_norm": 0.13376076519489288,
"learning_rate": 3.231022965812236e-09,
"loss": 0.0006,
"num_input_tokens_seen": 5693792,
"step": 861
},
{
"epoch": 4.915181753385602,
"grad_norm": 0.6564909815788269,
"learning_rate": 2.7860157558620703e-09,
"loss": 0.0044,
"num_input_tokens_seen": 5700368,
"step": 862
},
{
"epoch": 4.92088382038489,
"grad_norm": 0.331143319606781,
"learning_rate": 2.3739484864879203e-09,
"loss": 0.0007,
"num_input_tokens_seen": 5706672,
"step": 863
},
{
"epoch": 4.926585887384177,
"grad_norm": 0.0895066112279892,
"learning_rate": 1.9948265931352128e-09,
"loss": 0.0004,
"num_input_tokens_seen": 5713840,
"step": 864
},
{
"epoch": 4.932287954383464,
"grad_norm": 1.506907343864441,
"learning_rate": 1.6486550766783849e-09,
"loss": 0.0067,
"num_input_tokens_seen": 5720272,
"step": 865
},
{
"epoch": 4.937990021382751,
"grad_norm": 0.10082338750362396,
"learning_rate": 1.3354385033534368e-09,
"loss": 0.0005,
"num_input_tokens_seen": 5727040,
"step": 866
},
{
"epoch": 4.943692088382038,
"grad_norm": 0.3792347311973572,
"learning_rate": 1.055181004698813e-09,
"loss": 0.0017,
"num_input_tokens_seen": 5733696,
"step": 867
},
{
"epoch": 4.949394155381325,
"grad_norm": 0.820798397064209,
"learning_rate": 8.07886277500447e-10,
"loss": 0.0065,
"num_input_tokens_seen": 5740064,
"step": 868
},
{
"epoch": 4.955096222380613,
"grad_norm": 0.11408527195453644,
"learning_rate": 5.935575837429097e-10,
"loss": 0.0009,
"num_input_tokens_seen": 5746624,
"step": 869
},
{
"epoch": 4.9607982893799,
"grad_norm": 0.014658835716545582,
"learning_rate": 4.1219775056666746e-10,
"loss": 0.0001,
"num_input_tokens_seen": 5753456,
"step": 870
},
{
"epoch": 4.966500356379187,
"grad_norm": 1.0549769401550293,
"learning_rate": 2.638091702303336e-10,
"loss": 0.0096,
"num_input_tokens_seen": 5759808,
"step": 871
},
{
"epoch": 4.972202423378475,
"grad_norm": 2.1060280799865723,
"learning_rate": 1.4839380007986014e-10,
"loss": 0.0099,
"num_input_tokens_seen": 5766288,
"step": 872
},
{
"epoch": 4.977904490377762,
"grad_norm": 0.06906285136938095,
"learning_rate": 6.595316252161476e-11,
"loss": 0.0004,
"num_input_tokens_seen": 5772848,
"step": 873
},
{
"epoch": 4.983606557377049,
"grad_norm": 0.5131366848945618,
"learning_rate": 1.6488345003506933e-11,
"loss": 0.0026,
"num_input_tokens_seen": 5779552,
"step": 874
},
{
"epoch": 4.989308624376337,
"grad_norm": 0.508285641670227,
"learning_rate": 0.0,
"loss": 0.0029,
"num_input_tokens_seen": 5786192,
"step": 875
},
{
"epoch": 4.989308624376337,
"num_input_tokens_seen": 5786192,
"step": 875,
"total_flos": 2.6054961380143923e+17,
"train_loss": 0.1388659444341361,
"train_runtime": 11572.721,
"train_samples_per_second": 9.693,
"train_steps_per_second": 0.076
}
],
"logging_steps": 1,
"max_steps": 875,
"num_input_tokens_seen": 5786192,
"num_train_epochs": 5,
"save_steps": 5000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.6054961380143923e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}