xyz / trainer_state.json
bansilp's picture
Model save
1d0dddd verified
{
"best_metric": 0.662264883518219,
"best_model_checkpoint": "./xyz/checkpoint-3000",
"epoch": 5.0,
"eval_steps": 1000,
"global_step": 5400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 3.8143322467803955,
"learning_rate": 0.00019962962962962963,
"loss": 1.3521,
"step": 10
},
{
"epoch": 0.02,
"grad_norm": 6.054905891418457,
"learning_rate": 0.00019925925925925927,
"loss": 0.9066,
"step": 20
},
{
"epoch": 0.03,
"grad_norm": 4.3551836013793945,
"learning_rate": 0.0001988888888888889,
"loss": 1.2308,
"step": 30
},
{
"epoch": 0.04,
"grad_norm": 4.610753059387207,
"learning_rate": 0.00019851851851851853,
"loss": 1.4251,
"step": 40
},
{
"epoch": 0.05,
"grad_norm": 3.917494297027588,
"learning_rate": 0.00019814814814814814,
"loss": 1.3972,
"step": 50
},
{
"epoch": 0.06,
"grad_norm": 3.3760318756103516,
"learning_rate": 0.00019777777777777778,
"loss": 1.2909,
"step": 60
},
{
"epoch": 0.06,
"grad_norm": 4.219705104827881,
"learning_rate": 0.00019740740740740743,
"loss": 0.9873,
"step": 70
},
{
"epoch": 0.07,
"grad_norm": 8.88244915008545,
"learning_rate": 0.00019703703703703704,
"loss": 1.1551,
"step": 80
},
{
"epoch": 0.08,
"grad_norm": 4.982913970947266,
"learning_rate": 0.00019666666666666666,
"loss": 0.9101,
"step": 90
},
{
"epoch": 0.09,
"grad_norm": 2.62565016746521,
"learning_rate": 0.0001962962962962963,
"loss": 1.1123,
"step": 100
},
{
"epoch": 0.1,
"grad_norm": 5.44295597076416,
"learning_rate": 0.00019592592592592594,
"loss": 1.3237,
"step": 110
},
{
"epoch": 0.11,
"grad_norm": 4.725115776062012,
"learning_rate": 0.00019555555555555556,
"loss": 1.6125,
"step": 120
},
{
"epoch": 0.12,
"grad_norm": 3.289890766143799,
"learning_rate": 0.0001951851851851852,
"loss": 1.1549,
"step": 130
},
{
"epoch": 0.13,
"grad_norm": 2.697791576385498,
"learning_rate": 0.00019481481481481482,
"loss": 1.0856,
"step": 140
},
{
"epoch": 0.14,
"grad_norm": 1.6912928819656372,
"learning_rate": 0.00019444444444444446,
"loss": 0.7321,
"step": 150
},
{
"epoch": 0.15,
"grad_norm": 3.574882745742798,
"learning_rate": 0.0001941111111111111,
"loss": 1.0417,
"step": 160
},
{
"epoch": 0.16,
"grad_norm": 0.8383091688156128,
"learning_rate": 0.0001937777777777778,
"loss": 1.1763,
"step": 170
},
{
"epoch": 0.17,
"grad_norm": 5.596456050872803,
"learning_rate": 0.0001934074074074074,
"loss": 0.9972,
"step": 180
},
{
"epoch": 0.18,
"grad_norm": 6.187414169311523,
"learning_rate": 0.00019303703703703703,
"loss": 0.9869,
"step": 190
},
{
"epoch": 0.19,
"grad_norm": 6.857061862945557,
"learning_rate": 0.0001926666666666667,
"loss": 1.2555,
"step": 200
},
{
"epoch": 0.19,
"grad_norm": 6.55685567855835,
"learning_rate": 0.0001922962962962963,
"loss": 1.6114,
"step": 210
},
{
"epoch": 0.2,
"grad_norm": 2.499633550643921,
"learning_rate": 0.00019192592592592593,
"loss": 1.0535,
"step": 220
},
{
"epoch": 0.21,
"grad_norm": 5.06282377243042,
"learning_rate": 0.00019155555555555554,
"loss": 1.0282,
"step": 230
},
{
"epoch": 0.22,
"grad_norm": 3.4892516136169434,
"learning_rate": 0.0001911851851851852,
"loss": 0.9049,
"step": 240
},
{
"epoch": 0.23,
"grad_norm": 3.472264051437378,
"learning_rate": 0.00019081481481481483,
"loss": 1.0431,
"step": 250
},
{
"epoch": 0.24,
"grad_norm": 4.258254528045654,
"learning_rate": 0.00019044444444444444,
"loss": 1.1972,
"step": 260
},
{
"epoch": 0.25,
"grad_norm": 2.4951963424682617,
"learning_rate": 0.0001900740740740741,
"loss": 1.0739,
"step": 270
},
{
"epoch": 0.26,
"grad_norm": 3.604152202606201,
"learning_rate": 0.00018970370370370373,
"loss": 1.2274,
"step": 280
},
{
"epoch": 0.27,
"grad_norm": 1.1457213163375854,
"learning_rate": 0.00018933333333333335,
"loss": 0.9481,
"step": 290
},
{
"epoch": 0.28,
"grad_norm": 3.8631083965301514,
"learning_rate": 0.00018896296296296296,
"loss": 0.8334,
"step": 300
},
{
"epoch": 0.29,
"grad_norm": 7.121092796325684,
"learning_rate": 0.0001885925925925926,
"loss": 0.8217,
"step": 310
},
{
"epoch": 0.3,
"grad_norm": 4.082801342010498,
"learning_rate": 0.00018822222222222222,
"loss": 0.8905,
"step": 320
},
{
"epoch": 0.31,
"grad_norm": 14.353635787963867,
"learning_rate": 0.00018785185185185186,
"loss": 1.1616,
"step": 330
},
{
"epoch": 0.31,
"grad_norm": 2.392637014389038,
"learning_rate": 0.00018748148148148148,
"loss": 0.9321,
"step": 340
},
{
"epoch": 0.32,
"grad_norm": 1.7092102766036987,
"learning_rate": 0.00018711111111111112,
"loss": 0.9617,
"step": 350
},
{
"epoch": 0.33,
"grad_norm": 16.04946517944336,
"learning_rate": 0.00018674074074074074,
"loss": 0.9957,
"step": 360
},
{
"epoch": 0.34,
"grad_norm": 7.711897373199463,
"learning_rate": 0.00018637037037037038,
"loss": 1.2192,
"step": 370
},
{
"epoch": 0.35,
"grad_norm": 3.7547879219055176,
"learning_rate": 0.00018600000000000002,
"loss": 0.8561,
"step": 380
},
{
"epoch": 0.36,
"grad_norm": 5.042356967926025,
"learning_rate": 0.00018562962962962964,
"loss": 0.833,
"step": 390
},
{
"epoch": 0.37,
"grad_norm": 7.934051036834717,
"learning_rate": 0.00018525925925925925,
"loss": 0.6549,
"step": 400
},
{
"epoch": 0.38,
"grad_norm": 8.811772346496582,
"learning_rate": 0.0001848888888888889,
"loss": 0.9769,
"step": 410
},
{
"epoch": 0.39,
"grad_norm": 3.5302858352661133,
"learning_rate": 0.00018451851851851854,
"loss": 1.0116,
"step": 420
},
{
"epoch": 0.4,
"grad_norm": 5.3733930587768555,
"learning_rate": 0.00018414814814814815,
"loss": 0.7996,
"step": 430
},
{
"epoch": 0.41,
"grad_norm": 8.282529830932617,
"learning_rate": 0.00018377777777777777,
"loss": 0.9161,
"step": 440
},
{
"epoch": 0.42,
"grad_norm": 3.126352310180664,
"learning_rate": 0.0001834074074074074,
"loss": 0.6905,
"step": 450
},
{
"epoch": 0.43,
"grad_norm": 6.846827030181885,
"learning_rate": 0.00018303703703703705,
"loss": 0.6023,
"step": 460
},
{
"epoch": 0.44,
"grad_norm": 2.8621811866760254,
"learning_rate": 0.00018266666666666667,
"loss": 0.9146,
"step": 470
},
{
"epoch": 0.44,
"grad_norm": 8.606246948242188,
"learning_rate": 0.00018229629629629629,
"loss": 0.7004,
"step": 480
},
{
"epoch": 0.45,
"grad_norm": 2.7710301876068115,
"learning_rate": 0.00018192592592592596,
"loss": 0.6772,
"step": 490
},
{
"epoch": 0.46,
"grad_norm": 12.793920516967773,
"learning_rate": 0.00018155555555555557,
"loss": 1.1335,
"step": 500
},
{
"epoch": 0.47,
"grad_norm": 6.23002290725708,
"learning_rate": 0.0001811851851851852,
"loss": 0.8264,
"step": 510
},
{
"epoch": 0.48,
"grad_norm": 19.351716995239258,
"learning_rate": 0.0001808148148148148,
"loss": 1.0775,
"step": 520
},
{
"epoch": 0.49,
"grad_norm": 6.582885265350342,
"learning_rate": 0.00018044444444444447,
"loss": 0.9896,
"step": 530
},
{
"epoch": 0.5,
"grad_norm": 1.0233162641525269,
"learning_rate": 0.0001800740740740741,
"loss": 0.6742,
"step": 540
},
{
"epoch": 0.51,
"grad_norm": 3.269961357116699,
"learning_rate": 0.0001797037037037037,
"loss": 0.8393,
"step": 550
},
{
"epoch": 0.52,
"grad_norm": 2.69207763671875,
"learning_rate": 0.00017933333333333332,
"loss": 1.1656,
"step": 560
},
{
"epoch": 0.53,
"grad_norm": 2.027094602584839,
"learning_rate": 0.000178962962962963,
"loss": 1.0053,
"step": 570
},
{
"epoch": 0.54,
"grad_norm": 11.380446434020996,
"learning_rate": 0.0001785925925925926,
"loss": 0.8784,
"step": 580
},
{
"epoch": 0.55,
"grad_norm": 2.832817554473877,
"learning_rate": 0.00017822222222222222,
"loss": 0.7873,
"step": 590
},
{
"epoch": 0.56,
"grad_norm": 6.541043758392334,
"learning_rate": 0.00017785185185185186,
"loss": 1.2377,
"step": 600
},
{
"epoch": 0.56,
"grad_norm": 4.639912128448486,
"learning_rate": 0.0001774814814814815,
"loss": 1.0365,
"step": 610
},
{
"epoch": 0.57,
"grad_norm": 8.525505065917969,
"learning_rate": 0.00017711111111111112,
"loss": 1.3203,
"step": 620
},
{
"epoch": 0.58,
"grad_norm": 1.2499964237213135,
"learning_rate": 0.00017674074074074074,
"loss": 1.0517,
"step": 630
},
{
"epoch": 0.59,
"grad_norm": 5.76359224319458,
"learning_rate": 0.00017637037037037038,
"loss": 0.808,
"step": 640
},
{
"epoch": 0.6,
"grad_norm": 3.2585175037384033,
"learning_rate": 0.00017600000000000002,
"loss": 1.1463,
"step": 650
},
{
"epoch": 0.61,
"grad_norm": 5.738606929779053,
"learning_rate": 0.00017562962962962964,
"loss": 0.994,
"step": 660
},
{
"epoch": 0.62,
"grad_norm": 1.9442558288574219,
"learning_rate": 0.00017525925925925928,
"loss": 1.0929,
"step": 670
},
{
"epoch": 0.63,
"grad_norm": 3.3668947219848633,
"learning_rate": 0.0001748888888888889,
"loss": 1.2401,
"step": 680
},
{
"epoch": 0.64,
"grad_norm": 6.455596923828125,
"learning_rate": 0.00017451851851851854,
"loss": 1.4159,
"step": 690
},
{
"epoch": 0.65,
"grad_norm": 10.378853797912598,
"learning_rate": 0.00017414814814814815,
"loss": 0.8873,
"step": 700
},
{
"epoch": 0.66,
"grad_norm": 10.790160179138184,
"learning_rate": 0.0001737777777777778,
"loss": 0.9734,
"step": 710
},
{
"epoch": 0.67,
"grad_norm": 3.259772300720215,
"learning_rate": 0.0001734074074074074,
"loss": 0.7115,
"step": 720
},
{
"epoch": 0.68,
"grad_norm": 5.672187328338623,
"learning_rate": 0.00017303703703703703,
"loss": 0.8625,
"step": 730
},
{
"epoch": 0.69,
"grad_norm": 2.928330421447754,
"learning_rate": 0.00017266666666666667,
"loss": 0.9917,
"step": 740
},
{
"epoch": 0.69,
"grad_norm": 6.04885196685791,
"learning_rate": 0.00017229629629629631,
"loss": 1.45,
"step": 750
},
{
"epoch": 0.7,
"grad_norm": 3.1760973930358887,
"learning_rate": 0.00017192592592592593,
"loss": 1.2334,
"step": 760
},
{
"epoch": 0.71,
"grad_norm": 9.087358474731445,
"learning_rate": 0.00017155555555555555,
"loss": 1.6159,
"step": 770
},
{
"epoch": 0.72,
"grad_norm": 3.5645482540130615,
"learning_rate": 0.00017118518518518522,
"loss": 0.918,
"step": 780
},
{
"epoch": 0.73,
"grad_norm": 2.18007493019104,
"learning_rate": 0.00017081481481481483,
"loss": 1.0937,
"step": 790
},
{
"epoch": 0.74,
"grad_norm": 0.8623066544532776,
"learning_rate": 0.00017044444444444445,
"loss": 0.877,
"step": 800
},
{
"epoch": 0.75,
"grad_norm": 2.687042236328125,
"learning_rate": 0.00017007407407407406,
"loss": 0.9692,
"step": 810
},
{
"epoch": 0.76,
"grad_norm": 4.6899309158325195,
"learning_rate": 0.00016970370370370373,
"loss": 0.866,
"step": 820
},
{
"epoch": 0.77,
"grad_norm": 1.2095915079116821,
"learning_rate": 0.00016933333333333335,
"loss": 0.7583,
"step": 830
},
{
"epoch": 0.78,
"grad_norm": 5.8733439445495605,
"learning_rate": 0.00016896296296296296,
"loss": 0.7542,
"step": 840
},
{
"epoch": 0.79,
"grad_norm": 4.754787921905518,
"learning_rate": 0.00016859259259259258,
"loss": 1.1111,
"step": 850
},
{
"epoch": 0.8,
"grad_norm": 3.035780191421509,
"learning_rate": 0.00016822222222222225,
"loss": 0.7904,
"step": 860
},
{
"epoch": 0.81,
"grad_norm": 1.6468725204467773,
"learning_rate": 0.00016785185185185186,
"loss": 0.5631,
"step": 870
},
{
"epoch": 0.81,
"grad_norm": 1.4381688833236694,
"learning_rate": 0.00016748148148148148,
"loss": 0.8486,
"step": 880
},
{
"epoch": 0.82,
"grad_norm": 5.331398963928223,
"learning_rate": 0.00016711111111111112,
"loss": 0.7871,
"step": 890
},
{
"epoch": 0.83,
"grad_norm": 0.6239520311355591,
"learning_rate": 0.00016674074074074077,
"loss": 0.7374,
"step": 900
},
{
"epoch": 0.84,
"grad_norm": 7.6069841384887695,
"learning_rate": 0.00016637037037037038,
"loss": 0.9092,
"step": 910
},
{
"epoch": 0.85,
"grad_norm": 9.099188804626465,
"learning_rate": 0.000166,
"loss": 1.2067,
"step": 920
},
{
"epoch": 0.86,
"grad_norm": 6.082764625549316,
"learning_rate": 0.00016562962962962964,
"loss": 0.9759,
"step": 930
},
{
"epoch": 0.87,
"grad_norm": 2.981585741043091,
"learning_rate": 0.00016525925925925928,
"loss": 0.7387,
"step": 940
},
{
"epoch": 0.88,
"grad_norm": 7.254508972167969,
"learning_rate": 0.0001648888888888889,
"loss": 0.6081,
"step": 950
},
{
"epoch": 0.89,
"grad_norm": 4.67478084564209,
"learning_rate": 0.0001645185185185185,
"loss": 0.8602,
"step": 960
},
{
"epoch": 0.9,
"grad_norm": 1.7290263175964355,
"learning_rate": 0.00016414814814814816,
"loss": 0.8461,
"step": 970
},
{
"epoch": 0.91,
"grad_norm": 5.2177205085754395,
"learning_rate": 0.0001637777777777778,
"loss": 0.718,
"step": 980
},
{
"epoch": 0.92,
"grad_norm": 3.7638397216796875,
"learning_rate": 0.00016340740740740741,
"loss": 1.1177,
"step": 990
},
{
"epoch": 0.93,
"grad_norm": 5.743690490722656,
"learning_rate": 0.00016303703703703706,
"loss": 0.9351,
"step": 1000
},
{
"epoch": 0.93,
"eval_accuracy": 0.7194444444444444,
"eval_loss": 0.8621441721916199,
"eval_runtime": 12.4012,
"eval_samples_per_second": 87.088,
"eval_steps_per_second": 10.886,
"step": 1000
},
{
"epoch": 0.94,
"grad_norm": 0.8297109603881836,
"learning_rate": 0.00016266666666666667,
"loss": 0.9375,
"step": 1010
},
{
"epoch": 0.94,
"grad_norm": 17.907121658325195,
"learning_rate": 0.00016229629629629632,
"loss": 1.0369,
"step": 1020
},
{
"epoch": 0.95,
"grad_norm": 8.758153915405273,
"learning_rate": 0.00016192592592592593,
"loss": 1.2504,
"step": 1030
},
{
"epoch": 0.96,
"grad_norm": 8.147494316101074,
"learning_rate": 0.00016155555555555557,
"loss": 0.8821,
"step": 1040
},
{
"epoch": 0.97,
"grad_norm": 8.431010246276855,
"learning_rate": 0.0001611851851851852,
"loss": 0.7338,
"step": 1050
},
{
"epoch": 0.98,
"grad_norm": 9.130791664123535,
"learning_rate": 0.00016081481481481483,
"loss": 0.6653,
"step": 1060
},
{
"epoch": 0.99,
"grad_norm": 5.671422481536865,
"learning_rate": 0.00016044444444444445,
"loss": 1.1306,
"step": 1070
},
{
"epoch": 1.0,
"grad_norm": 2.940847635269165,
"learning_rate": 0.0001600740740740741,
"loss": 0.9153,
"step": 1080
},
{
"epoch": 1.01,
"grad_norm": 0.4813268780708313,
"learning_rate": 0.0001597037037037037,
"loss": 0.8784,
"step": 1090
},
{
"epoch": 1.02,
"grad_norm": 4.398209571838379,
"learning_rate": 0.00015933333333333332,
"loss": 0.9996,
"step": 1100
},
{
"epoch": 1.03,
"grad_norm": 4.6109161376953125,
"learning_rate": 0.000158962962962963,
"loss": 0.7194,
"step": 1110
},
{
"epoch": 1.04,
"grad_norm": 7.428894996643066,
"learning_rate": 0.0001585925925925926,
"loss": 0.5809,
"step": 1120
},
{
"epoch": 1.05,
"grad_norm": 1.783790946006775,
"learning_rate": 0.00015822222222222222,
"loss": 0.3673,
"step": 1130
},
{
"epoch": 1.06,
"grad_norm": 8.162879943847656,
"learning_rate": 0.00015785185185185184,
"loss": 0.4772,
"step": 1140
},
{
"epoch": 1.06,
"grad_norm": 3.2053816318511963,
"learning_rate": 0.0001574814814814815,
"loss": 0.9253,
"step": 1150
},
{
"epoch": 1.07,
"grad_norm": 0.6329092383384705,
"learning_rate": 0.00015711111111111112,
"loss": 0.6919,
"step": 1160
},
{
"epoch": 1.08,
"grad_norm": 1.652776837348938,
"learning_rate": 0.00015674074074074074,
"loss": 0.7594,
"step": 1170
},
{
"epoch": 1.09,
"grad_norm": 4.956950664520264,
"learning_rate": 0.00015637037037037035,
"loss": 0.7221,
"step": 1180
},
{
"epoch": 1.1,
"grad_norm": 5.909961223602295,
"learning_rate": 0.00015600000000000002,
"loss": 0.6839,
"step": 1190
},
{
"epoch": 1.11,
"grad_norm": 0.30241209268569946,
"learning_rate": 0.00015562962962962964,
"loss": 0.8099,
"step": 1200
},
{
"epoch": 1.12,
"grad_norm": 4.023684978485107,
"learning_rate": 0.00015525925925925926,
"loss": 0.7276,
"step": 1210
},
{
"epoch": 1.13,
"grad_norm": 1.580052375793457,
"learning_rate": 0.0001548888888888889,
"loss": 0.8793,
"step": 1220
},
{
"epoch": 1.14,
"grad_norm": 9.444668769836426,
"learning_rate": 0.00015451851851851854,
"loss": 0.4673,
"step": 1230
},
{
"epoch": 1.15,
"grad_norm": 8.650052070617676,
"learning_rate": 0.00015414814814814816,
"loss": 0.8987,
"step": 1240
},
{
"epoch": 1.16,
"grad_norm": 5.651284217834473,
"learning_rate": 0.00015377777777777777,
"loss": 0.6377,
"step": 1250
},
{
"epoch": 1.17,
"grad_norm": 6.849576950073242,
"learning_rate": 0.00015340740740740741,
"loss": 0.431,
"step": 1260
},
{
"epoch": 1.18,
"grad_norm": 8.004125595092773,
"learning_rate": 0.00015303703703703706,
"loss": 1.0208,
"step": 1270
},
{
"epoch": 1.19,
"grad_norm": 9.301837921142578,
"learning_rate": 0.00015266666666666667,
"loss": 0.8689,
"step": 1280
},
{
"epoch": 1.19,
"grad_norm": 1.5967570543289185,
"learning_rate": 0.00015229629629629632,
"loss": 0.6375,
"step": 1290
},
{
"epoch": 1.2,
"grad_norm": 0.2297324687242508,
"learning_rate": 0.00015192592592592593,
"loss": 0.3302,
"step": 1300
},
{
"epoch": 1.21,
"grad_norm": 2.4135913848876953,
"learning_rate": 0.00015155555555555557,
"loss": 0.4617,
"step": 1310
},
{
"epoch": 1.22,
"grad_norm": 1.871252417564392,
"learning_rate": 0.0001511851851851852,
"loss": 0.7083,
"step": 1320
},
{
"epoch": 1.23,
"grad_norm": 0.3391021192073822,
"learning_rate": 0.00015081481481481483,
"loss": 0.4535,
"step": 1330
},
{
"epoch": 1.24,
"grad_norm": 0.1725960224866867,
"learning_rate": 0.00015044444444444445,
"loss": 0.7101,
"step": 1340
},
{
"epoch": 1.25,
"grad_norm": 4.227051258087158,
"learning_rate": 0.0001500740740740741,
"loss": 0.9594,
"step": 1350
},
{
"epoch": 1.26,
"grad_norm": 4.840494155883789,
"learning_rate": 0.0001497037037037037,
"loss": 0.4371,
"step": 1360
},
{
"epoch": 1.27,
"grad_norm": 1.1564832925796509,
"learning_rate": 0.00014933333333333335,
"loss": 0.6952,
"step": 1370
},
{
"epoch": 1.28,
"grad_norm": 6.8748884201049805,
"learning_rate": 0.00014896296296296296,
"loss": 0.3793,
"step": 1380
},
{
"epoch": 1.29,
"grad_norm": 0.35871052742004395,
"learning_rate": 0.0001485925925925926,
"loss": 0.6367,
"step": 1390
},
{
"epoch": 1.3,
"grad_norm": 10.733613014221191,
"learning_rate": 0.00014822222222222225,
"loss": 0.8669,
"step": 1400
},
{
"epoch": 1.31,
"grad_norm": 6.138615131378174,
"learning_rate": 0.00014785185185185187,
"loss": 0.4982,
"step": 1410
},
{
"epoch": 1.31,
"grad_norm": 18.2926082611084,
"learning_rate": 0.00014748148148148148,
"loss": 0.9284,
"step": 1420
},
{
"epoch": 1.32,
"grad_norm": 5.831931114196777,
"learning_rate": 0.00014711111111111112,
"loss": 0.814,
"step": 1430
},
{
"epoch": 1.33,
"grad_norm": 6.251952648162842,
"learning_rate": 0.00014674074074074077,
"loss": 0.9152,
"step": 1440
},
{
"epoch": 1.34,
"grad_norm": 0.22696472704410553,
"learning_rate": 0.00014637037037037038,
"loss": 0.9036,
"step": 1450
},
{
"epoch": 1.35,
"grad_norm": 5.911442279815674,
"learning_rate": 0.000146,
"loss": 0.2541,
"step": 1460
},
{
"epoch": 1.36,
"grad_norm": 7.1152567863464355,
"learning_rate": 0.00014562962962962961,
"loss": 0.4708,
"step": 1470
},
{
"epoch": 1.37,
"grad_norm": 4.495903491973877,
"learning_rate": 0.00014525925925925928,
"loss": 0.592,
"step": 1480
},
{
"epoch": 1.38,
"grad_norm": 5.417073726654053,
"learning_rate": 0.0001448888888888889,
"loss": 0.6176,
"step": 1490
},
{
"epoch": 1.39,
"grad_norm": 3.883765697479248,
"learning_rate": 0.00014451851851851851,
"loss": 0.9332,
"step": 1500
},
{
"epoch": 1.4,
"grad_norm": 6.6734113693237305,
"learning_rate": 0.0001441851851851852,
"loss": 0.7576,
"step": 1510
},
{
"epoch": 1.41,
"grad_norm": 12.220704078674316,
"learning_rate": 0.00014381481481481483,
"loss": 1.0047,
"step": 1520
},
{
"epoch": 1.42,
"grad_norm": 9.806129455566406,
"learning_rate": 0.00014344444444444444,
"loss": 0.7319,
"step": 1530
},
{
"epoch": 1.43,
"grad_norm": 0.5132556557655334,
"learning_rate": 0.00014307407407407408,
"loss": 0.3966,
"step": 1540
},
{
"epoch": 1.44,
"grad_norm": 3.475074052810669,
"learning_rate": 0.00014270370370370373,
"loss": 0.6231,
"step": 1550
},
{
"epoch": 1.44,
"grad_norm": 0.3323878347873688,
"learning_rate": 0.00014233333333333334,
"loss": 1.0922,
"step": 1560
},
{
"epoch": 1.45,
"grad_norm": 0.349679172039032,
"learning_rate": 0.00014196296296296296,
"loss": 0.7094,
"step": 1570
},
{
"epoch": 1.46,
"grad_norm": 3.0207903385162354,
"learning_rate": 0.0001415925925925926,
"loss": 0.7197,
"step": 1580
},
{
"epoch": 1.47,
"grad_norm": 4.442481517791748,
"learning_rate": 0.00014122222222222224,
"loss": 0.6785,
"step": 1590
},
{
"epoch": 1.48,
"grad_norm": 7.016599178314209,
"learning_rate": 0.00014085185185185186,
"loss": 0.9507,
"step": 1600
},
{
"epoch": 1.49,
"grad_norm": 4.900627613067627,
"learning_rate": 0.00014048148148148147,
"loss": 0.875,
"step": 1610
},
{
"epoch": 1.5,
"grad_norm": 0.9394499063491821,
"learning_rate": 0.00014011111111111112,
"loss": 0.7031,
"step": 1620
},
{
"epoch": 1.51,
"grad_norm": 3.2277708053588867,
"learning_rate": 0.00013974074074074076,
"loss": 0.8119,
"step": 1630
},
{
"epoch": 1.52,
"grad_norm": 0.9272705316543579,
"learning_rate": 0.00013937037037037037,
"loss": 0.4543,
"step": 1640
},
{
"epoch": 1.53,
"grad_norm": 4.230544567108154,
"learning_rate": 0.000139,
"loss": 0.6693,
"step": 1650
},
{
"epoch": 1.54,
"grad_norm": 6.435239315032959,
"learning_rate": 0.00013862962962962963,
"loss": 0.7911,
"step": 1660
},
{
"epoch": 1.55,
"grad_norm": 4.573579788208008,
"learning_rate": 0.00013825925925925928,
"loss": 0.7161,
"step": 1670
},
{
"epoch": 1.56,
"grad_norm": 5.4016804695129395,
"learning_rate": 0.0001378888888888889,
"loss": 0.6663,
"step": 1680
},
{
"epoch": 1.56,
"grad_norm": 17.52692985534668,
"learning_rate": 0.00013751851851851853,
"loss": 0.5503,
"step": 1690
},
{
"epoch": 1.57,
"grad_norm": 8.25500202178955,
"learning_rate": 0.00013714814814814815,
"loss": 0.6628,
"step": 1700
},
{
"epoch": 1.58,
"grad_norm": 6.842739582061768,
"learning_rate": 0.0001367777777777778,
"loss": 0.6827,
"step": 1710
},
{
"epoch": 1.59,
"grad_norm": 9.770726203918457,
"learning_rate": 0.0001364074074074074,
"loss": 0.4264,
"step": 1720
},
{
"epoch": 1.6,
"grad_norm": 4.578246593475342,
"learning_rate": 0.00013603703703703705,
"loss": 0.6506,
"step": 1730
},
{
"epoch": 1.61,
"grad_norm": 0.670189380645752,
"learning_rate": 0.00013566666666666667,
"loss": 0.225,
"step": 1740
},
{
"epoch": 1.62,
"grad_norm": 2.7100305557250977,
"learning_rate": 0.0001352962962962963,
"loss": 0.3382,
"step": 1750
},
{
"epoch": 1.63,
"grad_norm": 8.259403228759766,
"learning_rate": 0.00013492592592592592,
"loss": 0.6352,
"step": 1760
},
{
"epoch": 1.64,
"grad_norm": 8.891138076782227,
"learning_rate": 0.00013455555555555557,
"loss": 0.2684,
"step": 1770
},
{
"epoch": 1.65,
"grad_norm": 0.33002957701683044,
"learning_rate": 0.00013418518518518518,
"loss": 0.4759,
"step": 1780
},
{
"epoch": 1.66,
"grad_norm": 10.079939842224121,
"learning_rate": 0.00013381481481481483,
"loss": 0.5655,
"step": 1790
},
{
"epoch": 1.67,
"grad_norm": 6.502269268035889,
"learning_rate": 0.00013344444444444447,
"loss": 0.2877,
"step": 1800
},
{
"epoch": 1.68,
"grad_norm": 6.24245023727417,
"learning_rate": 0.00013307407407407408,
"loss": 0.6231,
"step": 1810
},
{
"epoch": 1.69,
"grad_norm": 0.28803545236587524,
"learning_rate": 0.0001327037037037037,
"loss": 0.6815,
"step": 1820
},
{
"epoch": 1.69,
"grad_norm": 0.7538921236991882,
"learning_rate": 0.00013233333333333334,
"loss": 0.6853,
"step": 1830
},
{
"epoch": 1.7,
"grad_norm": 1.1127091646194458,
"learning_rate": 0.00013196296296296299,
"loss": 0.656,
"step": 1840
},
{
"epoch": 1.71,
"grad_norm": 11.09835147857666,
"learning_rate": 0.0001315925925925926,
"loss": 0.4621,
"step": 1850
},
{
"epoch": 1.72,
"grad_norm": 0.5205969214439392,
"learning_rate": 0.00013122222222222222,
"loss": 0.3503,
"step": 1860
},
{
"epoch": 1.73,
"grad_norm": 5.593262195587158,
"learning_rate": 0.00013085185185185186,
"loss": 0.6921,
"step": 1870
},
{
"epoch": 1.74,
"grad_norm": 6.780529499053955,
"learning_rate": 0.0001304814814814815,
"loss": 0.3961,
"step": 1880
},
{
"epoch": 1.75,
"grad_norm": 3.5800728797912598,
"learning_rate": 0.00013011111111111112,
"loss": 0.6663,
"step": 1890
},
{
"epoch": 1.76,
"grad_norm": 0.45919543504714966,
"learning_rate": 0.00012974074074074073,
"loss": 0.7023,
"step": 1900
},
{
"epoch": 1.77,
"grad_norm": 0.7412591576576233,
"learning_rate": 0.00012937037037037038,
"loss": 0.379,
"step": 1910
},
{
"epoch": 1.78,
"grad_norm": 10.062888145446777,
"learning_rate": 0.00012900000000000002,
"loss": 0.6595,
"step": 1920
},
{
"epoch": 1.79,
"grad_norm": 0.1417950540781021,
"learning_rate": 0.00012862962962962963,
"loss": 0.7735,
"step": 1930
},
{
"epoch": 1.8,
"grad_norm": 5.4311041831970215,
"learning_rate": 0.00012825925925925925,
"loss": 0.542,
"step": 1940
},
{
"epoch": 1.81,
"grad_norm": 15.764381408691406,
"learning_rate": 0.0001278888888888889,
"loss": 0.5839,
"step": 1950
},
{
"epoch": 1.81,
"grad_norm": 12.458526611328125,
"learning_rate": 0.00012751851851851854,
"loss": 0.7544,
"step": 1960
},
{
"epoch": 1.82,
"grad_norm": 7.4526519775390625,
"learning_rate": 0.00012714814814814815,
"loss": 0.5739,
"step": 1970
},
{
"epoch": 1.83,
"grad_norm": 0.40775883197784424,
"learning_rate": 0.00012677777777777777,
"loss": 0.6257,
"step": 1980
},
{
"epoch": 1.84,
"grad_norm": 0.317117303609848,
"learning_rate": 0.0001264074074074074,
"loss": 0.4599,
"step": 1990
},
{
"epoch": 1.85,
"grad_norm": 8.654322624206543,
"learning_rate": 0.00012603703703703705,
"loss": 0.6665,
"step": 2000
},
{
"epoch": 1.85,
"eval_accuracy": 0.787962962962963,
"eval_loss": 0.774786114692688,
"eval_runtime": 12.3656,
"eval_samples_per_second": 87.339,
"eval_steps_per_second": 10.917,
"step": 2000
},
{
"epoch": 1.86,
"grad_norm": 5.775906085968018,
"learning_rate": 0.00012566666666666667,
"loss": 0.586,
"step": 2010
},
{
"epoch": 1.87,
"grad_norm": 3.1729109287261963,
"learning_rate": 0.0001252962962962963,
"loss": 0.5933,
"step": 2020
},
{
"epoch": 1.88,
"grad_norm": 0.4010927677154541,
"learning_rate": 0.00012492592592592593,
"loss": 0.3058,
"step": 2030
},
{
"epoch": 1.89,
"grad_norm": 6.702044486999512,
"learning_rate": 0.00012455555555555557,
"loss": 0.7031,
"step": 2040
},
{
"epoch": 1.9,
"grad_norm": 12.502705574035645,
"learning_rate": 0.00012418518518518518,
"loss": 0.3493,
"step": 2050
},
{
"epoch": 1.91,
"grad_norm": 4.377738952636719,
"learning_rate": 0.00012381481481481483,
"loss": 0.3487,
"step": 2060
},
{
"epoch": 1.92,
"grad_norm": 0.11477731913328171,
"learning_rate": 0.00012344444444444444,
"loss": 0.4287,
"step": 2070
},
{
"epoch": 1.93,
"grad_norm": 0.11948081105947495,
"learning_rate": 0.00012307407407407409,
"loss": 0.2712,
"step": 2080
},
{
"epoch": 1.94,
"grad_norm": 0.1439143568277359,
"learning_rate": 0.0001227037037037037,
"loss": 0.4779,
"step": 2090
},
{
"epoch": 1.94,
"grad_norm": 4.081445693969727,
"learning_rate": 0.00012233333333333334,
"loss": 0.4548,
"step": 2100
},
{
"epoch": 1.95,
"grad_norm": 4.655524253845215,
"learning_rate": 0.00012196296296296297,
"loss": 0.2683,
"step": 2110
},
{
"epoch": 1.96,
"grad_norm": 6.8740010261535645,
"learning_rate": 0.00012159259259259259,
"loss": 0.8056,
"step": 2120
},
{
"epoch": 1.97,
"grad_norm": 0.4146583080291748,
"learning_rate": 0.00012122222222222223,
"loss": 0.715,
"step": 2130
},
{
"epoch": 1.98,
"grad_norm": 3.2763988971710205,
"learning_rate": 0.00012085185185185186,
"loss": 0.7085,
"step": 2140
},
{
"epoch": 1.99,
"grad_norm": 7.559572219848633,
"learning_rate": 0.00012048148148148149,
"loss": 0.8247,
"step": 2150
},
{
"epoch": 2.0,
"grad_norm": 1.2885911464691162,
"learning_rate": 0.0001201111111111111,
"loss": 0.3602,
"step": 2160
},
{
"epoch": 2.01,
"grad_norm": 11.56352424621582,
"learning_rate": 0.00011974074074074075,
"loss": 0.3884,
"step": 2170
},
{
"epoch": 2.02,
"grad_norm": 0.19463351368904114,
"learning_rate": 0.00011937037037037038,
"loss": 0.2351,
"step": 2180
},
{
"epoch": 2.03,
"grad_norm": 0.1924474686384201,
"learning_rate": 0.000119,
"loss": 0.7068,
"step": 2190
},
{
"epoch": 2.04,
"grad_norm": 0.45641255378723145,
"learning_rate": 0.00011862962962962965,
"loss": 0.3242,
"step": 2200
},
{
"epoch": 2.05,
"grad_norm": 0.9206830859184265,
"learning_rate": 0.00011825925925925926,
"loss": 0.1586,
"step": 2210
},
{
"epoch": 2.06,
"grad_norm": 2.6659185886383057,
"learning_rate": 0.0001178888888888889,
"loss": 0.2596,
"step": 2220
},
{
"epoch": 2.06,
"grad_norm": 27.92502212524414,
"learning_rate": 0.00011751851851851852,
"loss": 0.2333,
"step": 2230
},
{
"epoch": 2.07,
"grad_norm": 16.344820022583008,
"learning_rate": 0.00011714814814814817,
"loss": 0.5658,
"step": 2240
},
{
"epoch": 2.08,
"grad_norm": 18.95813751220703,
"learning_rate": 0.00011677777777777778,
"loss": 0.4515,
"step": 2250
},
{
"epoch": 2.09,
"grad_norm": 8.611042976379395,
"learning_rate": 0.00011640740740740741,
"loss": 0.2345,
"step": 2260
},
{
"epoch": 2.1,
"grad_norm": 0.0882779210805893,
"learning_rate": 0.00011603703703703704,
"loss": 0.1917,
"step": 2270
},
{
"epoch": 2.11,
"grad_norm": 0.47597736120224,
"learning_rate": 0.00011566666666666668,
"loss": 0.3882,
"step": 2280
},
{
"epoch": 2.12,
"grad_norm": 0.07633833587169647,
"learning_rate": 0.0001152962962962963,
"loss": 0.2515,
"step": 2290
},
{
"epoch": 2.13,
"grad_norm": 1.3004765510559082,
"learning_rate": 0.00011492592592592593,
"loss": 0.3138,
"step": 2300
},
{
"epoch": 2.14,
"grad_norm": 0.17043162882328033,
"learning_rate": 0.00011455555555555557,
"loss": 0.4406,
"step": 2310
},
{
"epoch": 2.15,
"grad_norm": 9.61899471282959,
"learning_rate": 0.0001141851851851852,
"loss": 0.4756,
"step": 2320
},
{
"epoch": 2.16,
"grad_norm": 14.96255874633789,
"learning_rate": 0.00011381481481481481,
"loss": 0.2049,
"step": 2330
},
{
"epoch": 2.17,
"grad_norm": 4.7120137214660645,
"learning_rate": 0.00011344444444444444,
"loss": 0.2767,
"step": 2340
},
{
"epoch": 2.18,
"grad_norm": 0.06632759422063828,
"learning_rate": 0.00011307407407407409,
"loss": 0.2453,
"step": 2350
},
{
"epoch": 2.19,
"grad_norm": 0.05220068618655205,
"learning_rate": 0.00011270370370370372,
"loss": 0.2655,
"step": 2360
},
{
"epoch": 2.19,
"grad_norm": 18.39328384399414,
"learning_rate": 0.00011233333333333333,
"loss": 0.6426,
"step": 2370
},
{
"epoch": 2.2,
"grad_norm": 6.286042213439941,
"learning_rate": 0.00011196296296296296,
"loss": 0.4363,
"step": 2380
},
{
"epoch": 2.21,
"grad_norm": 0.5384187698364258,
"learning_rate": 0.0001115925925925926,
"loss": 0.4998,
"step": 2390
},
{
"epoch": 2.22,
"grad_norm": 0.052328918129205704,
"learning_rate": 0.00011122222222222223,
"loss": 0.2833,
"step": 2400
},
{
"epoch": 2.23,
"grad_norm": 16.98727798461914,
"learning_rate": 0.00011085185185185185,
"loss": 0.7151,
"step": 2410
},
{
"epoch": 2.24,
"grad_norm": 4.248289108276367,
"learning_rate": 0.0001104814814814815,
"loss": 0.2527,
"step": 2420
},
{
"epoch": 2.25,
"grad_norm": 0.2333425134420395,
"learning_rate": 0.00011011111111111112,
"loss": 0.4196,
"step": 2430
},
{
"epoch": 2.26,
"grad_norm": 0.10220032185316086,
"learning_rate": 0.00010974074074074075,
"loss": 0.2859,
"step": 2440
},
{
"epoch": 2.27,
"grad_norm": 2.586373805999756,
"learning_rate": 0.00010937037037037036,
"loss": 0.0733,
"step": 2450
},
{
"epoch": 2.28,
"grad_norm": 0.18430662155151367,
"learning_rate": 0.000109,
"loss": 0.489,
"step": 2460
},
{
"epoch": 2.29,
"grad_norm": 0.06896654516458511,
"learning_rate": 0.00010862962962962964,
"loss": 0.2785,
"step": 2470
},
{
"epoch": 2.3,
"grad_norm": 2.3596293926239014,
"learning_rate": 0.00010825925925925927,
"loss": 0.1379,
"step": 2480
},
{
"epoch": 2.31,
"grad_norm": 0.13864585757255554,
"learning_rate": 0.00010788888888888888,
"loss": 0.3242,
"step": 2490
},
{
"epoch": 2.31,
"grad_norm": 0.3596765995025635,
"learning_rate": 0.00010751851851851852,
"loss": 0.3691,
"step": 2500
},
{
"epoch": 2.32,
"grad_norm": 1.0416500568389893,
"learning_rate": 0.00010714814814814815,
"loss": 0.4075,
"step": 2510
},
{
"epoch": 2.33,
"grad_norm": 1.7506141662597656,
"learning_rate": 0.00010677777777777778,
"loss": 0.5207,
"step": 2520
},
{
"epoch": 2.34,
"grad_norm": 4.200623989105225,
"learning_rate": 0.00010640740740740742,
"loss": 0.3859,
"step": 2530
},
{
"epoch": 2.35,
"grad_norm": 0.3842768669128418,
"learning_rate": 0.00010603703703703704,
"loss": 0.2929,
"step": 2540
},
{
"epoch": 2.36,
"grad_norm": 9.816132545471191,
"learning_rate": 0.00010566666666666667,
"loss": 0.3187,
"step": 2550
},
{
"epoch": 2.37,
"grad_norm": 0.5276467800140381,
"learning_rate": 0.0001052962962962963,
"loss": 0.5211,
"step": 2560
},
{
"epoch": 2.38,
"grad_norm": 2.977581262588501,
"learning_rate": 0.00010492592592592594,
"loss": 0.3046,
"step": 2570
},
{
"epoch": 2.39,
"grad_norm": 0.12905888259410858,
"learning_rate": 0.00010455555555555556,
"loss": 0.202,
"step": 2580
},
{
"epoch": 2.4,
"grad_norm": 15.567680358886719,
"learning_rate": 0.00010418518518518519,
"loss": 0.9993,
"step": 2590
},
{
"epoch": 2.41,
"grad_norm": 0.4216688573360443,
"learning_rate": 0.00010381481481481481,
"loss": 0.239,
"step": 2600
},
{
"epoch": 2.42,
"grad_norm": 0.46425601840019226,
"learning_rate": 0.00010344444444444446,
"loss": 0.3637,
"step": 2610
},
{
"epoch": 2.43,
"grad_norm": 11.700157165527344,
"learning_rate": 0.00010307407407407407,
"loss": 0.2115,
"step": 2620
},
{
"epoch": 2.44,
"grad_norm": 0.050183895975351334,
"learning_rate": 0.0001027037037037037,
"loss": 0.2299,
"step": 2630
},
{
"epoch": 2.44,
"grad_norm": 0.05202599614858627,
"learning_rate": 0.00010233333333333335,
"loss": 0.3084,
"step": 2640
},
{
"epoch": 2.45,
"grad_norm": 1.0749764442443848,
"learning_rate": 0.00010196296296296297,
"loss": 0.6547,
"step": 2650
},
{
"epoch": 2.46,
"grad_norm": 0.04489516839385033,
"learning_rate": 0.00010159259259259259,
"loss": 0.3082,
"step": 2660
},
{
"epoch": 2.47,
"grad_norm": 2.1335978507995605,
"learning_rate": 0.00010122222222222222,
"loss": 0.1922,
"step": 2670
},
{
"epoch": 2.48,
"grad_norm": 11.453554153442383,
"learning_rate": 0.00010085185185185186,
"loss": 0.1796,
"step": 2680
},
{
"epoch": 2.49,
"grad_norm": 4.8192243576049805,
"learning_rate": 0.00010048148148148149,
"loss": 0.2874,
"step": 2690
},
{
"epoch": 2.5,
"grad_norm": 16.574432373046875,
"learning_rate": 0.0001001111111111111,
"loss": 0.2686,
"step": 2700
},
{
"epoch": 2.51,
"grad_norm": 0.09453123062849045,
"learning_rate": 9.974074074074075e-05,
"loss": 0.4037,
"step": 2710
},
{
"epoch": 2.52,
"grad_norm": 0.24873816967010498,
"learning_rate": 9.937037037037038e-05,
"loss": 0.364,
"step": 2720
},
{
"epoch": 2.53,
"grad_norm": 26.02721405029297,
"learning_rate": 9.900000000000001e-05,
"loss": 0.465,
"step": 2730
},
{
"epoch": 2.54,
"grad_norm": 0.033142298460006714,
"learning_rate": 9.862962962962964e-05,
"loss": 0.271,
"step": 2740
},
{
"epoch": 2.55,
"grad_norm": 0.21852198243141174,
"learning_rate": 9.825925925925927e-05,
"loss": 0.3313,
"step": 2750
},
{
"epoch": 2.56,
"grad_norm": 0.09281805157661438,
"learning_rate": 9.78888888888889e-05,
"loss": 0.1279,
"step": 2760
},
{
"epoch": 2.56,
"grad_norm": 0.08570228517055511,
"learning_rate": 9.751851851851852e-05,
"loss": 0.3836,
"step": 2770
},
{
"epoch": 2.57,
"grad_norm": 7.437565326690674,
"learning_rate": 9.714814814814815e-05,
"loss": 0.1801,
"step": 2780
},
{
"epoch": 2.58,
"grad_norm": 0.05683588609099388,
"learning_rate": 9.677777777777778e-05,
"loss": 0.2252,
"step": 2790
},
{
"epoch": 2.59,
"grad_norm": 0.10955408215522766,
"learning_rate": 9.640740740740741e-05,
"loss": 0.2959,
"step": 2800
},
{
"epoch": 2.6,
"grad_norm": 0.06861086934804916,
"learning_rate": 9.603703703703704e-05,
"loss": 0.3204,
"step": 2810
},
{
"epoch": 2.61,
"grad_norm": 0.055710818618535995,
"learning_rate": 9.566666666666667e-05,
"loss": 0.0199,
"step": 2820
},
{
"epoch": 2.62,
"grad_norm": 0.07799094170331955,
"learning_rate": 9.52962962962963e-05,
"loss": 0.3671,
"step": 2830
},
{
"epoch": 2.63,
"grad_norm": 0.0612206906080246,
"learning_rate": 9.492592592592593e-05,
"loss": 0.104,
"step": 2840
},
{
"epoch": 2.64,
"grad_norm": 0.03648478537797928,
"learning_rate": 9.455555555555556e-05,
"loss": 0.1999,
"step": 2850
},
{
"epoch": 2.65,
"grad_norm": 0.14658460021018982,
"learning_rate": 9.418518518518519e-05,
"loss": 0.335,
"step": 2860
},
{
"epoch": 2.66,
"grad_norm": 0.10499215126037598,
"learning_rate": 9.381481481481482e-05,
"loss": 0.0321,
"step": 2870
},
{
"epoch": 2.67,
"grad_norm": 3.2149064540863037,
"learning_rate": 9.344444444444444e-05,
"loss": 0.5255,
"step": 2880
},
{
"epoch": 2.68,
"grad_norm": 0.059900399297475815,
"learning_rate": 9.307407407407407e-05,
"loss": 0.3072,
"step": 2890
},
{
"epoch": 2.69,
"grad_norm": 0.08851416409015656,
"learning_rate": 9.27037037037037e-05,
"loss": 0.0381,
"step": 2900
},
{
"epoch": 2.69,
"grad_norm": 0.10776031017303467,
"learning_rate": 9.233333333333333e-05,
"loss": 0.0099,
"step": 2910
},
{
"epoch": 2.7,
"grad_norm": 32.60137939453125,
"learning_rate": 9.196296296296296e-05,
"loss": 0.2336,
"step": 2920
},
{
"epoch": 2.71,
"grad_norm": 3.082794427871704,
"learning_rate": 9.159259259259259e-05,
"loss": 0.496,
"step": 2930
},
{
"epoch": 2.72,
"grad_norm": 4.368014335632324,
"learning_rate": 9.122222222222223e-05,
"loss": 0.2649,
"step": 2940
},
{
"epoch": 2.73,
"grad_norm": 18.354900360107422,
"learning_rate": 9.085185185185185e-05,
"loss": 0.2795,
"step": 2950
},
{
"epoch": 2.74,
"grad_norm": 19.77593231201172,
"learning_rate": 9.048148148148149e-05,
"loss": 0.205,
"step": 2960
},
{
"epoch": 2.75,
"grad_norm": 14.325284004211426,
"learning_rate": 9.011111111111111e-05,
"loss": 0.1761,
"step": 2970
},
{
"epoch": 2.76,
"grad_norm": 0.04269588738679886,
"learning_rate": 8.974074074074075e-05,
"loss": 0.2958,
"step": 2980
},
{
"epoch": 2.77,
"grad_norm": 10.502446174621582,
"learning_rate": 8.937037037037037e-05,
"loss": 0.3844,
"step": 2990
},
{
"epoch": 2.78,
"grad_norm": 0.06850555539131165,
"learning_rate": 8.900000000000001e-05,
"loss": 0.0161,
"step": 3000
},
{
"epoch": 2.78,
"eval_accuracy": 0.8481481481481481,
"eval_loss": 0.662264883518219,
"eval_runtime": 12.4432,
"eval_samples_per_second": 86.795,
"eval_steps_per_second": 10.849,
"step": 3000
},
{
"epoch": 2.79,
"grad_norm": 12.860281944274902,
"learning_rate": 8.862962962962962e-05,
"loss": 0.5996,
"step": 3010
},
{
"epoch": 2.8,
"grad_norm": 12.576947212219238,
"learning_rate": 8.825925925925927e-05,
"loss": 0.5588,
"step": 3020
},
{
"epoch": 2.81,
"grad_norm": 0.154628187417984,
"learning_rate": 8.78888888888889e-05,
"loss": 0.2931,
"step": 3030
},
{
"epoch": 2.81,
"grad_norm": 21.793437957763672,
"learning_rate": 8.751851851851853e-05,
"loss": 0.108,
"step": 3040
},
{
"epoch": 2.82,
"grad_norm": 0.14815708994865417,
"learning_rate": 8.714814814814815e-05,
"loss": 0.3091,
"step": 3050
},
{
"epoch": 2.83,
"grad_norm": 0.038052137941122055,
"learning_rate": 8.677777777777778e-05,
"loss": 0.1655,
"step": 3060
},
{
"epoch": 2.84,
"grad_norm": 0.049850016832351685,
"learning_rate": 8.640740740740741e-05,
"loss": 0.4173,
"step": 3070
},
{
"epoch": 2.85,
"grad_norm": 3.6258163452148438,
"learning_rate": 8.603703703703704e-05,
"loss": 0.3083,
"step": 3080
},
{
"epoch": 2.86,
"grad_norm": 0.14558164775371552,
"learning_rate": 8.566666666666667e-05,
"loss": 0.0754,
"step": 3090
},
{
"epoch": 2.87,
"grad_norm": 0.04760469123721123,
"learning_rate": 8.52962962962963e-05,
"loss": 0.2265,
"step": 3100
},
{
"epoch": 2.88,
"grad_norm": 0.03264734148979187,
"learning_rate": 8.492592592592593e-05,
"loss": 0.2033,
"step": 3110
},
{
"epoch": 2.89,
"grad_norm": 0.06131249666213989,
"learning_rate": 8.455555555555556e-05,
"loss": 0.0956,
"step": 3120
},
{
"epoch": 2.9,
"grad_norm": 1.4385136365890503,
"learning_rate": 8.418518518518519e-05,
"loss": 0.2025,
"step": 3130
},
{
"epoch": 2.91,
"grad_norm": 5.099593639373779,
"learning_rate": 8.381481481481483e-05,
"loss": 0.2737,
"step": 3140
},
{
"epoch": 2.92,
"grad_norm": 41.463233947753906,
"learning_rate": 8.344444444444445e-05,
"loss": 0.3401,
"step": 3150
},
{
"epoch": 2.93,
"grad_norm": 0.7718304991722107,
"learning_rate": 8.307407407407409e-05,
"loss": 0.1666,
"step": 3160
},
{
"epoch": 2.94,
"grad_norm": 0.29509657621383667,
"learning_rate": 8.27037037037037e-05,
"loss": 0.1498,
"step": 3170
},
{
"epoch": 2.94,
"grad_norm": 0.03002973273396492,
"learning_rate": 8.233333333333333e-05,
"loss": 0.1463,
"step": 3180
},
{
"epoch": 2.95,
"grad_norm": 0.06377755105495453,
"learning_rate": 8.196296296296296e-05,
"loss": 0.5395,
"step": 3190
},
{
"epoch": 2.96,
"grad_norm": 18.44507598876953,
"learning_rate": 8.159259259259259e-05,
"loss": 0.5493,
"step": 3200
},
{
"epoch": 2.97,
"grad_norm": 5.454731464385986,
"learning_rate": 8.122222222222222e-05,
"loss": 0.3929,
"step": 3210
},
{
"epoch": 2.98,
"grad_norm": 0.1575469821691513,
"learning_rate": 8.085185185185185e-05,
"loss": 0.3273,
"step": 3220
},
{
"epoch": 2.99,
"grad_norm": 0.09014039486646652,
"learning_rate": 8.048148148148148e-05,
"loss": 0.1845,
"step": 3230
},
{
"epoch": 3.0,
"grad_norm": 13.291876792907715,
"learning_rate": 8.011111111111111e-05,
"loss": 0.4388,
"step": 3240
},
{
"epoch": 3.01,
"grad_norm": 0.04203129932284355,
"learning_rate": 7.974074074074075e-05,
"loss": 0.1902,
"step": 3250
},
{
"epoch": 3.02,
"grad_norm": 0.04195088893175125,
"learning_rate": 7.937037037037037e-05,
"loss": 0.2616,
"step": 3260
},
{
"epoch": 3.03,
"grad_norm": 0.6606804132461548,
"learning_rate": 7.900000000000001e-05,
"loss": 0.1704,
"step": 3270
},
{
"epoch": 3.04,
"grad_norm": 0.10248692333698273,
"learning_rate": 7.862962962962962e-05,
"loss": 0.1673,
"step": 3280
},
{
"epoch": 3.05,
"grad_norm": 0.042431097477674484,
"learning_rate": 7.825925925925927e-05,
"loss": 0.1673,
"step": 3290
},
{
"epoch": 3.06,
"grad_norm": 1.6168256998062134,
"learning_rate": 7.788888888888888e-05,
"loss": 0.0368,
"step": 3300
},
{
"epoch": 3.06,
"grad_norm": 0.03584853559732437,
"learning_rate": 7.751851851851853e-05,
"loss": 0.0465,
"step": 3310
},
{
"epoch": 3.07,
"grad_norm": 0.37480175495147705,
"learning_rate": 7.714814814814814e-05,
"loss": 0.1106,
"step": 3320
},
{
"epoch": 3.08,
"grad_norm": 0.044489577412605286,
"learning_rate": 7.677777777777778e-05,
"loss": 0.103,
"step": 3330
},
{
"epoch": 3.09,
"grad_norm": 19.742586135864258,
"learning_rate": 7.640740740740741e-05,
"loss": 0.3641,
"step": 3340
},
{
"epoch": 3.1,
"grad_norm": 0.0765174850821495,
"learning_rate": 7.603703703703704e-05,
"loss": 0.0124,
"step": 3350
},
{
"epoch": 3.11,
"grad_norm": 0.025397051125764847,
"learning_rate": 7.566666666666667e-05,
"loss": 0.2347,
"step": 3360
},
{
"epoch": 3.12,
"grad_norm": 0.03231077268719673,
"learning_rate": 7.52962962962963e-05,
"loss": 0.0082,
"step": 3370
},
{
"epoch": 3.13,
"grad_norm": 0.03690877929329872,
"learning_rate": 7.492592592592593e-05,
"loss": 0.0561,
"step": 3380
},
{
"epoch": 3.14,
"grad_norm": 3.9507839679718018,
"learning_rate": 7.455555555555556e-05,
"loss": 0.3722,
"step": 3390
},
{
"epoch": 3.15,
"grad_norm": 0.06623607873916626,
"learning_rate": 7.418518518518519e-05,
"loss": 0.2332,
"step": 3400
},
{
"epoch": 3.16,
"grad_norm": 5.913718223571777,
"learning_rate": 7.381481481481482e-05,
"loss": 0.1747,
"step": 3410
},
{
"epoch": 3.17,
"grad_norm": 0.61083984375,
"learning_rate": 7.344444444444445e-05,
"loss": 0.0924,
"step": 3420
},
{
"epoch": 3.18,
"grad_norm": 0.03572462126612663,
"learning_rate": 7.307407407407408e-05,
"loss": 0.2629,
"step": 3430
},
{
"epoch": 3.19,
"grad_norm": 0.7170370817184448,
"learning_rate": 7.27037037037037e-05,
"loss": 0.116,
"step": 3440
},
{
"epoch": 3.19,
"grad_norm": 0.02990008518099785,
"learning_rate": 7.233333333333335e-05,
"loss": 0.011,
"step": 3450
},
{
"epoch": 3.2,
"grad_norm": 0.01911913976073265,
"learning_rate": 7.196296296296296e-05,
"loss": 0.0563,
"step": 3460
},
{
"epoch": 3.21,
"grad_norm": 0.030100274831056595,
"learning_rate": 7.15925925925926e-05,
"loss": 0.0066,
"step": 3470
},
{
"epoch": 3.22,
"grad_norm": 0.12400100380182266,
"learning_rate": 7.122222222222222e-05,
"loss": 0.1642,
"step": 3480
},
{
"epoch": 3.23,
"grad_norm": 0.024309534579515457,
"learning_rate": 7.085185185185186e-05,
"loss": 0.3418,
"step": 3490
},
{
"epoch": 3.24,
"grad_norm": 0.013081849552690983,
"learning_rate": 7.048148148148148e-05,
"loss": 0.0802,
"step": 3500
},
{
"epoch": 3.25,
"grad_norm": 0.023276880383491516,
"learning_rate": 7.011111111111112e-05,
"loss": 0.0087,
"step": 3510
},
{
"epoch": 3.26,
"grad_norm": 0.28136885166168213,
"learning_rate": 6.974074074074074e-05,
"loss": 0.0759,
"step": 3520
},
{
"epoch": 3.27,
"grad_norm": 0.022475214675068855,
"learning_rate": 6.937037037037038e-05,
"loss": 0.1695,
"step": 3530
},
{
"epoch": 3.28,
"grad_norm": 13.397526741027832,
"learning_rate": 6.9e-05,
"loss": 0.2144,
"step": 3540
},
{
"epoch": 3.29,
"grad_norm": 1.7123217582702637,
"learning_rate": 6.862962962962964e-05,
"loss": 0.1239,
"step": 3550
},
{
"epoch": 3.3,
"grad_norm": 0.046623773872852325,
"learning_rate": 6.825925925925927e-05,
"loss": 0.1555,
"step": 3560
},
{
"epoch": 3.31,
"grad_norm": 3.4634435176849365,
"learning_rate": 6.788888888888888e-05,
"loss": 0.1094,
"step": 3570
},
{
"epoch": 3.31,
"grad_norm": 0.02147137187421322,
"learning_rate": 6.751851851851853e-05,
"loss": 0.2513,
"step": 3580
},
{
"epoch": 3.32,
"grad_norm": 0.03033963218331337,
"learning_rate": 6.714814814814814e-05,
"loss": 0.0916,
"step": 3590
},
{
"epoch": 3.33,
"grad_norm": 0.037080105394124985,
"learning_rate": 6.677777777777779e-05,
"loss": 0.3951,
"step": 3600
},
{
"epoch": 3.34,
"grad_norm": 0.019203362986445427,
"learning_rate": 6.64074074074074e-05,
"loss": 0.005,
"step": 3610
},
{
"epoch": 3.35,
"grad_norm": 0.02684640698134899,
"learning_rate": 6.603703703703704e-05,
"loss": 0.0067,
"step": 3620
},
{
"epoch": 3.36,
"grad_norm": 31.32074737548828,
"learning_rate": 6.566666666666666e-05,
"loss": 0.062,
"step": 3630
},
{
"epoch": 3.37,
"grad_norm": 0.023628873750567436,
"learning_rate": 6.52962962962963e-05,
"loss": 0.167,
"step": 3640
},
{
"epoch": 3.38,
"grad_norm": 0.02283358946442604,
"learning_rate": 6.492592592592593e-05,
"loss": 0.0166,
"step": 3650
},
{
"epoch": 3.39,
"grad_norm": 0.02163301594555378,
"learning_rate": 6.455555555555556e-05,
"loss": 0.0134,
"step": 3660
},
{
"epoch": 3.4,
"grad_norm": 24.740894317626953,
"learning_rate": 6.418518518518519e-05,
"loss": 0.0466,
"step": 3670
},
{
"epoch": 3.41,
"grad_norm": 0.06638414412736893,
"learning_rate": 6.381481481481482e-05,
"loss": 0.0059,
"step": 3680
},
{
"epoch": 3.42,
"grad_norm": 0.02026531845331192,
"learning_rate": 6.344444444444445e-05,
"loss": 0.1199,
"step": 3690
},
{
"epoch": 3.43,
"grad_norm": 0.01748637668788433,
"learning_rate": 6.307407407407408e-05,
"loss": 0.012,
"step": 3700
},
{
"epoch": 3.44,
"grad_norm": 0.023274874314665794,
"learning_rate": 6.27037037037037e-05,
"loss": 0.2315,
"step": 3710
},
{
"epoch": 3.44,
"grad_norm": 0.02449820004403591,
"learning_rate": 6.233333333333334e-05,
"loss": 0.0658,
"step": 3720
},
{
"epoch": 3.45,
"grad_norm": 0.022532064467668533,
"learning_rate": 6.196296296296296e-05,
"loss": 0.1057,
"step": 3730
},
{
"epoch": 3.46,
"grad_norm": 0.10987967252731323,
"learning_rate": 6.15925925925926e-05,
"loss": 0.1278,
"step": 3740
},
{
"epoch": 3.47,
"grad_norm": 9.465861320495605,
"learning_rate": 6.122222222222222e-05,
"loss": 0.1739,
"step": 3750
},
{
"epoch": 3.48,
"grad_norm": 0.0271518062800169,
"learning_rate": 6.085185185185186e-05,
"loss": 0.2296,
"step": 3760
},
{
"epoch": 3.49,
"grad_norm": 13.017244338989258,
"learning_rate": 6.048148148148148e-05,
"loss": 0.0416,
"step": 3770
},
{
"epoch": 3.5,
"grad_norm": 0.015440110117197037,
"learning_rate": 6.011111111111112e-05,
"loss": 0.0419,
"step": 3780
},
{
"epoch": 3.51,
"grad_norm": 0.06865392625331879,
"learning_rate": 5.974074074074074e-05,
"loss": 0.198,
"step": 3790
},
{
"epoch": 3.52,
"grad_norm": 0.030324600636959076,
"learning_rate": 5.9370370370370375e-05,
"loss": 0.0366,
"step": 3800
},
{
"epoch": 3.53,
"grad_norm": 2.1887781620025635,
"learning_rate": 5.9e-05,
"loss": 0.289,
"step": 3810
},
{
"epoch": 3.54,
"grad_norm": 0.01846599206328392,
"learning_rate": 5.8629629629629633e-05,
"loss": 0.005,
"step": 3820
},
{
"epoch": 3.55,
"grad_norm": 0.5803767442703247,
"learning_rate": 5.8259259259259256e-05,
"loss": 0.0062,
"step": 3830
},
{
"epoch": 3.56,
"grad_norm": 31.179725646972656,
"learning_rate": 5.788888888888889e-05,
"loss": 0.0432,
"step": 3840
},
{
"epoch": 3.56,
"grad_norm": 0.019040146842598915,
"learning_rate": 5.7518518518518514e-05,
"loss": 0.0236,
"step": 3850
},
{
"epoch": 3.57,
"grad_norm": 38.02296447753906,
"learning_rate": 5.714814814814815e-05,
"loss": 0.2169,
"step": 3860
},
{
"epoch": 3.58,
"grad_norm": 0.014890924096107483,
"learning_rate": 5.6777777777777786e-05,
"loss": 0.0126,
"step": 3870
},
{
"epoch": 3.59,
"grad_norm": 7.889443397521973,
"learning_rate": 5.640740740740741e-05,
"loss": 0.0889,
"step": 3880
},
{
"epoch": 3.6,
"grad_norm": 0.03283142298460007,
"learning_rate": 5.6037037037037044e-05,
"loss": 0.0036,
"step": 3890
},
{
"epoch": 3.61,
"grad_norm": 0.035356394946575165,
"learning_rate": 5.566666666666667e-05,
"loss": 0.0111,
"step": 3900
},
{
"epoch": 3.62,
"grad_norm": 1.5505471229553223,
"learning_rate": 5.52962962962963e-05,
"loss": 0.0056,
"step": 3910
},
{
"epoch": 3.63,
"grad_norm": 1.363088607788086,
"learning_rate": 5.4925925925925925e-05,
"loss": 0.0044,
"step": 3920
},
{
"epoch": 3.64,
"grad_norm": 3.6997830867767334,
"learning_rate": 5.455555555555556e-05,
"loss": 0.391,
"step": 3930
},
{
"epoch": 3.65,
"grad_norm": 0.014362264424562454,
"learning_rate": 5.4185185185185183e-05,
"loss": 0.0977,
"step": 3940
},
{
"epoch": 3.66,
"grad_norm": 21.31338119506836,
"learning_rate": 5.381481481481482e-05,
"loss": 0.5591,
"step": 3950
},
{
"epoch": 3.67,
"grad_norm": 4.652730941772461,
"learning_rate": 5.3444444444444455e-05,
"loss": 0.1585,
"step": 3960
},
{
"epoch": 3.68,
"grad_norm": 0.03227533772587776,
"learning_rate": 5.307407407407408e-05,
"loss": 0.1463,
"step": 3970
},
{
"epoch": 3.69,
"grad_norm": 0.1887240707874298,
"learning_rate": 5.2703703703703714e-05,
"loss": 0.0893,
"step": 3980
},
{
"epoch": 3.69,
"grad_norm": 0.011262119747698307,
"learning_rate": 5.237037037037037e-05,
"loss": 0.0547,
"step": 3990
},
{
"epoch": 3.7,
"grad_norm": 0.03117945045232773,
"learning_rate": 5.2000000000000004e-05,
"loss": 0.0069,
"step": 4000
},
{
"epoch": 3.7,
"eval_accuracy": 0.8583333333333333,
"eval_loss": 0.6438960433006287,
"eval_runtime": 12.5109,
"eval_samples_per_second": 86.325,
"eval_steps_per_second": 10.791,
"step": 4000
},
{
"epoch": 3.71,
"grad_norm": 0.012212943285703659,
"learning_rate": 5.1629629629629626e-05,
"loss": 0.0214,
"step": 4010
},
{
"epoch": 3.72,
"grad_norm": 0.08047550171613693,
"learning_rate": 5.125925925925926e-05,
"loss": 0.0622,
"step": 4020
},
{
"epoch": 3.73,
"grad_norm": 0.024356622248888016,
"learning_rate": 5.0888888888888884e-05,
"loss": 0.3073,
"step": 4030
},
{
"epoch": 3.74,
"grad_norm": 0.018961375579237938,
"learning_rate": 5.051851851851852e-05,
"loss": 0.0051,
"step": 4040
},
{
"epoch": 3.75,
"grad_norm": 0.014591037295758724,
"learning_rate": 5.0148148148148156e-05,
"loss": 0.1283,
"step": 4050
},
{
"epoch": 3.76,
"grad_norm": 0.02201053500175476,
"learning_rate": 4.977777777777778e-05,
"loss": 0.1009,
"step": 4060
},
{
"epoch": 3.77,
"grad_norm": 2.3703818321228027,
"learning_rate": 4.940740740740741e-05,
"loss": 0.0084,
"step": 4070
},
{
"epoch": 3.78,
"grad_norm": 0.04111458361148834,
"learning_rate": 4.903703703703704e-05,
"loss": 0.006,
"step": 4080
},
{
"epoch": 3.79,
"grad_norm": 7.977302551269531,
"learning_rate": 4.866666666666667e-05,
"loss": 0.1365,
"step": 4090
},
{
"epoch": 3.8,
"grad_norm": 20.146554946899414,
"learning_rate": 4.82962962962963e-05,
"loss": 0.1093,
"step": 4100
},
{
"epoch": 3.81,
"grad_norm": 0.01854802295565605,
"learning_rate": 4.792592592592593e-05,
"loss": 0.1342,
"step": 4110
},
{
"epoch": 3.81,
"grad_norm": 0.3029710352420807,
"learning_rate": 4.755555555555556e-05,
"loss": 0.008,
"step": 4120
},
{
"epoch": 3.82,
"grad_norm": 0.012369997799396515,
"learning_rate": 4.718518518518519e-05,
"loss": 0.0048,
"step": 4130
},
{
"epoch": 3.83,
"grad_norm": 0.01679087057709694,
"learning_rate": 4.681481481481482e-05,
"loss": 0.0663,
"step": 4140
},
{
"epoch": 3.84,
"grad_norm": 3.0343573093414307,
"learning_rate": 4.644444444444445e-05,
"loss": 0.0128,
"step": 4150
},
{
"epoch": 3.85,
"grad_norm": 3.9064276218414307,
"learning_rate": 4.607407407407408e-05,
"loss": 0.2076,
"step": 4160
},
{
"epoch": 3.86,
"grad_norm": 0.010118911974132061,
"learning_rate": 4.5703703703703706e-05,
"loss": 0.0033,
"step": 4170
},
{
"epoch": 3.87,
"grad_norm": 0.020308103412389755,
"learning_rate": 4.5333333333333335e-05,
"loss": 0.1709,
"step": 4180
},
{
"epoch": 3.88,
"grad_norm": 0.027303216978907585,
"learning_rate": 4.496296296296297e-05,
"loss": 0.1362,
"step": 4190
},
{
"epoch": 3.89,
"grad_norm": 0.013376427814364433,
"learning_rate": 4.4592592592592594e-05,
"loss": 0.127,
"step": 4200
},
{
"epoch": 3.9,
"grad_norm": 0.015483486466109753,
"learning_rate": 4.422222222222222e-05,
"loss": 0.2179,
"step": 4210
},
{
"epoch": 3.91,
"grad_norm": 0.030794810503721237,
"learning_rate": 4.385185185185185e-05,
"loss": 0.2987,
"step": 4220
},
{
"epoch": 3.92,
"grad_norm": 54.30390167236328,
"learning_rate": 4.348148148148148e-05,
"loss": 0.1011,
"step": 4230
},
{
"epoch": 3.93,
"grad_norm": 0.01496837381273508,
"learning_rate": 4.311111111111111e-05,
"loss": 0.1438,
"step": 4240
},
{
"epoch": 3.94,
"grad_norm": 0.01796271651983261,
"learning_rate": 4.274074074074074e-05,
"loss": 0.031,
"step": 4250
},
{
"epoch": 3.94,
"grad_norm": 0.016814138740301132,
"learning_rate": 4.237037037037037e-05,
"loss": 0.1035,
"step": 4260
},
{
"epoch": 3.95,
"grad_norm": 0.015431849285960197,
"learning_rate": 4.2e-05,
"loss": 0.0032,
"step": 4270
},
{
"epoch": 3.96,
"grad_norm": 0.8801605701446533,
"learning_rate": 4.162962962962963e-05,
"loss": 0.0656,
"step": 4280
},
{
"epoch": 3.97,
"grad_norm": 0.06061088666319847,
"learning_rate": 4.1259259259259256e-05,
"loss": 0.2061,
"step": 4290
},
{
"epoch": 3.98,
"grad_norm": 0.014112889766693115,
"learning_rate": 4.088888888888889e-05,
"loss": 0.5104,
"step": 4300
},
{
"epoch": 3.99,
"grad_norm": 0.03528869152069092,
"learning_rate": 4.051851851851852e-05,
"loss": 0.0036,
"step": 4310
},
{
"epoch": 4.0,
"grad_norm": 0.016192223876714706,
"learning_rate": 4.014814814814815e-05,
"loss": 0.0061,
"step": 4320
},
{
"epoch": 4.01,
"grad_norm": 0.01523031946271658,
"learning_rate": 3.977777777777778e-05,
"loss": 0.0086,
"step": 4330
},
{
"epoch": 4.02,
"grad_norm": 0.016884813085198402,
"learning_rate": 3.940740740740741e-05,
"loss": 0.0054,
"step": 4340
},
{
"epoch": 4.03,
"grad_norm": 0.018689552322030067,
"learning_rate": 3.903703703703704e-05,
"loss": 0.0134,
"step": 4350
},
{
"epoch": 4.04,
"grad_norm": 0.013242964632809162,
"learning_rate": 3.866666666666667e-05,
"loss": 0.0033,
"step": 4360
},
{
"epoch": 4.05,
"grad_norm": 0.013729427009820938,
"learning_rate": 3.8296296296296296e-05,
"loss": 0.1552,
"step": 4370
},
{
"epoch": 4.06,
"grad_norm": 0.018114076927304268,
"learning_rate": 3.7925925925925925e-05,
"loss": 0.0308,
"step": 4380
},
{
"epoch": 4.06,
"grad_norm": 0.010893206112086773,
"learning_rate": 3.7555555555555554e-05,
"loss": 0.0166,
"step": 4390
},
{
"epoch": 4.07,
"grad_norm": 0.03100050427019596,
"learning_rate": 3.718518518518519e-05,
"loss": 0.1192,
"step": 4400
},
{
"epoch": 4.08,
"grad_norm": 0.016582641750574112,
"learning_rate": 3.681481481481482e-05,
"loss": 0.1121,
"step": 4410
},
{
"epoch": 4.09,
"grad_norm": 0.031242402270436287,
"learning_rate": 3.644444444444445e-05,
"loss": 0.0056,
"step": 4420
},
{
"epoch": 4.1,
"grad_norm": 0.015217756852507591,
"learning_rate": 3.607407407407408e-05,
"loss": 0.1173,
"step": 4430
},
{
"epoch": 4.11,
"grad_norm": 0.013042649254202843,
"learning_rate": 3.570370370370371e-05,
"loss": 0.1124,
"step": 4440
},
{
"epoch": 4.12,
"grad_norm": 0.02006973698735237,
"learning_rate": 3.5333333333333336e-05,
"loss": 0.0036,
"step": 4450
},
{
"epoch": 4.13,
"grad_norm": 34.0983772277832,
"learning_rate": 3.4962962962962965e-05,
"loss": 0.0182,
"step": 4460
},
{
"epoch": 4.14,
"grad_norm": 0.016817888244986534,
"learning_rate": 3.4592592592592594e-05,
"loss": 0.0034,
"step": 4470
},
{
"epoch": 4.15,
"grad_norm": 0.035831551998853683,
"learning_rate": 3.4222222222222224e-05,
"loss": 0.0134,
"step": 4480
},
{
"epoch": 4.16,
"grad_norm": 0.010470203123986721,
"learning_rate": 3.385185185185185e-05,
"loss": 0.0035,
"step": 4490
},
{
"epoch": 4.17,
"grad_norm": 0.007853644900023937,
"learning_rate": 3.348148148148148e-05,
"loss": 0.0061,
"step": 4500
},
{
"epoch": 4.18,
"grad_norm": 0.03988093510270119,
"learning_rate": 3.311111111111112e-05,
"loss": 0.0177,
"step": 4510
},
{
"epoch": 4.19,
"grad_norm": 0.010890784673392773,
"learning_rate": 3.274074074074075e-05,
"loss": 0.0051,
"step": 4520
},
{
"epoch": 4.19,
"grad_norm": 0.4988226294517517,
"learning_rate": 3.2370370370370376e-05,
"loss": 0.0046,
"step": 4530
},
{
"epoch": 4.2,
"grad_norm": 0.02439550869166851,
"learning_rate": 3.2000000000000005e-05,
"loss": 0.0097,
"step": 4540
},
{
"epoch": 4.21,
"grad_norm": 0.1934681236743927,
"learning_rate": 3.1629629629629634e-05,
"loss": 0.004,
"step": 4550
},
{
"epoch": 4.22,
"grad_norm": 0.021830545738339424,
"learning_rate": 3.1259259259259264e-05,
"loss": 0.0054,
"step": 4560
},
{
"epoch": 4.23,
"grad_norm": 0.015792693942785263,
"learning_rate": 3.088888888888889e-05,
"loss": 0.0029,
"step": 4570
},
{
"epoch": 4.24,
"grad_norm": 0.08612110465765,
"learning_rate": 3.0518518518518515e-05,
"loss": 0.0466,
"step": 4580
},
{
"epoch": 4.25,
"grad_norm": 0.3596853017807007,
"learning_rate": 3.0148148148148148e-05,
"loss": 0.0042,
"step": 4590
},
{
"epoch": 4.26,
"grad_norm": 0.017882276326417923,
"learning_rate": 2.9777777777777777e-05,
"loss": 0.0038,
"step": 4600
},
{
"epoch": 4.27,
"grad_norm": 0.0303801279515028,
"learning_rate": 2.9407407407407413e-05,
"loss": 0.1058,
"step": 4610
},
{
"epoch": 4.28,
"grad_norm": 0.015595002099871635,
"learning_rate": 2.9037037037037042e-05,
"loss": 0.0153,
"step": 4620
},
{
"epoch": 4.29,
"grad_norm": 0.019567418843507767,
"learning_rate": 2.8666666666666668e-05,
"loss": 0.0027,
"step": 4630
},
{
"epoch": 4.3,
"grad_norm": 0.012446213513612747,
"learning_rate": 2.8296296296296297e-05,
"loss": 0.0028,
"step": 4640
},
{
"epoch": 4.31,
"grad_norm": 0.008696034550666809,
"learning_rate": 2.7925925925925926e-05,
"loss": 0.005,
"step": 4650
},
{
"epoch": 4.31,
"grad_norm": 0.01078906748443842,
"learning_rate": 2.7555555555555555e-05,
"loss": 0.0028,
"step": 4660
},
{
"epoch": 4.32,
"grad_norm": 47.83900833129883,
"learning_rate": 2.7185185185185184e-05,
"loss": 0.0598,
"step": 4670
},
{
"epoch": 4.33,
"grad_norm": 0.012699645012617111,
"learning_rate": 2.6814814814814814e-05,
"loss": 0.0031,
"step": 4680
},
{
"epoch": 4.34,
"grad_norm": 0.12236062437295914,
"learning_rate": 2.6444444444444443e-05,
"loss": 0.0035,
"step": 4690
},
{
"epoch": 4.35,
"grad_norm": 0.0082387775182724,
"learning_rate": 2.6074074074074072e-05,
"loss": 0.0038,
"step": 4700
},
{
"epoch": 4.36,
"grad_norm": 0.015677435323596,
"learning_rate": 2.5703703703703708e-05,
"loss": 0.003,
"step": 4710
},
{
"epoch": 4.37,
"grad_norm": 0.08713872730731964,
"learning_rate": 2.5333333333333337e-05,
"loss": 0.0027,
"step": 4720
},
{
"epoch": 4.38,
"grad_norm": 0.009761589579284191,
"learning_rate": 2.4962962962962963e-05,
"loss": 0.0174,
"step": 4730
},
{
"epoch": 4.39,
"grad_norm": 0.009880335070192814,
"learning_rate": 2.4592592592592595e-05,
"loss": 0.1331,
"step": 4740
},
{
"epoch": 4.4,
"grad_norm": 16.022714614868164,
"learning_rate": 2.4222222222222224e-05,
"loss": 0.2331,
"step": 4750
},
{
"epoch": 4.41,
"grad_norm": 0.02941802889108658,
"learning_rate": 2.3851851851851854e-05,
"loss": 0.0026,
"step": 4760
},
{
"epoch": 4.42,
"grad_norm": 0.013288211077451706,
"learning_rate": 2.3481481481481483e-05,
"loss": 0.0031,
"step": 4770
},
{
"epoch": 4.43,
"grad_norm": 54.48020935058594,
"learning_rate": 2.3111111111111112e-05,
"loss": 0.0491,
"step": 4780
},
{
"epoch": 4.44,
"grad_norm": 0.026326889172196388,
"learning_rate": 2.2740740740740744e-05,
"loss": 0.1355,
"step": 4790
},
{
"epoch": 4.44,
"grad_norm": 0.010915144346654415,
"learning_rate": 2.2370370370370374e-05,
"loss": 0.0704,
"step": 4800
},
{
"epoch": 4.45,
"grad_norm": 0.013166334480047226,
"learning_rate": 2.2000000000000003e-05,
"loss": 0.0234,
"step": 4810
},
{
"epoch": 4.46,
"grad_norm": 26.486459732055664,
"learning_rate": 2.162962962962963e-05,
"loss": 0.1362,
"step": 4820
},
{
"epoch": 4.47,
"grad_norm": 0.008863969705998898,
"learning_rate": 2.1259259259259258e-05,
"loss": 0.0024,
"step": 4830
},
{
"epoch": 4.48,
"grad_norm": 0.07663685083389282,
"learning_rate": 2.088888888888889e-05,
"loss": 0.0029,
"step": 4840
},
{
"epoch": 4.49,
"grad_norm": 0.06865613162517548,
"learning_rate": 2.051851851851852e-05,
"loss": 0.1504,
"step": 4850
},
{
"epoch": 4.5,
"grad_norm": 0.012820614501833916,
"learning_rate": 2.014814814814815e-05,
"loss": 0.0027,
"step": 4860
},
{
"epoch": 4.51,
"grad_norm": 0.008527039550244808,
"learning_rate": 1.9777777777777778e-05,
"loss": 0.0128,
"step": 4870
},
{
"epoch": 4.52,
"grad_norm": 0.018568038940429688,
"learning_rate": 1.9407407407407407e-05,
"loss": 0.0641,
"step": 4880
},
{
"epoch": 4.53,
"grad_norm": 0.011374670080840588,
"learning_rate": 1.903703703703704e-05,
"loss": 0.1139,
"step": 4890
},
{
"epoch": 4.54,
"grad_norm": 0.01039755716919899,
"learning_rate": 1.866666666666667e-05,
"loss": 0.1868,
"step": 4900
},
{
"epoch": 4.55,
"grad_norm": 0.01569090038537979,
"learning_rate": 1.8296296296296298e-05,
"loss": 0.1442,
"step": 4910
},
{
"epoch": 4.56,
"grad_norm": 0.012444186955690384,
"learning_rate": 1.7925925925925927e-05,
"loss": 0.0059,
"step": 4920
},
{
"epoch": 4.56,
"grad_norm": 0.008781096898019314,
"learning_rate": 1.7555555555555556e-05,
"loss": 0.0022,
"step": 4930
},
{
"epoch": 4.57,
"grad_norm": 8.792170524597168,
"learning_rate": 1.7185185185185185e-05,
"loss": 0.0866,
"step": 4940
},
{
"epoch": 4.58,
"grad_norm": 0.2720411717891693,
"learning_rate": 1.6814814814814818e-05,
"loss": 0.0031,
"step": 4950
},
{
"epoch": 4.59,
"grad_norm": 0.010680705308914185,
"learning_rate": 1.6444444444444447e-05,
"loss": 0.1525,
"step": 4960
},
{
"epoch": 4.6,
"grad_norm": 0.010680481791496277,
"learning_rate": 1.6074074074074076e-05,
"loss": 0.032,
"step": 4970
},
{
"epoch": 4.61,
"grad_norm": 0.033251844346523285,
"learning_rate": 1.5703703703703705e-05,
"loss": 0.0023,
"step": 4980
},
{
"epoch": 4.62,
"grad_norm": 0.009812863543629646,
"learning_rate": 1.5333333333333334e-05,
"loss": 0.0906,
"step": 4990
},
{
"epoch": 4.63,
"grad_norm": 1.3783074617385864,
"learning_rate": 1.4962962962962965e-05,
"loss": 0.0071,
"step": 5000
},
{
"epoch": 4.63,
"eval_accuracy": 0.8814814814814815,
"eval_loss": 0.5871204733848572,
"eval_runtime": 12.3387,
"eval_samples_per_second": 87.53,
"eval_steps_per_second": 10.941,
"step": 5000
},
{
"epoch": 4.64,
"grad_norm": 0.014345189556479454,
"learning_rate": 1.4592592592592594e-05,
"loss": 0.0025,
"step": 5010
},
{
"epoch": 4.65,
"grad_norm": 0.011392067186534405,
"learning_rate": 1.4222222222222224e-05,
"loss": 0.0028,
"step": 5020
},
{
"epoch": 4.66,
"grad_norm": 0.010690497234463692,
"learning_rate": 1.3851851851851853e-05,
"loss": 0.0036,
"step": 5030
},
{
"epoch": 4.67,
"grad_norm": 0.029059024527668953,
"learning_rate": 1.348148148148148e-05,
"loss": 0.0026,
"step": 5040
},
{
"epoch": 4.68,
"grad_norm": 0.012555091641843319,
"learning_rate": 1.3111111111111113e-05,
"loss": 0.0022,
"step": 5050
},
{
"epoch": 4.69,
"grad_norm": 3.5421626567840576,
"learning_rate": 1.2740740740740742e-05,
"loss": 0.1878,
"step": 5060
},
{
"epoch": 4.69,
"grad_norm": 0.01144416257739067,
"learning_rate": 1.2370370370370371e-05,
"loss": 0.0052,
"step": 5070
},
{
"epoch": 4.7,
"grad_norm": 0.025753198191523552,
"learning_rate": 1.2e-05,
"loss": 0.0027,
"step": 5080
},
{
"epoch": 4.71,
"grad_norm": 0.009280257858335972,
"learning_rate": 1.1629629629629631e-05,
"loss": 0.0022,
"step": 5090
},
{
"epoch": 4.72,
"grad_norm": 0.022236941382288933,
"learning_rate": 1.125925925925926e-05,
"loss": 0.0024,
"step": 5100
},
{
"epoch": 4.73,
"grad_norm": 0.0111949872225523,
"learning_rate": 1.088888888888889e-05,
"loss": 0.0055,
"step": 5110
},
{
"epoch": 4.74,
"grad_norm": 0.011432080529630184,
"learning_rate": 1.0518518518518519e-05,
"loss": 0.003,
"step": 5120
},
{
"epoch": 4.75,
"grad_norm": 0.01230093464255333,
"learning_rate": 1.0148148148148148e-05,
"loss": 0.0027,
"step": 5130
},
{
"epoch": 4.76,
"grad_norm": 0.013996107503771782,
"learning_rate": 9.777777777777779e-06,
"loss": 0.0028,
"step": 5140
},
{
"epoch": 4.77,
"grad_norm": 0.00684409961104393,
"learning_rate": 9.407407407407408e-06,
"loss": 0.0023,
"step": 5150
},
{
"epoch": 4.78,
"grad_norm": 0.010829826816916466,
"learning_rate": 9.037037037037037e-06,
"loss": 0.0023,
"step": 5160
},
{
"epoch": 4.79,
"grad_norm": 0.01750708743929863,
"learning_rate": 8.666666666666668e-06,
"loss": 0.0029,
"step": 5170
},
{
"epoch": 4.8,
"grad_norm": 8.395956039428711,
"learning_rate": 8.296296296296297e-06,
"loss": 0.0894,
"step": 5180
},
{
"epoch": 4.81,
"grad_norm": 0.012726990506052971,
"learning_rate": 7.925925925925926e-06,
"loss": 0.0027,
"step": 5190
},
{
"epoch": 4.81,
"grad_norm": 0.08154325932264328,
"learning_rate": 7.555555555555556e-06,
"loss": 0.0025,
"step": 5200
},
{
"epoch": 4.82,
"grad_norm": 0.01044376753270626,
"learning_rate": 7.185185185185185e-06,
"loss": 0.0021,
"step": 5210
},
{
"epoch": 4.83,
"grad_norm": 56.14961624145508,
"learning_rate": 6.814814814814815e-06,
"loss": 0.1173,
"step": 5220
},
{
"epoch": 4.84,
"grad_norm": 0.017923351377248764,
"learning_rate": 6.4444444444444445e-06,
"loss": 0.0037,
"step": 5230
},
{
"epoch": 4.85,
"grad_norm": 0.011229267343878746,
"learning_rate": 6.0740740740740745e-06,
"loss": 0.0024,
"step": 5240
},
{
"epoch": 4.86,
"grad_norm": 0.009692909196019173,
"learning_rate": 5.7037037037037045e-06,
"loss": 0.0027,
"step": 5250
},
{
"epoch": 4.87,
"grad_norm": 0.007118642330169678,
"learning_rate": 5.333333333333334e-06,
"loss": 0.0028,
"step": 5260
},
{
"epoch": 4.88,
"grad_norm": 0.019179262220859528,
"learning_rate": 4.962962962962963e-06,
"loss": 0.0026,
"step": 5270
},
{
"epoch": 4.89,
"grad_norm": 0.06468287855386734,
"learning_rate": 4.592592592592593e-06,
"loss": 0.1189,
"step": 5280
},
{
"epoch": 4.9,
"grad_norm": 11.788923263549805,
"learning_rate": 4.222222222222223e-06,
"loss": 0.182,
"step": 5290
},
{
"epoch": 4.91,
"grad_norm": 0.018768593668937683,
"learning_rate": 3.851851851851852e-06,
"loss": 0.0036,
"step": 5300
},
{
"epoch": 4.92,
"grad_norm": 0.010481791570782661,
"learning_rate": 3.4814814814814816e-06,
"loss": 0.0022,
"step": 5310
},
{
"epoch": 4.93,
"grad_norm": 5.62942361831665,
"learning_rate": 3.111111111111111e-06,
"loss": 0.2351,
"step": 5320
},
{
"epoch": 4.94,
"grad_norm": 0.009041093289852142,
"learning_rate": 2.7407407407407407e-06,
"loss": 0.0031,
"step": 5330
},
{
"epoch": 4.94,
"grad_norm": 0.008371179923415184,
"learning_rate": 2.3703703703703703e-06,
"loss": 0.0021,
"step": 5340
},
{
"epoch": 4.95,
"grad_norm": 0.012409684248268604,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.003,
"step": 5350
},
{
"epoch": 4.96,
"grad_norm": 0.011011340655386448,
"learning_rate": 1.6296296296296295e-06,
"loss": 0.0516,
"step": 5360
},
{
"epoch": 4.97,
"grad_norm": 0.009352467954158783,
"learning_rate": 1.2592592592592593e-06,
"loss": 0.0025,
"step": 5370
},
{
"epoch": 4.98,
"grad_norm": 7.559159278869629,
"learning_rate": 8.88888888888889e-07,
"loss": 0.0127,
"step": 5380
},
{
"epoch": 4.99,
"grad_norm": 0.017836574465036392,
"learning_rate": 5.185185185185186e-07,
"loss": 0.0029,
"step": 5390
},
{
"epoch": 5.0,
"grad_norm": 0.04105929285287857,
"learning_rate": 1.4814814814814815e-07,
"loss": 0.0028,
"step": 5400
},
{
"epoch": 5.0,
"step": 5400,
"total_flos": 1.6739319895474176e+18,
"train_loss": 0.41893966080965817,
"train_runtime": 684.6117,
"train_samples_per_second": 31.551,
"train_steps_per_second": 7.888
}
],
"logging_steps": 10,
"max_steps": 5400,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 3000,
"total_flos": 1.6739319895474176e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}