vit-base-hate-meme / trainer_state.json
GauravGrow's picture
Yayyy Lets Share hateful Memes!!!
31d0112 verified
{
"best_metric": 0.6965588331222534,
"best_model_checkpoint": "./vit-base-hate-meme/checkpoint-532",
"epoch": 8.0,
"eval_steps": 500,
"global_step": 4256,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 6.126795768737793,
"learning_rate": 4.000000000000001e-06,
"loss": 0.7608,
"step": 10
},
{
"epoch": 0.04,
"grad_norm": 5.251371383666992,
"learning_rate": 8.000000000000001e-06,
"loss": 0.7016,
"step": 20
},
{
"epoch": 0.06,
"grad_norm": 5.532265663146973,
"learning_rate": 1.2e-05,
"loss": 0.6429,
"step": 30
},
{
"epoch": 0.08,
"grad_norm": 5.4727888107299805,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.7306,
"step": 40
},
{
"epoch": 0.09,
"grad_norm": 5.2745771408081055,
"learning_rate": 2e-05,
"loss": 0.7079,
"step": 50
},
{
"epoch": 0.11,
"grad_norm": 4.616114616394043,
"learning_rate": 2.36e-05,
"loss": 0.671,
"step": 60
},
{
"epoch": 0.13,
"grad_norm": 4.487009048461914,
"learning_rate": 2.7600000000000003e-05,
"loss": 0.6954,
"step": 70
},
{
"epoch": 0.15,
"grad_norm": 6.081264495849609,
"learning_rate": 3.16e-05,
"loss": 0.6968,
"step": 80
},
{
"epoch": 0.17,
"grad_norm": 4.151589393615723,
"learning_rate": 3.56e-05,
"loss": 0.6523,
"step": 90
},
{
"epoch": 0.19,
"grad_norm": 4.241616249084473,
"learning_rate": 3.960000000000001e-05,
"loss": 0.6785,
"step": 100
},
{
"epoch": 0.21,
"grad_norm": 4.035406112670898,
"learning_rate": 4.36e-05,
"loss": 0.6779,
"step": 110
},
{
"epoch": 0.23,
"grad_norm": 4.655460357666016,
"learning_rate": 4.76e-05,
"loss": 0.6418,
"step": 120
},
{
"epoch": 0.24,
"grad_norm": 3.683321952819824,
"learning_rate": 5.16e-05,
"loss": 0.6415,
"step": 130
},
{
"epoch": 0.26,
"grad_norm": 5.034379959106445,
"learning_rate": 5.560000000000001e-05,
"loss": 0.7168,
"step": 140
},
{
"epoch": 0.28,
"grad_norm": 7.606465816497803,
"learning_rate": 5.96e-05,
"loss": 0.7146,
"step": 150
},
{
"epoch": 0.3,
"grad_norm": 3.5599539279937744,
"learning_rate": 6.36e-05,
"loss": 0.6495,
"step": 160
},
{
"epoch": 0.32,
"grad_norm": 8.060068130493164,
"learning_rate": 6.76e-05,
"loss": 0.6523,
"step": 170
},
{
"epoch": 0.34,
"grad_norm": 2.229842185974121,
"learning_rate": 7.16e-05,
"loss": 0.6252,
"step": 180
},
{
"epoch": 0.36,
"grad_norm": 2.0872910022735596,
"learning_rate": 7.560000000000001e-05,
"loss": 0.6729,
"step": 190
},
{
"epoch": 0.38,
"grad_norm": 5.176510334014893,
"learning_rate": 7.960000000000001e-05,
"loss": 0.6706,
"step": 200
},
{
"epoch": 0.39,
"grad_norm": 3.0015692710876465,
"learning_rate": 8.36e-05,
"loss": 0.6279,
"step": 210
},
{
"epoch": 0.41,
"grad_norm": 3.563908576965332,
"learning_rate": 8.76e-05,
"loss": 0.5934,
"step": 220
},
{
"epoch": 0.43,
"grad_norm": 4.134763240814209,
"learning_rate": 9.16e-05,
"loss": 0.6436,
"step": 230
},
{
"epoch": 0.45,
"grad_norm": 6.163305759429932,
"learning_rate": 9.56e-05,
"loss": 0.6519,
"step": 240
},
{
"epoch": 0.47,
"grad_norm": 4.374207019805908,
"learning_rate": 9.960000000000001e-05,
"loss": 0.6637,
"step": 250
},
{
"epoch": 0.49,
"grad_norm": 4.880618572235107,
"learning_rate": 0.00010360000000000001,
"loss": 0.6486,
"step": 260
},
{
"epoch": 0.51,
"grad_norm": 1.537030577659607,
"learning_rate": 0.00010760000000000001,
"loss": 0.622,
"step": 270
},
{
"epoch": 0.53,
"grad_norm": 3.5295188426971436,
"learning_rate": 0.00011160000000000002,
"loss": 0.6234,
"step": 280
},
{
"epoch": 0.55,
"grad_norm": 1.5816266536712646,
"learning_rate": 0.00011559999999999999,
"loss": 0.6321,
"step": 290
},
{
"epoch": 0.56,
"grad_norm": 1.2675076723098755,
"learning_rate": 0.0001192,
"loss": 0.7066,
"step": 300
},
{
"epoch": 0.58,
"grad_norm": 2.379913568496704,
"learning_rate": 0.0001232,
"loss": 0.6625,
"step": 310
},
{
"epoch": 0.6,
"grad_norm": 2.8097121715545654,
"learning_rate": 0.0001272,
"loss": 0.6018,
"step": 320
},
{
"epoch": 0.62,
"grad_norm": 6.209559917449951,
"learning_rate": 0.00013120000000000002,
"loss": 0.6946,
"step": 330
},
{
"epoch": 0.64,
"grad_norm": 4.973850727081299,
"learning_rate": 0.0001352,
"loss": 0.6724,
"step": 340
},
{
"epoch": 0.66,
"grad_norm": 1.5640188455581665,
"learning_rate": 0.0001392,
"loss": 0.6227,
"step": 350
},
{
"epoch": 0.68,
"grad_norm": 3.425607442855835,
"learning_rate": 0.0001432,
"loss": 0.6515,
"step": 360
},
{
"epoch": 0.7,
"grad_norm": 2.7312209606170654,
"learning_rate": 0.0001472,
"loss": 0.6477,
"step": 370
},
{
"epoch": 0.71,
"grad_norm": 2.8867738246917725,
"learning_rate": 0.00015120000000000002,
"loss": 0.6713,
"step": 380
},
{
"epoch": 0.73,
"grad_norm": 0.647611141204834,
"learning_rate": 0.0001552,
"loss": 0.6554,
"step": 390
},
{
"epoch": 0.75,
"grad_norm": 1.5883524417877197,
"learning_rate": 0.00015920000000000002,
"loss": 0.6394,
"step": 400
},
{
"epoch": 0.77,
"grad_norm": 3.988485097885132,
"learning_rate": 0.0001632,
"loss": 0.6706,
"step": 410
},
{
"epoch": 0.79,
"grad_norm": 7.253389358520508,
"learning_rate": 0.0001672,
"loss": 0.6658,
"step": 420
},
{
"epoch": 0.81,
"grad_norm": 0.7725067138671875,
"learning_rate": 0.00017120000000000001,
"loss": 0.7349,
"step": 430
},
{
"epoch": 0.83,
"grad_norm": 3.858313798904419,
"learning_rate": 0.0001752,
"loss": 0.5985,
"step": 440
},
{
"epoch": 0.85,
"grad_norm": 3.0623714923858643,
"learning_rate": 0.00017920000000000002,
"loss": 0.5857,
"step": 450
},
{
"epoch": 0.86,
"grad_norm": 2.89859938621521,
"learning_rate": 0.0001832,
"loss": 0.7285,
"step": 460
},
{
"epoch": 0.88,
"grad_norm": 5.508333683013916,
"learning_rate": 0.00018720000000000002,
"loss": 0.643,
"step": 470
},
{
"epoch": 0.9,
"grad_norm": 1.188262939453125,
"learning_rate": 0.0001912,
"loss": 0.664,
"step": 480
},
{
"epoch": 0.92,
"grad_norm": 2.729386329650879,
"learning_rate": 0.0001952,
"loss": 0.622,
"step": 490
},
{
"epoch": 0.94,
"grad_norm": 1.0896118879318237,
"learning_rate": 0.00019920000000000002,
"loss": 0.5781,
"step": 500
},
{
"epoch": 0.96,
"grad_norm": 1.856227159500122,
"learning_rate": 0.0001995740149094782,
"loss": 0.6579,
"step": 510
},
{
"epoch": 0.98,
"grad_norm": 0.9613101482391357,
"learning_rate": 0.0001990415335463259,
"loss": 0.6003,
"step": 520
},
{
"epoch": 1.0,
"grad_norm": 2.3655054569244385,
"learning_rate": 0.0001985090521831736,
"loss": 0.6407,
"step": 530
},
{
"epoch": 1.0,
"eval_loss": 0.6965588331222534,
"eval_runtime": 5.5301,
"eval_samples_per_second": 90.415,
"eval_steps_per_second": 2.893,
"step": 532
},
{
"epoch": 1.02,
"grad_norm": 4.0246710777282715,
"learning_rate": 0.0001979765708200213,
"loss": 0.6144,
"step": 540
},
{
"epoch": 1.03,
"grad_norm": 0.397265762090683,
"learning_rate": 0.000197444089456869,
"loss": 0.7252,
"step": 550
},
{
"epoch": 1.05,
"grad_norm": 6.538491249084473,
"learning_rate": 0.00019691160809371674,
"loss": 0.6467,
"step": 560
},
{
"epoch": 1.07,
"grad_norm": 5.099808692932129,
"learning_rate": 0.00019637912673056444,
"loss": 0.6668,
"step": 570
},
{
"epoch": 1.09,
"grad_norm": 2.9582018852233887,
"learning_rate": 0.00019584664536741216,
"loss": 0.6395,
"step": 580
},
{
"epoch": 1.11,
"grad_norm": 6.390412330627441,
"learning_rate": 0.00019531416400425986,
"loss": 0.6392,
"step": 590
},
{
"epoch": 1.13,
"grad_norm": 0.7637470960617065,
"learning_rate": 0.00019478168264110756,
"loss": 0.6227,
"step": 600
},
{
"epoch": 1.15,
"grad_norm": 3.5703885555267334,
"learning_rate": 0.00019424920127795528,
"loss": 0.6981,
"step": 610
},
{
"epoch": 1.17,
"grad_norm": 0.8668873310089111,
"learning_rate": 0.000193716719914803,
"loss": 0.6175,
"step": 620
},
{
"epoch": 1.18,
"grad_norm": 2.3757476806640625,
"learning_rate": 0.0001931842385516507,
"loss": 0.6463,
"step": 630
},
{
"epoch": 1.2,
"grad_norm": 0.4950419068336487,
"learning_rate": 0.0001926517571884984,
"loss": 0.6656,
"step": 640
},
{
"epoch": 1.22,
"grad_norm": 0.4995131492614746,
"learning_rate": 0.00019211927582534613,
"loss": 0.6087,
"step": 650
},
{
"epoch": 1.24,
"grad_norm": 4.226231098175049,
"learning_rate": 0.00019158679446219383,
"loss": 0.6768,
"step": 660
},
{
"epoch": 1.26,
"grad_norm": 4.077276706695557,
"learning_rate": 0.00019105431309904153,
"loss": 0.6551,
"step": 670
},
{
"epoch": 1.28,
"grad_norm": 0.6081713438034058,
"learning_rate": 0.00019052183173588926,
"loss": 0.6334,
"step": 680
},
{
"epoch": 1.3,
"grad_norm": 1.841720461845398,
"learning_rate": 0.00018998935037273698,
"loss": 0.6541,
"step": 690
},
{
"epoch": 1.32,
"grad_norm": 0.833160400390625,
"learning_rate": 0.00018945686900958468,
"loss": 0.5732,
"step": 700
},
{
"epoch": 1.33,
"grad_norm": 1.2434287071228027,
"learning_rate": 0.00018892438764643238,
"loss": 0.7005,
"step": 710
},
{
"epoch": 1.35,
"grad_norm": 1.5411890745162964,
"learning_rate": 0.0001883919062832801,
"loss": 0.604,
"step": 720
},
{
"epoch": 1.37,
"grad_norm": 0.5615927577018738,
"learning_rate": 0.0001878594249201278,
"loss": 0.6982,
"step": 730
},
{
"epoch": 1.39,
"grad_norm": 1.1369315385818481,
"learning_rate": 0.0001873269435569755,
"loss": 0.6451,
"step": 740
},
{
"epoch": 1.41,
"grad_norm": 0.7013240456581116,
"learning_rate": 0.00018679446219382323,
"loss": 0.6697,
"step": 750
},
{
"epoch": 1.43,
"grad_norm": 0.7162796854972839,
"learning_rate": 0.00018626198083067095,
"loss": 0.6793,
"step": 760
},
{
"epoch": 1.45,
"grad_norm": 0.7037357091903687,
"learning_rate": 0.00018572949946751865,
"loss": 0.6546,
"step": 770
},
{
"epoch": 1.47,
"grad_norm": 3.4446752071380615,
"learning_rate": 0.00018519701810436635,
"loss": 0.5589,
"step": 780
},
{
"epoch": 1.48,
"grad_norm": 2.896850109100342,
"learning_rate": 0.00018466453674121408,
"loss": 0.6368,
"step": 790
},
{
"epoch": 1.5,
"grad_norm": 0.8148677945137024,
"learning_rate": 0.00018413205537806177,
"loss": 0.6817,
"step": 800
},
{
"epoch": 1.52,
"grad_norm": 1.7753351926803589,
"learning_rate": 0.00018359957401490947,
"loss": 0.6491,
"step": 810
},
{
"epoch": 1.54,
"grad_norm": 1.8540890216827393,
"learning_rate": 0.0001830670926517572,
"loss": 0.6578,
"step": 820
},
{
"epoch": 1.56,
"grad_norm": 1.7155886888504028,
"learning_rate": 0.00018253461128860492,
"loss": 0.6473,
"step": 830
},
{
"epoch": 1.58,
"grad_norm": 0.892581582069397,
"learning_rate": 0.00018200212992545262,
"loss": 0.6021,
"step": 840
},
{
"epoch": 1.6,
"grad_norm": 0.7859013080596924,
"learning_rate": 0.00018146964856230032,
"loss": 0.6064,
"step": 850
},
{
"epoch": 1.62,
"grad_norm": 6.423700332641602,
"learning_rate": 0.00018093716719914805,
"loss": 0.612,
"step": 860
},
{
"epoch": 1.64,
"grad_norm": 1.622660517692566,
"learning_rate": 0.00018040468583599575,
"loss": 0.6688,
"step": 870
},
{
"epoch": 1.65,
"grad_norm": 1.050557255744934,
"learning_rate": 0.00017987220447284344,
"loss": 0.6549,
"step": 880
},
{
"epoch": 1.67,
"grad_norm": 3.1418724060058594,
"learning_rate": 0.00017933972310969117,
"loss": 0.6104,
"step": 890
},
{
"epoch": 1.69,
"grad_norm": 3.0087502002716064,
"learning_rate": 0.0001788072417465389,
"loss": 0.6451,
"step": 900
},
{
"epoch": 1.71,
"grad_norm": 3.000000476837158,
"learning_rate": 0.0001782747603833866,
"loss": 0.6247,
"step": 910
},
{
"epoch": 1.73,
"grad_norm": 1.215047001838684,
"learning_rate": 0.0001777422790202343,
"loss": 0.6418,
"step": 920
},
{
"epoch": 1.75,
"grad_norm": 2.694948434829712,
"learning_rate": 0.00017720979765708202,
"loss": 0.657,
"step": 930
},
{
"epoch": 1.77,
"grad_norm": 3.017652988433838,
"learning_rate": 0.00017667731629392972,
"loss": 0.6534,
"step": 940
},
{
"epoch": 1.79,
"grad_norm": 3.342782497406006,
"learning_rate": 0.00017614483493077742,
"loss": 0.66,
"step": 950
},
{
"epoch": 1.8,
"grad_norm": 3.4681971073150635,
"learning_rate": 0.00017561235356762514,
"loss": 0.6681,
"step": 960
},
{
"epoch": 1.82,
"grad_norm": 0.638871431350708,
"learning_rate": 0.00017507987220447287,
"loss": 0.6062,
"step": 970
},
{
"epoch": 1.84,
"grad_norm": 0.615951657295227,
"learning_rate": 0.00017454739084132057,
"loss": 0.5745,
"step": 980
},
{
"epoch": 1.86,
"grad_norm": 2.9999630451202393,
"learning_rate": 0.00017401490947816826,
"loss": 0.6137,
"step": 990
},
{
"epoch": 1.88,
"grad_norm": 3.212564468383789,
"learning_rate": 0.000173482428115016,
"loss": 0.5914,
"step": 1000
},
{
"epoch": 1.9,
"grad_norm": 2.063244104385376,
"learning_rate": 0.0001729499467518637,
"loss": 0.5904,
"step": 1010
},
{
"epoch": 1.92,
"grad_norm": 4.356505393981934,
"learning_rate": 0.00017241746538871139,
"loss": 0.6904,
"step": 1020
},
{
"epoch": 1.94,
"grad_norm": 2.85433292388916,
"learning_rate": 0.0001718849840255591,
"loss": 0.6621,
"step": 1030
},
{
"epoch": 1.95,
"grad_norm": 0.8630966544151306,
"learning_rate": 0.00017135250266240684,
"loss": 0.6591,
"step": 1040
},
{
"epoch": 1.97,
"grad_norm": 7.148132801055908,
"learning_rate": 0.00017082002129925454,
"loss": 0.6562,
"step": 1050
},
{
"epoch": 1.99,
"grad_norm": 0.8249239921569824,
"learning_rate": 0.00017028753993610223,
"loss": 0.5847,
"step": 1060
},
{
"epoch": 2.0,
"eval_loss": 0.7979968190193176,
"eval_runtime": 4.8835,
"eval_samples_per_second": 102.387,
"eval_steps_per_second": 3.276,
"step": 1064
},
{
"epoch": 2.01,
"grad_norm": 0.5970304012298584,
"learning_rate": 0.00016975505857294996,
"loss": 0.6398,
"step": 1070
},
{
"epoch": 2.03,
"grad_norm": 2.641237258911133,
"learning_rate": 0.00016922257720979766,
"loss": 0.631,
"step": 1080
},
{
"epoch": 2.05,
"grad_norm": 0.5415599942207336,
"learning_rate": 0.00016869009584664536,
"loss": 0.6337,
"step": 1090
},
{
"epoch": 2.07,
"grad_norm": 5.68765115737915,
"learning_rate": 0.00016815761448349308,
"loss": 0.6154,
"step": 1100
},
{
"epoch": 2.09,
"grad_norm": 3.6129837036132812,
"learning_rate": 0.0001676251331203408,
"loss": 0.68,
"step": 1110
},
{
"epoch": 2.11,
"grad_norm": 0.3377436399459839,
"learning_rate": 0.0001670926517571885,
"loss": 0.6461,
"step": 1120
},
{
"epoch": 2.12,
"grad_norm": 5.473901271820068,
"learning_rate": 0.0001665601703940362,
"loss": 0.6727,
"step": 1130
},
{
"epoch": 2.14,
"grad_norm": 0.664064884185791,
"learning_rate": 0.00016602768903088393,
"loss": 0.5921,
"step": 1140
},
{
"epoch": 2.16,
"grad_norm": 1.5953819751739502,
"learning_rate": 0.00016549520766773163,
"loss": 0.6052,
"step": 1150
},
{
"epoch": 2.18,
"grad_norm": 2.9201242923736572,
"learning_rate": 0.00016496272630457933,
"loss": 0.6278,
"step": 1160
},
{
"epoch": 2.2,
"grad_norm": 2.1254889965057373,
"learning_rate": 0.00016443024494142705,
"loss": 0.5732,
"step": 1170
},
{
"epoch": 2.22,
"grad_norm": 4.827600002288818,
"learning_rate": 0.00016389776357827478,
"loss": 0.5687,
"step": 1180
},
{
"epoch": 2.24,
"grad_norm": 4.18930721282959,
"learning_rate": 0.00016336528221512248,
"loss": 0.5923,
"step": 1190
},
{
"epoch": 2.26,
"grad_norm": 3.059593439102173,
"learning_rate": 0.00016283280085197018,
"loss": 0.6697,
"step": 1200
},
{
"epoch": 2.27,
"grad_norm": 0.7229830622673035,
"learning_rate": 0.0001623003194888179,
"loss": 0.5956,
"step": 1210
},
{
"epoch": 2.29,
"grad_norm": 0.41750237345695496,
"learning_rate": 0.0001617678381256656,
"loss": 0.6543,
"step": 1220
},
{
"epoch": 2.31,
"grad_norm": 1.0170073509216309,
"learning_rate": 0.0001612353567625133,
"loss": 0.6222,
"step": 1230
},
{
"epoch": 2.33,
"grad_norm": 1.94419527053833,
"learning_rate": 0.00016070287539936103,
"loss": 0.6379,
"step": 1240
},
{
"epoch": 2.35,
"grad_norm": 2.2258355617523193,
"learning_rate": 0.00016017039403620875,
"loss": 0.5887,
"step": 1250
},
{
"epoch": 2.37,
"grad_norm": 1.861475944519043,
"learning_rate": 0.00015963791267305645,
"loss": 0.5848,
"step": 1260
},
{
"epoch": 2.39,
"grad_norm": 1.9490504264831543,
"learning_rate": 0.00015910543130990418,
"loss": 0.6283,
"step": 1270
},
{
"epoch": 2.41,
"grad_norm": 0.9739388823509216,
"learning_rate": 0.00015857294994675187,
"loss": 0.6027,
"step": 1280
},
{
"epoch": 2.42,
"grad_norm": 1.012650966644287,
"learning_rate": 0.00015804046858359957,
"loss": 0.5685,
"step": 1290
},
{
"epoch": 2.44,
"grad_norm": 0.5264813303947449,
"learning_rate": 0.00015750798722044727,
"loss": 0.619,
"step": 1300
},
{
"epoch": 2.46,
"grad_norm": 1.5455741882324219,
"learning_rate": 0.00015697550585729502,
"loss": 0.6441,
"step": 1310
},
{
"epoch": 2.48,
"grad_norm": 1.331289529800415,
"learning_rate": 0.00015644302449414272,
"loss": 0.6583,
"step": 1320
},
{
"epoch": 2.5,
"grad_norm": 1.2682970762252808,
"learning_rate": 0.00015591054313099042,
"loss": 0.6573,
"step": 1330
},
{
"epoch": 2.52,
"grad_norm": 2.107792377471924,
"learning_rate": 0.00015537806176783815,
"loss": 0.6044,
"step": 1340
},
{
"epoch": 2.54,
"grad_norm": 0.5763486623764038,
"learning_rate": 0.00015484558040468585,
"loss": 0.5277,
"step": 1350
},
{
"epoch": 2.56,
"grad_norm": 5.478113651275635,
"learning_rate": 0.00015431309904153354,
"loss": 0.6821,
"step": 1360
},
{
"epoch": 2.58,
"grad_norm": 3.25028395652771,
"learning_rate": 0.00015378061767838124,
"loss": 0.6411,
"step": 1370
},
{
"epoch": 2.59,
"grad_norm": 1.4305381774902344,
"learning_rate": 0.000153248136315229,
"loss": 0.6273,
"step": 1380
},
{
"epoch": 2.61,
"grad_norm": 1.6666404008865356,
"learning_rate": 0.0001527156549520767,
"loss": 0.6636,
"step": 1390
},
{
"epoch": 2.63,
"grad_norm": 4.375661373138428,
"learning_rate": 0.0001521831735889244,
"loss": 0.5989,
"step": 1400
},
{
"epoch": 2.65,
"grad_norm": 3.827533483505249,
"learning_rate": 0.00015165069222577212,
"loss": 0.5814,
"step": 1410
},
{
"epoch": 2.67,
"grad_norm": 2.938608169555664,
"learning_rate": 0.00015111821086261982,
"loss": 0.5575,
"step": 1420
},
{
"epoch": 2.69,
"grad_norm": 2.966648817062378,
"learning_rate": 0.00015058572949946752,
"loss": 0.623,
"step": 1430
},
{
"epoch": 2.71,
"grad_norm": 3.7637674808502197,
"learning_rate": 0.00015005324813631521,
"loss": 0.5792,
"step": 1440
},
{
"epoch": 2.73,
"grad_norm": 2.175443172454834,
"learning_rate": 0.00014952076677316297,
"loss": 0.6161,
"step": 1450
},
{
"epoch": 2.74,
"grad_norm": 0.6871302723884583,
"learning_rate": 0.00014898828541001067,
"loss": 0.6145,
"step": 1460
},
{
"epoch": 2.76,
"grad_norm": 2.2254390716552734,
"learning_rate": 0.00014845580404685836,
"loss": 0.5978,
"step": 1470
},
{
"epoch": 2.78,
"grad_norm": 2.5566136837005615,
"learning_rate": 0.0001479233226837061,
"loss": 0.6676,
"step": 1480
},
{
"epoch": 2.8,
"grad_norm": 1.8439075946807861,
"learning_rate": 0.0001473908413205538,
"loss": 0.6386,
"step": 1490
},
{
"epoch": 2.82,
"grad_norm": 2.6792383193969727,
"learning_rate": 0.0001468583599574015,
"loss": 0.6562,
"step": 1500
},
{
"epoch": 2.84,
"grad_norm": 0.6229275465011597,
"learning_rate": 0.0001463258785942492,
"loss": 0.5914,
"step": 1510
},
{
"epoch": 2.86,
"grad_norm": 5.081364631652832,
"learning_rate": 0.00014579339723109694,
"loss": 0.5784,
"step": 1520
},
{
"epoch": 2.88,
"grad_norm": 4.535241603851318,
"learning_rate": 0.00014526091586794464,
"loss": 0.5823,
"step": 1530
},
{
"epoch": 2.89,
"grad_norm": 0.9447025060653687,
"learning_rate": 0.00014472843450479234,
"loss": 0.6573,
"step": 1540
},
{
"epoch": 2.91,
"grad_norm": 1.4323358535766602,
"learning_rate": 0.00014419595314164006,
"loss": 0.5481,
"step": 1550
},
{
"epoch": 2.93,
"grad_norm": 2.6219279766082764,
"learning_rate": 0.00014366347177848776,
"loss": 0.6331,
"step": 1560
},
{
"epoch": 2.95,
"grad_norm": 0.7964478135108948,
"learning_rate": 0.00014313099041533546,
"loss": 0.6071,
"step": 1570
},
{
"epoch": 2.97,
"grad_norm": 1.5989696979522705,
"learning_rate": 0.00014259850905218318,
"loss": 0.621,
"step": 1580
},
{
"epoch": 2.99,
"grad_norm": 2.2981741428375244,
"learning_rate": 0.0001420660276890309,
"loss": 0.6144,
"step": 1590
},
{
"epoch": 3.0,
"eval_loss": 0.7764111161231995,
"eval_runtime": 4.9574,
"eval_samples_per_second": 100.859,
"eval_steps_per_second": 3.227,
"step": 1596
},
{
"epoch": 3.01,
"grad_norm": 1.12291419506073,
"learning_rate": 0.0001415335463258786,
"loss": 0.6111,
"step": 1600
},
{
"epoch": 3.03,
"grad_norm": 3.5445947647094727,
"learning_rate": 0.0001410010649627263,
"loss": 0.5686,
"step": 1610
},
{
"epoch": 3.05,
"grad_norm": 5.601056098937988,
"learning_rate": 0.00014046858359957403,
"loss": 0.5344,
"step": 1620
},
{
"epoch": 3.06,
"grad_norm": 2.273033380508423,
"learning_rate": 0.00013993610223642173,
"loss": 0.5662,
"step": 1630
},
{
"epoch": 3.08,
"grad_norm": 0.932608962059021,
"learning_rate": 0.00013940362087326943,
"loss": 0.5788,
"step": 1640
},
{
"epoch": 3.1,
"grad_norm": 1.4313303232192993,
"learning_rate": 0.00013887113951011716,
"loss": 0.6199,
"step": 1650
},
{
"epoch": 3.12,
"grad_norm": 1.161855936050415,
"learning_rate": 0.00013833865814696488,
"loss": 0.5824,
"step": 1660
},
{
"epoch": 3.14,
"grad_norm": 3.128478527069092,
"learning_rate": 0.00013780617678381258,
"loss": 0.5774,
"step": 1670
},
{
"epoch": 3.16,
"grad_norm": 2.3241376876831055,
"learning_rate": 0.00013727369542066028,
"loss": 0.6157,
"step": 1680
},
{
"epoch": 3.18,
"grad_norm": 3.1155593395233154,
"learning_rate": 0.000136741214057508,
"loss": 0.6216,
"step": 1690
},
{
"epoch": 3.2,
"grad_norm": 1.408307433128357,
"learning_rate": 0.0001362087326943557,
"loss": 0.6149,
"step": 1700
},
{
"epoch": 3.21,
"grad_norm": 1.0534541606903076,
"learning_rate": 0.0001356762513312034,
"loss": 0.579,
"step": 1710
},
{
"epoch": 3.23,
"grad_norm": 1.5528515577316284,
"learning_rate": 0.00013514376996805113,
"loss": 0.5386,
"step": 1720
},
{
"epoch": 3.25,
"grad_norm": 2.1062605381011963,
"learning_rate": 0.00013461128860489885,
"loss": 0.6308,
"step": 1730
},
{
"epoch": 3.27,
"grad_norm": 2.3048410415649414,
"learning_rate": 0.00013407880724174655,
"loss": 0.6255,
"step": 1740
},
{
"epoch": 3.29,
"grad_norm": 3.4321556091308594,
"learning_rate": 0.00013354632587859425,
"loss": 0.6106,
"step": 1750
},
{
"epoch": 3.31,
"grad_norm": 1.6664164066314697,
"learning_rate": 0.00013301384451544198,
"loss": 0.5444,
"step": 1760
},
{
"epoch": 3.33,
"grad_norm": 2.9253435134887695,
"learning_rate": 0.00013248136315228967,
"loss": 0.5808,
"step": 1770
},
{
"epoch": 3.35,
"grad_norm": 2.220623731613159,
"learning_rate": 0.00013194888178913737,
"loss": 0.5715,
"step": 1780
},
{
"epoch": 3.36,
"grad_norm": 1.8884316682815552,
"learning_rate": 0.0001314164004259851,
"loss": 0.5269,
"step": 1790
},
{
"epoch": 3.38,
"grad_norm": 1.592698335647583,
"learning_rate": 0.00013088391906283282,
"loss": 0.5806,
"step": 1800
},
{
"epoch": 3.4,
"grad_norm": 1.9080153703689575,
"learning_rate": 0.00013035143769968052,
"loss": 0.6371,
"step": 1810
},
{
"epoch": 3.42,
"grad_norm": 2.2922542095184326,
"learning_rate": 0.00012981895633652822,
"loss": 0.6221,
"step": 1820
},
{
"epoch": 3.44,
"grad_norm": 1.9848778247833252,
"learning_rate": 0.00012928647497337595,
"loss": 0.5616,
"step": 1830
},
{
"epoch": 3.46,
"grad_norm": 3.399679660797119,
"learning_rate": 0.00012875399361022365,
"loss": 0.6284,
"step": 1840
},
{
"epoch": 3.48,
"grad_norm": 1.6504648923873901,
"learning_rate": 0.00012822151224707134,
"loss": 0.5542,
"step": 1850
},
{
"epoch": 3.5,
"grad_norm": 3.0508906841278076,
"learning_rate": 0.00012768903088391907,
"loss": 0.5928,
"step": 1860
},
{
"epoch": 3.52,
"grad_norm": 3.353853464126587,
"learning_rate": 0.0001271565495207668,
"loss": 0.5858,
"step": 1870
},
{
"epoch": 3.53,
"grad_norm": 3.4700353145599365,
"learning_rate": 0.0001266240681576145,
"loss": 0.6719,
"step": 1880
},
{
"epoch": 3.55,
"grad_norm": 0.9324420094490051,
"learning_rate": 0.0001260915867944622,
"loss": 0.5396,
"step": 1890
},
{
"epoch": 3.57,
"grad_norm": 1.1515653133392334,
"learning_rate": 0.00012555910543130992,
"loss": 0.6344,
"step": 1900
},
{
"epoch": 3.59,
"grad_norm": 1.1183910369873047,
"learning_rate": 0.00012502662406815762,
"loss": 0.5346,
"step": 1910
},
{
"epoch": 3.61,
"grad_norm": 0.988416850566864,
"learning_rate": 0.00012449414270500531,
"loss": 0.611,
"step": 1920
},
{
"epoch": 3.63,
"grad_norm": 2.163034439086914,
"learning_rate": 0.00012396166134185304,
"loss": 0.5806,
"step": 1930
},
{
"epoch": 3.65,
"grad_norm": 0.785860538482666,
"learning_rate": 0.00012342917997870077,
"loss": 0.5344,
"step": 1940
},
{
"epoch": 3.67,
"grad_norm": 1.1014069318771362,
"learning_rate": 0.00012289669861554846,
"loss": 0.5961,
"step": 1950
},
{
"epoch": 3.68,
"grad_norm": 1.5329209566116333,
"learning_rate": 0.00012236421725239616,
"loss": 0.5685,
"step": 1960
},
{
"epoch": 3.7,
"grad_norm": 2.057729721069336,
"learning_rate": 0.00012183173588924388,
"loss": 0.6254,
"step": 1970
},
{
"epoch": 3.72,
"grad_norm": 1.8107123374938965,
"learning_rate": 0.00012129925452609159,
"loss": 0.5715,
"step": 1980
},
{
"epoch": 3.74,
"grad_norm": 2.0086817741394043,
"learning_rate": 0.0001207667731629393,
"loss": 0.5606,
"step": 1990
},
{
"epoch": 3.76,
"grad_norm": 3.7626357078552246,
"learning_rate": 0.000120234291799787,
"loss": 0.5334,
"step": 2000
},
{
"epoch": 3.78,
"grad_norm": 3.1028711795806885,
"learning_rate": 0.00011970181043663474,
"loss": 0.6453,
"step": 2010
},
{
"epoch": 3.8,
"grad_norm": 1.0698882341384888,
"learning_rate": 0.00011916932907348244,
"loss": 0.5851,
"step": 2020
},
{
"epoch": 3.82,
"grad_norm": 2.4677796363830566,
"learning_rate": 0.00011863684771033015,
"loss": 0.6008,
"step": 2030
},
{
"epoch": 3.83,
"grad_norm": 1.6755846738815308,
"learning_rate": 0.00011810436634717785,
"loss": 0.5903,
"step": 2040
},
{
"epoch": 3.85,
"grad_norm": 1.8782827854156494,
"learning_rate": 0.00011757188498402556,
"loss": 0.6018,
"step": 2050
},
{
"epoch": 3.87,
"grad_norm": 1.1513298749923706,
"learning_rate": 0.00011703940362087327,
"loss": 0.5819,
"step": 2060
},
{
"epoch": 3.89,
"grad_norm": 2.3282463550567627,
"learning_rate": 0.000116506922257721,
"loss": 0.5476,
"step": 2070
},
{
"epoch": 3.91,
"grad_norm": 2.159614324569702,
"learning_rate": 0.00011597444089456871,
"loss": 0.5934,
"step": 2080
},
{
"epoch": 3.93,
"grad_norm": 1.6344852447509766,
"learning_rate": 0.00011544195953141641,
"loss": 0.4688,
"step": 2090
},
{
"epoch": 3.95,
"grad_norm": 1.4373761415481567,
"learning_rate": 0.00011490947816826412,
"loss": 0.5604,
"step": 2100
},
{
"epoch": 3.97,
"grad_norm": 2.425197124481201,
"learning_rate": 0.00011437699680511182,
"loss": 0.4934,
"step": 2110
},
{
"epoch": 3.98,
"grad_norm": 3.7428033351898193,
"learning_rate": 0.00011384451544195953,
"loss": 0.5307,
"step": 2120
},
{
"epoch": 4.0,
"eval_loss": 0.9912688136100769,
"eval_runtime": 5.4585,
"eval_samples_per_second": 91.6,
"eval_steps_per_second": 2.931,
"step": 2128
},
{
"epoch": 4.0,
"grad_norm": 1.4787501096725464,
"learning_rate": 0.00011331203407880724,
"loss": 0.5672,
"step": 2130
},
{
"epoch": 4.02,
"grad_norm": 3.0657687187194824,
"learning_rate": 0.00011277955271565497,
"loss": 0.4967,
"step": 2140
},
{
"epoch": 4.04,
"grad_norm": 1.8531616926193237,
"learning_rate": 0.00011224707135250268,
"loss": 0.4264,
"step": 2150
},
{
"epoch": 4.06,
"grad_norm": 2.5244009494781494,
"learning_rate": 0.00011171458998935038,
"loss": 0.5213,
"step": 2160
},
{
"epoch": 4.08,
"grad_norm": 1.9924266338348389,
"learning_rate": 0.00011118210862619809,
"loss": 0.5376,
"step": 2170
},
{
"epoch": 4.1,
"grad_norm": 1.3312138319015503,
"learning_rate": 0.0001106496272630458,
"loss": 0.5506,
"step": 2180
},
{
"epoch": 4.12,
"grad_norm": 0.9579734206199646,
"learning_rate": 0.0001101171458998935,
"loss": 0.4378,
"step": 2190
},
{
"epoch": 4.14,
"grad_norm": 1.8804982900619507,
"learning_rate": 0.00010958466453674121,
"loss": 0.4032,
"step": 2200
},
{
"epoch": 4.15,
"grad_norm": 1.5957534313201904,
"learning_rate": 0.00010905218317358894,
"loss": 0.5157,
"step": 2210
},
{
"epoch": 4.17,
"grad_norm": 3.0028767585754395,
"learning_rate": 0.00010851970181043665,
"loss": 0.5172,
"step": 2220
},
{
"epoch": 4.19,
"grad_norm": 2.0464072227478027,
"learning_rate": 0.00010798722044728435,
"loss": 0.5099,
"step": 2230
},
{
"epoch": 4.21,
"grad_norm": 2.517566204071045,
"learning_rate": 0.00010745473908413206,
"loss": 0.5021,
"step": 2240
},
{
"epoch": 4.23,
"grad_norm": 1.1620187759399414,
"learning_rate": 0.00010692225772097977,
"loss": 0.4711,
"step": 2250
},
{
"epoch": 4.25,
"grad_norm": 1.9912981986999512,
"learning_rate": 0.00010638977635782747,
"loss": 0.5725,
"step": 2260
},
{
"epoch": 4.27,
"grad_norm": 0.9746074080467224,
"learning_rate": 0.00010585729499467518,
"loss": 0.5196,
"step": 2270
},
{
"epoch": 4.29,
"grad_norm": 3.125993490219116,
"learning_rate": 0.00010532481363152291,
"loss": 0.55,
"step": 2280
},
{
"epoch": 4.3,
"grad_norm": 1.6319783926010132,
"learning_rate": 0.00010479233226837062,
"loss": 0.5553,
"step": 2290
},
{
"epoch": 4.32,
"grad_norm": 2.2027761936187744,
"learning_rate": 0.00010425985090521832,
"loss": 0.5072,
"step": 2300
},
{
"epoch": 4.34,
"grad_norm": 3.756495475769043,
"learning_rate": 0.00010372736954206603,
"loss": 0.5979,
"step": 2310
},
{
"epoch": 4.36,
"grad_norm": 0.9569351673126221,
"learning_rate": 0.00010319488817891375,
"loss": 0.5269,
"step": 2320
},
{
"epoch": 4.38,
"grad_norm": 1.8240845203399658,
"learning_rate": 0.00010266240681576144,
"loss": 0.5554,
"step": 2330
},
{
"epoch": 4.4,
"grad_norm": 1.4558720588684082,
"learning_rate": 0.00010212992545260916,
"loss": 0.4523,
"step": 2340
},
{
"epoch": 4.42,
"grad_norm": 1.966886281967163,
"learning_rate": 0.00010159744408945688,
"loss": 0.5397,
"step": 2350
},
{
"epoch": 4.44,
"grad_norm": 2.034513473510742,
"learning_rate": 0.0001010649627263046,
"loss": 0.4826,
"step": 2360
},
{
"epoch": 4.45,
"grad_norm": 2.524667263031006,
"learning_rate": 0.00010053248136315229,
"loss": 0.4602,
"step": 2370
},
{
"epoch": 4.47,
"grad_norm": 1.973412036895752,
"learning_rate": 0.00010005324813631522,
"loss": 0.5769,
"step": 2380
},
{
"epoch": 4.49,
"grad_norm": 2.0014846324920654,
"learning_rate": 9.952076677316294e-05,
"loss": 0.464,
"step": 2390
},
{
"epoch": 4.51,
"grad_norm": 1.190698266029358,
"learning_rate": 9.898828541001066e-05,
"loss": 0.4745,
"step": 2400
},
{
"epoch": 4.53,
"grad_norm": 1.8334747552871704,
"learning_rate": 9.845580404685837e-05,
"loss": 0.4763,
"step": 2410
},
{
"epoch": 4.55,
"grad_norm": 1.5026592016220093,
"learning_rate": 9.792332268370608e-05,
"loss": 0.4714,
"step": 2420
},
{
"epoch": 4.57,
"grad_norm": 3.0638644695281982,
"learning_rate": 9.739084132055378e-05,
"loss": 0.3997,
"step": 2430
},
{
"epoch": 4.59,
"grad_norm": 3.050144672393799,
"learning_rate": 9.68583599574015e-05,
"loss": 0.4471,
"step": 2440
},
{
"epoch": 4.61,
"grad_norm": 2.4229140281677246,
"learning_rate": 9.63258785942492e-05,
"loss": 0.4742,
"step": 2450
},
{
"epoch": 4.62,
"grad_norm": 2.2217772006988525,
"learning_rate": 9.579339723109692e-05,
"loss": 0.5514,
"step": 2460
},
{
"epoch": 4.64,
"grad_norm": 2.5604331493377686,
"learning_rate": 9.526091586794463e-05,
"loss": 0.538,
"step": 2470
},
{
"epoch": 4.66,
"grad_norm": 1.479228138923645,
"learning_rate": 9.472843450479234e-05,
"loss": 0.5559,
"step": 2480
},
{
"epoch": 4.68,
"grad_norm": 1.5853182077407837,
"learning_rate": 9.419595314164005e-05,
"loss": 0.4786,
"step": 2490
},
{
"epoch": 4.7,
"grad_norm": 4.546787261962891,
"learning_rate": 9.366347177848775e-05,
"loss": 0.5031,
"step": 2500
},
{
"epoch": 4.72,
"grad_norm": 2.7307677268981934,
"learning_rate": 9.313099041533548e-05,
"loss": 0.4968,
"step": 2510
},
{
"epoch": 4.74,
"grad_norm": 2.9583988189697266,
"learning_rate": 9.259850905218317e-05,
"loss": 0.5033,
"step": 2520
},
{
"epoch": 4.76,
"grad_norm": 2.163015604019165,
"learning_rate": 9.206602768903089e-05,
"loss": 0.533,
"step": 2530
},
{
"epoch": 4.77,
"grad_norm": 2.1034317016601562,
"learning_rate": 9.15335463258786e-05,
"loss": 0.4756,
"step": 2540
},
{
"epoch": 4.79,
"grad_norm": 3.1241562366485596,
"learning_rate": 9.100106496272631e-05,
"loss": 0.5204,
"step": 2550
},
{
"epoch": 4.81,
"grad_norm": 1.9084091186523438,
"learning_rate": 9.046858359957402e-05,
"loss": 0.4767,
"step": 2560
},
{
"epoch": 4.83,
"grad_norm": 2.3724541664123535,
"learning_rate": 8.993610223642172e-05,
"loss": 0.4941,
"step": 2570
},
{
"epoch": 4.85,
"grad_norm": 1.9276816844940186,
"learning_rate": 8.940362087326945e-05,
"loss": 0.5011,
"step": 2580
},
{
"epoch": 4.87,
"grad_norm": 3.0274977684020996,
"learning_rate": 8.887113951011715e-05,
"loss": 0.4222,
"step": 2590
},
{
"epoch": 4.89,
"grad_norm": 3.4712843894958496,
"learning_rate": 8.833865814696486e-05,
"loss": 0.4536,
"step": 2600
},
{
"epoch": 4.91,
"grad_norm": 1.3534293174743652,
"learning_rate": 8.780617678381257e-05,
"loss": 0.4915,
"step": 2610
},
{
"epoch": 4.92,
"grad_norm": 2.5444459915161133,
"learning_rate": 8.727369542066028e-05,
"loss": 0.5122,
"step": 2620
},
{
"epoch": 4.94,
"grad_norm": 2.352358102798462,
"learning_rate": 8.6741214057508e-05,
"loss": 0.5088,
"step": 2630
},
{
"epoch": 4.96,
"grad_norm": 2.807650327682495,
"learning_rate": 8.620873269435569e-05,
"loss": 0.5527,
"step": 2640
},
{
"epoch": 4.98,
"grad_norm": 1.5027140378952026,
"learning_rate": 8.567625133120342e-05,
"loss": 0.5642,
"step": 2650
},
{
"epoch": 5.0,
"grad_norm": 9.080309867858887,
"learning_rate": 8.514376996805112e-05,
"loss": 0.5545,
"step": 2660
},
{
"epoch": 5.0,
"eval_loss": 0.9512230157852173,
"eval_runtime": 5.1199,
"eval_samples_per_second": 97.657,
"eval_steps_per_second": 3.125,
"step": 2660
},
{
"epoch": 5.02,
"grad_norm": 1.3162051439285278,
"learning_rate": 8.461128860489883e-05,
"loss": 0.405,
"step": 2670
},
{
"epoch": 5.04,
"grad_norm": 2.260577440261841,
"learning_rate": 8.407880724174654e-05,
"loss": 0.3254,
"step": 2680
},
{
"epoch": 5.06,
"grad_norm": 2.4929864406585693,
"learning_rate": 8.354632587859425e-05,
"loss": 0.4028,
"step": 2690
},
{
"epoch": 5.08,
"grad_norm": 5.688662052154541,
"learning_rate": 8.301384451544197e-05,
"loss": 0.2906,
"step": 2700
},
{
"epoch": 5.09,
"grad_norm": 1.3222452402114868,
"learning_rate": 8.248136315228966e-05,
"loss": 0.3261,
"step": 2710
},
{
"epoch": 5.11,
"grad_norm": 1.3454262018203735,
"learning_rate": 8.194888178913739e-05,
"loss": 0.3013,
"step": 2720
},
{
"epoch": 5.13,
"grad_norm": 3.683896064758301,
"learning_rate": 8.141640042598509e-05,
"loss": 0.3953,
"step": 2730
},
{
"epoch": 5.15,
"grad_norm": 5.352532863616943,
"learning_rate": 8.08839190628328e-05,
"loss": 0.4251,
"step": 2740
},
{
"epoch": 5.17,
"grad_norm": 2.004856586456299,
"learning_rate": 8.035143769968051e-05,
"loss": 0.3757,
"step": 2750
},
{
"epoch": 5.19,
"grad_norm": 3.9098947048187256,
"learning_rate": 7.981895633652823e-05,
"loss": 0.3575,
"step": 2760
},
{
"epoch": 5.21,
"grad_norm": 3.314652919769287,
"learning_rate": 7.928647497337594e-05,
"loss": 0.3759,
"step": 2770
},
{
"epoch": 5.23,
"grad_norm": 5.735342979431152,
"learning_rate": 7.875399361022364e-05,
"loss": 0.346,
"step": 2780
},
{
"epoch": 5.24,
"grad_norm": 1.5605205297470093,
"learning_rate": 7.822151224707136e-05,
"loss": 0.2185,
"step": 2790
},
{
"epoch": 5.26,
"grad_norm": 4.125953197479248,
"learning_rate": 7.768903088391907e-05,
"loss": 0.4666,
"step": 2800
},
{
"epoch": 5.28,
"grad_norm": 3.755160331726074,
"learning_rate": 7.715654952076677e-05,
"loss": 0.4115,
"step": 2810
},
{
"epoch": 5.3,
"grad_norm": 3.822338104248047,
"learning_rate": 7.66240681576145e-05,
"loss": 0.3338,
"step": 2820
},
{
"epoch": 5.32,
"grad_norm": 7.3369269371032715,
"learning_rate": 7.60915867944622e-05,
"loss": 0.4178,
"step": 2830
},
{
"epoch": 5.34,
"grad_norm": 3.8311543464660645,
"learning_rate": 7.555910543130991e-05,
"loss": 0.3084,
"step": 2840
},
{
"epoch": 5.36,
"grad_norm": 4.502466678619385,
"learning_rate": 7.502662406815761e-05,
"loss": 0.2962,
"step": 2850
},
{
"epoch": 5.38,
"grad_norm": 3.489075183868408,
"learning_rate": 7.449414270500533e-05,
"loss": 0.3073,
"step": 2860
},
{
"epoch": 5.39,
"grad_norm": 2.344810962677002,
"learning_rate": 7.396166134185304e-05,
"loss": 0.3059,
"step": 2870
},
{
"epoch": 5.41,
"grad_norm": 4.37916374206543,
"learning_rate": 7.342917997870074e-05,
"loss": 0.3749,
"step": 2880
},
{
"epoch": 5.43,
"grad_norm": 2.2266342639923096,
"learning_rate": 7.289669861554847e-05,
"loss": 0.2954,
"step": 2890
},
{
"epoch": 5.45,
"grad_norm": 2.1551311016082764,
"learning_rate": 7.236421725239617e-05,
"loss": 0.3377,
"step": 2900
},
{
"epoch": 5.47,
"grad_norm": 5.243711948394775,
"learning_rate": 7.183173588924388e-05,
"loss": 0.2986,
"step": 2910
},
{
"epoch": 5.49,
"grad_norm": 3.0937271118164062,
"learning_rate": 7.129925452609159e-05,
"loss": 0.2355,
"step": 2920
},
{
"epoch": 5.51,
"grad_norm": 1.6880041360855103,
"learning_rate": 7.07667731629393e-05,
"loss": 0.2808,
"step": 2930
},
{
"epoch": 5.53,
"grad_norm": 2.438985586166382,
"learning_rate": 7.023429179978702e-05,
"loss": 0.4147,
"step": 2940
},
{
"epoch": 5.55,
"grad_norm": 2.1790943145751953,
"learning_rate": 6.970181043663471e-05,
"loss": 0.3619,
"step": 2950
},
{
"epoch": 5.56,
"grad_norm": 4.307677745819092,
"learning_rate": 6.916932907348244e-05,
"loss": 0.3657,
"step": 2960
},
{
"epoch": 5.58,
"grad_norm": 4.3886566162109375,
"learning_rate": 6.863684771033014e-05,
"loss": 0.3333,
"step": 2970
},
{
"epoch": 5.6,
"grad_norm": 2.579944610595703,
"learning_rate": 6.810436634717785e-05,
"loss": 0.4132,
"step": 2980
},
{
"epoch": 5.62,
"grad_norm": 2.7568283081054688,
"learning_rate": 6.757188498402556e-05,
"loss": 0.2668,
"step": 2990
},
{
"epoch": 5.64,
"grad_norm": 1.8786532878875732,
"learning_rate": 6.703940362087328e-05,
"loss": 0.3285,
"step": 3000
},
{
"epoch": 5.66,
"grad_norm": 1.1324492692947388,
"learning_rate": 6.650692225772099e-05,
"loss": 0.3481,
"step": 3010
},
{
"epoch": 5.68,
"grad_norm": 1.0097510814666748,
"learning_rate": 6.597444089456869e-05,
"loss": 0.2596,
"step": 3020
},
{
"epoch": 5.7,
"grad_norm": 5.024123191833496,
"learning_rate": 6.544195953141641e-05,
"loss": 0.2748,
"step": 3030
},
{
"epoch": 5.71,
"grad_norm": 4.416189193725586,
"learning_rate": 6.490947816826411e-05,
"loss": 0.3929,
"step": 3040
},
{
"epoch": 5.73,
"grad_norm": 2.678957462310791,
"learning_rate": 6.437699680511182e-05,
"loss": 0.2433,
"step": 3050
},
{
"epoch": 5.75,
"grad_norm": 2.553661346435547,
"learning_rate": 6.384451544195953e-05,
"loss": 0.2908,
"step": 3060
},
{
"epoch": 5.77,
"grad_norm": 2.256429433822632,
"learning_rate": 6.331203407880725e-05,
"loss": 0.2666,
"step": 3070
},
{
"epoch": 5.79,
"grad_norm": 5.489305019378662,
"learning_rate": 6.277955271565496e-05,
"loss": 0.3884,
"step": 3080
},
{
"epoch": 5.81,
"grad_norm": 4.188615322113037,
"learning_rate": 6.224707135250266e-05,
"loss": 0.3637,
"step": 3090
},
{
"epoch": 5.83,
"grad_norm": 3.3800997734069824,
"learning_rate": 6.171458998935038e-05,
"loss": 0.2935,
"step": 3100
},
{
"epoch": 5.85,
"grad_norm": 1.9177793264389038,
"learning_rate": 6.118210862619808e-05,
"loss": 0.3361,
"step": 3110
},
{
"epoch": 5.86,
"grad_norm": 1.3074955940246582,
"learning_rate": 6.0649627263045794e-05,
"loss": 0.2567,
"step": 3120
},
{
"epoch": 5.88,
"grad_norm": 4.866466045379639,
"learning_rate": 6.01171458998935e-05,
"loss": 0.3597,
"step": 3130
},
{
"epoch": 5.9,
"grad_norm": 2.4640395641326904,
"learning_rate": 5.958466453674122e-05,
"loss": 0.2919,
"step": 3140
},
{
"epoch": 5.92,
"grad_norm": 4.659738540649414,
"learning_rate": 5.9052183173588923e-05,
"loss": 0.3253,
"step": 3150
},
{
"epoch": 5.94,
"grad_norm": 3.3256399631500244,
"learning_rate": 5.8519701810436636e-05,
"loss": 0.3053,
"step": 3160
},
{
"epoch": 5.96,
"grad_norm": 1.7987494468688965,
"learning_rate": 5.7987220447284354e-05,
"loss": 0.3421,
"step": 3170
},
{
"epoch": 5.98,
"grad_norm": 3.481025218963623,
"learning_rate": 5.745473908413206e-05,
"loss": 0.2801,
"step": 3180
},
{
"epoch": 6.0,
"grad_norm": 3.000406265258789,
"learning_rate": 5.6922257720979765e-05,
"loss": 0.3243,
"step": 3190
},
{
"epoch": 6.0,
"eval_loss": 1.507144808769226,
"eval_runtime": 4.9299,
"eval_samples_per_second": 101.421,
"eval_steps_per_second": 3.245,
"step": 3192
},
{
"epoch": 6.02,
"grad_norm": 1.909064531326294,
"learning_rate": 5.6389776357827484e-05,
"loss": 0.195,
"step": 3200
},
{
"epoch": 6.03,
"grad_norm": 1.058613896369934,
"learning_rate": 5.585729499467519e-05,
"loss": 0.1155,
"step": 3210
},
{
"epoch": 6.05,
"grad_norm": 3.8664138317108154,
"learning_rate": 5.53248136315229e-05,
"loss": 0.2217,
"step": 3220
},
{
"epoch": 6.07,
"grad_norm": 0.6019624471664429,
"learning_rate": 5.479233226837061e-05,
"loss": 0.1283,
"step": 3230
},
{
"epoch": 6.09,
"grad_norm": 4.904506683349609,
"learning_rate": 5.4259850905218326e-05,
"loss": 0.1153,
"step": 3240
},
{
"epoch": 6.11,
"grad_norm": 2.7440154552459717,
"learning_rate": 5.372736954206603e-05,
"loss": 0.0852,
"step": 3250
},
{
"epoch": 6.13,
"grad_norm": 1.7608120441436768,
"learning_rate": 5.3194888178913736e-05,
"loss": 0.1631,
"step": 3260
},
{
"epoch": 6.15,
"grad_norm": 0.817927360534668,
"learning_rate": 5.2662406815761455e-05,
"loss": 0.0899,
"step": 3270
},
{
"epoch": 6.17,
"grad_norm": 0.4968046545982361,
"learning_rate": 5.212992545260916e-05,
"loss": 0.1774,
"step": 3280
},
{
"epoch": 6.18,
"grad_norm": 3.789479970932007,
"learning_rate": 5.159744408945687e-05,
"loss": 0.1012,
"step": 3290
},
{
"epoch": 6.2,
"grad_norm": 2.593512535095215,
"learning_rate": 5.106496272630458e-05,
"loss": 0.0886,
"step": 3300
},
{
"epoch": 6.22,
"grad_norm": 1.9879395961761475,
"learning_rate": 5.05324813631523e-05,
"loss": 0.0865,
"step": 3310
},
{
"epoch": 6.24,
"grad_norm": 0.262031614780426,
"learning_rate": 5e-05,
"loss": 0.1385,
"step": 3320
},
{
"epoch": 6.26,
"grad_norm": 0.5335056781768799,
"learning_rate": 4.9467518636847715e-05,
"loss": 0.12,
"step": 3330
},
{
"epoch": 6.28,
"grad_norm": 3.7105541229248047,
"learning_rate": 4.893503727369542e-05,
"loss": 0.2251,
"step": 3340
},
{
"epoch": 6.3,
"grad_norm": 5.374883651733398,
"learning_rate": 4.840255591054313e-05,
"loss": 0.154,
"step": 3350
},
{
"epoch": 6.32,
"grad_norm": 11.730541229248047,
"learning_rate": 4.7870074547390844e-05,
"loss": 0.2578,
"step": 3360
},
{
"epoch": 6.33,
"grad_norm": 2.778977632522583,
"learning_rate": 4.7337593184238556e-05,
"loss": 0.1043,
"step": 3370
},
{
"epoch": 6.35,
"grad_norm": 1.2093865871429443,
"learning_rate": 4.680511182108626e-05,
"loss": 0.1851,
"step": 3380
},
{
"epoch": 6.37,
"grad_norm": 0.44236135482788086,
"learning_rate": 4.6272630457933974e-05,
"loss": 0.193,
"step": 3390
},
{
"epoch": 6.39,
"grad_norm": 5.98400354385376,
"learning_rate": 4.5740149094781686e-05,
"loss": 0.1475,
"step": 3400
},
{
"epoch": 6.41,
"grad_norm": 9.247739791870117,
"learning_rate": 4.520766773162939e-05,
"loss": 0.1972,
"step": 3410
},
{
"epoch": 6.43,
"grad_norm": 0.7436373829841614,
"learning_rate": 4.46751863684771e-05,
"loss": 0.141,
"step": 3420
},
{
"epoch": 6.45,
"grad_norm": 2.57186222076416,
"learning_rate": 4.4142705005324815e-05,
"loss": 0.1439,
"step": 3430
},
{
"epoch": 6.47,
"grad_norm": 6.6432600021362305,
"learning_rate": 4.361022364217253e-05,
"loss": 0.1118,
"step": 3440
},
{
"epoch": 6.48,
"grad_norm": 5.326605796813965,
"learning_rate": 4.307774227902024e-05,
"loss": 0.1989,
"step": 3450
},
{
"epoch": 6.5,
"grad_norm": 4.456277370452881,
"learning_rate": 4.2545260915867945e-05,
"loss": 0.1541,
"step": 3460
},
{
"epoch": 6.52,
"grad_norm": 0.17808414995670319,
"learning_rate": 4.201277955271566e-05,
"loss": 0.0874,
"step": 3470
},
{
"epoch": 6.54,
"grad_norm": 14.033349990844727,
"learning_rate": 4.148029818956336e-05,
"loss": 0.1112,
"step": 3480
},
{
"epoch": 6.56,
"grad_norm": 7.038208484649658,
"learning_rate": 4.094781682641108e-05,
"loss": 0.2453,
"step": 3490
},
{
"epoch": 6.58,
"grad_norm": 1.385833501815796,
"learning_rate": 4.041533546325879e-05,
"loss": 0.134,
"step": 3500
},
{
"epoch": 6.6,
"grad_norm": 3.095097303390503,
"learning_rate": 3.98828541001065e-05,
"loss": 0.1328,
"step": 3510
},
{
"epoch": 6.62,
"grad_norm": 0.5865158438682556,
"learning_rate": 3.935037273695421e-05,
"loss": 0.1189,
"step": 3520
},
{
"epoch": 6.64,
"grad_norm": 6.275442600250244,
"learning_rate": 3.8817891373801916e-05,
"loss": 0.2252,
"step": 3530
},
{
"epoch": 6.65,
"grad_norm": 4.78378438949585,
"learning_rate": 3.828541001064963e-05,
"loss": 0.2046,
"step": 3540
},
{
"epoch": 6.67,
"grad_norm": 0.8807236552238464,
"learning_rate": 3.775292864749734e-05,
"loss": 0.0828,
"step": 3550
},
{
"epoch": 6.69,
"grad_norm": 0.18408401310443878,
"learning_rate": 3.722044728434505e-05,
"loss": 0.0516,
"step": 3560
},
{
"epoch": 6.71,
"grad_norm": 8.150348663330078,
"learning_rate": 3.668796592119276e-05,
"loss": 0.2113,
"step": 3570
},
{
"epoch": 6.73,
"grad_norm": 4.728875637054443,
"learning_rate": 3.615548455804047e-05,
"loss": 0.1763,
"step": 3580
},
{
"epoch": 6.75,
"grad_norm": 8.548792839050293,
"learning_rate": 3.562300319488818e-05,
"loss": 0.1872,
"step": 3590
},
{
"epoch": 6.77,
"grad_norm": 0.4689349830150604,
"learning_rate": 3.509052183173589e-05,
"loss": 0.0679,
"step": 3600
},
{
"epoch": 6.79,
"grad_norm": 1.130393624305725,
"learning_rate": 3.45580404685836e-05,
"loss": 0.15,
"step": 3610
},
{
"epoch": 6.8,
"grad_norm": 1.4335063695907593,
"learning_rate": 3.402555910543131e-05,
"loss": 0.1507,
"step": 3620
},
{
"epoch": 6.82,
"grad_norm": 1.1329718828201294,
"learning_rate": 3.3493077742279024e-05,
"loss": 0.1569,
"step": 3630
},
{
"epoch": 6.84,
"grad_norm": 0.7500413656234741,
"learning_rate": 3.2960596379126736e-05,
"loss": 0.1514,
"step": 3640
},
{
"epoch": 6.86,
"grad_norm": 0.21835221350193024,
"learning_rate": 3.242811501597444e-05,
"loss": 0.1324,
"step": 3650
},
{
"epoch": 6.88,
"grad_norm": 1.765376329421997,
"learning_rate": 3.1895633652822154e-05,
"loss": 0.0542,
"step": 3660
},
{
"epoch": 6.9,
"grad_norm": 1.007416844367981,
"learning_rate": 3.136315228966986e-05,
"loss": 0.1132,
"step": 3670
},
{
"epoch": 6.92,
"grad_norm": 0.4407300353050232,
"learning_rate": 3.083067092651757e-05,
"loss": 0.1133,
"step": 3680
},
{
"epoch": 6.94,
"grad_norm": 3.9314990043640137,
"learning_rate": 3.029818956336528e-05,
"loss": 0.153,
"step": 3690
},
{
"epoch": 6.95,
"grad_norm": 9.002704620361328,
"learning_rate": 2.9765708200212995e-05,
"loss": 0.1732,
"step": 3700
},
{
"epoch": 6.97,
"grad_norm": 8.162276268005371,
"learning_rate": 2.9233226837060707e-05,
"loss": 0.1315,
"step": 3710
},
{
"epoch": 6.99,
"grad_norm": 0.3330124318599701,
"learning_rate": 2.8700745473908413e-05,
"loss": 0.1371,
"step": 3720
},
{
"epoch": 7.0,
"eval_loss": 2.0203089714050293,
"eval_runtime": 4.9645,
"eval_samples_per_second": 100.715,
"eval_steps_per_second": 3.223,
"step": 3724
},
{
"epoch": 7.01,
"grad_norm": 1.0700572729110718,
"learning_rate": 2.8168264110756125e-05,
"loss": 0.0909,
"step": 3730
},
{
"epoch": 7.03,
"grad_norm": 0.4397372007369995,
"learning_rate": 2.7635782747603834e-05,
"loss": 0.0137,
"step": 3740
},
{
"epoch": 7.05,
"grad_norm": 6.55562686920166,
"learning_rate": 2.7103301384451546e-05,
"loss": 0.0651,
"step": 3750
},
{
"epoch": 7.07,
"grad_norm": 0.3805689215660095,
"learning_rate": 2.6570820021299255e-05,
"loss": 0.0863,
"step": 3760
},
{
"epoch": 7.09,
"grad_norm": 3.0128049850463867,
"learning_rate": 2.6038338658146967e-05,
"loss": 0.0868,
"step": 3770
},
{
"epoch": 7.11,
"grad_norm": 2.7534263134002686,
"learning_rate": 2.550585729499468e-05,
"loss": 0.0183,
"step": 3780
},
{
"epoch": 7.12,
"grad_norm": 0.03968283161520958,
"learning_rate": 2.4973375931842384e-05,
"loss": 0.0045,
"step": 3790
},
{
"epoch": 7.14,
"grad_norm": 11.932687759399414,
"learning_rate": 2.44408945686901e-05,
"loss": 0.0418,
"step": 3800
},
{
"epoch": 7.16,
"grad_norm": 0.20918692648410797,
"learning_rate": 2.390841320553781e-05,
"loss": 0.0048,
"step": 3810
},
{
"epoch": 7.18,
"grad_norm": 0.48621994256973267,
"learning_rate": 2.3375931842385517e-05,
"loss": 0.0285,
"step": 3820
},
{
"epoch": 7.2,
"grad_norm": 0.008626734837889671,
"learning_rate": 2.284345047923323e-05,
"loss": 0.0382,
"step": 3830
},
{
"epoch": 7.22,
"grad_norm": 12.129301071166992,
"learning_rate": 2.2310969116080938e-05,
"loss": 0.1381,
"step": 3840
},
{
"epoch": 7.24,
"grad_norm": 3.3187191486358643,
"learning_rate": 2.1778487752928647e-05,
"loss": 0.034,
"step": 3850
},
{
"epoch": 7.26,
"grad_norm": 19.58869171142578,
"learning_rate": 2.124600638977636e-05,
"loss": 0.078,
"step": 3860
},
{
"epoch": 7.27,
"grad_norm": 0.011677253991365433,
"learning_rate": 2.071352502662407e-05,
"loss": 0.0278,
"step": 3870
},
{
"epoch": 7.29,
"grad_norm": 0.0037781130522489548,
"learning_rate": 2.018104366347178e-05,
"loss": 0.019,
"step": 3880
},
{
"epoch": 7.31,
"grad_norm": 0.012461444362998009,
"learning_rate": 1.964856230031949e-05,
"loss": 0.022,
"step": 3890
},
{
"epoch": 7.33,
"grad_norm": 0.13421136140823364,
"learning_rate": 1.91160809371672e-05,
"loss": 0.0591,
"step": 3900
},
{
"epoch": 7.35,
"grad_norm": 0.015879683196544647,
"learning_rate": 1.858359957401491e-05,
"loss": 0.0587,
"step": 3910
},
{
"epoch": 7.37,
"grad_norm": 0.006961719132959843,
"learning_rate": 1.805111821086262e-05,
"loss": 0.0549,
"step": 3920
},
{
"epoch": 7.39,
"grad_norm": 0.0046995761804282665,
"learning_rate": 1.7518636847710333e-05,
"loss": 0.0096,
"step": 3930
},
{
"epoch": 7.41,
"grad_norm": 3.304826021194458,
"learning_rate": 1.6986155484558042e-05,
"loss": 0.083,
"step": 3940
},
{
"epoch": 7.42,
"grad_norm": 0.3136584758758545,
"learning_rate": 1.645367412140575e-05,
"loss": 0.0191,
"step": 3950
},
{
"epoch": 7.44,
"grad_norm": 2.098071575164795,
"learning_rate": 1.5921192758253463e-05,
"loss": 0.1322,
"step": 3960
},
{
"epoch": 7.46,
"grad_norm": 0.0669000893831253,
"learning_rate": 1.5388711395101172e-05,
"loss": 0.0662,
"step": 3970
},
{
"epoch": 7.48,
"grad_norm": 0.024517908692359924,
"learning_rate": 1.485623003194888e-05,
"loss": 0.0431,
"step": 3980
},
{
"epoch": 7.5,
"grad_norm": 0.01538030430674553,
"learning_rate": 1.4323748668796594e-05,
"loss": 0.0201,
"step": 3990
},
{
"epoch": 7.52,
"grad_norm": 0.019790129736065865,
"learning_rate": 1.3791267305644303e-05,
"loss": 0.0768,
"step": 4000
},
{
"epoch": 7.54,
"grad_norm": 0.6820014119148254,
"learning_rate": 1.3258785942492014e-05,
"loss": 0.0222,
"step": 4010
},
{
"epoch": 7.56,
"grad_norm": 0.027402225881814957,
"learning_rate": 1.2726304579339724e-05,
"loss": 0.0191,
"step": 4020
},
{
"epoch": 7.58,
"grad_norm": 0.012008791789412498,
"learning_rate": 1.2193823216187434e-05,
"loss": 0.0137,
"step": 4030
},
{
"epoch": 7.59,
"grad_norm": 10.289400100708008,
"learning_rate": 1.1661341853035145e-05,
"loss": 0.0738,
"step": 4040
},
{
"epoch": 7.61,
"grad_norm": 0.019787069410085678,
"learning_rate": 1.1128860489882854e-05,
"loss": 0.0201,
"step": 4050
},
{
"epoch": 7.63,
"grad_norm": 3.301687717437744,
"learning_rate": 1.0596379126730564e-05,
"loss": 0.0393,
"step": 4060
},
{
"epoch": 7.65,
"grad_norm": 1.3089189529418945,
"learning_rate": 1.0063897763578276e-05,
"loss": 0.031,
"step": 4070
},
{
"epoch": 7.67,
"grad_norm": 0.03497765213251114,
"learning_rate": 9.531416400425985e-06,
"loss": 0.159,
"step": 4080
},
{
"epoch": 7.69,
"grad_norm": 0.23914293944835663,
"learning_rate": 8.998935037273695e-06,
"loss": 0.0757,
"step": 4090
},
{
"epoch": 7.71,
"grad_norm": 0.011268123984336853,
"learning_rate": 8.466453674121406e-06,
"loss": 0.0051,
"step": 4100
},
{
"epoch": 7.73,
"grad_norm": 0.021614540368318558,
"learning_rate": 7.933972310969116e-06,
"loss": 0.0606,
"step": 4110
},
{
"epoch": 7.74,
"grad_norm": 2.5865726470947266,
"learning_rate": 7.4014909478168266e-06,
"loss": 0.0776,
"step": 4120
},
{
"epoch": 7.76,
"grad_norm": 0.5956721305847168,
"learning_rate": 6.869009584664538e-06,
"loss": 0.0222,
"step": 4130
},
{
"epoch": 7.78,
"grad_norm": 5.844602108001709,
"learning_rate": 6.336528221512247e-06,
"loss": 0.0293,
"step": 4140
},
{
"epoch": 7.8,
"grad_norm": 0.03582174703478813,
"learning_rate": 5.804046858359958e-06,
"loss": 0.032,
"step": 4150
},
{
"epoch": 7.82,
"grad_norm": 4.13277006149292,
"learning_rate": 5.2715654952076674e-06,
"loss": 0.0635,
"step": 4160
},
{
"epoch": 7.84,
"grad_norm": 0.02260272204875946,
"learning_rate": 4.739084132055379e-06,
"loss": 0.0024,
"step": 4170
},
{
"epoch": 7.86,
"grad_norm": 0.026146868243813515,
"learning_rate": 4.206602768903089e-06,
"loss": 0.0522,
"step": 4180
},
{
"epoch": 7.88,
"grad_norm": 0.3275425434112549,
"learning_rate": 3.6741214057507987e-06,
"loss": 0.0492,
"step": 4190
},
{
"epoch": 7.89,
"grad_norm": 0.07726357877254486,
"learning_rate": 3.141640042598509e-06,
"loss": 0.0205,
"step": 4200
},
{
"epoch": 7.91,
"grad_norm": 0.6571511626243591,
"learning_rate": 2.6091586794462196e-06,
"loss": 0.0593,
"step": 4210
},
{
"epoch": 7.93,
"grad_norm": 0.18334336578845978,
"learning_rate": 2.0766773162939296e-06,
"loss": 0.0052,
"step": 4220
},
{
"epoch": 7.95,
"grad_norm": 0.056655995547771454,
"learning_rate": 1.54419595314164e-06,
"loss": 0.0362,
"step": 4230
},
{
"epoch": 7.97,
"grad_norm": 0.3170097768306732,
"learning_rate": 1.0117145899893504e-06,
"loss": 0.0056,
"step": 4240
},
{
"epoch": 7.99,
"grad_norm": 6.882564544677734,
"learning_rate": 4.792332268370607e-07,
"loss": 0.0571,
"step": 4250
},
{
"epoch": 8.0,
"eval_loss": 2.9498751163482666,
"eval_runtime": 5.0819,
"eval_samples_per_second": 98.389,
"eval_steps_per_second": 3.148,
"step": 4256
},
{
"epoch": 8.0,
"step": 4256,
"total_flos": 5.269455293792256e+18,
"train_loss": 0.4405817036390921,
"train_runtime": 1400.9279,
"train_samples_per_second": 48.539,
"train_steps_per_second": 3.038
}
],
"logging_steps": 10,
"max_steps": 4256,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 500,
"total_flos": 5.269455293792256e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}