chansung's picture
Model save
4345b3d verified
raw
history blame
No virus
51.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 1480,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006756756756756757,
"grad_norm": 474.0,
"learning_rate": 1.3513513513513515e-06,
"loss": 38.1848,
"step": 1
},
{
"epoch": 0.033783783783783786,
"grad_norm": 456.0,
"learning_rate": 6.7567567567567575e-06,
"loss": 40.5702,
"step": 5
},
{
"epoch": 0.06756756756756757,
"grad_norm": 388.0,
"learning_rate": 1.3513513513513515e-05,
"loss": 37.3485,
"step": 10
},
{
"epoch": 0.10135135135135136,
"grad_norm": 256.0,
"learning_rate": 2.0270270270270273e-05,
"loss": 27.8365,
"step": 15
},
{
"epoch": 0.13513513513513514,
"grad_norm": 41.75,
"learning_rate": 2.702702702702703e-05,
"loss": 19.8335,
"step": 20
},
{
"epoch": 0.16891891891891891,
"grad_norm": 32.25,
"learning_rate": 3.3783783783783784e-05,
"loss": 17.7565,
"step": 25
},
{
"epoch": 0.20270270270270271,
"grad_norm": 18.125,
"learning_rate": 4.0540540540540545e-05,
"loss": 16.1239,
"step": 30
},
{
"epoch": 0.23648648648648649,
"grad_norm": 5.78125,
"learning_rate": 4.72972972972973e-05,
"loss": 14.7405,
"step": 35
},
{
"epoch": 0.2702702702702703,
"grad_norm": 5.375,
"learning_rate": 5.405405405405406e-05,
"loss": 13.8327,
"step": 40
},
{
"epoch": 0.30405405405405406,
"grad_norm": 8.375,
"learning_rate": 6.0810810810810814e-05,
"loss": 13.4116,
"step": 45
},
{
"epoch": 0.33783783783783783,
"grad_norm": 14.625,
"learning_rate": 6.756756756756757e-05,
"loss": 12.5308,
"step": 50
},
{
"epoch": 0.3716216216216216,
"grad_norm": 31.5,
"learning_rate": 7.432432432432433e-05,
"loss": 10.6509,
"step": 55
},
{
"epoch": 0.40540540540540543,
"grad_norm": 35.0,
"learning_rate": 8.108108108108109e-05,
"loss": 6.0675,
"step": 60
},
{
"epoch": 0.4391891891891892,
"grad_norm": 8.75,
"learning_rate": 8.783783783783784e-05,
"loss": 2.1561,
"step": 65
},
{
"epoch": 0.47297297297297297,
"grad_norm": 3.515625,
"learning_rate": 9.45945945945946e-05,
"loss": 1.7484,
"step": 70
},
{
"epoch": 0.5067567567567568,
"grad_norm": 2.203125,
"learning_rate": 0.00010135135135135136,
"loss": 1.565,
"step": 75
},
{
"epoch": 0.5405405405405406,
"grad_norm": 3.265625,
"learning_rate": 0.00010810810810810812,
"loss": 1.4507,
"step": 80
},
{
"epoch": 0.5743243243243243,
"grad_norm": 1.484375,
"learning_rate": 0.00011486486486486487,
"loss": 1.3348,
"step": 85
},
{
"epoch": 0.6081081081081081,
"grad_norm": 2.484375,
"learning_rate": 0.00012162162162162163,
"loss": 1.2468,
"step": 90
},
{
"epoch": 0.6418918918918919,
"grad_norm": 4.6875,
"learning_rate": 0.0001283783783783784,
"loss": 1.2245,
"step": 95
},
{
"epoch": 0.6756756756756757,
"grad_norm": 3.28125,
"learning_rate": 0.00013513513513513514,
"loss": 1.1992,
"step": 100
},
{
"epoch": 0.7094594594594594,
"grad_norm": 4.40625,
"learning_rate": 0.00014189189189189188,
"loss": 1.1334,
"step": 105
},
{
"epoch": 0.7432432432432432,
"grad_norm": 2.53125,
"learning_rate": 0.00014864864864864866,
"loss": 1.1272,
"step": 110
},
{
"epoch": 0.777027027027027,
"grad_norm": 2.015625,
"learning_rate": 0.0001554054054054054,
"loss": 1.0732,
"step": 115
},
{
"epoch": 0.8108108108108109,
"grad_norm": 2.125,
"learning_rate": 0.00016216216216216218,
"loss": 1.0813,
"step": 120
},
{
"epoch": 0.8445945945945946,
"grad_norm": 3.734375,
"learning_rate": 0.00016891891891891893,
"loss": 1.0315,
"step": 125
},
{
"epoch": 0.8783783783783784,
"grad_norm": 4.09375,
"learning_rate": 0.00017567567567567568,
"loss": 1.0636,
"step": 130
},
{
"epoch": 0.9121621621621622,
"grad_norm": 1.390625,
"learning_rate": 0.00018243243243243245,
"loss": 0.9858,
"step": 135
},
{
"epoch": 0.9459459459459459,
"grad_norm": 11.0,
"learning_rate": 0.0001891891891891892,
"loss": 0.9889,
"step": 140
},
{
"epoch": 0.9797297297297297,
"grad_norm": 30.25,
"learning_rate": 0.00019594594594594594,
"loss": 1.0072,
"step": 145
},
{
"epoch": 1.0,
"eval_loss": 2.267159938812256,
"eval_runtime": 1.0098,
"eval_samples_per_second": 4.951,
"eval_steps_per_second": 1.981,
"step": 148
},
{
"epoch": 1.0135135135135136,
"grad_norm": 3.546875,
"learning_rate": 0.00019999888744757143,
"loss": 1.0602,
"step": 150
},
{
"epoch": 1.0472972972972974,
"grad_norm": 3.234375,
"learning_rate": 0.000199986371517049,
"loss": 0.9825,
"step": 155
},
{
"epoch": 1.0810810810810811,
"grad_norm": 0.9609375,
"learning_rate": 0.0001999599507118322,
"loss": 0.9573,
"step": 160
},
{
"epoch": 1.114864864864865,
"grad_norm": 3.125,
"learning_rate": 0.00019991962870620153,
"loss": 0.9675,
"step": 165
},
{
"epoch": 1.1486486486486487,
"grad_norm": 3.34375,
"learning_rate": 0.00019986541110764565,
"loss": 0.9773,
"step": 170
},
{
"epoch": 1.1824324324324325,
"grad_norm": 2.125,
"learning_rate": 0.00019979730545608126,
"loss": 0.9352,
"step": 175
},
{
"epoch": 1.2162162162162162,
"grad_norm": 0.96484375,
"learning_rate": 0.00019971532122280464,
"loss": 0.9498,
"step": 180
},
{
"epoch": 1.25,
"grad_norm": 1.25,
"learning_rate": 0.00019961946980917456,
"loss": 0.9255,
"step": 185
},
{
"epoch": 1.2837837837837838,
"grad_norm": 1.1640625,
"learning_rate": 0.0001995097645450266,
"loss": 0.9378,
"step": 190
},
{
"epoch": 1.3175675675675675,
"grad_norm": 0.7421875,
"learning_rate": 0.00019938622068681953,
"loss": 0.9164,
"step": 195
},
{
"epoch": 1.3513513513513513,
"grad_norm": 1.703125,
"learning_rate": 0.0001992488554155135,
"loss": 0.8986,
"step": 200
},
{
"epoch": 1.385135135135135,
"grad_norm": 6.59375,
"learning_rate": 0.00019909768783418086,
"loss": 0.9389,
"step": 205
},
{
"epoch": 1.4189189189189189,
"grad_norm": 1.6171875,
"learning_rate": 0.00019893273896534936,
"loss": 0.9261,
"step": 210
},
{
"epoch": 1.4527027027027026,
"grad_norm": 0.80859375,
"learning_rate": 0.00019875403174807882,
"loss": 0.8863,
"step": 215
},
{
"epoch": 1.4864864864864864,
"grad_norm": 0.7890625,
"learning_rate": 0.00019856159103477086,
"loss": 0.8941,
"step": 220
},
{
"epoch": 1.5202702702702702,
"grad_norm": 0.88671875,
"learning_rate": 0.0001983554435877128,
"loss": 0.8607,
"step": 225
},
{
"epoch": 1.554054054054054,
"grad_norm": 0.78125,
"learning_rate": 0.00019813561807535598,
"loss": 0.9002,
"step": 230
},
{
"epoch": 1.5878378378378377,
"grad_norm": 1.5,
"learning_rate": 0.00019790214506832868,
"loss": 0.894,
"step": 235
},
{
"epoch": 1.6216216216216215,
"grad_norm": 1.890625,
"learning_rate": 0.00019765505703518496,
"loss": 0.8619,
"step": 240
},
{
"epoch": 1.6554054054054053,
"grad_norm": 0.7109375,
"learning_rate": 0.0001973943883378892,
"loss": 0.8988,
"step": 245
},
{
"epoch": 1.689189189189189,
"grad_norm": 1.2109375,
"learning_rate": 0.00019712017522703764,
"loss": 0.8851,
"step": 250
},
{
"epoch": 1.722972972972973,
"grad_norm": 1.5,
"learning_rate": 0.00019683245583681675,
"loss": 0.8613,
"step": 255
},
{
"epoch": 1.7567567567567568,
"grad_norm": 0.66796875,
"learning_rate": 0.00019653127017970034,
"loss": 0.8816,
"step": 260
},
{
"epoch": 1.7905405405405406,
"grad_norm": 0.7421875,
"learning_rate": 0.00019621666014088494,
"loss": 0.9012,
"step": 265
},
{
"epoch": 1.8243243243243243,
"grad_norm": 0.85546875,
"learning_rate": 0.00019588866947246498,
"loss": 0.8495,
"step": 270
},
{
"epoch": 1.8581081081081081,
"grad_norm": 0.98046875,
"learning_rate": 0.00019554734378734824,
"loss": 0.8473,
"step": 275
},
{
"epoch": 1.8918918918918919,
"grad_norm": 1.296875,
"learning_rate": 0.00019519273055291266,
"loss": 0.9128,
"step": 280
},
{
"epoch": 1.9256756756756757,
"grad_norm": 0.7265625,
"learning_rate": 0.000194824879084405,
"loss": 0.8479,
"step": 285
},
{
"epoch": 1.9594594594594594,
"grad_norm": 2.03125,
"learning_rate": 0.00019444384053808288,
"loss": 0.8502,
"step": 290
},
{
"epoch": 1.9932432432432432,
"grad_norm": 6.03125,
"learning_rate": 0.00019404966790410047,
"loss": 0.8705,
"step": 295
},
{
"epoch": 2.0,
"eval_loss": 2.1745002269744873,
"eval_runtime": 1.0121,
"eval_samples_per_second": 4.94,
"eval_steps_per_second": 1.976,
"step": 296
},
{
"epoch": 2.027027027027027,
"grad_norm": 1.6875,
"learning_rate": 0.00019364241599913924,
"loss": 0.7692,
"step": 300
},
{
"epoch": 2.060810810810811,
"grad_norm": 0.75390625,
"learning_rate": 0.00019322214145878487,
"loss": 0.7726,
"step": 305
},
{
"epoch": 2.0945945945945947,
"grad_norm": 1.0859375,
"learning_rate": 0.00019278890272965096,
"loss": 0.7765,
"step": 310
},
{
"epoch": 2.1283783783783785,
"grad_norm": 4.625,
"learning_rate": 0.000192342760061251,
"loss": 0.7588,
"step": 315
},
{
"epoch": 2.1621621621621623,
"grad_norm": 0.92578125,
"learning_rate": 0.00019188377549761963,
"loss": 0.7696,
"step": 320
},
{
"epoch": 2.195945945945946,
"grad_norm": 0.71875,
"learning_rate": 0.00019141201286868435,
"loss": 0.7883,
"step": 325
},
{
"epoch": 2.22972972972973,
"grad_norm": 0.6796875,
"learning_rate": 0.00019092753778138886,
"loss": 0.7835,
"step": 330
},
{
"epoch": 2.2635135135135136,
"grad_norm": 0.8671875,
"learning_rate": 0.00019043041761056907,
"loss": 0.7848,
"step": 335
},
{
"epoch": 2.2972972972972974,
"grad_norm": 0.765625,
"learning_rate": 0.00018992072148958368,
"loss": 0.7891,
"step": 340
},
{
"epoch": 2.331081081081081,
"grad_norm": 0.96875,
"learning_rate": 0.00018939852030069981,
"loss": 0.7922,
"step": 345
},
{
"epoch": 2.364864864864865,
"grad_norm": 1.328125,
"learning_rate": 0.0001888638866652356,
"loss": 0.7857,
"step": 350
},
{
"epoch": 2.3986486486486487,
"grad_norm": 2.40625,
"learning_rate": 0.00018831689493346095,
"loss": 0.7839,
"step": 355
},
{
"epoch": 2.4324324324324325,
"grad_norm": 2.0625,
"learning_rate": 0.00018775762117425777,
"loss": 0.8031,
"step": 360
},
{
"epoch": 2.4662162162162162,
"grad_norm": 0.83203125,
"learning_rate": 0.00018718614316454133,
"loss": 0.7835,
"step": 365
},
{
"epoch": 2.5,
"grad_norm": 2.5,
"learning_rate": 0.00018660254037844388,
"loss": 0.7749,
"step": 370
},
{
"epoch": 2.5337837837837838,
"grad_norm": 0.9140625,
"learning_rate": 0.00018600689397626246,
"loss": 0.7973,
"step": 375
},
{
"epoch": 2.5675675675675675,
"grad_norm": 1.1953125,
"learning_rate": 0.0001853992867931721,
"loss": 0.8086,
"step": 380
},
{
"epoch": 2.6013513513513513,
"grad_norm": 0.84375,
"learning_rate": 0.00018477980332770607,
"loss": 0.7994,
"step": 385
},
{
"epoch": 2.635135135135135,
"grad_norm": 0.921875,
"learning_rate": 0.00018414852973000503,
"loss": 0.7872,
"step": 390
},
{
"epoch": 2.668918918918919,
"grad_norm": 0.78125,
"learning_rate": 0.00018350555378983608,
"loss": 0.7871,
"step": 395
},
{
"epoch": 2.7027027027027026,
"grad_norm": 0.640625,
"learning_rate": 0.00018285096492438424,
"loss": 0.7913,
"step": 400
},
{
"epoch": 2.7364864864864864,
"grad_norm": 1.109375,
"learning_rate": 0.00018218485416581726,
"loss": 0.7861,
"step": 405
},
{
"epoch": 2.77027027027027,
"grad_norm": 0.921875,
"learning_rate": 0.00018150731414862622,
"loss": 0.7757,
"step": 410
},
{
"epoch": 2.804054054054054,
"grad_norm": 1.15625,
"learning_rate": 0.00018081843909674276,
"loss": 0.7893,
"step": 415
},
{
"epoch": 2.8378378378378377,
"grad_norm": 1.0390625,
"learning_rate": 0.00018011832481043576,
"loss": 0.795,
"step": 420
},
{
"epoch": 2.8716216216216215,
"grad_norm": 0.65234375,
"learning_rate": 0.0001794070686529886,
"loss": 0.7933,
"step": 425
},
{
"epoch": 2.9054054054054053,
"grad_norm": 0.625,
"learning_rate": 0.000178684769537159,
"loss": 0.7832,
"step": 430
},
{
"epoch": 2.939189189189189,
"grad_norm": 0.69921875,
"learning_rate": 0.0001779515279114236,
"loss": 0.7833,
"step": 435
},
{
"epoch": 2.972972972972973,
"grad_norm": 0.78125,
"learning_rate": 0.00017720744574600863,
"loss": 0.7957,
"step": 440
},
{
"epoch": 3.0,
"eval_loss": 2.191443920135498,
"eval_runtime": 1.0106,
"eval_samples_per_second": 4.947,
"eval_steps_per_second": 1.979,
"step": 444
},
{
"epoch": 3.0067567567567566,
"grad_norm": 0.71484375,
"learning_rate": 0.00017645262651870926,
"loss": 0.7603,
"step": 445
},
{
"epoch": 3.0405405405405403,
"grad_norm": 0.64453125,
"learning_rate": 0.0001756871752004992,
"loss": 0.6923,
"step": 450
},
{
"epoch": 3.074324324324324,
"grad_norm": 0.7109375,
"learning_rate": 0.0001749111982409325,
"loss": 0.6958,
"step": 455
},
{
"epoch": 3.108108108108108,
"grad_norm": 0.69921875,
"learning_rate": 0.00017412480355334005,
"loss": 0.6803,
"step": 460
},
{
"epoch": 3.141891891891892,
"grad_norm": 0.84765625,
"learning_rate": 0.00017332810049982208,
"loss": 0.7161,
"step": 465
},
{
"epoch": 3.175675675675676,
"grad_norm": 1.0234375,
"learning_rate": 0.00017252119987603973,
"loss": 0.68,
"step": 470
},
{
"epoch": 3.2094594594594597,
"grad_norm": 0.70703125,
"learning_rate": 0.00017170421389580667,
"loss": 0.6976,
"step": 475
},
{
"epoch": 3.2432432432432434,
"grad_norm": 0.765625,
"learning_rate": 0.00017087725617548385,
"loss": 0.6836,
"step": 480
},
{
"epoch": 3.277027027027027,
"grad_norm": 0.69921875,
"learning_rate": 0.00017004044171817925,
"loss": 0.7073,
"step": 485
},
{
"epoch": 3.310810810810811,
"grad_norm": 0.72265625,
"learning_rate": 0.00016919388689775464,
"loss": 0.7111,
"step": 490
},
{
"epoch": 3.3445945945945947,
"grad_norm": 0.6953125,
"learning_rate": 0.00016833770944264153,
"loss": 0.716,
"step": 495
},
{
"epoch": 3.3783783783783785,
"grad_norm": 0.8046875,
"learning_rate": 0.00016747202841946928,
"loss": 0.7096,
"step": 500
},
{
"epoch": 3.4121621621621623,
"grad_norm": 0.859375,
"learning_rate": 0.00016659696421650645,
"loss": 0.6776,
"step": 505
},
{
"epoch": 3.445945945945946,
"grad_norm": 1.40625,
"learning_rate": 0.00016571263852691888,
"loss": 0.7246,
"step": 510
},
{
"epoch": 3.47972972972973,
"grad_norm": 1.09375,
"learning_rate": 0.00016481917433184607,
"loss": 0.706,
"step": 515
},
{
"epoch": 3.5135135135135136,
"grad_norm": 0.8828125,
"learning_rate": 0.0001639166958832985,
"loss": 0.694,
"step": 520
},
{
"epoch": 3.5472972972972974,
"grad_norm": 0.921875,
"learning_rate": 0.00016300532868687806,
"loss": 0.7063,
"step": 525
},
{
"epoch": 3.581081081081081,
"grad_norm": 0.84375,
"learning_rate": 0.0001620851994843244,
"loss": 0.7071,
"step": 530
},
{
"epoch": 3.614864864864865,
"grad_norm": 0.8671875,
"learning_rate": 0.00016115643623588915,
"loss": 0.7343,
"step": 535
},
{
"epoch": 3.6486486486486487,
"grad_norm": 1.109375,
"learning_rate": 0.00016021916810254097,
"loss": 0.6788,
"step": 540
},
{
"epoch": 3.6824324324324325,
"grad_norm": 0.75390625,
"learning_rate": 0.00015927352542800317,
"loss": 0.7099,
"step": 545
},
{
"epoch": 3.7162162162162162,
"grad_norm": 0.7265625,
"learning_rate": 0.00015831963972062733,
"loss": 0.7176,
"step": 550
},
{
"epoch": 3.75,
"grad_norm": 0.7109375,
"learning_rate": 0.0001573576436351046,
"loss": 0.7099,
"step": 555
},
{
"epoch": 3.7837837837837838,
"grad_norm": 0.703125,
"learning_rate": 0.0001563876709540178,
"loss": 0.7117,
"step": 560
},
{
"epoch": 3.8175675675675675,
"grad_norm": 0.8203125,
"learning_rate": 0.00015540985656923645,
"loss": 0.7227,
"step": 565
},
{
"epoch": 3.8513513513513513,
"grad_norm": 0.703125,
"learning_rate": 0.0001544243364631579,
"loss": 0.7077,
"step": 570
},
{
"epoch": 3.885135135135135,
"grad_norm": 1.15625,
"learning_rate": 0.00015343124768979637,
"loss": 0.7162,
"step": 575
},
{
"epoch": 3.918918918918919,
"grad_norm": 0.921875,
"learning_rate": 0.00015243072835572318,
"loss": 0.7164,
"step": 580
},
{
"epoch": 3.9527027027027026,
"grad_norm": 2.703125,
"learning_rate": 0.0001514229176008607,
"loss": 0.7312,
"step": 585
},
{
"epoch": 3.9864864864864864,
"grad_norm": 0.78125,
"learning_rate": 0.00015040795557913245,
"loss": 0.731,
"step": 590
},
{
"epoch": 4.0,
"eval_loss": 2.2511258125305176,
"eval_runtime": 1.0139,
"eval_samples_per_second": 4.932,
"eval_steps_per_second": 1.973,
"step": 592
},
{
"epoch": 4.02027027027027,
"grad_norm": 0.88671875,
"learning_rate": 0.00014938598343897214,
"loss": 0.653,
"step": 595
},
{
"epoch": 4.054054054054054,
"grad_norm": 0.8828125,
"learning_rate": 0.00014835714330369446,
"loss": 0.6058,
"step": 600
},
{
"epoch": 4.087837837837838,
"grad_norm": 0.9609375,
"learning_rate": 0.00014732157825173044,
"loss": 0.6245,
"step": 605
},
{
"epoch": 4.121621621621622,
"grad_norm": 0.94921875,
"learning_rate": 0.0001462794322967299,
"loss": 0.6074,
"step": 610
},
{
"epoch": 4.155405405405405,
"grad_norm": 0.8515625,
"learning_rate": 0.00014523085036753354,
"loss": 0.6067,
"step": 615
},
{
"epoch": 4.1891891891891895,
"grad_norm": 0.90234375,
"learning_rate": 0.00014417597828801832,
"loss": 0.608,
"step": 620
},
{
"epoch": 4.222972972972973,
"grad_norm": 0.81640625,
"learning_rate": 0.00014311496275681783,
"loss": 0.6079,
"step": 625
},
{
"epoch": 4.256756756756757,
"grad_norm": 0.62890625,
"learning_rate": 0.00014204795132692144,
"loss": 0.6234,
"step": 630
},
{
"epoch": 4.29054054054054,
"grad_norm": 0.67578125,
"learning_rate": 0.00014097509238515432,
"loss": 0.6139,
"step": 635
},
{
"epoch": 4.324324324324325,
"grad_norm": 0.62109375,
"learning_rate": 0.00013989653513154165,
"loss": 0.6203,
"step": 640
},
{
"epoch": 4.358108108108108,
"grad_norm": 0.66796875,
"learning_rate": 0.00013881242955855974,
"loss": 0.6053,
"step": 645
},
{
"epoch": 4.391891891891892,
"grad_norm": 0.72265625,
"learning_rate": 0.000137722926430277,
"loss": 0.6032,
"step": 650
},
{
"epoch": 4.425675675675675,
"grad_norm": 0.93359375,
"learning_rate": 0.00013662817726138728,
"loss": 0.6301,
"step": 655
},
{
"epoch": 4.45945945945946,
"grad_norm": 0.7265625,
"learning_rate": 0.00013552833429613938,
"loss": 0.6211,
"step": 660
},
{
"epoch": 4.493243243243243,
"grad_norm": 0.65625,
"learning_rate": 0.0001344235504871645,
"loss": 0.625,
"step": 665
},
{
"epoch": 4.527027027027027,
"grad_norm": 0.828125,
"learning_rate": 0.00013331397947420576,
"loss": 0.6075,
"step": 670
},
{
"epoch": 4.5608108108108105,
"grad_norm": 0.6875,
"learning_rate": 0.00013219977556275163,
"loss": 0.6016,
"step": 675
},
{
"epoch": 4.594594594594595,
"grad_norm": 0.70703125,
"learning_rate": 0.00013108109370257712,
"loss": 0.6126,
"step": 680
},
{
"epoch": 4.628378378378378,
"grad_norm": 0.62109375,
"learning_rate": 0.0001299580894661953,
"loss": 0.6229,
"step": 685
},
{
"epoch": 4.662162162162162,
"grad_norm": 0.66015625,
"learning_rate": 0.0001288309190272222,
"loss": 0.6368,
"step": 690
},
{
"epoch": 4.695945945945946,
"grad_norm": 0.640625,
"learning_rate": 0.00012769973913865794,
"loss": 0.6246,
"step": 695
},
{
"epoch": 4.72972972972973,
"grad_norm": 0.87890625,
"learning_rate": 0.00012656470711108764,
"loss": 0.6279,
"step": 700
},
{
"epoch": 4.763513513513513,
"grad_norm": 0.70703125,
"learning_rate": 0.00012542598079080456,
"loss": 0.6183,
"step": 705
},
{
"epoch": 4.797297297297297,
"grad_norm": 0.76953125,
"learning_rate": 0.0001242837185378587,
"loss": 0.6299,
"step": 710
},
{
"epoch": 4.831081081081081,
"grad_norm": 0.96484375,
"learning_rate": 0.00012313807920403419,
"loss": 0.6327,
"step": 715
},
{
"epoch": 4.864864864864865,
"grad_norm": 0.796875,
"learning_rate": 0.00012198922211075778,
"loss": 0.6425,
"step": 720
},
{
"epoch": 4.898648648648649,
"grad_norm": 0.71875,
"learning_rate": 0.00012083730702694291,
"loss": 0.6226,
"step": 725
},
{
"epoch": 4.9324324324324325,
"grad_norm": 0.7109375,
"learning_rate": 0.00011968249414677055,
"loss": 0.6108,
"step": 730
},
{
"epoch": 4.966216216216216,
"grad_norm": 0.71484375,
"learning_rate": 0.00011852494406741165,
"loss": 0.6359,
"step": 735
},
{
"epoch": 5.0,
"grad_norm": 0.703125,
"learning_rate": 0.00011736481776669306,
"loss": 0.634,
"step": 740
},
{
"epoch": 5.0,
"eval_loss": 2.340895175933838,
"eval_runtime": 1.0117,
"eval_samples_per_second": 4.942,
"eval_steps_per_second": 1.977,
"step": 740
},
{
"epoch": 5.033783783783784,
"grad_norm": 0.9453125,
"learning_rate": 0.00011620227658071087,
"loss": 0.53,
"step": 745
},
{
"epoch": 5.0675675675675675,
"grad_norm": 0.6796875,
"learning_rate": 0.00011503748218139369,
"loss": 0.5224,
"step": 750
},
{
"epoch": 5.101351351351352,
"grad_norm": 0.75390625,
"learning_rate": 0.00011387059655401932,
"loss": 0.5205,
"step": 755
},
{
"epoch": 5.135135135135135,
"grad_norm": 0.80859375,
"learning_rate": 0.00011270178197468789,
"loss": 0.5171,
"step": 760
},
{
"epoch": 5.168918918918919,
"grad_norm": 0.79296875,
"learning_rate": 0.00011153120098775434,
"loss": 0.5174,
"step": 765
},
{
"epoch": 5.202702702702703,
"grad_norm": 0.765625,
"learning_rate": 0.00011035901638322392,
"loss": 0.5288,
"step": 770
},
{
"epoch": 5.236486486486487,
"grad_norm": 0.921875,
"learning_rate": 0.00010918539117411333,
"loss": 0.5389,
"step": 775
},
{
"epoch": 5.27027027027027,
"grad_norm": 0.796875,
"learning_rate": 0.00010801048857378071,
"loss": 0.5254,
"step": 780
},
{
"epoch": 5.304054054054054,
"grad_norm": 0.734375,
"learning_rate": 0.00010683447197322817,
"loss": 0.535,
"step": 785
},
{
"epoch": 5.337837837837838,
"grad_norm": 0.76171875,
"learning_rate": 0.00010565750491837925,
"loss": 0.5152,
"step": 790
},
{
"epoch": 5.371621621621622,
"grad_norm": 0.7734375,
"learning_rate": 0.00010447975108733492,
"loss": 0.5387,
"step": 795
},
{
"epoch": 5.405405405405405,
"grad_norm": 0.8515625,
"learning_rate": 0.00010330137426761135,
"loss": 0.5245,
"step": 800
},
{
"epoch": 5.4391891891891895,
"grad_norm": 0.69140625,
"learning_rate": 0.00010212253833336237,
"loss": 0.5284,
"step": 805
},
{
"epoch": 5.472972972972973,
"grad_norm": 0.7890625,
"learning_rate": 0.00010094340722258969,
"loss": 0.5501,
"step": 810
},
{
"epoch": 5.506756756756757,
"grad_norm": 0.78515625,
"learning_rate": 9.976414491434463e-05,
"loss": 0.5526,
"step": 815
},
{
"epoch": 5.54054054054054,
"grad_norm": 0.76953125,
"learning_rate": 9.858491540592382e-05,
"loss": 0.5313,
"step": 820
},
{
"epoch": 5.574324324324325,
"grad_norm": 0.828125,
"learning_rate": 9.740588269006246e-05,
"loss": 0.5398,
"step": 825
},
{
"epoch": 5.608108108108108,
"grad_norm": 0.7578125,
"learning_rate": 9.622721073212832e-05,
"loss": 0.5228,
"step": 830
},
{
"epoch": 5.641891891891892,
"grad_norm": 0.796875,
"learning_rate": 9.504906344731932e-05,
"loss": 0.5509,
"step": 835
},
{
"epoch": 5.675675675675675,
"grad_norm": 0.8125,
"learning_rate": 9.38716046778684e-05,
"loss": 0.547,
"step": 840
},
{
"epoch": 5.70945945945946,
"grad_norm": 0.7109375,
"learning_rate": 9.269499817025814e-05,
"loss": 0.5351,
"step": 845
},
{
"epoch": 5.743243243243243,
"grad_norm": 0.75390625,
"learning_rate": 9.151940755244912e-05,
"loss": 0.5352,
"step": 850
},
{
"epoch": 5.777027027027027,
"grad_norm": 0.69921875,
"learning_rate": 9.034499631112437e-05,
"loss": 0.5371,
"step": 855
},
{
"epoch": 5.8108108108108105,
"grad_norm": 0.6796875,
"learning_rate": 8.917192776895382e-05,
"loss": 0.5459,
"step": 860
},
{
"epoch": 5.844594594594595,
"grad_norm": 0.80859375,
"learning_rate": 8.800036506188129e-05,
"loss": 0.5324,
"step": 865
},
{
"epoch": 5.878378378378378,
"grad_norm": 0.7578125,
"learning_rate": 8.683047111643763e-05,
"loss": 0.5478,
"step": 870
},
{
"epoch": 5.912162162162162,
"grad_norm": 0.82421875,
"learning_rate": 8.566240862708274e-05,
"loss": 0.5465,
"step": 875
},
{
"epoch": 5.945945945945946,
"grad_norm": 0.90234375,
"learning_rate": 8.449634003358022e-05,
"loss": 0.5341,
"step": 880
},
{
"epoch": 5.97972972972973,
"grad_norm": 0.79296875,
"learning_rate": 8.33324274984071e-05,
"loss": 0.5418,
"step": 885
},
{
"epoch": 6.0,
"eval_loss": 2.4840505123138428,
"eval_runtime": 1.0123,
"eval_samples_per_second": 4.939,
"eval_steps_per_second": 1.976,
"step": 888
},
{
"epoch": 6.013513513513513,
"grad_norm": 0.8515625,
"learning_rate": 8.217083288420241e-05,
"loss": 0.5205,
"step": 890
},
{
"epoch": 6.047297297297297,
"grad_norm": 0.8828125,
"learning_rate": 8.101171773125716e-05,
"loss": 0.4562,
"step": 895
},
{
"epoch": 6.081081081081081,
"grad_norm": 0.83984375,
"learning_rate": 7.985524323504948e-05,
"loss": 0.4565,
"step": 900
},
{
"epoch": 6.114864864864865,
"grad_norm": 0.7578125,
"learning_rate": 7.870157022382735e-05,
"loss": 0.4372,
"step": 905
},
{
"epoch": 6.148648648648648,
"grad_norm": 0.76171875,
"learning_rate": 7.755085913624274e-05,
"loss": 0.4503,
"step": 910
},
{
"epoch": 6.1824324324324325,
"grad_norm": 0.7578125,
"learning_rate": 7.640326999903967e-05,
"loss": 0.449,
"step": 915
},
{
"epoch": 6.216216216216216,
"grad_norm": 0.734375,
"learning_rate": 7.525896240479976e-05,
"loss": 0.4404,
"step": 920
},
{
"epoch": 6.25,
"grad_norm": 0.8359375,
"learning_rate": 7.411809548974792e-05,
"loss": 0.4534,
"step": 925
},
{
"epoch": 6.283783783783784,
"grad_norm": 0.76953125,
"learning_rate": 7.29808279116218e-05,
"loss": 0.4564,
"step": 930
},
{
"epoch": 6.3175675675675675,
"grad_norm": 0.7890625,
"learning_rate": 7.184731782760746e-05,
"loss": 0.4515,
"step": 935
},
{
"epoch": 6.351351351351352,
"grad_norm": 0.91796875,
"learning_rate": 7.071772287234497e-05,
"loss": 0.4481,
"step": 940
},
{
"epoch": 6.385135135135135,
"grad_norm": 0.8046875,
"learning_rate": 6.959220013600641e-05,
"loss": 0.45,
"step": 945
},
{
"epoch": 6.418918918918919,
"grad_norm": 0.8125,
"learning_rate": 6.847090614244977e-05,
"loss": 0.4494,
"step": 950
},
{
"epoch": 6.452702702702703,
"grad_norm": 0.9140625,
"learning_rate": 6.735399682745145e-05,
"loss": 0.4462,
"step": 955
},
{
"epoch": 6.486486486486487,
"grad_norm": 0.83203125,
"learning_rate": 6.624162751702076e-05,
"loss": 0.4597,
"step": 960
},
{
"epoch": 6.52027027027027,
"grad_norm": 0.83203125,
"learning_rate": 6.513395290579901e-05,
"loss": 0.4488,
"step": 965
},
{
"epoch": 6.554054054054054,
"grad_norm": 0.78125,
"learning_rate": 6.403112703554643e-05,
"loss": 0.4635,
"step": 970
},
{
"epoch": 6.587837837837838,
"grad_norm": 0.84375,
"learning_rate": 6.293330327372005e-05,
"loss": 0.4549,
"step": 975
},
{
"epoch": 6.621621621621622,
"grad_norm": 0.83984375,
"learning_rate": 6.184063429214515e-05,
"loss": 0.4614,
"step": 980
},
{
"epoch": 6.655405405405405,
"grad_norm": 0.76171875,
"learning_rate": 6.0753272045783625e-05,
"loss": 0.449,
"step": 985
},
{
"epoch": 6.6891891891891895,
"grad_norm": 0.75390625,
"learning_rate": 5.967136775160187e-05,
"loss": 0.4631,
"step": 990
},
{
"epoch": 6.722972972972973,
"grad_norm": 0.7890625,
"learning_rate": 5.859507186754146e-05,
"loss": 0.4672,
"step": 995
},
{
"epoch": 6.756756756756757,
"grad_norm": 0.82421875,
"learning_rate": 5.752453407159522e-05,
"loss": 0.4552,
"step": 1000
},
{
"epoch": 6.79054054054054,
"grad_norm": 0.84765625,
"learning_rate": 5.645990324099197e-05,
"loss": 0.4807,
"step": 1005
},
{
"epoch": 6.824324324324325,
"grad_norm": 0.73828125,
"learning_rate": 5.540132743149242e-05,
"loss": 0.4661,
"step": 1010
},
{
"epoch": 6.858108108108108,
"grad_norm": 0.75,
"learning_rate": 5.434895385679937e-05,
"loss": 0.4595,
"step": 1015
},
{
"epoch": 6.891891891891892,
"grad_norm": 0.75,
"learning_rate": 5.33029288680852e-05,
"loss": 0.4457,
"step": 1020
},
{
"epoch": 6.925675675675675,
"grad_norm": 0.8046875,
"learning_rate": 5.226339793363898e-05,
"loss": 0.4594,
"step": 1025
},
{
"epoch": 6.95945945945946,
"grad_norm": 0.87890625,
"learning_rate": 5.123050561863657e-05,
"loss": 0.479,
"step": 1030
},
{
"epoch": 6.993243243243243,
"grad_norm": 0.8984375,
"learning_rate": 5.020439556503629e-05,
"loss": 0.4578,
"step": 1035
},
{
"epoch": 7.0,
"eval_loss": 2.6821556091308594,
"eval_runtime": 1.013,
"eval_samples_per_second": 4.936,
"eval_steps_per_second": 1.974,
"step": 1036
},
{
"epoch": 7.027027027027027,
"grad_norm": 0.8125,
"learning_rate": 4.918521047160308e-05,
"loss": 0.3997,
"step": 1040
},
{
"epoch": 7.0608108108108105,
"grad_norm": 0.8671875,
"learning_rate": 4.817309207406346e-05,
"loss": 0.392,
"step": 1045
},
{
"epoch": 7.094594594594595,
"grad_norm": 0.8359375,
"learning_rate": 4.716818112539485e-05,
"loss": 0.3883,
"step": 1050
},
{
"epoch": 7.128378378378378,
"grad_norm": 0.7421875,
"learning_rate": 4.617061737625139e-05,
"loss": 0.3929,
"step": 1055
},
{
"epoch": 7.162162162162162,
"grad_norm": 0.8125,
"learning_rate": 4.518053955552903e-05,
"loss": 0.3921,
"step": 1060
},
{
"epoch": 7.195945945945946,
"grad_norm": 0.80078125,
"learning_rate": 4.419808535107287e-05,
"loss": 0.3865,
"step": 1065
},
{
"epoch": 7.22972972972973,
"grad_norm": 0.79296875,
"learning_rate": 4.322339139052921e-05,
"loss": 0.3812,
"step": 1070
},
{
"epoch": 7.263513513513513,
"grad_norm": 0.8046875,
"learning_rate": 4.2256593222345185e-05,
"loss": 0.3915,
"step": 1075
},
{
"epoch": 7.297297297297297,
"grad_norm": 0.76953125,
"learning_rate": 4.129782529691815e-05,
"loss": 0.396,
"step": 1080
},
{
"epoch": 7.331081081081081,
"grad_norm": 0.79296875,
"learning_rate": 4.034722094789809e-05,
"loss": 0.408,
"step": 1085
},
{
"epoch": 7.364864864864865,
"grad_norm": 0.75390625,
"learning_rate": 3.9404912373645185e-05,
"loss": 0.3819,
"step": 1090
},
{
"epoch": 7.398648648648648,
"grad_norm": 0.80078125,
"learning_rate": 3.8471030618845375e-05,
"loss": 0.3925,
"step": 1095
},
{
"epoch": 7.4324324324324325,
"grad_norm": 0.80859375,
"learning_rate": 3.7545705556286126e-05,
"loss": 0.3932,
"step": 1100
},
{
"epoch": 7.466216216216216,
"grad_norm": 0.79296875,
"learning_rate": 3.662906586879542e-05,
"loss": 0.3964,
"step": 1105
},
{
"epoch": 7.5,
"grad_norm": 0.81640625,
"learning_rate": 3.5721239031346066e-05,
"loss": 0.4121,
"step": 1110
},
{
"epoch": 7.533783783783784,
"grad_norm": 0.76953125,
"learning_rate": 3.48223512933282e-05,
"loss": 0.3911,
"step": 1115
},
{
"epoch": 7.5675675675675675,
"grad_norm": 0.76171875,
"learning_rate": 3.393252766099187e-05,
"loss": 0.3941,
"step": 1120
},
{
"epoch": 7.601351351351351,
"grad_norm": 0.80078125,
"learning_rate": 3.305189188006281e-05,
"loss": 0.4026,
"step": 1125
},
{
"epoch": 7.635135135135135,
"grad_norm": 0.80078125,
"learning_rate": 3.218056641853337e-05,
"loss": 0.3947,
"step": 1130
},
{
"epoch": 7.668918918918919,
"grad_norm": 0.7890625,
"learning_rate": 3.1318672449631284e-05,
"loss": 0.3939,
"step": 1135
},
{
"epoch": 7.702702702702703,
"grad_norm": 0.75390625,
"learning_rate": 3.0466329834968233e-05,
"loss": 0.3939,
"step": 1140
},
{
"epoch": 7.736486486486487,
"grad_norm": 0.80859375,
"learning_rate": 2.9623657107870996e-05,
"loss": 0.3925,
"step": 1145
},
{
"epoch": 7.77027027027027,
"grad_norm": 0.7890625,
"learning_rate": 2.879077145689746e-05,
"loss": 0.3896,
"step": 1150
},
{
"epoch": 7.804054054054054,
"grad_norm": 0.7890625,
"learning_rate": 2.7967788709539233e-05,
"loss": 0.3823,
"step": 1155
},
{
"epoch": 7.837837837837838,
"grad_norm": 0.79296875,
"learning_rate": 2.7154823316113932e-05,
"loss": 0.4006,
"step": 1160
},
{
"epoch": 7.871621621621622,
"grad_norm": 0.7578125,
"learning_rate": 2.6351988333848788e-05,
"loss": 0.3879,
"step": 1165
},
{
"epoch": 7.905405405405405,
"grad_norm": 0.86328125,
"learning_rate": 2.5559395411158115e-05,
"loss": 0.3861,
"step": 1170
},
{
"epoch": 7.9391891891891895,
"grad_norm": 0.76171875,
"learning_rate": 2.4777154772116496e-05,
"loss": 0.4031,
"step": 1175
},
{
"epoch": 7.972972972972973,
"grad_norm": 0.80859375,
"learning_rate": 2.4005375201130274e-05,
"loss": 0.3886,
"step": 1180
},
{
"epoch": 8.0,
"eval_loss": 2.8496615886688232,
"eval_runtime": 1.0122,
"eval_samples_per_second": 4.94,
"eval_steps_per_second": 1.976,
"step": 1184
},
{
"epoch": 8.006756756756756,
"grad_norm": 0.7109375,
"learning_rate": 2.324416402780907e-05,
"loss": 0.3975,
"step": 1185
},
{
"epoch": 8.04054054054054,
"grad_norm": 0.7109375,
"learning_rate": 2.249362711203985e-05,
"loss": 0.3604,
"step": 1190
},
{
"epoch": 8.074324324324325,
"grad_norm": 0.7734375,
"learning_rate": 2.1753868829265046e-05,
"loss": 0.361,
"step": 1195
},
{
"epoch": 8.108108108108109,
"grad_norm": 0.85546875,
"learning_rate": 2.102499205596743e-05,
"loss": 0.3573,
"step": 1200
},
{
"epoch": 8.141891891891891,
"grad_norm": 0.75390625,
"learning_rate": 2.0307098155363236e-05,
"loss": 0.3567,
"step": 1205
},
{
"epoch": 8.175675675675675,
"grad_norm": 0.734375,
"learning_rate": 1.9600286963305957e-05,
"loss": 0.3561,
"step": 1210
},
{
"epoch": 8.20945945945946,
"grad_norm": 0.75,
"learning_rate": 1.8904656774402208e-05,
"loss": 0.3661,
"step": 1215
},
{
"epoch": 8.243243243243244,
"grad_norm": 0.76953125,
"learning_rate": 1.8220304328342252e-05,
"loss": 0.3729,
"step": 1220
},
{
"epoch": 8.277027027027026,
"grad_norm": 0.7578125,
"learning_rate": 1.754732479644655e-05,
"loss": 0.3614,
"step": 1225
},
{
"epoch": 8.31081081081081,
"grad_norm": 0.734375,
"learning_rate": 1.688581176843066e-05,
"loss": 0.3608,
"step": 1230
},
{
"epoch": 8.344594594594595,
"grad_norm": 0.77734375,
"learning_rate": 1.6235857239389696e-05,
"loss": 0.368,
"step": 1235
},
{
"epoch": 8.378378378378379,
"grad_norm": 0.7734375,
"learning_rate": 1.5597551597004966e-05,
"loss": 0.3453,
"step": 1240
},
{
"epoch": 8.412162162162161,
"grad_norm": 0.71484375,
"learning_rate": 1.4970983608973942e-05,
"loss": 0.3594,
"step": 1245
},
{
"epoch": 8.445945945945946,
"grad_norm": 0.74609375,
"learning_rate": 1.4356240410665433e-05,
"loss": 0.3673,
"step": 1250
},
{
"epoch": 8.47972972972973,
"grad_norm": 0.73828125,
"learning_rate": 1.3753407493001968e-05,
"loss": 0.3618,
"step": 1255
},
{
"epoch": 8.513513513513514,
"grad_norm": 0.74609375,
"learning_rate": 1.3162568690570743e-05,
"loss": 0.3462,
"step": 1260
},
{
"epoch": 8.547297297297296,
"grad_norm": 0.79296875,
"learning_rate": 1.2583806169964961e-05,
"loss": 0.3717,
"step": 1265
},
{
"epoch": 8.58108108108108,
"grad_norm": 0.73046875,
"learning_rate": 1.2017200418357078e-05,
"loss": 0.3506,
"step": 1270
},
{
"epoch": 8.614864864864865,
"grad_norm": 0.7578125,
"learning_rate": 1.14628302323056e-05,
"loss": 0.3653,
"step": 1275
},
{
"epoch": 8.64864864864865,
"grad_norm": 0.80078125,
"learning_rate": 1.0920772706797167e-05,
"loss": 0.3672,
"step": 1280
},
{
"epoch": 8.682432432432432,
"grad_norm": 0.7421875,
"learning_rate": 1.0391103224524956e-05,
"loss": 0.3627,
"step": 1285
},
{
"epoch": 8.716216216216216,
"grad_norm": 0.73046875,
"learning_rate": 9.873895445405523e-06,
"loss": 0.3608,
"step": 1290
},
{
"epoch": 8.75,
"grad_norm": 0.796875,
"learning_rate": 9.369221296335006e-06,
"loss": 0.3657,
"step": 1295
},
{
"epoch": 8.783783783783784,
"grad_norm": 0.72265625,
"learning_rate": 8.87715096118642e-06,
"loss": 0.3412,
"step": 1300
},
{
"epoch": 8.817567567567568,
"grad_norm": 0.7421875,
"learning_rate": 8.397752871049436e-06,
"loss": 0.3612,
"step": 1305
},
{
"epoch": 8.85135135135135,
"grad_norm": 0.76171875,
"learning_rate": 7.931093694713687e-06,
"loss": 0.3552,
"step": 1310
},
{
"epoch": 8.885135135135135,
"grad_norm": 0.7734375,
"learning_rate": 7.477238329397418e-06,
"loss": 0.3623,
"step": 1315
},
{
"epoch": 8.91891891891892,
"grad_norm": 0.75390625,
"learning_rate": 7.03624989172228e-06,
"loss": 0.3563,
"step": 1320
},
{
"epoch": 8.952702702702704,
"grad_norm": 0.82421875,
"learning_rate": 6.608189708935964e-06,
"loss": 0.3574,
"step": 1325
},
{
"epoch": 8.986486486486486,
"grad_norm": 0.75390625,
"learning_rate": 6.1931173103834115e-06,
"loss": 0.3611,
"step": 1330
},
{
"epoch": 9.0,
"eval_loss": 2.9868175983428955,
"eval_runtime": 1.0121,
"eval_samples_per_second": 4.94,
"eval_steps_per_second": 1.976,
"step": 1332
},
{
"epoch": 9.02027027027027,
"grad_norm": 0.75390625,
"learning_rate": 5.791090419228351e-06,
"loss": 0.3557,
"step": 1335
},
{
"epoch": 9.054054054054054,
"grad_norm": 0.71484375,
"learning_rate": 5.402164944425758e-06,
"loss": 0.3503,
"step": 1340
},
{
"epoch": 9.087837837837839,
"grad_norm": 0.7265625,
"learning_rate": 5.026394972946813e-06,
"loss": 0.3513,
"step": 1345
},
{
"epoch": 9.121621621621621,
"grad_norm": 0.703125,
"learning_rate": 4.66383276225707e-06,
"loss": 0.354,
"step": 1350
},
{
"epoch": 9.155405405405405,
"grad_norm": 0.796875,
"learning_rate": 4.314528733049206e-06,
"loss": 0.3452,
"step": 1355
},
{
"epoch": 9.18918918918919,
"grad_norm": 0.73046875,
"learning_rate": 3.9785314622310495e-06,
"loss": 0.3555,
"step": 1360
},
{
"epoch": 9.222972972972974,
"grad_norm": 0.73828125,
"learning_rate": 3.655887676170222e-06,
"loss": 0.3437,
"step": 1365
},
{
"epoch": 9.256756756756756,
"grad_norm": 0.71484375,
"learning_rate": 3.3466422441958634e-06,
"loss": 0.3448,
"step": 1370
},
{
"epoch": 9.29054054054054,
"grad_norm": 0.765625,
"learning_rate": 3.050838172358883e-06,
"loss": 0.3558,
"step": 1375
},
{
"epoch": 9.324324324324325,
"grad_norm": 0.76171875,
"learning_rate": 2.7685165974510986e-06,
"loss": 0.3604,
"step": 1380
},
{
"epoch": 9.358108108108109,
"grad_norm": 0.75,
"learning_rate": 2.499716781284556e-06,
"loss": 0.3458,
"step": 1385
},
{
"epoch": 9.391891891891891,
"grad_norm": 0.765625,
"learning_rate": 2.2444761052313856e-06,
"loss": 0.3534,
"step": 1390
},
{
"epoch": 9.425675675675675,
"grad_norm": 0.75390625,
"learning_rate": 2.002830065025263e-06,
"loss": 0.3618,
"step": 1395
},
{
"epoch": 9.45945945945946,
"grad_norm": 0.734375,
"learning_rate": 1.7748122658251876e-06,
"loss": 0.3562,
"step": 1400
},
{
"epoch": 9.493243243243244,
"grad_norm": 0.73828125,
"learning_rate": 1.56045441754199e-06,
"loss": 0.341,
"step": 1405
},
{
"epoch": 9.527027027027026,
"grad_norm": 0.8125,
"learning_rate": 1.3597863304285475e-06,
"loss": 0.3735,
"step": 1410
},
{
"epoch": 9.56081081081081,
"grad_norm": 0.70703125,
"learning_rate": 1.1728359109341446e-06,
"loss": 0.3583,
"step": 1415
},
{
"epoch": 9.594594594594595,
"grad_norm": 0.76171875,
"learning_rate": 9.996291578236228e-07,
"loss": 0.3456,
"step": 1420
},
{
"epoch": 9.628378378378379,
"grad_norm": 0.7734375,
"learning_rate": 8.401901585616823e-07,
"loss": 0.3568,
"step": 1425
},
{
"epoch": 9.662162162162161,
"grad_norm": 0.75,
"learning_rate": 6.945410859632295e-07,
"loss": 0.3518,
"step": 1430
},
{
"epoch": 9.695945945945946,
"grad_norm": 0.7421875,
"learning_rate": 5.627021951097545e-07,
"loss": 0.3585,
"step": 1435
},
{
"epoch": 9.72972972972973,
"grad_norm": 0.7578125,
"learning_rate": 4.44691820532539e-07,
"loss": 0.3471,
"step": 1440
},
{
"epoch": 9.763513513513514,
"grad_norm": 0.7421875,
"learning_rate": 3.405263736629416e-07,
"loss": 0.3496,
"step": 1445
},
{
"epoch": 9.797297297297296,
"grad_norm": 0.7421875,
"learning_rate": 2.5022034055003364e-07,
"loss": 0.3512,
"step": 1450
},
{
"epoch": 9.83108108108108,
"grad_norm": 0.7421875,
"learning_rate": 1.7378627984612207e-07,
"loss": 0.3498,
"step": 1455
},
{
"epoch": 9.864864864864865,
"grad_norm": 0.7421875,
"learning_rate": 1.1123482106021322e-07,
"loss": 0.3514,
"step": 1460
},
{
"epoch": 9.89864864864865,
"grad_norm": 0.75,
"learning_rate": 6.25746630798063e-08,
"loss": 0.3613,
"step": 1465
},
{
"epoch": 9.932432432432432,
"grad_norm": 0.75390625,
"learning_rate": 2.7812572961127824e-08,
"loss": 0.3546,
"step": 1470
},
{
"epoch": 9.966216216216216,
"grad_norm": 0.75390625,
"learning_rate": 6.953384988095391e-09,
"loss": 0.3575,
"step": 1475
},
{
"epoch": 10.0,
"grad_norm": 0.71484375,
"learning_rate": 0.0,
"loss": 0.3501,
"step": 1480
},
{
"epoch": 10.0,
"eval_loss": 3.0043678283691406,
"eval_runtime": 1.0152,
"eval_samples_per_second": 4.925,
"eval_steps_per_second": 1.97,
"step": 1480
},
{
"epoch": 10.0,
"step": 1480,
"total_flos": 2.2627322959180595e+18,
"train_loss": 1.3636517344294368,
"train_runtime": 11668.1199,
"train_samples_per_second": 2.027,
"train_steps_per_second": 0.127
}
],
"logging_steps": 5,
"max_steps": 1480,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.2627322959180595e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}