arianhosseini's picture
Training in progress, step 2000, checkpoint
ceb1847 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 42.780748663101605,
"eval_steps": 400,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0213903743315508,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 1.8558,
"step": 1
},
{
"epoch": 0.21390374331550802,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 2.0957,
"step": 10
},
{
"epoch": 0.42780748663101603,
"grad_norm": 63.96761703491211,
"learning_rate": 8e-08,
"loss": 2.1274,
"step": 20
},
{
"epoch": 0.6417112299465241,
"grad_norm": 56.0494270324707,
"learning_rate": 4.4e-07,
"loss": 2.0513,
"step": 30
},
{
"epoch": 0.8556149732620321,
"grad_norm": 31.82117462158203,
"learning_rate": 8.400000000000001e-07,
"loss": 1.6275,
"step": 40
},
{
"epoch": 1.0695187165775402,
"grad_norm": 27.546764373779297,
"learning_rate": 1.2400000000000002e-06,
"loss": 1.3922,
"step": 50
},
{
"epoch": 1.2834224598930482,
"grad_norm": 27.145185470581055,
"learning_rate": 1.6400000000000002e-06,
"loss": 1.0424,
"step": 60
},
{
"epoch": 1.4973262032085561,
"grad_norm": 33.02227020263672,
"learning_rate": 2.04e-06,
"loss": 1.0434,
"step": 70
},
{
"epoch": 1.7112299465240641,
"grad_norm": 23.329740524291992,
"learning_rate": 2.4400000000000004e-06,
"loss": 1.0585,
"step": 80
},
{
"epoch": 1.9251336898395723,
"grad_norm": 26.408071517944336,
"learning_rate": 2.84e-06,
"loss": 0.8849,
"step": 90
},
{
"epoch": 2.1390374331550803,
"grad_norm": 17.790225982666016,
"learning_rate": 3.2400000000000003e-06,
"loss": 0.5427,
"step": 100
},
{
"epoch": 2.3529411764705883,
"grad_norm": 29.846942901611328,
"learning_rate": 3.6400000000000003e-06,
"loss": 0.2784,
"step": 110
},
{
"epoch": 2.5668449197860963,
"grad_norm": 20.77630043029785,
"learning_rate": 4.04e-06,
"loss": 0.2569,
"step": 120
},
{
"epoch": 2.7807486631016043,
"grad_norm": 33.25271987915039,
"learning_rate": 4.440000000000001e-06,
"loss": 0.1836,
"step": 130
},
{
"epoch": 2.9946524064171123,
"grad_norm": 66.23503875732422,
"learning_rate": 4.84e-06,
"loss": 0.3042,
"step": 140
},
{
"epoch": 3.2085561497326203,
"grad_norm": 4.618898868560791,
"learning_rate": 5.240000000000001e-06,
"loss": 0.0455,
"step": 150
},
{
"epoch": 3.4224598930481283,
"grad_norm": 5.856906890869141,
"learning_rate": 5.64e-06,
"loss": 0.0558,
"step": 160
},
{
"epoch": 3.6363636363636362,
"grad_norm": 25.529407501220703,
"learning_rate": 6.040000000000001e-06,
"loss": 0.0835,
"step": 170
},
{
"epoch": 3.8502673796791442,
"grad_norm": 16.971477508544922,
"learning_rate": 6.440000000000001e-06,
"loss": 0.1126,
"step": 180
},
{
"epoch": 4.064171122994653,
"grad_norm": 9.28547191619873,
"learning_rate": 6.8400000000000014e-06,
"loss": 0.0838,
"step": 190
},
{
"epoch": 4.278074866310161,
"grad_norm": 17.296798706054688,
"learning_rate": 7.24e-06,
"loss": 0.0518,
"step": 200
},
{
"epoch": 4.491978609625669,
"grad_norm": 12.538177490234375,
"learning_rate": 7.640000000000001e-06,
"loss": 0.0532,
"step": 210
},
{
"epoch": 4.705882352941177,
"grad_norm": 24.446659088134766,
"learning_rate": 8.040000000000001e-06,
"loss": 0.1583,
"step": 220
},
{
"epoch": 4.919786096256685,
"grad_norm": 25.139781951904297,
"learning_rate": 8.44e-06,
"loss": 0.0894,
"step": 230
},
{
"epoch": 5.133689839572193,
"grad_norm": 2.361607074737549,
"learning_rate": 8.84e-06,
"loss": 0.0531,
"step": 240
},
{
"epoch": 5.347593582887701,
"grad_norm": 4.768627643585205,
"learning_rate": 9.240000000000001e-06,
"loss": 0.0664,
"step": 250
},
{
"epoch": 5.561497326203209,
"grad_norm": 37.02785873413086,
"learning_rate": 9.640000000000001e-06,
"loss": 0.1502,
"step": 260
},
{
"epoch": 5.775401069518717,
"grad_norm": 48.693763732910156,
"learning_rate": 1e-05,
"loss": 0.1953,
"step": 270
},
{
"epoch": 5.989304812834225,
"grad_norm": 37.84030532836914,
"learning_rate": 1e-05,
"loss": 0.1806,
"step": 280
},
{
"epoch": 6.2032085561497325,
"grad_norm": 39.74201965332031,
"learning_rate": 1e-05,
"loss": 0.0866,
"step": 290
},
{
"epoch": 6.4171122994652405,
"grad_norm": 7.8511834144592285,
"learning_rate": 1e-05,
"loss": 0.0994,
"step": 300
},
{
"epoch": 6.6310160427807485,
"grad_norm": 54.63271713256836,
"learning_rate": 1e-05,
"loss": 0.1712,
"step": 310
},
{
"epoch": 6.8449197860962565,
"grad_norm": 44.596153259277344,
"learning_rate": 1e-05,
"loss": 0.1442,
"step": 320
},
{
"epoch": 7.0588235294117645,
"grad_norm": 5.094964504241943,
"learning_rate": 1e-05,
"loss": 0.0526,
"step": 330
},
{
"epoch": 7.2727272727272725,
"grad_norm": 12.109665870666504,
"learning_rate": 1e-05,
"loss": 0.03,
"step": 340
},
{
"epoch": 7.4866310160427805,
"grad_norm": 34.3682975769043,
"learning_rate": 1e-05,
"loss": 0.0759,
"step": 350
},
{
"epoch": 7.7005347593582885,
"grad_norm": 2.3397929668426514,
"learning_rate": 1e-05,
"loss": 0.0685,
"step": 360
},
{
"epoch": 7.9144385026737964,
"grad_norm": 21.006542205810547,
"learning_rate": 1e-05,
"loss": 0.0637,
"step": 370
},
{
"epoch": 8.128342245989305,
"grad_norm": 2.5418567657470703,
"learning_rate": 1e-05,
"loss": 0.057,
"step": 380
},
{
"epoch": 8.342245989304812,
"grad_norm": 18.59604263305664,
"learning_rate": 1e-05,
"loss": 0.1096,
"step": 390
},
{
"epoch": 8.556149732620321,
"grad_norm": 52.85743713378906,
"learning_rate": 1e-05,
"loss": 0.1422,
"step": 400
},
{
"epoch": 8.556149732620321,
"eval_accuracy": 0.4,
"eval_loss": 4.6875,
"eval_runtime": 0.8724,
"eval_samples_per_second": 11.462,
"eval_steps_per_second": 1.146,
"step": 400
},
{
"epoch": 8.770053475935828,
"grad_norm": 36.897674560546875,
"learning_rate": 1e-05,
"loss": 0.1633,
"step": 410
},
{
"epoch": 8.983957219251337,
"grad_norm": 16.528366088867188,
"learning_rate": 1e-05,
"loss": 0.0336,
"step": 420
},
{
"epoch": 9.197860962566844,
"grad_norm": 87.09658813476562,
"learning_rate": 1e-05,
"loss": 0.1071,
"step": 430
},
{
"epoch": 9.411764705882353,
"grad_norm": 4.741786479949951,
"learning_rate": 1e-05,
"loss": 0.0701,
"step": 440
},
{
"epoch": 9.62566844919786,
"grad_norm": 12.09588623046875,
"learning_rate": 1e-05,
"loss": 0.0746,
"step": 450
},
{
"epoch": 9.83957219251337,
"grad_norm": 32.604068756103516,
"learning_rate": 1e-05,
"loss": 0.0872,
"step": 460
},
{
"epoch": 10.053475935828876,
"grad_norm": 4.408775806427002,
"learning_rate": 1e-05,
"loss": 0.1084,
"step": 470
},
{
"epoch": 10.267379679144385,
"grad_norm": 22.274890899658203,
"learning_rate": 1e-05,
"loss": 0.0297,
"step": 480
},
{
"epoch": 10.481283422459892,
"grad_norm": 58.223323822021484,
"learning_rate": 1e-05,
"loss": 0.0637,
"step": 490
},
{
"epoch": 10.695187165775401,
"grad_norm": 2.7027640342712402,
"learning_rate": 1e-05,
"loss": 0.1206,
"step": 500
},
{
"epoch": 10.909090909090908,
"grad_norm": 43.47382736206055,
"learning_rate": 1e-05,
"loss": 0.045,
"step": 510
},
{
"epoch": 11.122994652406417,
"grad_norm": 1.6665784120559692,
"learning_rate": 1e-05,
"loss": 0.045,
"step": 520
},
{
"epoch": 11.336898395721924,
"grad_norm": 0.5738474130630493,
"learning_rate": 1e-05,
"loss": 0.008,
"step": 530
},
{
"epoch": 11.550802139037433,
"grad_norm": 16.410573959350586,
"learning_rate": 1e-05,
"loss": 0.0995,
"step": 540
},
{
"epoch": 11.764705882352942,
"grad_norm": 0.7015154361724854,
"learning_rate": 1e-05,
"loss": 0.0313,
"step": 550
},
{
"epoch": 11.97860962566845,
"grad_norm": 11.851508140563965,
"learning_rate": 1e-05,
"loss": 0.0724,
"step": 560
},
{
"epoch": 12.192513368983958,
"grad_norm": 16.754575729370117,
"learning_rate": 1e-05,
"loss": 0.0272,
"step": 570
},
{
"epoch": 12.406417112299465,
"grad_norm": 15.92441177368164,
"learning_rate": 1e-05,
"loss": 0.0241,
"step": 580
},
{
"epoch": 12.620320855614974,
"grad_norm": 0.05043609067797661,
"learning_rate": 1e-05,
"loss": 0.042,
"step": 590
},
{
"epoch": 12.834224598930481,
"grad_norm": 0.3494648337364197,
"learning_rate": 1e-05,
"loss": 0.0083,
"step": 600
},
{
"epoch": 13.04812834224599,
"grad_norm": 23.260435104370117,
"learning_rate": 1e-05,
"loss": 0.026,
"step": 610
},
{
"epoch": 13.262032085561497,
"grad_norm": 5.789357662200928,
"learning_rate": 1e-05,
"loss": 0.0224,
"step": 620
},
{
"epoch": 13.475935828877006,
"grad_norm": 0.056954510509967804,
"learning_rate": 1e-05,
"loss": 0.0642,
"step": 630
},
{
"epoch": 13.689839572192513,
"grad_norm": 72.28430938720703,
"learning_rate": 1e-05,
"loss": 0.0366,
"step": 640
},
{
"epoch": 13.903743315508022,
"grad_norm": 0.5189899802207947,
"learning_rate": 1e-05,
"loss": 0.033,
"step": 650
},
{
"epoch": 14.117647058823529,
"grad_norm": 0.07448354363441467,
"learning_rate": 1e-05,
"loss": 0.0088,
"step": 660
},
{
"epoch": 14.331550802139038,
"grad_norm": 0.05018957331776619,
"learning_rate": 1e-05,
"loss": 0.0082,
"step": 670
},
{
"epoch": 14.545454545454545,
"grad_norm": 33.61125183105469,
"learning_rate": 1e-05,
"loss": 0.0753,
"step": 680
},
{
"epoch": 14.759358288770054,
"grad_norm": 0.023922119289636612,
"learning_rate": 1e-05,
"loss": 0.0267,
"step": 690
},
{
"epoch": 14.973262032085561,
"grad_norm": 0.04713653773069382,
"learning_rate": 1e-05,
"loss": 0.096,
"step": 700
},
{
"epoch": 15.18716577540107,
"grad_norm": 0.0457034632563591,
"learning_rate": 1e-05,
"loss": 0.0083,
"step": 710
},
{
"epoch": 15.401069518716577,
"grad_norm": 0.011708029545843601,
"learning_rate": 1e-05,
"loss": 0.072,
"step": 720
},
{
"epoch": 15.614973262032086,
"grad_norm": 0.3412806987762451,
"learning_rate": 1e-05,
"loss": 0.0032,
"step": 730
},
{
"epoch": 15.828877005347593,
"grad_norm": 1.667181372642517,
"learning_rate": 1e-05,
"loss": 0.0512,
"step": 740
},
{
"epoch": 16.0427807486631,
"grad_norm": 0.004597651772201061,
"learning_rate": 1e-05,
"loss": 0.0555,
"step": 750
},
{
"epoch": 16.25668449197861,
"grad_norm": 0.01679002121090889,
"learning_rate": 1e-05,
"loss": 0.0294,
"step": 760
},
{
"epoch": 16.470588235294116,
"grad_norm": 24.51487922668457,
"learning_rate": 1e-05,
"loss": 0.0979,
"step": 770
},
{
"epoch": 16.684491978609625,
"grad_norm": 0.010406852699816227,
"learning_rate": 1e-05,
"loss": 0.0361,
"step": 780
},
{
"epoch": 16.898395721925134,
"grad_norm": 4.567187786102295,
"learning_rate": 1e-05,
"loss": 0.1229,
"step": 790
},
{
"epoch": 17.112299465240643,
"grad_norm": 1.7842605113983154,
"learning_rate": 1e-05,
"loss": 0.0121,
"step": 800
},
{
"epoch": 17.112299465240643,
"eval_accuracy": 0.5,
"eval_loss": 6.6640625,
"eval_runtime": 0.8689,
"eval_samples_per_second": 11.509,
"eval_steps_per_second": 1.151,
"step": 800
},
{
"epoch": 17.32620320855615,
"grad_norm": 0.061683282256126404,
"learning_rate": 1e-05,
"loss": 0.0079,
"step": 810
},
{
"epoch": 17.540106951871657,
"grad_norm": 0.11749367415904999,
"learning_rate": 1e-05,
"loss": 0.0857,
"step": 820
},
{
"epoch": 17.754010695187166,
"grad_norm": 0.02787230722606182,
"learning_rate": 1e-05,
"loss": 0.0051,
"step": 830
},
{
"epoch": 17.967914438502675,
"grad_norm": 0.015304004773497581,
"learning_rate": 1e-05,
"loss": 0.0882,
"step": 840
},
{
"epoch": 18.181818181818183,
"grad_norm": 0.41026026010513306,
"learning_rate": 1e-05,
"loss": 0.0002,
"step": 850
},
{
"epoch": 18.39572192513369,
"grad_norm": 0.012662127614021301,
"learning_rate": 1e-05,
"loss": 0.0013,
"step": 860
},
{
"epoch": 18.609625668449198,
"grad_norm": 0.10844732820987701,
"learning_rate": 1e-05,
"loss": 0.0062,
"step": 870
},
{
"epoch": 18.823529411764707,
"grad_norm": 2.6394922733306885,
"learning_rate": 1e-05,
"loss": 0.0335,
"step": 880
},
{
"epoch": 19.037433155080215,
"grad_norm": 0.28504815697669983,
"learning_rate": 1e-05,
"loss": 0.0092,
"step": 890
},
{
"epoch": 19.25133689839572,
"grad_norm": 0.03371066227555275,
"learning_rate": 1e-05,
"loss": 0.0001,
"step": 900
},
{
"epoch": 19.46524064171123,
"grad_norm": 0.01855628192424774,
"learning_rate": 1e-05,
"loss": 0.0046,
"step": 910
},
{
"epoch": 19.67914438502674,
"grad_norm": 0.008938683196902275,
"learning_rate": 1e-05,
"loss": 0.0014,
"step": 920
},
{
"epoch": 19.893048128342247,
"grad_norm": 0.0025397331919521093,
"learning_rate": 1e-05,
"loss": 0.0023,
"step": 930
},
{
"epoch": 20.106951871657753,
"grad_norm": 0.0024771266616880894,
"learning_rate": 1e-05,
"loss": 0.001,
"step": 940
},
{
"epoch": 20.32085561497326,
"grad_norm": 56.46254348754883,
"learning_rate": 1e-05,
"loss": 0.0533,
"step": 950
},
{
"epoch": 20.53475935828877,
"grad_norm": 0.046234406530857086,
"learning_rate": 1e-05,
"loss": 0.0113,
"step": 960
},
{
"epoch": 20.74866310160428,
"grad_norm": 0.03888735547661781,
"learning_rate": 1e-05,
"loss": 0.0001,
"step": 970
},
{
"epoch": 20.962566844919785,
"grad_norm": 4.142932415008545,
"learning_rate": 1e-05,
"loss": 0.0078,
"step": 980
},
{
"epoch": 21.176470588235293,
"grad_norm": 0.01926487125456333,
"learning_rate": 1e-05,
"loss": 0.0015,
"step": 990
},
{
"epoch": 21.390374331550802,
"grad_norm": 0.03864801302552223,
"learning_rate": 1e-05,
"loss": 0.0032,
"step": 1000
},
{
"epoch": 21.60427807486631,
"grad_norm": 0.40588998794555664,
"learning_rate": 1e-05,
"loss": 0.0003,
"step": 1010
},
{
"epoch": 21.818181818181817,
"grad_norm": 0.2934487760066986,
"learning_rate": 1e-05,
"loss": 0.0081,
"step": 1020
},
{
"epoch": 22.032085561497325,
"grad_norm": 0.004641254432499409,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 1030
},
{
"epoch": 22.245989304812834,
"grad_norm": 14.084956169128418,
"learning_rate": 1e-05,
"loss": 0.0018,
"step": 1040
},
{
"epoch": 22.459893048128343,
"grad_norm": 1.7572118043899536,
"learning_rate": 1e-05,
"loss": 0.001,
"step": 1050
},
{
"epoch": 22.67379679144385,
"grad_norm": 2.7290031909942627,
"learning_rate": 1e-05,
"loss": 0.0002,
"step": 1060
},
{
"epoch": 22.887700534759357,
"grad_norm": 0.005051525309681892,
"learning_rate": 1e-05,
"loss": 0.0007,
"step": 1070
},
{
"epoch": 23.101604278074866,
"grad_norm": 0.002047200920060277,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 1080
},
{
"epoch": 23.315508021390375,
"grad_norm": 0.0035107170697301626,
"learning_rate": 1e-05,
"loss": 0.0494,
"step": 1090
},
{
"epoch": 23.529411764705884,
"grad_norm": 0.004472868517041206,
"learning_rate": 1e-05,
"loss": 0.0034,
"step": 1100
},
{
"epoch": 23.74331550802139,
"grad_norm": 0.08594793826341629,
"learning_rate": 1e-05,
"loss": 0.0063,
"step": 1110
},
{
"epoch": 23.9572192513369,
"grad_norm": 0.752574622631073,
"learning_rate": 1e-05,
"loss": 0.0002,
"step": 1120
},
{
"epoch": 24.171122994652407,
"grad_norm": 0.021934740245342255,
"learning_rate": 1e-05,
"loss": 0.0019,
"step": 1130
},
{
"epoch": 24.385026737967916,
"grad_norm": 0.007998108863830566,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 1140
},
{
"epoch": 24.59893048128342,
"grad_norm": 0.010390130802989006,
"learning_rate": 1e-05,
"loss": 0.0072,
"step": 1150
},
{
"epoch": 24.81283422459893,
"grad_norm": 0.002071298658847809,
"learning_rate": 1e-05,
"loss": 0.0013,
"step": 1160
},
{
"epoch": 25.02673796791444,
"grad_norm": 0.0045625255443155766,
"learning_rate": 1e-05,
"loss": 0.002,
"step": 1170
},
{
"epoch": 25.240641711229948,
"grad_norm": 0.017212701961398125,
"learning_rate": 1e-05,
"loss": 0.0048,
"step": 1180
},
{
"epoch": 25.454545454545453,
"grad_norm": 12.734994888305664,
"learning_rate": 1e-05,
"loss": 0.0018,
"step": 1190
},
{
"epoch": 25.668449197860962,
"grad_norm": 0.0035849984269589186,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 1200
},
{
"epoch": 25.668449197860962,
"eval_accuracy": 0.8,
"eval_loss": 4.71484375,
"eval_runtime": 0.8712,
"eval_samples_per_second": 11.478,
"eval_steps_per_second": 1.148,
"step": 1200
},
{
"epoch": 25.88235294117647,
"grad_norm": 0.005425190087407827,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 1210
},
{
"epoch": 26.09625668449198,
"grad_norm": 0.007795536890625954,
"learning_rate": 1e-05,
"loss": 0.0034,
"step": 1220
},
{
"epoch": 26.310160427807485,
"grad_norm": 0.00858351681381464,
"learning_rate": 1e-05,
"loss": 0.0032,
"step": 1230
},
{
"epoch": 26.524064171122994,
"grad_norm": 0.0035222836304455996,
"learning_rate": 1e-05,
"loss": 0.0103,
"step": 1240
},
{
"epoch": 26.737967914438503,
"grad_norm": 0.004781092517077923,
"learning_rate": 1e-05,
"loss": 0.0002,
"step": 1250
},
{
"epoch": 26.951871657754012,
"grad_norm": 0.0031953530851751566,
"learning_rate": 1e-05,
"loss": 0.0013,
"step": 1260
},
{
"epoch": 27.165775401069517,
"grad_norm": 0.00516524026170373,
"learning_rate": 1e-05,
"loss": 0.0007,
"step": 1270
},
{
"epoch": 27.379679144385026,
"grad_norm": 0.004281027242541313,
"learning_rate": 1e-05,
"loss": 0.0106,
"step": 1280
},
{
"epoch": 27.593582887700535,
"grad_norm": 0.0015023744199424982,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 1290
},
{
"epoch": 27.807486631016044,
"grad_norm": 0.005484332330524921,
"learning_rate": 1e-05,
"loss": 0.0023,
"step": 1300
},
{
"epoch": 28.02139037433155,
"grad_norm": 0.0035302124451845884,
"learning_rate": 1e-05,
"loss": 0.0001,
"step": 1310
},
{
"epoch": 28.235294117647058,
"grad_norm": 0.0033441612031310797,
"learning_rate": 1e-05,
"loss": 0.0025,
"step": 1320
},
{
"epoch": 28.449197860962567,
"grad_norm": 0.0035446221008896828,
"learning_rate": 1e-05,
"loss": 0.0014,
"step": 1330
},
{
"epoch": 28.663101604278076,
"grad_norm": 0.010898416861891747,
"learning_rate": 1e-05,
"loss": 0.002,
"step": 1340
},
{
"epoch": 28.87700534759358,
"grad_norm": 0.008727981708943844,
"learning_rate": 1e-05,
"loss": 0.1828,
"step": 1350
},
{
"epoch": 29.09090909090909,
"grad_norm": 0.004256559070199728,
"learning_rate": 1e-05,
"loss": 0.0431,
"step": 1360
},
{
"epoch": 29.3048128342246,
"grad_norm": 0.21178306639194489,
"learning_rate": 1e-05,
"loss": 0.0099,
"step": 1370
},
{
"epoch": 29.518716577540108,
"grad_norm": 0.2352713793516159,
"learning_rate": 1e-05,
"loss": 0.0835,
"step": 1380
},
{
"epoch": 29.732620320855617,
"grad_norm": 0.03791838884353638,
"learning_rate": 1e-05,
"loss": 0.0522,
"step": 1390
},
{
"epoch": 29.946524064171122,
"grad_norm": 0.04343949630856514,
"learning_rate": 1e-05,
"loss": 0.0004,
"step": 1400
},
{
"epoch": 30.16042780748663,
"grad_norm": 2.7350242137908936,
"learning_rate": 1e-05,
"loss": 0.003,
"step": 1410
},
{
"epoch": 30.37433155080214,
"grad_norm": 0.030035167932510376,
"learning_rate": 1e-05,
"loss": 0.006,
"step": 1420
},
{
"epoch": 30.58823529411765,
"grad_norm": 3.1150763034820557,
"learning_rate": 1e-05,
"loss": 0.0016,
"step": 1430
},
{
"epoch": 30.802139037433154,
"grad_norm": 0.028651399537920952,
"learning_rate": 1e-05,
"loss": 0.0016,
"step": 1440
},
{
"epoch": 31.016042780748663,
"grad_norm": 37.155609130859375,
"learning_rate": 1e-05,
"loss": 0.0128,
"step": 1450
},
{
"epoch": 31.22994652406417,
"grad_norm": 0.002968219807371497,
"learning_rate": 1e-05,
"loss": 0.0141,
"step": 1460
},
{
"epoch": 31.44385026737968,
"grad_norm": 5.24705696105957,
"learning_rate": 1e-05,
"loss": 0.0121,
"step": 1470
},
{
"epoch": 31.657754010695186,
"grad_norm": 0.0024348751176148653,
"learning_rate": 1e-05,
"loss": 0.1083,
"step": 1480
},
{
"epoch": 31.871657754010695,
"grad_norm": 0.5254661440849304,
"learning_rate": 1e-05,
"loss": 0.1179,
"step": 1490
},
{
"epoch": 32.0855614973262,
"grad_norm": 72.65499114990234,
"learning_rate": 1e-05,
"loss": 0.1361,
"step": 1500
},
{
"epoch": 32.29946524064171,
"grad_norm": 14.370610237121582,
"learning_rate": 1e-05,
"loss": 0.0346,
"step": 1510
},
{
"epoch": 32.51336898395722,
"grad_norm": 20.988916397094727,
"learning_rate": 1e-05,
"loss": 0.0025,
"step": 1520
},
{
"epoch": 32.72727272727273,
"grad_norm": 0.002341994782909751,
"learning_rate": 1e-05,
"loss": 0.0004,
"step": 1530
},
{
"epoch": 32.94117647058823,
"grad_norm": 2.404449701309204,
"learning_rate": 1e-05,
"loss": 0.013,
"step": 1540
},
{
"epoch": 33.155080213903744,
"grad_norm": 0.005042242351919413,
"learning_rate": 1e-05,
"loss": 0.0011,
"step": 1550
},
{
"epoch": 33.36898395721925,
"grad_norm": 0.00044189911568537354,
"learning_rate": 1e-05,
"loss": 0.0139,
"step": 1560
},
{
"epoch": 33.58288770053476,
"grad_norm": 0.0014409746509045362,
"learning_rate": 1e-05,
"loss": 0.0284,
"step": 1570
},
{
"epoch": 33.79679144385027,
"grad_norm": 2.608125686645508,
"learning_rate": 1e-05,
"loss": 0.015,
"step": 1580
},
{
"epoch": 34.01069518716577,
"grad_norm": 0.0005788679700344801,
"learning_rate": 1e-05,
"loss": 0.0201,
"step": 1590
},
{
"epoch": 34.224598930481285,
"grad_norm": 75.40068817138672,
"learning_rate": 1e-05,
"loss": 0.028,
"step": 1600
},
{
"epoch": 34.224598930481285,
"eval_accuracy": 0.7,
"eval_loss": 6.69921875,
"eval_runtime": 0.8667,
"eval_samples_per_second": 11.537,
"eval_steps_per_second": 1.154,
"step": 1600
},
{
"epoch": 34.43850267379679,
"grad_norm": 0.005270595662295818,
"learning_rate": 1e-05,
"loss": 0.0087,
"step": 1610
},
{
"epoch": 34.6524064171123,
"grad_norm": 5.715099334716797,
"learning_rate": 1e-05,
"loss": 0.0131,
"step": 1620
},
{
"epoch": 34.86631016042781,
"grad_norm": 7.6973066329956055,
"learning_rate": 1e-05,
"loss": 0.0071,
"step": 1630
},
{
"epoch": 35.080213903743314,
"grad_norm": 0.04163156822323799,
"learning_rate": 1e-05,
"loss": 0.0432,
"step": 1640
},
{
"epoch": 35.294117647058826,
"grad_norm": 0.8603066205978394,
"learning_rate": 1e-05,
"loss": 0.081,
"step": 1650
},
{
"epoch": 35.50802139037433,
"grad_norm": 0.02197711355984211,
"learning_rate": 1e-05,
"loss": 0.0087,
"step": 1660
},
{
"epoch": 35.72192513368984,
"grad_norm": 0.2183353304862976,
"learning_rate": 1e-05,
"loss": 0.0463,
"step": 1670
},
{
"epoch": 35.93582887700535,
"grad_norm": 1.1545729637145996,
"learning_rate": 1e-05,
"loss": 0.0073,
"step": 1680
},
{
"epoch": 36.149732620320854,
"grad_norm": 0.5573898553848267,
"learning_rate": 1e-05,
"loss": 0.0011,
"step": 1690
},
{
"epoch": 36.36363636363637,
"grad_norm": 0.0014353781007230282,
"learning_rate": 1e-05,
"loss": 0.077,
"step": 1700
},
{
"epoch": 36.57754010695187,
"grad_norm": 0.0148012051358819,
"learning_rate": 1e-05,
"loss": 0.0052,
"step": 1710
},
{
"epoch": 36.79144385026738,
"grad_norm": 0.03192583844065666,
"learning_rate": 1e-05,
"loss": 0.086,
"step": 1720
},
{
"epoch": 37.00534759358289,
"grad_norm": 82.85762023925781,
"learning_rate": 1e-05,
"loss": 0.0983,
"step": 1730
},
{
"epoch": 37.219251336898395,
"grad_norm": 0.08266828954219818,
"learning_rate": 1e-05,
"loss": 0.0279,
"step": 1740
},
{
"epoch": 37.4331550802139,
"grad_norm": 0.008177646435797215,
"learning_rate": 1e-05,
"loss": 0.0143,
"step": 1750
},
{
"epoch": 37.64705882352941,
"grad_norm": 0.009183383546769619,
"learning_rate": 1e-05,
"loss": 0.0107,
"step": 1760
},
{
"epoch": 37.86096256684492,
"grad_norm": 1.8703384399414062,
"learning_rate": 1e-05,
"loss": 0.0479,
"step": 1770
},
{
"epoch": 38.07486631016043,
"grad_norm": 0.004155300557613373,
"learning_rate": 1e-05,
"loss": 0.0595,
"step": 1780
},
{
"epoch": 38.288770053475936,
"grad_norm": 2.2654850482940674,
"learning_rate": 1e-05,
"loss": 0.033,
"step": 1790
},
{
"epoch": 38.50267379679144,
"grad_norm": 0.20017670094966888,
"learning_rate": 1e-05,
"loss": 0.0078,
"step": 1800
},
{
"epoch": 38.716577540106954,
"grad_norm": 53.791812896728516,
"learning_rate": 1e-05,
"loss": 0.058,
"step": 1810
},
{
"epoch": 38.93048128342246,
"grad_norm": 47.128692626953125,
"learning_rate": 1e-05,
"loss": 0.045,
"step": 1820
},
{
"epoch": 39.144385026737964,
"grad_norm": 41.79620361328125,
"learning_rate": 1e-05,
"loss": 0.156,
"step": 1830
},
{
"epoch": 39.35828877005348,
"grad_norm": 4.738333702087402,
"learning_rate": 1e-05,
"loss": 0.0736,
"step": 1840
},
{
"epoch": 39.57219251336898,
"grad_norm": 0.09291240572929382,
"learning_rate": 1e-05,
"loss": 0.0921,
"step": 1850
},
{
"epoch": 39.786096256684495,
"grad_norm": 11.098824501037598,
"learning_rate": 1e-05,
"loss": 0.0778,
"step": 1860
},
{
"epoch": 40.0,
"grad_norm": 40.247833251953125,
"learning_rate": 1e-05,
"loss": 0.0217,
"step": 1870
},
{
"epoch": 40.213903743315505,
"grad_norm": 2.459455728530884,
"learning_rate": 1e-05,
"loss": 0.008,
"step": 1880
},
{
"epoch": 40.42780748663102,
"grad_norm": 0.03633593022823334,
"learning_rate": 1e-05,
"loss": 0.0053,
"step": 1890
},
{
"epoch": 40.64171122994652,
"grad_norm": 0.0483514666557312,
"learning_rate": 1e-05,
"loss": 0.0566,
"step": 1900
},
{
"epoch": 40.855614973262036,
"grad_norm": 53.63500213623047,
"learning_rate": 1e-05,
"loss": 0.0183,
"step": 1910
},
{
"epoch": 41.06951871657754,
"grad_norm": 0.13120533525943756,
"learning_rate": 1e-05,
"loss": 0.0099,
"step": 1920
},
{
"epoch": 41.283422459893046,
"grad_norm": 0.0008334398153237998,
"learning_rate": 1e-05,
"loss": 0.0007,
"step": 1930
},
{
"epoch": 41.49732620320856,
"grad_norm": 0.028349481523036957,
"learning_rate": 1e-05,
"loss": 0.0011,
"step": 1940
},
{
"epoch": 41.711229946524064,
"grad_norm": 72.34516906738281,
"learning_rate": 1e-05,
"loss": 0.0117,
"step": 1950
},
{
"epoch": 41.92513368983957,
"grad_norm": 0.0020225769840180874,
"learning_rate": 1e-05,
"loss": 0.0097,
"step": 1960
},
{
"epoch": 42.13903743315508,
"grad_norm": 43.7913703918457,
"learning_rate": 1e-05,
"loss": 0.0821,
"step": 1970
},
{
"epoch": 42.35294117647059,
"grad_norm": 18.823827743530273,
"learning_rate": 1e-05,
"loss": 0.01,
"step": 1980
},
{
"epoch": 42.5668449197861,
"grad_norm": 0.01237189769744873,
"learning_rate": 1e-05,
"loss": 0.013,
"step": 1990
},
{
"epoch": 42.780748663101605,
"grad_norm": 0.00256702140904963,
"learning_rate": 1e-05,
"loss": 0.0041,
"step": 2000
},
{
"epoch": 42.780748663101605,
"eval_accuracy": 0.4,
"eval_loss": 7.45703125,
"eval_runtime": 0.871,
"eval_samples_per_second": 11.481,
"eval_steps_per_second": 1.148,
"step": 2000
}
],
"logging_steps": 10,
"max_steps": 2500,
"num_input_tokens_seen": 0,
"num_train_epochs": 55,
"save_steps": 400,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.6510938875124777e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}