nguyenthanhdo's picture
Upload folder using huggingface_hub
d7eab89 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.5074438930449596,
"eval_steps": 11,
"global_step": 159,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009480779201540626,
"grad_norm": 1.421875,
"learning_rate": 2e-05,
"loss": 0.4503,
"step": 1
},
{
"epoch": 0.009480779201540626,
"eval_loss": 0.4263582229614258,
"eval_runtime": 33.6286,
"eval_samples_per_second": 21.143,
"eval_steps_per_second": 21.143,
"step": 1
},
{
"epoch": 0.018961558403081252,
"grad_norm": 1.359375,
"learning_rate": 4e-05,
"loss": 0.412,
"step": 2
},
{
"epoch": 0.02844233760462188,
"grad_norm": 1.1875,
"learning_rate": 6e-05,
"loss": 0.4214,
"step": 3
},
{
"epoch": 0.037923116806162505,
"grad_norm": 0.80859375,
"learning_rate": 8e-05,
"loss": 0.2924,
"step": 4
},
{
"epoch": 0.04740389600770313,
"grad_norm": 0.51953125,
"learning_rate": 0.0001,
"loss": 0.1896,
"step": 5
},
{
"epoch": 0.05688467520924376,
"grad_norm": 0.4453125,
"learning_rate": 0.00012,
"loss": 0.1531,
"step": 6
},
{
"epoch": 0.06636545441078438,
"grad_norm": 0.27734375,
"learning_rate": 0.00014,
"loss": 0.1181,
"step": 7
},
{
"epoch": 0.07584623361232501,
"grad_norm": 0.263671875,
"learning_rate": 0.00016,
"loss": 0.1143,
"step": 8
},
{
"epoch": 0.08532701281386564,
"grad_norm": 0.2197265625,
"learning_rate": 0.00018,
"loss": 0.0952,
"step": 9
},
{
"epoch": 0.09480779201540626,
"grad_norm": 0.1708984375,
"learning_rate": 0.0002,
"loss": 0.0767,
"step": 10
},
{
"epoch": 0.10428857121694689,
"grad_norm": 0.1484375,
"learning_rate": 0.00019999469523400122,
"loss": 0.0836,
"step": 11
},
{
"epoch": 0.10428857121694689,
"eval_loss": 0.07918477058410645,
"eval_runtime": 33.4231,
"eval_samples_per_second": 21.273,
"eval_steps_per_second": 21.273,
"step": 11
},
{
"epoch": 0.11376935041848751,
"grad_norm": 0.154296875,
"learning_rate": 0.00019997878149881574,
"loss": 0.0757,
"step": 12
},
{
"epoch": 0.12325012962002814,
"grad_norm": 0.1513671875,
"learning_rate": 0.0001999522604828164,
"loss": 0.0767,
"step": 13
},
{
"epoch": 0.13273090882156877,
"grad_norm": 0.1416015625,
"learning_rate": 0.00019991513499975882,
"loss": 0.0809,
"step": 14
},
{
"epoch": 0.1422116880231094,
"grad_norm": 0.09765625,
"learning_rate": 0.00019986740898848306,
"loss": 0.0634,
"step": 15
},
{
"epoch": 0.15169246722465002,
"grad_norm": 0.099609375,
"learning_rate": 0.00019980908751249555,
"loss": 0.0674,
"step": 16
},
{
"epoch": 0.16117324642619066,
"grad_norm": 0.119140625,
"learning_rate": 0.00019974017675943192,
"loss": 0.0667,
"step": 17
},
{
"epoch": 0.17065402562773127,
"grad_norm": 0.09619140625,
"learning_rate": 0.0001996606840404006,
"loss": 0.0632,
"step": 18
},
{
"epoch": 0.1801348048292719,
"grad_norm": 0.09130859375,
"learning_rate": 0.00019957061778920701,
"loss": 0.0488,
"step": 19
},
{
"epoch": 0.18961558403081252,
"grad_norm": 0.0947265625,
"learning_rate": 0.0001994699875614589,
"loss": 0.0627,
"step": 20
},
{
"epoch": 0.19909636323235316,
"grad_norm": 0.08203125,
"learning_rate": 0.00019935880403355253,
"loss": 0.0528,
"step": 21
},
{
"epoch": 0.20857714243389378,
"grad_norm": 0.1123046875,
"learning_rate": 0.00019923707900153982,
"loss": 0.0532,
"step": 22
},
{
"epoch": 0.20857714243389378,
"eval_loss": 0.056647635996341705,
"eval_runtime": 33.3595,
"eval_samples_per_second": 21.313,
"eval_steps_per_second": 21.313,
"step": 22
},
{
"epoch": 0.21805792163543442,
"grad_norm": 0.10693359375,
"learning_rate": 0.00019910482537987702,
"loss": 0.0583,
"step": 23
},
{
"epoch": 0.22753870083697503,
"grad_norm": 0.0791015625,
"learning_rate": 0.0001989620572000544,
"loss": 0.0554,
"step": 24
},
{
"epoch": 0.23701948003851567,
"grad_norm": 0.123046875,
"learning_rate": 0.00019880878960910772,
"loss": 0.0688,
"step": 25
},
{
"epoch": 0.24650025924005628,
"grad_norm": 0.10498046875,
"learning_rate": 0.00019864503886801106,
"loss": 0.0655,
"step": 26
},
{
"epoch": 0.2559810384415969,
"grad_norm": 0.07421875,
"learning_rate": 0.00019847082234995171,
"loss": 0.0471,
"step": 27
},
{
"epoch": 0.26546181764313753,
"grad_norm": 0.080078125,
"learning_rate": 0.00019828615853848688,
"loss": 0.0518,
"step": 28
},
{
"epoch": 0.27494259684467814,
"grad_norm": 0.07421875,
"learning_rate": 0.00019809106702558277,
"loss": 0.0481,
"step": 29
},
{
"epoch": 0.2844233760462188,
"grad_norm": 0.0703125,
"learning_rate": 0.0001978855685095358,
"loss": 0.0464,
"step": 30
},
{
"epoch": 0.2939041552477594,
"grad_norm": 0.0849609375,
"learning_rate": 0.00019766968479277683,
"loss": 0.0566,
"step": 31
},
{
"epoch": 0.30338493444930004,
"grad_norm": 0.0859375,
"learning_rate": 0.00019744343877955788,
"loss": 0.0517,
"step": 32
},
{
"epoch": 0.3128657136508407,
"grad_norm": 0.07177734375,
"learning_rate": 0.00019720685447352209,
"loss": 0.0511,
"step": 33
},
{
"epoch": 0.3128657136508407,
"eval_loss": 0.04964344948530197,
"eval_runtime": 33.3741,
"eval_samples_per_second": 21.304,
"eval_steps_per_second": 21.304,
"step": 33
},
{
"epoch": 0.3223464928523813,
"grad_norm": 0.076171875,
"learning_rate": 0.0001969599569751571,
"loss": 0.045,
"step": 34
},
{
"epoch": 0.33182727205392193,
"grad_norm": 0.07568359375,
"learning_rate": 0.00019670277247913205,
"loss": 0.0543,
"step": 35
},
{
"epoch": 0.34130805125546254,
"grad_norm": 0.0791015625,
"learning_rate": 0.0001964353282715183,
"loss": 0.0444,
"step": 36
},
{
"epoch": 0.3507888304570032,
"grad_norm": 0.0771484375,
"learning_rate": 0.00019615765272689461,
"loss": 0.0506,
"step": 37
},
{
"epoch": 0.3602696096585438,
"grad_norm": 0.080078125,
"learning_rate": 0.00019586977530533677,
"loss": 0.0558,
"step": 38
},
{
"epoch": 0.36975038886008443,
"grad_norm": 0.078125,
"learning_rate": 0.00019557172654929196,
"loss": 0.0507,
"step": 39
},
{
"epoch": 0.37923116806162505,
"grad_norm": 0.06396484375,
"learning_rate": 0.00019526353808033825,
"loss": 0.0452,
"step": 40
},
{
"epoch": 0.3887119472631657,
"grad_norm": 0.07275390625,
"learning_rate": 0.00019494524259582992,
"loss": 0.0481,
"step": 41
},
{
"epoch": 0.3981927264647063,
"grad_norm": 0.06591796875,
"learning_rate": 0.00019461687386542826,
"loss": 0.0464,
"step": 42
},
{
"epoch": 0.40767350566624694,
"grad_norm": 0.06591796875,
"learning_rate": 0.00019427846672751873,
"loss": 0.0431,
"step": 43
},
{
"epoch": 0.41715428486778755,
"grad_norm": 0.06982421875,
"learning_rate": 0.00019393005708551498,
"loss": 0.0511,
"step": 44
},
{
"epoch": 0.41715428486778755,
"eval_loss": 0.04574437811970711,
"eval_runtime": 33.3514,
"eval_samples_per_second": 21.318,
"eval_steps_per_second": 21.318,
"step": 44
},
{
"epoch": 0.4266350640693282,
"grad_norm": 0.06787109375,
"learning_rate": 0.00019357168190404936,
"loss": 0.0443,
"step": 45
},
{
"epoch": 0.43611584327086883,
"grad_norm": 0.0751953125,
"learning_rate": 0.00019320337920505153,
"loss": 0.0545,
"step": 46
},
{
"epoch": 0.44559662247240944,
"grad_norm": 0.07666015625,
"learning_rate": 0.00019282518806371414,
"loss": 0.0542,
"step": 47
},
{
"epoch": 0.45507740167395005,
"grad_norm": 0.07666015625,
"learning_rate": 0.0001924371486043473,
"loss": 0.0589,
"step": 48
},
{
"epoch": 0.4645581808754907,
"grad_norm": 0.059326171875,
"learning_rate": 0.0001920393019961217,
"loss": 0.0444,
"step": 49
},
{
"epoch": 0.47403896007703133,
"grad_norm": 0.0732421875,
"learning_rate": 0.0001916316904487005,
"loss": 0.0485,
"step": 50
},
{
"epoch": 0.48351973927857195,
"grad_norm": 0.060546875,
"learning_rate": 0.00019121435720776122,
"loss": 0.0408,
"step": 51
},
{
"epoch": 0.49300051848011256,
"grad_norm": 0.068359375,
"learning_rate": 0.0001907873465504076,
"loss": 0.0466,
"step": 52
},
{
"epoch": 0.5024812976816532,
"grad_norm": 0.07763671875,
"learning_rate": 0.00019035070378047204,
"loss": 0.0426,
"step": 53
},
{
"epoch": 0.5119620768831938,
"grad_norm": 0.0654296875,
"learning_rate": 0.00018990447522370884,
"loss": 0.0407,
"step": 54
},
{
"epoch": 0.5214428560847345,
"grad_norm": 0.080078125,
"learning_rate": 0.00018944870822287956,
"loss": 0.0475,
"step": 55
},
{
"epoch": 0.5214428560847345,
"eval_loss": 0.043636418879032135,
"eval_runtime": 33.266,
"eval_samples_per_second": 21.373,
"eval_steps_per_second": 21.373,
"step": 55
},
{
"epoch": 0.5309236352862751,
"grad_norm": 0.0673828125,
"learning_rate": 0.00018898345113272998,
"loss": 0.0404,
"step": 56
},
{
"epoch": 0.5404044144878157,
"grad_norm": 0.06201171875,
"learning_rate": 0.00018850875331485995,
"loss": 0.036,
"step": 57
},
{
"epoch": 0.5498851936893563,
"grad_norm": 0.0751953125,
"learning_rate": 0.00018802466513248632,
"loss": 0.0412,
"step": 58
},
{
"epoch": 0.559365972890897,
"grad_norm": 0.068359375,
"learning_rate": 0.00018753123794509974,
"loss": 0.044,
"step": 59
},
{
"epoch": 0.5688467520924376,
"grad_norm": 0.0673828125,
"learning_rate": 0.00018702852410301554,
"loss": 0.0458,
"step": 60
},
{
"epoch": 0.5783275312939782,
"grad_norm": 0.07080078125,
"learning_rate": 0.0001865165769418196,
"loss": 0.0464,
"step": 61
},
{
"epoch": 0.5878083104955188,
"grad_norm": 0.0732421875,
"learning_rate": 0.00018599545077670985,
"loss": 0.0427,
"step": 62
},
{
"epoch": 0.5972890896970595,
"grad_norm": 0.059814453125,
"learning_rate": 0.0001854652008967335,
"loss": 0.0403,
"step": 63
},
{
"epoch": 0.6067698688986001,
"grad_norm": 0.07080078125,
"learning_rate": 0.00018492588355892124,
"loss": 0.0475,
"step": 64
},
{
"epoch": 0.6162506481001407,
"grad_norm": 0.072265625,
"learning_rate": 0.00018437755598231856,
"loss": 0.0454,
"step": 65
},
{
"epoch": 0.6257314273016814,
"grad_norm": 0.06982421875,
"learning_rate": 0.00018382027634191524,
"loss": 0.0435,
"step": 66
},
{
"epoch": 0.6257314273016814,
"eval_loss": 0.042031481862068176,
"eval_runtime": 33.3825,
"eval_samples_per_second": 21.299,
"eval_steps_per_second": 21.299,
"step": 66
},
{
"epoch": 0.635212206503222,
"grad_norm": 0.0712890625,
"learning_rate": 0.00018325410376247294,
"loss": 0.0429,
"step": 67
},
{
"epoch": 0.6446929857047626,
"grad_norm": 0.0625,
"learning_rate": 0.0001826790983122527,
"loss": 0.0402,
"step": 68
},
{
"epoch": 0.6541737649063032,
"grad_norm": 0.0673828125,
"learning_rate": 0.00018209532099664174,
"loss": 0.0437,
"step": 69
},
{
"epoch": 0.6636545441078439,
"grad_norm": 0.07568359375,
"learning_rate": 0.00018150283375168114,
"loss": 0.0442,
"step": 70
},
{
"epoch": 0.6731353233093845,
"grad_norm": 0.06982421875,
"learning_rate": 0.00018090169943749476,
"loss": 0.0462,
"step": 71
},
{
"epoch": 0.6826161025109251,
"grad_norm": 0.07568359375,
"learning_rate": 0.00018029198183161998,
"loss": 0.0578,
"step": 72
},
{
"epoch": 0.6920968817124658,
"grad_norm": 0.06494140625,
"learning_rate": 0.00017967374562224132,
"loss": 0.0443,
"step": 73
},
{
"epoch": 0.7015776609140064,
"grad_norm": 0.0703125,
"learning_rate": 0.00017904705640132718,
"loss": 0.0462,
"step": 74
},
{
"epoch": 0.711058440115547,
"grad_norm": 0.06640625,
"learning_rate": 0.00017841198065767107,
"loss": 0.0362,
"step": 75
},
{
"epoch": 0.7205392193170876,
"grad_norm": 0.06396484375,
"learning_rate": 0.00017776858576983712,
"loss": 0.0431,
"step": 76
},
{
"epoch": 0.7300199985186282,
"grad_norm": 0.0615234375,
"learning_rate": 0.0001771169399990119,
"loss": 0.0361,
"step": 77
},
{
"epoch": 0.7300199985186282,
"eval_loss": 0.040743011981248856,
"eval_runtime": 33.5015,
"eval_samples_per_second": 21.223,
"eval_steps_per_second": 21.223,
"step": 77
},
{
"epoch": 0.7395007777201689,
"grad_norm": 0.0576171875,
"learning_rate": 0.00017645711248176195,
"loss": 0.0371,
"step": 78
},
{
"epoch": 0.7489815569217095,
"grad_norm": 0.060791015625,
"learning_rate": 0.00017578917322269886,
"loss": 0.0395,
"step": 79
},
{
"epoch": 0.7584623361232501,
"grad_norm": 0.06787109375,
"learning_rate": 0.00017511319308705198,
"loss": 0.0387,
"step": 80
},
{
"epoch": 0.7679431153247908,
"grad_norm": 0.0634765625,
"learning_rate": 0.0001744292437931502,
"loss": 0.0374,
"step": 81
},
{
"epoch": 0.7774238945263314,
"grad_norm": 0.0732421875,
"learning_rate": 0.00017373739790481262,
"loss": 0.042,
"step": 82
},
{
"epoch": 0.786904673727872,
"grad_norm": 0.052734375,
"learning_rate": 0.00017303772882365016,
"loss": 0.0314,
"step": 83
},
{
"epoch": 0.7963854529294127,
"grad_norm": 0.07177734375,
"learning_rate": 0.00017233031078127788,
"loss": 0.0404,
"step": 84
},
{
"epoch": 0.8058662321309532,
"grad_norm": 0.0791015625,
"learning_rate": 0.00017161521883143934,
"loss": 0.0472,
"step": 85
},
{
"epoch": 0.8153470113324939,
"grad_norm": 0.07568359375,
"learning_rate": 0.00017089252884204377,
"loss": 0.0434,
"step": 86
},
{
"epoch": 0.8248277905340345,
"grad_norm": 0.0732421875,
"learning_rate": 0.0001701623174871168,
"loss": 0.0419,
"step": 87
},
{
"epoch": 0.8343085697355751,
"grad_norm": 0.0732421875,
"learning_rate": 0.0001694246622386658,
"loss": 0.0406,
"step": 88
},
{
"epoch": 0.8343085697355751,
"eval_loss": 0.039096854627132416,
"eval_runtime": 33.3174,
"eval_samples_per_second": 21.34,
"eval_steps_per_second": 21.34,
"step": 88
},
{
"epoch": 0.8437893489371158,
"grad_norm": 0.056640625,
"learning_rate": 0.00016867964135846043,
"loss": 0.0331,
"step": 89
},
{
"epoch": 0.8532701281386564,
"grad_norm": 0.07177734375,
"learning_rate": 0.00016792733388972932,
"loss": 0.0439,
"step": 90
},
{
"epoch": 0.862750907340197,
"grad_norm": 0.059326171875,
"learning_rate": 0.0001671678196487741,
"loss": 0.0422,
"step": 91
},
{
"epoch": 0.8722316865417377,
"grad_norm": 0.07470703125,
"learning_rate": 0.00016640117921650117,
"loss": 0.0463,
"step": 92
},
{
"epoch": 0.8817124657432783,
"grad_norm": 0.057373046875,
"learning_rate": 0.00016562749392987254,
"loss": 0.037,
"step": 93
},
{
"epoch": 0.8911932449448189,
"grad_norm": 0.057373046875,
"learning_rate": 0.0001648468458732762,
"loss": 0.0361,
"step": 94
},
{
"epoch": 0.9006740241463596,
"grad_norm": 0.061279296875,
"learning_rate": 0.00016405931786981755,
"loss": 0.039,
"step": 95
},
{
"epoch": 0.9101548033479001,
"grad_norm": 0.0634765625,
"learning_rate": 0.00016326499347253207,
"loss": 0.0393,
"step": 96
},
{
"epoch": 0.9196355825494408,
"grad_norm": 0.05712890625,
"learning_rate": 0.00016246395695552085,
"loss": 0.0376,
"step": 97
},
{
"epoch": 0.9291163617509814,
"grad_norm": 0.054931640625,
"learning_rate": 0.00016165629330500952,
"loss": 0.035,
"step": 98
},
{
"epoch": 0.938597140952522,
"grad_norm": 0.060791015625,
"learning_rate": 0.0001608420882103315,
"loss": 0.0349,
"step": 99
},
{
"epoch": 0.938597140952522,
"eval_loss": 0.03842462599277496,
"eval_runtime": 33.3873,
"eval_samples_per_second": 21.296,
"eval_steps_per_second": 21.296,
"step": 99
},
{
"epoch": 0.9480779201540627,
"grad_norm": 0.0703125,
"learning_rate": 0.00016002142805483685,
"loss": 0.0379,
"step": 100
},
{
"epoch": 0.9575586993556033,
"grad_norm": 0.06884765625,
"learning_rate": 0.0001591943999067273,
"loss": 0.0382,
"step": 101
},
{
"epoch": 0.9670394785571439,
"grad_norm": 0.060791015625,
"learning_rate": 0.00015836109150981886,
"loss": 0.0345,
"step": 102
},
{
"epoch": 0.9765202577586846,
"grad_norm": 0.0703125,
"learning_rate": 0.00015752159127423263,
"loss": 0.0399,
"step": 103
},
{
"epoch": 0.9860010369602251,
"grad_norm": 0.06787109375,
"learning_rate": 0.0001566759882670146,
"loss": 0.0358,
"step": 104
},
{
"epoch": 0.9954818161617658,
"grad_norm": 0.06982421875,
"learning_rate": 0.00015582437220268647,
"loss": 0.0408,
"step": 105
},
{
"epoch": 1.0049625953633063,
"grad_norm": 0.05908203125,
"learning_rate": 0.0001549668334337271,
"loss": 0.0321,
"step": 106
},
{
"epoch": 1.0144433745648471,
"grad_norm": 0.06298828125,
"learning_rate": 0.0001541034629409865,
"loss": 0.0313,
"step": 107
},
{
"epoch": 1.0239241537663877,
"grad_norm": 0.061279296875,
"learning_rate": 0.00015323435232403337,
"loss": 0.0318,
"step": 108
},
{
"epoch": 1.0334049329679282,
"grad_norm": 0.049072265625,
"learning_rate": 0.00015235959379143678,
"loss": 0.0274,
"step": 109
},
{
"epoch": 1.042885712169469,
"grad_norm": 0.056396484375,
"learning_rate": 0.0001514792801509831,
"loss": 0.0304,
"step": 110
},
{
"epoch": 1.042885712169469,
"eval_loss": 0.03729868680238724,
"eval_runtime": 33.6565,
"eval_samples_per_second": 21.125,
"eval_steps_per_second": 21.125,
"step": 110
},
{
"epoch": 1.0523664913710096,
"grad_norm": 0.054931640625,
"learning_rate": 0.00015059350479982965,
"loss": 0.0271,
"step": 111
},
{
"epoch": 1.0618472705725501,
"grad_norm": 0.05712890625,
"learning_rate": 0.0001497023617145958,
"loss": 0.0285,
"step": 112
},
{
"epoch": 1.071328049774091,
"grad_norm": 0.054931640625,
"learning_rate": 0.0001488059454413923,
"loss": 0.0272,
"step": 113
},
{
"epoch": 1.0808088289756315,
"grad_norm": 0.06201171875,
"learning_rate": 0.00014790435108579048,
"loss": 0.0294,
"step": 114
},
{
"epoch": 1.090289608177172,
"grad_norm": 0.05908203125,
"learning_rate": 0.000146997674302732,
"loss": 0.0273,
"step": 115
},
{
"epoch": 1.0997703873787126,
"grad_norm": 0.059326171875,
"learning_rate": 0.00014608601128638027,
"loss": 0.0279,
"step": 116
},
{
"epoch": 1.1092511665802534,
"grad_norm": 0.06298828125,
"learning_rate": 0.00014516945875991472,
"loss": 0.0327,
"step": 117
},
{
"epoch": 1.118731945781794,
"grad_norm": 0.060302734375,
"learning_rate": 0.00014424811396526892,
"loss": 0.0256,
"step": 118
},
{
"epoch": 1.1282127249833347,
"grad_norm": 0.07177734375,
"learning_rate": 0.00014332207465281364,
"loss": 0.0312,
"step": 119
},
{
"epoch": 1.1376935041848752,
"grad_norm": 0.072265625,
"learning_rate": 0.0001423914390709861,
"loss": 0.0314,
"step": 120
},
{
"epoch": 1.1471742833864158,
"grad_norm": 0.06298828125,
"learning_rate": 0.00014145630595586607,
"loss": 0.0305,
"step": 121
},
{
"epoch": 1.1471742833864158,
"eval_loss": 0.03735668212175369,
"eval_runtime": 33.3809,
"eval_samples_per_second": 21.3,
"eval_steps_per_second": 21.3,
"step": 121
},
{
"epoch": 1.1566550625879564,
"grad_norm": 0.057373046875,
"learning_rate": 0.00014051677452070065,
"loss": 0.027,
"step": 122
},
{
"epoch": 1.1661358417894971,
"grad_norm": 0.058837890625,
"learning_rate": 0.00013957294444537808,
"loss": 0.027,
"step": 123
},
{
"epoch": 1.1756166209910377,
"grad_norm": 0.06494140625,
"learning_rate": 0.0001386249158658522,
"loss": 0.0362,
"step": 124
},
{
"epoch": 1.1850974001925783,
"grad_norm": 0.060791015625,
"learning_rate": 0.00013767278936351854,
"loss": 0.0301,
"step": 125
},
{
"epoch": 1.194578179394119,
"grad_norm": 0.06201171875,
"learning_rate": 0.00013671666595454295,
"loss": 0.0241,
"step": 126
},
{
"epoch": 1.2040589585956596,
"grad_norm": 0.05859375,
"learning_rate": 0.00013575664707914448,
"loss": 0.0259,
"step": 127
},
{
"epoch": 1.2135397377972001,
"grad_norm": 0.064453125,
"learning_rate": 0.0001347928345908329,
"loss": 0.0325,
"step": 128
},
{
"epoch": 1.223020516998741,
"grad_norm": 0.05908203125,
"learning_rate": 0.00013382533074560255,
"loss": 0.0284,
"step": 129
},
{
"epoch": 1.2325012962002815,
"grad_norm": 0.05908203125,
"learning_rate": 0.0001328542381910835,
"loss": 0.0267,
"step": 130
},
{
"epoch": 1.241982075401822,
"grad_norm": 0.0546875,
"learning_rate": 0.00013187965995565098,
"loss": 0.0274,
"step": 131
},
{
"epoch": 1.2514628546033628,
"grad_norm": 0.05078125,
"learning_rate": 0.00013090169943749476,
"loss": 0.0251,
"step": 132
},
{
"epoch": 1.2514628546033628,
"eval_loss": 0.036460939794778824,
"eval_runtime": 33.4627,
"eval_samples_per_second": 21.248,
"eval_steps_per_second": 21.248,
"step": 132
},
{
"epoch": 1.2609436338049034,
"grad_norm": 0.0546875,
"learning_rate": 0.00012992046039364893,
"loss": 0.0273,
"step": 133
},
{
"epoch": 1.270424413006444,
"grad_norm": 0.064453125,
"learning_rate": 0.0001289360469289838,
"loss": 0.0263,
"step": 134
},
{
"epoch": 1.2799051922079845,
"grad_norm": 0.06298828125,
"learning_rate": 0.00012794856348516095,
"loss": 0.0273,
"step": 135
},
{
"epoch": 1.2893859714095253,
"grad_norm": 0.06640625,
"learning_rate": 0.00012695811482955227,
"loss": 0.0277,
"step": 136
},
{
"epoch": 1.2988667506110658,
"grad_norm": 0.057861328125,
"learning_rate": 0.00012596480604412484,
"loss": 0.0297,
"step": 137
},
{
"epoch": 1.3083475298126066,
"grad_norm": 0.057861328125,
"learning_rate": 0.000124968742514292,
"loss": 0.023,
"step": 138
},
{
"epoch": 1.3178283090141472,
"grad_norm": 0.06005859375,
"learning_rate": 0.00012397002991773275,
"loss": 0.0274,
"step": 139
},
{
"epoch": 1.3273090882156877,
"grad_norm": 0.06591796875,
"learning_rate": 0.0001229687742131796,
"loss": 0.0262,
"step": 140
},
{
"epoch": 1.3367898674172283,
"grad_norm": 0.06396484375,
"learning_rate": 0.00012196508162917677,
"loss": 0.0312,
"step": 141
},
{
"epoch": 1.346270646618769,
"grad_norm": 0.0693359375,
"learning_rate": 0.00012095905865281025,
"loss": 0.0291,
"step": 142
},
{
"epoch": 1.3557514258203096,
"grad_norm": 0.0625,
"learning_rate": 0.00011995081201840956,
"loss": 0.0288,
"step": 143
},
{
"epoch": 1.3557514258203096,
"eval_loss": 0.036965224891901016,
"eval_runtime": 33.3733,
"eval_samples_per_second": 21.304,
"eval_steps_per_second": 21.304,
"step": 143
},
{
"epoch": 1.3652322050218502,
"grad_norm": 0.058837890625,
"learning_rate": 0.00011894044869622403,
"loss": 0.0279,
"step": 144
},
{
"epoch": 1.374712984223391,
"grad_norm": 0.056640625,
"learning_rate": 0.00011792807588107357,
"loss": 0.0247,
"step": 145
},
{
"epoch": 1.3841937634249315,
"grad_norm": 0.06689453125,
"learning_rate": 0.00011691380098097597,
"loss": 0.0315,
"step": 146
},
{
"epoch": 1.393674542626472,
"grad_norm": 0.0537109375,
"learning_rate": 0.0001158977316057513,
"loss": 0.0244,
"step": 147
},
{
"epoch": 1.4031553218280126,
"grad_norm": 0.054443359375,
"learning_rate": 0.00011487997555560503,
"loss": 0.0292,
"step": 148
},
{
"epoch": 1.4126361010295534,
"grad_norm": 0.064453125,
"learning_rate": 0.00011386064080969094,
"loss": 0.0293,
"step": 149
},
{
"epoch": 1.422116880231094,
"grad_norm": 0.059814453125,
"learning_rate": 0.00011283983551465511,
"loss": 0.0254,
"step": 150
},
{
"epoch": 1.4315976594326347,
"grad_norm": 0.06640625,
"learning_rate": 0.0001118176679731619,
"loss": 0.0284,
"step": 151
},
{
"epoch": 1.4410784386341753,
"grad_norm": 0.060791015625,
"learning_rate": 0.00011079424663240372,
"loss": 0.0306,
"step": 152
},
{
"epoch": 1.4505592178357158,
"grad_norm": 0.058349609375,
"learning_rate": 0.00010976968007259519,
"loss": 0.028,
"step": 153
},
{
"epoch": 1.4600399970372564,
"grad_norm": 0.0537109375,
"learning_rate": 0.00010874407699545328,
"loss": 0.0251,
"step": 154
},
{
"epoch": 1.4600399970372564,
"eval_loss": 0.03655948117375374,
"eval_runtime": 33.4084,
"eval_samples_per_second": 21.282,
"eval_steps_per_second": 21.282,
"step": 154
},
{
"epoch": 1.4695207762387972,
"grad_norm": 0.06298828125,
"learning_rate": 0.00010771754621266466,
"loss": 0.031,
"step": 155
},
{
"epoch": 1.4790015554403377,
"grad_norm": 0.05419921875,
"learning_rate": 0.00010669019663434117,
"loss": 0.0262,
"step": 156
},
{
"epoch": 1.4884823346418785,
"grad_norm": 0.06298828125,
"learning_rate": 0.00010566213725746506,
"loss": 0.0317,
"step": 157
},
{
"epoch": 1.497963113843419,
"grad_norm": 0.052734375,
"learning_rate": 0.00010463347715432488,
"loss": 0.0255,
"step": 158
},
{
"epoch": 1.5074438930449596,
"grad_norm": 0.058837890625,
"learning_rate": 0.00010360432546094341,
"loss": 0.028,
"step": 159
}
],
"logging_steps": 1,
"max_steps": 315,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 53,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.8648279883317248e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}