nguyenthanhdo's picture
Upload folder using huggingface_hub
03de149 verified
raw
history blame contribute delete
No virus
50.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.512406488408266,
"eval_steps": 11,
"global_step": 265,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009480779201540626,
"grad_norm": 2.265625,
"learning_rate": 2e-05,
"loss": 0.5339,
"step": 1
},
{
"epoch": 0.009480779201540626,
"eval_loss": 0.5036121606826782,
"eval_runtime": 36.0149,
"eval_samples_per_second": 19.742,
"eval_steps_per_second": 19.742,
"step": 1
},
{
"epoch": 0.018961558403081252,
"grad_norm": 2.1875,
"learning_rate": 4e-05,
"loss": 0.502,
"step": 2
},
{
"epoch": 0.02844233760462188,
"grad_norm": 1.890625,
"learning_rate": 6e-05,
"loss": 0.4769,
"step": 3
},
{
"epoch": 0.037923116806162505,
"grad_norm": 1.0234375,
"learning_rate": 8e-05,
"loss": 0.2922,
"step": 4
},
{
"epoch": 0.04740389600770313,
"grad_norm": 1.5234375,
"learning_rate": 0.0001,
"loss": 0.2162,
"step": 5
},
{
"epoch": 0.05688467520924376,
"grad_norm": 0.59375,
"learning_rate": 0.00012,
"loss": 0.1685,
"step": 6
},
{
"epoch": 0.06636545441078438,
"grad_norm": 0.359375,
"learning_rate": 0.00014,
"loss": 0.1208,
"step": 7
},
{
"epoch": 0.07584623361232501,
"grad_norm": 0.32421875,
"learning_rate": 0.00016,
"loss": 0.1147,
"step": 8
},
{
"epoch": 0.08532701281386564,
"grad_norm": 0.302734375,
"learning_rate": 0.00018,
"loss": 0.0983,
"step": 9
},
{
"epoch": 0.09480779201540626,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.0827,
"step": 10
},
{
"epoch": 0.10428857121694689,
"grad_norm": 0.232421875,
"learning_rate": 0.00019999469523400122,
"loss": 0.0879,
"step": 11
},
{
"epoch": 0.10428857121694689,
"eval_loss": 0.08134982734918594,
"eval_runtime": 35.8683,
"eval_samples_per_second": 19.823,
"eval_steps_per_second": 19.823,
"step": 11
},
{
"epoch": 0.11376935041848751,
"grad_norm": 0.2265625,
"learning_rate": 0.00019997878149881574,
"loss": 0.0831,
"step": 12
},
{
"epoch": 0.12325012962002814,
"grad_norm": 0.21875,
"learning_rate": 0.0001999522604828164,
"loss": 0.0756,
"step": 13
},
{
"epoch": 0.13273090882156877,
"grad_norm": 0.2255859375,
"learning_rate": 0.00019991513499975882,
"loss": 0.0835,
"step": 14
},
{
"epoch": 0.1422116880231094,
"grad_norm": 0.234375,
"learning_rate": 0.00019986740898848306,
"loss": 0.0732,
"step": 15
},
{
"epoch": 0.15169246722465002,
"grad_norm": 0.1650390625,
"learning_rate": 0.00019980908751249555,
"loss": 0.0739,
"step": 16
},
{
"epoch": 0.16117324642619066,
"grad_norm": 0.14453125,
"learning_rate": 0.00019974017675943192,
"loss": 0.0698,
"step": 17
},
{
"epoch": 0.17065402562773127,
"grad_norm": 0.1767578125,
"learning_rate": 0.0001996606840404006,
"loss": 0.072,
"step": 18
},
{
"epoch": 0.1801348048292719,
"grad_norm": 0.1328125,
"learning_rate": 0.00019957061778920701,
"loss": 0.0556,
"step": 19
},
{
"epoch": 0.18961558403081252,
"grad_norm": 0.1455078125,
"learning_rate": 0.0001994699875614589,
"loss": 0.0685,
"step": 20
},
{
"epoch": 0.19909636323235316,
"grad_norm": 0.1455078125,
"learning_rate": 0.00019935880403355253,
"loss": 0.0598,
"step": 21
},
{
"epoch": 0.20857714243389378,
"grad_norm": 0.13671875,
"learning_rate": 0.00019923707900153982,
"loss": 0.0582,
"step": 22
},
{
"epoch": 0.20857714243389378,
"eval_loss": 0.06287878006696701,
"eval_runtime": 35.7847,
"eval_samples_per_second": 19.869,
"eval_steps_per_second": 19.869,
"step": 22
},
{
"epoch": 0.21805792163543442,
"grad_norm": 0.15234375,
"learning_rate": 0.00019910482537987702,
"loss": 0.0644,
"step": 23
},
{
"epoch": 0.22753870083697503,
"grad_norm": 0.130859375,
"learning_rate": 0.0001989620572000544,
"loss": 0.0615,
"step": 24
},
{
"epoch": 0.23701948003851567,
"grad_norm": 0.13671875,
"learning_rate": 0.00019880878960910772,
"loss": 0.0686,
"step": 25
},
{
"epoch": 0.24650025924005628,
"grad_norm": 0.14453125,
"learning_rate": 0.00019864503886801106,
"loss": 0.068,
"step": 26
},
{
"epoch": 0.2559810384415969,
"grad_norm": 0.12353515625,
"learning_rate": 0.00019847082234995171,
"loss": 0.0525,
"step": 27
},
{
"epoch": 0.26546181764313753,
"grad_norm": 0.10791015625,
"learning_rate": 0.00019828615853848688,
"loss": 0.0541,
"step": 28
},
{
"epoch": 0.27494259684467814,
"grad_norm": 0.12060546875,
"learning_rate": 0.00019809106702558277,
"loss": 0.0522,
"step": 29
},
{
"epoch": 0.2844233760462188,
"grad_norm": 0.11669921875,
"learning_rate": 0.0001978855685095358,
"loss": 0.0526,
"step": 30
},
{
"epoch": 0.2939041552477594,
"grad_norm": 0.140625,
"learning_rate": 0.00019766968479277683,
"loss": 0.0626,
"step": 31
},
{
"epoch": 0.30338493444930004,
"grad_norm": 0.134765625,
"learning_rate": 0.00019744343877955788,
"loss": 0.0598,
"step": 32
},
{
"epoch": 0.3128657136508407,
"grad_norm": 0.1220703125,
"learning_rate": 0.00019720685447352209,
"loss": 0.06,
"step": 33
},
{
"epoch": 0.3128657136508407,
"eval_loss": 0.05662544071674347,
"eval_runtime": 35.8251,
"eval_samples_per_second": 19.846,
"eval_steps_per_second": 19.846,
"step": 33
},
{
"epoch": 0.3223464928523813,
"grad_norm": 0.14453125,
"learning_rate": 0.0001969599569751571,
"loss": 0.0555,
"step": 34
},
{
"epoch": 0.33182727205392193,
"grad_norm": 0.12158203125,
"learning_rate": 0.00019670277247913205,
"loss": 0.0615,
"step": 35
},
{
"epoch": 0.34130805125546254,
"grad_norm": 0.11279296875,
"learning_rate": 0.0001964353282715183,
"loss": 0.0499,
"step": 36
},
{
"epoch": 0.3507888304570032,
"grad_norm": 0.12158203125,
"learning_rate": 0.00019615765272689461,
"loss": 0.0555,
"step": 37
},
{
"epoch": 0.3602696096585438,
"grad_norm": 0.11572265625,
"learning_rate": 0.00019586977530533677,
"loss": 0.0613,
"step": 38
},
{
"epoch": 0.36975038886008443,
"grad_norm": 0.1044921875,
"learning_rate": 0.00019557172654929196,
"loss": 0.0574,
"step": 39
},
{
"epoch": 0.37923116806162505,
"grad_norm": 0.1025390625,
"learning_rate": 0.00019526353808033825,
"loss": 0.0527,
"step": 40
},
{
"epoch": 0.3887119472631657,
"grad_norm": 0.1298828125,
"learning_rate": 0.00019494524259582992,
"loss": 0.0561,
"step": 41
},
{
"epoch": 0.3981927264647063,
"grad_norm": 0.10302734375,
"learning_rate": 0.00019461687386542826,
"loss": 0.0487,
"step": 42
},
{
"epoch": 0.40767350566624694,
"grad_norm": 0.11279296875,
"learning_rate": 0.00019427846672751873,
"loss": 0.0476,
"step": 43
},
{
"epoch": 0.41715428486778755,
"grad_norm": 0.1240234375,
"learning_rate": 0.00019393005708551498,
"loss": 0.0593,
"step": 44
},
{
"epoch": 0.41715428486778755,
"eval_loss": 0.051377832889556885,
"eval_runtime": 35.8632,
"eval_samples_per_second": 19.825,
"eval_steps_per_second": 19.825,
"step": 44
},
{
"epoch": 0.4266350640693282,
"grad_norm": 0.10888671875,
"learning_rate": 0.00019357168190404936,
"loss": 0.0498,
"step": 45
},
{
"epoch": 0.43611584327086883,
"grad_norm": 0.1171875,
"learning_rate": 0.00019320337920505153,
"loss": 0.0599,
"step": 46
},
{
"epoch": 0.44559662247240944,
"grad_norm": 0.11669921875,
"learning_rate": 0.00019282518806371414,
"loss": 0.0595,
"step": 47
},
{
"epoch": 0.45507740167395005,
"grad_norm": 0.123046875,
"learning_rate": 0.0001924371486043473,
"loss": 0.0648,
"step": 48
},
{
"epoch": 0.4645581808754907,
"grad_norm": 0.1025390625,
"learning_rate": 0.0001920393019961217,
"loss": 0.0506,
"step": 49
},
{
"epoch": 0.47403896007703133,
"grad_norm": 0.1171875,
"learning_rate": 0.0001916316904487005,
"loss": 0.0568,
"step": 50
},
{
"epoch": 0.48351973927857195,
"grad_norm": 0.09521484375,
"learning_rate": 0.00019121435720776122,
"loss": 0.0476,
"step": 51
},
{
"epoch": 0.49300051848011256,
"grad_norm": 0.11474609375,
"learning_rate": 0.0001907873465504076,
"loss": 0.0504,
"step": 52
},
{
"epoch": 0.5024812976816532,
"grad_norm": 0.11572265625,
"learning_rate": 0.00019035070378047204,
"loss": 0.0486,
"step": 53
},
{
"epoch": 0.5119620768831938,
"grad_norm": 0.10986328125,
"learning_rate": 0.00018990447522370884,
"loss": 0.0467,
"step": 54
},
{
"epoch": 0.5214428560847345,
"grad_norm": 0.12158203125,
"learning_rate": 0.00018944870822287956,
"loss": 0.054,
"step": 55
},
{
"epoch": 0.5214428560847345,
"eval_loss": 0.048270147293806076,
"eval_runtime": 35.8252,
"eval_samples_per_second": 19.846,
"eval_steps_per_second": 19.846,
"step": 55
},
{
"epoch": 0.5309236352862751,
"grad_norm": 0.09912109375,
"learning_rate": 0.00018898345113272998,
"loss": 0.0442,
"step": 56
},
{
"epoch": 0.5404044144878157,
"grad_norm": 0.1005859375,
"learning_rate": 0.00018850875331485995,
"loss": 0.0415,
"step": 57
},
{
"epoch": 0.5498851936893563,
"grad_norm": 0.1142578125,
"learning_rate": 0.00018802466513248632,
"loss": 0.0474,
"step": 58
},
{
"epoch": 0.559365972890897,
"grad_norm": 0.1103515625,
"learning_rate": 0.00018753123794509974,
"loss": 0.0501,
"step": 59
},
{
"epoch": 0.5688467520924376,
"grad_norm": 0.10986328125,
"learning_rate": 0.00018702852410301554,
"loss": 0.0509,
"step": 60
},
{
"epoch": 0.5783275312939782,
"grad_norm": 0.119140625,
"learning_rate": 0.0001865165769418196,
"loss": 0.054,
"step": 61
},
{
"epoch": 0.5878083104955188,
"grad_norm": 0.12353515625,
"learning_rate": 0.00018599545077670985,
"loss": 0.0496,
"step": 62
},
{
"epoch": 0.5972890896970595,
"grad_norm": 0.09619140625,
"learning_rate": 0.0001854652008967335,
"loss": 0.0479,
"step": 63
},
{
"epoch": 0.6067698688986001,
"grad_norm": 0.1123046875,
"learning_rate": 0.00018492588355892124,
"loss": 0.0508,
"step": 64
},
{
"epoch": 0.6162506481001407,
"grad_norm": 0.125,
"learning_rate": 0.00018437755598231856,
"loss": 0.0499,
"step": 65
},
{
"epoch": 0.6257314273016814,
"grad_norm": 0.0986328125,
"learning_rate": 0.00018382027634191524,
"loss": 0.0459,
"step": 66
},
{
"epoch": 0.6257314273016814,
"eval_loss": 0.04686620458960533,
"eval_runtime": 35.9226,
"eval_samples_per_second": 19.793,
"eval_steps_per_second": 19.793,
"step": 66
},
{
"epoch": 0.635212206503222,
"grad_norm": 0.12109375,
"learning_rate": 0.00018325410376247294,
"loss": 0.0473,
"step": 67
},
{
"epoch": 0.6446929857047626,
"grad_norm": 0.1103515625,
"learning_rate": 0.0001826790983122527,
"loss": 0.0443,
"step": 68
},
{
"epoch": 0.6541737649063032,
"grad_norm": 0.11279296875,
"learning_rate": 0.00018209532099664174,
"loss": 0.0508,
"step": 69
},
{
"epoch": 0.6636545441078439,
"grad_norm": 0.11083984375,
"learning_rate": 0.00018150283375168114,
"loss": 0.0485,
"step": 70
},
{
"epoch": 0.6731353233093845,
"grad_norm": 0.10693359375,
"learning_rate": 0.00018090169943749476,
"loss": 0.0535,
"step": 71
},
{
"epoch": 0.6826161025109251,
"grad_norm": 0.10888671875,
"learning_rate": 0.00018029198183161998,
"loss": 0.0617,
"step": 72
},
{
"epoch": 0.6920968817124658,
"grad_norm": 0.09521484375,
"learning_rate": 0.00017967374562224132,
"loss": 0.0477,
"step": 73
},
{
"epoch": 0.7015776609140064,
"grad_norm": 0.0966796875,
"learning_rate": 0.00017904705640132718,
"loss": 0.0545,
"step": 74
},
{
"epoch": 0.711058440115547,
"grad_norm": 0.09423828125,
"learning_rate": 0.00017841198065767107,
"loss": 0.0396,
"step": 75
},
{
"epoch": 0.7205392193170876,
"grad_norm": 0.0947265625,
"learning_rate": 0.00017776858576983712,
"loss": 0.0483,
"step": 76
},
{
"epoch": 0.7300199985186282,
"grad_norm": 0.09326171875,
"learning_rate": 0.0001771169399990119,
"loss": 0.0397,
"step": 77
},
{
"epoch": 0.7300199985186282,
"eval_loss": 0.046032145619392395,
"eval_runtime": 35.9702,
"eval_samples_per_second": 19.766,
"eval_steps_per_second": 19.766,
"step": 77
},
{
"epoch": 0.7395007777201689,
"grad_norm": 0.09423828125,
"learning_rate": 0.00017645711248176195,
"loss": 0.0414,
"step": 78
},
{
"epoch": 0.7489815569217095,
"grad_norm": 0.10107421875,
"learning_rate": 0.00017578917322269886,
"loss": 0.0447,
"step": 79
},
{
"epoch": 0.7584623361232501,
"grad_norm": 0.0966796875,
"learning_rate": 0.00017511319308705198,
"loss": 0.0404,
"step": 80
},
{
"epoch": 0.7679431153247908,
"grad_norm": 0.099609375,
"learning_rate": 0.0001744292437931502,
"loss": 0.0409,
"step": 81
},
{
"epoch": 0.7774238945263314,
"grad_norm": 0.12890625,
"learning_rate": 0.00017373739790481262,
"loss": 0.0482,
"step": 82
},
{
"epoch": 0.786904673727872,
"grad_norm": 0.09375,
"learning_rate": 0.00017303772882365016,
"loss": 0.0356,
"step": 83
},
{
"epoch": 0.7963854529294127,
"grad_norm": 0.1103515625,
"learning_rate": 0.00017233031078127788,
"loss": 0.0467,
"step": 84
},
{
"epoch": 0.8058662321309532,
"grad_norm": 0.11767578125,
"learning_rate": 0.00017161521883143934,
"loss": 0.0537,
"step": 85
},
{
"epoch": 0.8153470113324939,
"grad_norm": 0.095703125,
"learning_rate": 0.00017089252884204377,
"loss": 0.0477,
"step": 86
},
{
"epoch": 0.8248277905340345,
"grad_norm": 0.09521484375,
"learning_rate": 0.0001701623174871168,
"loss": 0.0466,
"step": 87
},
{
"epoch": 0.8343085697355751,
"grad_norm": 0.09326171875,
"learning_rate": 0.0001694246622386658,
"loss": 0.0453,
"step": 88
},
{
"epoch": 0.8343085697355751,
"eval_loss": 0.04489152505993843,
"eval_runtime": 36.0012,
"eval_samples_per_second": 19.749,
"eval_steps_per_second": 19.749,
"step": 88
},
{
"epoch": 0.8437893489371158,
"grad_norm": 0.08349609375,
"learning_rate": 0.00016867964135846043,
"loss": 0.039,
"step": 89
},
{
"epoch": 0.8532701281386564,
"grad_norm": 0.10400390625,
"learning_rate": 0.00016792733388972932,
"loss": 0.0521,
"step": 90
},
{
"epoch": 0.862750907340197,
"grad_norm": 0.0849609375,
"learning_rate": 0.0001671678196487741,
"loss": 0.0481,
"step": 91
},
{
"epoch": 0.8722316865417377,
"grad_norm": 0.09814453125,
"learning_rate": 0.00016640117921650117,
"loss": 0.0509,
"step": 92
},
{
"epoch": 0.8817124657432783,
"grad_norm": 0.0927734375,
"learning_rate": 0.00016562749392987254,
"loss": 0.0413,
"step": 93
},
{
"epoch": 0.8911932449448189,
"grad_norm": 0.08740234375,
"learning_rate": 0.0001648468458732762,
"loss": 0.0405,
"step": 94
},
{
"epoch": 0.9006740241463596,
"grad_norm": 0.09228515625,
"learning_rate": 0.00016405931786981755,
"loss": 0.0458,
"step": 95
},
{
"epoch": 0.9101548033479001,
"grad_norm": 0.1005859375,
"learning_rate": 0.00016326499347253207,
"loss": 0.0457,
"step": 96
},
{
"epoch": 0.9196355825494408,
"grad_norm": 0.09619140625,
"learning_rate": 0.00016246395695552085,
"loss": 0.0439,
"step": 97
},
{
"epoch": 0.9291163617509814,
"grad_norm": 0.08740234375,
"learning_rate": 0.00016165629330500952,
"loss": 0.0389,
"step": 98
},
{
"epoch": 0.938597140952522,
"grad_norm": 0.08935546875,
"learning_rate": 0.0001608420882103315,
"loss": 0.04,
"step": 99
},
{
"epoch": 0.938597140952522,
"eval_loss": 0.04293662682175636,
"eval_runtime": 36.2853,
"eval_samples_per_second": 19.595,
"eval_steps_per_second": 19.595,
"step": 99
},
{
"epoch": 0.9480779201540627,
"grad_norm": 0.09326171875,
"learning_rate": 0.00016002142805483685,
"loss": 0.0418,
"step": 100
},
{
"epoch": 0.9575586993556033,
"grad_norm": 0.0830078125,
"learning_rate": 0.0001591943999067273,
"loss": 0.0406,
"step": 101
},
{
"epoch": 0.9670394785571439,
"grad_norm": 0.09375,
"learning_rate": 0.00015836109150981886,
"loss": 0.0418,
"step": 102
},
{
"epoch": 0.9765202577586846,
"grad_norm": 0.09765625,
"learning_rate": 0.00015752159127423263,
"loss": 0.0395,
"step": 103
},
{
"epoch": 0.9860010369602251,
"grad_norm": 0.08154296875,
"learning_rate": 0.0001566759882670146,
"loss": 0.0375,
"step": 104
},
{
"epoch": 0.9954818161617658,
"grad_norm": 0.1005859375,
"learning_rate": 0.00015582437220268647,
"loss": 0.0463,
"step": 105
},
{
"epoch": 1.0049625953633063,
"grad_norm": 0.07666015625,
"learning_rate": 0.0001549668334337271,
"loss": 0.0346,
"step": 106
},
{
"epoch": 1.0144433745648471,
"grad_norm": 0.07763671875,
"learning_rate": 0.0001541034629409865,
"loss": 0.0322,
"step": 107
},
{
"epoch": 1.0239241537663877,
"grad_norm": 0.08154296875,
"learning_rate": 0.00015323435232403337,
"loss": 0.0328,
"step": 108
},
{
"epoch": 1.0334049329679282,
"grad_norm": 0.06591796875,
"learning_rate": 0.00015235959379143678,
"loss": 0.0299,
"step": 109
},
{
"epoch": 1.042885712169469,
"grad_norm": 0.08544921875,
"learning_rate": 0.0001514792801509831,
"loss": 0.0338,
"step": 110
},
{
"epoch": 1.042885712169469,
"eval_loss": 0.04179221764206886,
"eval_runtime": 35.8658,
"eval_samples_per_second": 19.824,
"eval_steps_per_second": 19.824,
"step": 110
},
{
"epoch": 1.0523664913710096,
"grad_norm": 0.0830078125,
"learning_rate": 0.00015059350479982965,
"loss": 0.0281,
"step": 111
},
{
"epoch": 1.0618472705725501,
"grad_norm": 0.083984375,
"learning_rate": 0.0001497023617145958,
"loss": 0.03,
"step": 112
},
{
"epoch": 1.071328049774091,
"grad_norm": 0.0771484375,
"learning_rate": 0.0001488059454413923,
"loss": 0.0286,
"step": 113
},
{
"epoch": 1.0808088289756315,
"grad_norm": 0.0966796875,
"learning_rate": 0.00014790435108579048,
"loss": 0.0311,
"step": 114
},
{
"epoch": 1.090289608177172,
"grad_norm": 0.0869140625,
"learning_rate": 0.000146997674302732,
"loss": 0.0291,
"step": 115
},
{
"epoch": 1.0997703873787126,
"grad_norm": 0.08984375,
"learning_rate": 0.00014608601128638027,
"loss": 0.0281,
"step": 116
},
{
"epoch": 1.1092511665802534,
"grad_norm": 0.08837890625,
"learning_rate": 0.00014516945875991472,
"loss": 0.0322,
"step": 117
},
{
"epoch": 1.118731945781794,
"grad_norm": 0.083984375,
"learning_rate": 0.00014424811396526892,
"loss": 0.026,
"step": 118
},
{
"epoch": 1.1282127249833347,
"grad_norm": 0.08837890625,
"learning_rate": 0.00014332207465281364,
"loss": 0.0317,
"step": 119
},
{
"epoch": 1.1376935041848752,
"grad_norm": 0.09912109375,
"learning_rate": 0.0001423914390709861,
"loss": 0.0341,
"step": 120
},
{
"epoch": 1.1471742833864158,
"grad_norm": 0.095703125,
"learning_rate": 0.00014145630595586607,
"loss": 0.0322,
"step": 121
},
{
"epoch": 1.1471742833864158,
"eval_loss": 0.04224802553653717,
"eval_runtime": 35.7541,
"eval_samples_per_second": 19.886,
"eval_steps_per_second": 19.886,
"step": 121
},
{
"epoch": 1.1566550625879564,
"grad_norm": 0.07763671875,
"learning_rate": 0.00014051677452070065,
"loss": 0.0279,
"step": 122
},
{
"epoch": 1.1661358417894971,
"grad_norm": 0.08837890625,
"learning_rate": 0.00013957294444537808,
"loss": 0.0276,
"step": 123
},
{
"epoch": 1.1756166209910377,
"grad_norm": 0.099609375,
"learning_rate": 0.0001386249158658522,
"loss": 0.0399,
"step": 124
},
{
"epoch": 1.1850974001925783,
"grad_norm": 0.0888671875,
"learning_rate": 0.00013767278936351854,
"loss": 0.0327,
"step": 125
},
{
"epoch": 1.194578179394119,
"grad_norm": 0.078125,
"learning_rate": 0.00013671666595454295,
"loss": 0.0257,
"step": 126
},
{
"epoch": 1.2040589585956596,
"grad_norm": 0.078125,
"learning_rate": 0.00013575664707914448,
"loss": 0.0268,
"step": 127
},
{
"epoch": 1.2135397377972001,
"grad_norm": 0.0966796875,
"learning_rate": 0.0001347928345908329,
"loss": 0.0368,
"step": 128
},
{
"epoch": 1.223020516998741,
"grad_norm": 0.076171875,
"learning_rate": 0.00013382533074560255,
"loss": 0.0286,
"step": 129
},
{
"epoch": 1.2325012962002815,
"grad_norm": 0.0888671875,
"learning_rate": 0.0001328542381910835,
"loss": 0.0304,
"step": 130
},
{
"epoch": 1.241982075401822,
"grad_norm": 0.087890625,
"learning_rate": 0.00013187965995565098,
"loss": 0.0304,
"step": 131
},
{
"epoch": 1.2514628546033628,
"grad_norm": 0.0732421875,
"learning_rate": 0.00013090169943749476,
"loss": 0.0275,
"step": 132
},
{
"epoch": 1.2514628546033628,
"eval_loss": 0.04160289093852043,
"eval_runtime": 35.8932,
"eval_samples_per_second": 19.809,
"eval_steps_per_second": 19.809,
"step": 132
},
{
"epoch": 1.2609436338049034,
"grad_norm": 0.08203125,
"learning_rate": 0.00012992046039364893,
"loss": 0.0293,
"step": 133
},
{
"epoch": 1.270424413006444,
"grad_norm": 0.087890625,
"learning_rate": 0.0001289360469289838,
"loss": 0.03,
"step": 134
},
{
"epoch": 1.2799051922079845,
"grad_norm": 0.08203125,
"learning_rate": 0.00012794856348516095,
"loss": 0.028,
"step": 135
},
{
"epoch": 1.2893859714095253,
"grad_norm": 0.0869140625,
"learning_rate": 0.00012695811482955227,
"loss": 0.0293,
"step": 136
},
{
"epoch": 1.2988667506110658,
"grad_norm": 0.09228515625,
"learning_rate": 0.00012596480604412484,
"loss": 0.0341,
"step": 137
},
{
"epoch": 1.3083475298126066,
"grad_norm": 0.07470703125,
"learning_rate": 0.000124968742514292,
"loss": 0.0259,
"step": 138
},
{
"epoch": 1.3178283090141472,
"grad_norm": 0.10009765625,
"learning_rate": 0.00012397002991773275,
"loss": 0.0298,
"step": 139
},
{
"epoch": 1.3273090882156877,
"grad_norm": 0.08837890625,
"learning_rate": 0.0001229687742131796,
"loss": 0.0285,
"step": 140
},
{
"epoch": 1.3367898674172283,
"grad_norm": 0.091796875,
"learning_rate": 0.00012196508162917677,
"loss": 0.034,
"step": 141
},
{
"epoch": 1.346270646618769,
"grad_norm": 0.0986328125,
"learning_rate": 0.00012095905865281025,
"loss": 0.0314,
"step": 142
},
{
"epoch": 1.3557514258203096,
"grad_norm": 0.0947265625,
"learning_rate": 0.00011995081201840956,
"loss": 0.0322,
"step": 143
},
{
"epoch": 1.3557514258203096,
"eval_loss": 0.041649721562862396,
"eval_runtime": 35.8396,
"eval_samples_per_second": 19.838,
"eval_steps_per_second": 19.838,
"step": 143
},
{
"epoch": 1.3652322050218502,
"grad_norm": 0.083984375,
"learning_rate": 0.00011894044869622403,
"loss": 0.0303,
"step": 144
},
{
"epoch": 1.374712984223391,
"grad_norm": 0.07373046875,
"learning_rate": 0.00011792807588107357,
"loss": 0.0259,
"step": 145
},
{
"epoch": 1.3841937634249315,
"grad_norm": 0.08740234375,
"learning_rate": 0.00011691380098097597,
"loss": 0.0334,
"step": 146
},
{
"epoch": 1.393674542626472,
"grad_norm": 0.068359375,
"learning_rate": 0.0001158977316057513,
"loss": 0.0245,
"step": 147
},
{
"epoch": 1.4031553218280126,
"grad_norm": 0.09423828125,
"learning_rate": 0.00011487997555560503,
"loss": 0.0322,
"step": 148
},
{
"epoch": 1.4126361010295534,
"grad_norm": 0.0947265625,
"learning_rate": 0.00011386064080969094,
"loss": 0.0338,
"step": 149
},
{
"epoch": 1.422116880231094,
"grad_norm": 0.09326171875,
"learning_rate": 0.00011283983551465511,
"loss": 0.0278,
"step": 150
},
{
"epoch": 1.4315976594326347,
"grad_norm": 0.087890625,
"learning_rate": 0.0001118176679731619,
"loss": 0.0305,
"step": 151
},
{
"epoch": 1.4410784386341753,
"grad_norm": 0.083984375,
"learning_rate": 0.00011079424663240372,
"loss": 0.0311,
"step": 152
},
{
"epoch": 1.4505592178357158,
"grad_norm": 0.08154296875,
"learning_rate": 0.00010976968007259519,
"loss": 0.0298,
"step": 153
},
{
"epoch": 1.4600399970372564,
"grad_norm": 0.072265625,
"learning_rate": 0.00010874407699545328,
"loss": 0.0266,
"step": 154
},
{
"epoch": 1.4600399970372564,
"eval_loss": 0.04042724519968033,
"eval_runtime": 36.0075,
"eval_samples_per_second": 19.746,
"eval_steps_per_second": 19.746,
"step": 154
},
{
"epoch": 1.4695207762387972,
"grad_norm": 0.083984375,
"learning_rate": 0.00010771754621266466,
"loss": 0.0335,
"step": 155
},
{
"epoch": 1.4790015554403377,
"grad_norm": 0.07470703125,
"learning_rate": 0.00010669019663434117,
"loss": 0.0268,
"step": 156
},
{
"epoch": 1.4884823346418785,
"grad_norm": 0.0849609375,
"learning_rate": 0.00010566213725746506,
"loss": 0.0334,
"step": 157
},
{
"epoch": 1.497963113843419,
"grad_norm": 0.0751953125,
"learning_rate": 0.00010463347715432488,
"loss": 0.0283,
"step": 158
},
{
"epoch": 1.5074438930449596,
"grad_norm": 0.07421875,
"learning_rate": 0.00010360432546094341,
"loss": 0.0284,
"step": 159
},
{
"epoch": 1.5169246722465002,
"grad_norm": 0.08154296875,
"learning_rate": 0.00010257479136549889,
"loss": 0.0306,
"step": 160
},
{
"epoch": 1.5264054514480407,
"grad_norm": 0.091796875,
"learning_rate": 0.00010154498409674051,
"loss": 0.0347,
"step": 161
},
{
"epoch": 1.5358862306495815,
"grad_norm": 0.087890625,
"learning_rate": 0.00010051501291240008,
"loss": 0.0343,
"step": 162
},
{
"epoch": 1.5453670098511223,
"grad_norm": 0.08544921875,
"learning_rate": 9.948498708759993e-05,
"loss": 0.0313,
"step": 163
},
{
"epoch": 1.5548477890526629,
"grad_norm": 0.0791015625,
"learning_rate": 9.845501590325948e-05,
"loss": 0.0284,
"step": 164
},
{
"epoch": 1.5643285682542034,
"grad_norm": 0.076171875,
"learning_rate": 9.742520863450115e-05,
"loss": 0.0249,
"step": 165
},
{
"epoch": 1.5643285682542034,
"eval_loss": 0.03973715007305145,
"eval_runtime": 35.8478,
"eval_samples_per_second": 19.834,
"eval_steps_per_second": 19.834,
"step": 165
},
{
"epoch": 1.573809347455744,
"grad_norm": 0.076171875,
"learning_rate": 9.639567453905661e-05,
"loss": 0.0294,
"step": 166
},
{
"epoch": 1.5832901266572845,
"grad_norm": 0.0703125,
"learning_rate": 9.536652284567513e-05,
"loss": 0.0248,
"step": 167
},
{
"epoch": 1.5927709058588253,
"grad_norm": 0.08251953125,
"learning_rate": 9.433786274253495e-05,
"loss": 0.03,
"step": 168
},
{
"epoch": 1.6022516850603659,
"grad_norm": 0.07373046875,
"learning_rate": 9.330980336565887e-05,
"loss": 0.0269,
"step": 169
},
{
"epoch": 1.6117324642619066,
"grad_norm": 0.10791015625,
"learning_rate": 9.228245378733537e-05,
"loss": 0.0323,
"step": 170
},
{
"epoch": 1.6212132434634472,
"grad_norm": 0.08837890625,
"learning_rate": 9.125592300454676e-05,
"loss": 0.0328,
"step": 171
},
{
"epoch": 1.6306940226649878,
"grad_norm": 0.076171875,
"learning_rate": 9.023031992740488e-05,
"loss": 0.0245,
"step": 172
},
{
"epoch": 1.6401748018665283,
"grad_norm": 0.0791015625,
"learning_rate": 8.920575336759629e-05,
"loss": 0.0291,
"step": 173
},
{
"epoch": 1.6496555810680689,
"grad_norm": 0.10107421875,
"learning_rate": 8.818233202683814e-05,
"loss": 0.0324,
"step": 174
},
{
"epoch": 1.6591363602696096,
"grad_norm": 0.0859375,
"learning_rate": 8.71601644853449e-05,
"loss": 0.0294,
"step": 175
},
{
"epoch": 1.6686171394711504,
"grad_norm": 0.0849609375,
"learning_rate": 8.613935919030907e-05,
"loss": 0.0292,
"step": 176
},
{
"epoch": 1.6686171394711504,
"eval_loss": 0.03933083638548851,
"eval_runtime": 36.0124,
"eval_samples_per_second": 19.743,
"eval_steps_per_second": 19.743,
"step": 176
},
{
"epoch": 1.678097918672691,
"grad_norm": 0.07666015625,
"learning_rate": 8.512002444439502e-05,
"loss": 0.028,
"step": 177
},
{
"epoch": 1.6875786978742315,
"grad_norm": 0.08984375,
"learning_rate": 8.410226839424871e-05,
"loss": 0.0312,
"step": 178
},
{
"epoch": 1.697059477075772,
"grad_norm": 0.0732421875,
"learning_rate": 8.308619901902406e-05,
"loss": 0.0253,
"step": 179
},
{
"epoch": 1.7065402562773127,
"grad_norm": 0.083984375,
"learning_rate": 8.207192411892646e-05,
"loss": 0.0278,
"step": 180
},
{
"epoch": 1.7160210354788534,
"grad_norm": 0.08642578125,
"learning_rate": 8.1059551303776e-05,
"loss": 0.0275,
"step": 181
},
{
"epoch": 1.7255018146803942,
"grad_norm": 0.07568359375,
"learning_rate": 8.004918798159045e-05,
"loss": 0.0265,
"step": 182
},
{
"epoch": 1.7349825938819348,
"grad_norm": 0.076171875,
"learning_rate": 7.904094134718976e-05,
"loss": 0.0285,
"step": 183
},
{
"epoch": 1.7444633730834753,
"grad_norm": 0.09228515625,
"learning_rate": 7.803491837082324e-05,
"loss": 0.0349,
"step": 184
},
{
"epoch": 1.7539441522850159,
"grad_norm": 0.09521484375,
"learning_rate": 7.703122578682046e-05,
"loss": 0.0325,
"step": 185
},
{
"epoch": 1.7634249314865564,
"grad_norm": 0.0751953125,
"learning_rate": 7.602997008226726e-05,
"loss": 0.0283,
"step": 186
},
{
"epoch": 1.7729057106880972,
"grad_norm": 0.0869140625,
"learning_rate": 7.5031257485708e-05,
"loss": 0.031,
"step": 187
},
{
"epoch": 1.7729057106880972,
"eval_loss": 0.03848842531442642,
"eval_runtime": 35.9251,
"eval_samples_per_second": 19.791,
"eval_steps_per_second": 19.791,
"step": 187
},
{
"epoch": 1.7823864898896378,
"grad_norm": 0.07861328125,
"learning_rate": 7.403519395587521e-05,
"loss": 0.03,
"step": 188
},
{
"epoch": 1.7918672690911785,
"grad_norm": 0.07177734375,
"learning_rate": 7.304188517044774e-05,
"loss": 0.0281,
"step": 189
},
{
"epoch": 1.801348048292719,
"grad_norm": 0.07568359375,
"learning_rate": 7.205143651483906e-05,
"loss": 0.0253,
"step": 190
},
{
"epoch": 1.8108288274942597,
"grad_norm": 0.06787109375,
"learning_rate": 7.106395307101621e-05,
"loss": 0.0264,
"step": 191
},
{
"epoch": 1.8203096066958002,
"grad_norm": 0.07177734375,
"learning_rate": 7.007953960635109e-05,
"loss": 0.0268,
"step": 192
},
{
"epoch": 1.8297903858973408,
"grad_norm": 0.08740234375,
"learning_rate": 6.909830056250527e-05,
"loss": 0.0308,
"step": 193
},
{
"epoch": 1.8392711650988816,
"grad_norm": 0.07275390625,
"learning_rate": 6.812034004434903e-05,
"loss": 0.0275,
"step": 194
},
{
"epoch": 1.8487519443004223,
"grad_norm": 0.07568359375,
"learning_rate": 6.714576180891654e-05,
"loss": 0.0253,
"step": 195
},
{
"epoch": 1.858232723501963,
"grad_norm": 0.07177734375,
"learning_rate": 6.617466925439746e-05,
"loss": 0.0262,
"step": 196
},
{
"epoch": 1.8677135027035034,
"grad_norm": 0.06787109375,
"learning_rate": 6.520716540916709e-05,
"loss": 0.0257,
"step": 197
},
{
"epoch": 1.877194281905044,
"grad_norm": 0.0751953125,
"learning_rate": 6.424335292085553e-05,
"loss": 0.0265,
"step": 198
},
{
"epoch": 1.877194281905044,
"eval_loss": 0.037521231919527054,
"eval_runtime": 36.0213,
"eval_samples_per_second": 19.738,
"eval_steps_per_second": 19.738,
"step": 198
},
{
"epoch": 1.8866750611065846,
"grad_norm": 0.10107421875,
"learning_rate": 6.32833340454571e-05,
"loss": 0.0269,
"step": 199
},
{
"epoch": 1.8961558403081253,
"grad_norm": 0.0751953125,
"learning_rate": 6.232721063648148e-05,
"loss": 0.0279,
"step": 200
},
{
"epoch": 1.905636619509666,
"grad_norm": 0.0791015625,
"learning_rate": 6.137508413414784e-05,
"loss": 0.0301,
"step": 201
},
{
"epoch": 1.9151173987112067,
"grad_norm": 0.0791015625,
"learning_rate": 6.0427055554621913e-05,
"loss": 0.0289,
"step": 202
},
{
"epoch": 1.9245981779127472,
"grad_norm": 0.08349609375,
"learning_rate": 5.948322547929939e-05,
"loss": 0.0239,
"step": 203
},
{
"epoch": 1.9340789571142878,
"grad_norm": 0.07373046875,
"learning_rate": 5.854369404413398e-05,
"loss": 0.023,
"step": 204
},
{
"epoch": 1.9435597363158283,
"grad_norm": 0.080078125,
"learning_rate": 5.7608560929013946e-05,
"loss": 0.0274,
"step": 205
},
{
"epoch": 1.9530405155173691,
"grad_norm": 0.08447265625,
"learning_rate": 5.667792534718639e-05,
"loss": 0.0308,
"step": 206
},
{
"epoch": 1.9625212947189097,
"grad_norm": 0.0732421875,
"learning_rate": 5.5751886034731115e-05,
"loss": 0.0253,
"step": 207
},
{
"epoch": 1.9720020739204505,
"grad_norm": 0.0732421875,
"learning_rate": 5.483054124008528e-05,
"loss": 0.0261,
"step": 208
},
{
"epoch": 1.981482853121991,
"grad_norm": 0.07763671875,
"learning_rate": 5.391398871361972e-05,
"loss": 0.0273,
"step": 209
},
{
"epoch": 1.981482853121991,
"eval_loss": 0.037543829530477524,
"eval_runtime": 36.1289,
"eval_samples_per_second": 19.68,
"eval_steps_per_second": 19.68,
"step": 209
},
{
"epoch": 1.9909636323235316,
"grad_norm": 0.07958984375,
"learning_rate": 5.300232569726804e-05,
"loss": 0.0284,
"step": 210
},
{
"epoch": 2.000444411525072,
"grad_norm": 0.08544921875,
"learning_rate": 5.2095648914209525e-05,
"loss": 0.0338,
"step": 211
},
{
"epoch": 2.0099251907266127,
"grad_norm": 0.06298828125,
"learning_rate": 5.119405455860772e-05,
"loss": 0.0198,
"step": 212
},
{
"epoch": 2.0194059699281537,
"grad_norm": 0.05810546875,
"learning_rate": 5.029763828540419e-05,
"loss": 0.0192,
"step": 213
},
{
"epoch": 2.0288867491296942,
"grad_norm": 0.059326171875,
"learning_rate": 4.940649520017035e-05,
"loss": 0.0202,
"step": 214
},
{
"epoch": 2.038367528331235,
"grad_norm": 0.060546875,
"learning_rate": 4.852071984901696e-05,
"loss": 0.0199,
"step": 215
},
{
"epoch": 2.0478483075327754,
"grad_norm": 0.05078125,
"learning_rate": 4.7640406208563224e-05,
"loss": 0.0169,
"step": 216
},
{
"epoch": 2.057329086734316,
"grad_norm": 0.058349609375,
"learning_rate": 4.676564767596663e-05,
"loss": 0.0193,
"step": 217
},
{
"epoch": 2.0668098659358565,
"grad_norm": 0.05712890625,
"learning_rate": 4.5896537059013536e-05,
"loss": 0.0175,
"step": 218
},
{
"epoch": 2.076290645137397,
"grad_norm": 0.056396484375,
"learning_rate": 4.503316656627294e-05,
"loss": 0.0193,
"step": 219
},
{
"epoch": 2.085771424338938,
"grad_norm": 0.06298828125,
"learning_rate": 4.417562779731355e-05,
"loss": 0.0175,
"step": 220
},
{
"epoch": 2.085771424338938,
"eval_loss": 0.03771064057946205,
"eval_runtime": 36.0692,
"eval_samples_per_second": 19.712,
"eval_steps_per_second": 19.712,
"step": 220
},
{
"epoch": 2.0952522035404786,
"grad_norm": 0.0693359375,
"learning_rate": 4.3324011732985433e-05,
"loss": 0.0205,
"step": 221
},
{
"epoch": 2.104732982742019,
"grad_norm": 0.0693359375,
"learning_rate": 4.247840872576739e-05,
"loss": 0.0184,
"step": 222
},
{
"epoch": 2.1142137619435597,
"grad_norm": 0.0634765625,
"learning_rate": 4.163890849018114e-05,
"loss": 0.0191,
"step": 223
},
{
"epoch": 2.1236945411451003,
"grad_norm": 0.057373046875,
"learning_rate": 4.0805600093272735e-05,
"loss": 0.0157,
"step": 224
},
{
"epoch": 2.133175320346641,
"grad_norm": 0.06640625,
"learning_rate": 3.997857194516319e-05,
"loss": 0.019,
"step": 225
},
{
"epoch": 2.142656099548182,
"grad_norm": 0.06494140625,
"learning_rate": 3.9157911789668525e-05,
"loss": 0.0173,
"step": 226
},
{
"epoch": 2.1521368787497224,
"grad_norm": 0.064453125,
"learning_rate": 3.8343706694990465e-05,
"loss": 0.0169,
"step": 227
},
{
"epoch": 2.161617657951263,
"grad_norm": 0.06689453125,
"learning_rate": 3.753604304447915e-05,
"loss": 0.0166,
"step": 228
},
{
"epoch": 2.1710984371528035,
"grad_norm": 0.076171875,
"learning_rate": 3.6735006527467965e-05,
"loss": 0.0166,
"step": 229
},
{
"epoch": 2.180579216354344,
"grad_norm": 0.064453125,
"learning_rate": 3.594068213018249e-05,
"loss": 0.0172,
"step": 230
},
{
"epoch": 2.1900599955558846,
"grad_norm": 0.06884765625,
"learning_rate": 3.515315412672384e-05,
"loss": 0.0168,
"step": 231
},
{
"epoch": 2.1900599955558846,
"eval_loss": 0.03964013606309891,
"eval_runtime": 36.2659,
"eval_samples_per_second": 19.605,
"eval_steps_per_second": 19.605,
"step": 231
},
{
"epoch": 2.199540774757425,
"grad_norm": 0.0751953125,
"learning_rate": 3.437250607012748e-05,
"loss": 0.0166,
"step": 232
},
{
"epoch": 2.209021553958966,
"grad_norm": 0.072265625,
"learning_rate": 3.359882078349883e-05,
"loss": 0.0181,
"step": 233
},
{
"epoch": 2.2185023331605067,
"grad_norm": 0.0673828125,
"learning_rate": 3.283218035122592e-05,
"loss": 0.0179,
"step": 234
},
{
"epoch": 2.2279831123620473,
"grad_norm": 0.09130859375,
"learning_rate": 3.207266611027069e-05,
"loss": 0.016,
"step": 235
},
{
"epoch": 2.237463891563588,
"grad_norm": 0.08447265625,
"learning_rate": 3.132035864153958e-05,
"loss": 0.0192,
"step": 236
},
{
"epoch": 2.2469446707651284,
"grad_norm": 0.08154296875,
"learning_rate": 3.057533776133421e-05,
"loss": 0.0183,
"step": 237
},
{
"epoch": 2.2564254499666694,
"grad_norm": 0.06640625,
"learning_rate": 2.98376825128832e-05,
"loss": 0.0146,
"step": 238
},
{
"epoch": 2.26590622916821,
"grad_norm": 0.07373046875,
"learning_rate": 2.910747115795628e-05,
"loss": 0.0171,
"step": 239
},
{
"epoch": 2.2753870083697505,
"grad_norm": 0.072265625,
"learning_rate": 2.8384781168560693e-05,
"loss": 0.0168,
"step": 240
},
{
"epoch": 2.284867787571291,
"grad_norm": 0.0859375,
"learning_rate": 2.766968921872213e-05,
"loss": 0.0166,
"step": 241
},
{
"epoch": 2.2943485667728316,
"grad_norm": 0.07470703125,
"learning_rate": 2.6962271176349852e-05,
"loss": 0.0182,
"step": 242
},
{
"epoch": 2.2943485667728316,
"eval_loss": 0.04028235375881195,
"eval_runtime": 36.1866,
"eval_samples_per_second": 19.648,
"eval_steps_per_second": 19.648,
"step": 242
},
{
"epoch": 2.303829345974372,
"grad_norm": 0.06640625,
"learning_rate": 2.626260209518737e-05,
"loss": 0.0159,
"step": 243
},
{
"epoch": 2.3133101251759127,
"grad_norm": 0.06884765625,
"learning_rate": 2.5570756206849832e-05,
"loss": 0.0154,
"step": 244
},
{
"epoch": 2.3227909043774533,
"grad_norm": 0.064453125,
"learning_rate": 2.4886806912948035e-05,
"loss": 0.0148,
"step": 245
},
{
"epoch": 2.3322716835789943,
"grad_norm": 0.07080078125,
"learning_rate": 2.4210826777301153e-05,
"loss": 0.0175,
"step": 246
},
{
"epoch": 2.341752462780535,
"grad_norm": 0.06494140625,
"learning_rate": 2.3542887518238056e-05,
"loss": 0.0155,
"step": 247
},
{
"epoch": 2.3512332419820754,
"grad_norm": 0.078125,
"learning_rate": 2.288306000098811e-05,
"loss": 0.0166,
"step": 248
},
{
"epoch": 2.360714021183616,
"grad_norm": 0.083984375,
"learning_rate": 2.2231414230162894e-05,
"loss": 0.022,
"step": 249
},
{
"epoch": 2.3701948003851565,
"grad_norm": 0.09033203125,
"learning_rate": 2.1588019342328968e-05,
"loss": 0.0234,
"step": 250
},
{
"epoch": 2.3796755795866975,
"grad_norm": 0.07666015625,
"learning_rate": 2.0952943598672846e-05,
"loss": 0.0176,
"step": 251
},
{
"epoch": 2.389156358788238,
"grad_norm": 0.06982421875,
"learning_rate": 2.0326254377758703e-05,
"loss": 0.0189,
"step": 252
},
{
"epoch": 2.3986371379897786,
"grad_norm": 0.0751953125,
"learning_rate": 1.9708018168380037e-05,
"loss": 0.0201,
"step": 253
},
{
"epoch": 2.3986371379897786,
"eval_loss": 0.039680514484643936,
"eval_runtime": 36.0607,
"eval_samples_per_second": 19.717,
"eval_steps_per_second": 19.717,
"step": 253
},
{
"epoch": 2.408117917191319,
"grad_norm": 0.06298828125,
"learning_rate": 1.9098300562505266e-05,
"loss": 0.0147,
"step": 254
},
{
"epoch": 2.4175986963928597,
"grad_norm": 0.0634765625,
"learning_rate": 1.8497166248318876e-05,
"loss": 0.0172,
"step": 255
},
{
"epoch": 2.4270794755944003,
"grad_norm": 0.08349609375,
"learning_rate": 1.7904679003358283e-05,
"loss": 0.0173,
"step": 256
},
{
"epoch": 2.436560254795941,
"grad_norm": 0.07177734375,
"learning_rate": 1.7320901687747292e-05,
"loss": 0.0174,
"step": 257
},
{
"epoch": 2.446041033997482,
"grad_norm": 0.08203125,
"learning_rate": 1.674589623752707e-05,
"loss": 0.0184,
"step": 258
},
{
"epoch": 2.4555218131990224,
"grad_norm": 0.0947265625,
"learning_rate": 1.617972365808481e-05,
"loss": 0.0192,
"step": 259
},
{
"epoch": 2.465002592400563,
"grad_norm": 0.06005859375,
"learning_rate": 1.562244401768144e-05,
"loss": 0.0129,
"step": 260
},
{
"epoch": 2.4744833716021035,
"grad_norm": 0.0791015625,
"learning_rate": 1.507411644107879e-05,
"loss": 0.0215,
"step": 261
},
{
"epoch": 2.483964150803644,
"grad_norm": 0.078125,
"learning_rate": 1.4534799103266505e-05,
"loss": 0.0194,
"step": 262
},
{
"epoch": 2.4934449300051846,
"grad_norm": 0.07275390625,
"learning_rate": 1.4004549223290164e-05,
"loss": 0.0168,
"step": 263
},
{
"epoch": 2.5029257092067256,
"grad_norm": 0.05810546875,
"learning_rate": 1.3483423058180421e-05,
"loss": 0.0138,
"step": 264
},
{
"epoch": 2.5029257092067256,
"eval_loss": 0.039309222251176834,
"eval_runtime": 36.2397,
"eval_samples_per_second": 19.619,
"eval_steps_per_second": 19.619,
"step": 264
},
{
"epoch": 2.512406488408266,
"grad_norm": 0.06787109375,
"learning_rate": 1.2971475896984475e-05,
"loss": 0.0157,
"step": 265
}
],
"logging_steps": 1,
"max_steps": 315,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 53,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.742595805273129e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}