imdatta0's picture
End of training
8b1b653 verified
raw
history blame
27.8 kB
{
"best_metric": 1.9140400886535645,
"best_model_checkpoint": "/home/datta0/models/lora_final/Qwen2-7B_pct_default_r16/checkpoint-376",
"epoch": 0.9893719806763285,
"eval_steps": 8,
"global_step": 384,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0025764895330112722,
"grad_norm": 6.036927223205566,
"learning_rate": 1.25e-05,
"loss": 2.0463,
"step": 1
},
{
"epoch": 0.010305958132045089,
"grad_norm": 4.005067825317383,
"learning_rate": 5e-05,
"loss": 2.1077,
"step": 4
},
{
"epoch": 0.020611916264090178,
"grad_norm": 7.223879814147949,
"learning_rate": 0.0001,
"loss": 2.031,
"step": 8
},
{
"epoch": 0.020611916264090178,
"eval_loss": 1.9518073797225952,
"eval_runtime": 125.9043,
"eval_samples_per_second": 1.946,
"eval_steps_per_second": 0.977,
"step": 8
},
{
"epoch": 0.030917874396135265,
"grad_norm": 2.8582210540771484,
"learning_rate": 9.997266286704631e-05,
"loss": 2.0048,
"step": 12
},
{
"epoch": 0.041223832528180356,
"grad_norm": 2.455995559692383,
"learning_rate": 9.989068136093873e-05,
"loss": 2.0039,
"step": 16
},
{
"epoch": 0.041223832528180356,
"eval_loss": 1.9526941776275635,
"eval_runtime": 107.8801,
"eval_samples_per_second": 2.271,
"eval_steps_per_second": 1.14,
"step": 16
},
{
"epoch": 0.05152979066022544,
"grad_norm": 2.7170777320861816,
"learning_rate": 9.975414512725057e-05,
"loss": 1.9595,
"step": 20
},
{
"epoch": 0.06183574879227053,
"grad_norm": 3.476964235305786,
"learning_rate": 9.956320346634876e-05,
"loss": 2.0258,
"step": 24
},
{
"epoch": 0.06183574879227053,
"eval_loss": 1.9363595247268677,
"eval_runtime": 123.7728,
"eval_samples_per_second": 1.979,
"eval_steps_per_second": 0.994,
"step": 24
},
{
"epoch": 0.07214170692431562,
"grad_norm": 2.7470412254333496,
"learning_rate": 9.931806517013612e-05,
"loss": 2.0143,
"step": 28
},
{
"epoch": 0.08244766505636071,
"grad_norm": 2.5656278133392334,
"learning_rate": 9.901899829374047e-05,
"loss": 1.9476,
"step": 32
},
{
"epoch": 0.08244766505636071,
"eval_loss": 1.9395873546600342,
"eval_runtime": 237.2101,
"eval_samples_per_second": 1.033,
"eval_steps_per_second": 0.519,
"step": 32
},
{
"epoch": 0.0927536231884058,
"grad_norm": 2.428863763809204,
"learning_rate": 9.86663298624003e-05,
"loss": 2.0166,
"step": 36
},
{
"epoch": 0.10305958132045089,
"grad_norm": 2.5703530311584473,
"learning_rate": 9.826044551386744e-05,
"loss": 1.993,
"step": 40
},
{
"epoch": 0.10305958132045089,
"eval_loss": 1.9390852451324463,
"eval_runtime": 125.0953,
"eval_samples_per_second": 1.959,
"eval_steps_per_second": 0.983,
"step": 40
},
{
"epoch": 0.11336553945249597,
"grad_norm": 2.8227152824401855,
"learning_rate": 9.780178907671789e-05,
"loss": 1.9189,
"step": 44
},
{
"epoch": 0.12367149758454106,
"grad_norm": 2.5152082443237305,
"learning_rate": 9.729086208503174e-05,
"loss": 1.9924,
"step": 48
},
{
"epoch": 0.12367149758454106,
"eval_loss": 1.9412620067596436,
"eval_runtime": 125.5452,
"eval_samples_per_second": 1.951,
"eval_steps_per_second": 0.98,
"step": 48
},
{
"epoch": 0.13397745571658615,
"grad_norm": 2.8379340171813965,
"learning_rate": 9.672822322997305e-05,
"loss": 1.9297,
"step": 52
},
{
"epoch": 0.14428341384863125,
"grad_norm": 2.782554864883423,
"learning_rate": 9.611448774886924e-05,
"loss": 2.0036,
"step": 56
},
{
"epoch": 0.14428341384863125,
"eval_loss": 1.9407403469085693,
"eval_runtime": 111.7215,
"eval_samples_per_second": 2.193,
"eval_steps_per_second": 1.101,
"step": 56
},
{
"epoch": 0.15458937198067632,
"grad_norm": 3.028449535369873,
"learning_rate": 9.545032675245813e-05,
"loss": 1.9739,
"step": 60
},
{
"epoch": 0.16489533011272142,
"grad_norm": 3.124444007873535,
"learning_rate": 9.473646649103818e-05,
"loss": 1.9358,
"step": 64
},
{
"epoch": 0.16489533011272142,
"eval_loss": 1.9378271102905273,
"eval_runtime": 122.2691,
"eval_samples_per_second": 2.004,
"eval_steps_per_second": 1.006,
"step": 64
},
{
"epoch": 0.1752012882447665,
"grad_norm": 2.85798716545105,
"learning_rate": 9.397368756032445e-05,
"loss": 2.0492,
"step": 68
},
{
"epoch": 0.1855072463768116,
"grad_norm": 2.860339403152466,
"learning_rate": 9.316282404787871e-05,
"loss": 1.9956,
"step": 72
},
{
"epoch": 0.1855072463768116,
"eval_loss": 1.9400973320007324,
"eval_runtime": 233.619,
"eval_samples_per_second": 1.049,
"eval_steps_per_second": 0.526,
"step": 72
},
{
"epoch": 0.19581320450885667,
"grad_norm": 2.945218563079834,
"learning_rate": 9.230476262104677e-05,
"loss": 2.0209,
"step": 76
},
{
"epoch": 0.20611916264090177,
"grad_norm": 2.412891387939453,
"learning_rate": 9.140044155740101e-05,
"loss": 2.0183,
"step": 80
},
{
"epoch": 0.20611916264090177,
"eval_loss": 1.9399027824401855,
"eval_runtime": 104.912,
"eval_samples_per_second": 2.335,
"eval_steps_per_second": 1.172,
"step": 80
},
{
"epoch": 0.21642512077294687,
"grad_norm": 2.782628059387207,
"learning_rate": 9.045084971874738e-05,
"loss": 2.0703,
"step": 84
},
{
"epoch": 0.22673107890499195,
"grad_norm": 2.9531919956207275,
"learning_rate": 8.945702546981969e-05,
"loss": 1.9952,
"step": 88
},
{
"epoch": 0.22673107890499195,
"eval_loss": 1.9411203861236572,
"eval_runtime": 125.6512,
"eval_samples_per_second": 1.95,
"eval_steps_per_second": 0.979,
"step": 88
},
{
"epoch": 0.23703703703703705,
"grad_norm": 2.6295812129974365,
"learning_rate": 8.842005554284296e-05,
"loss": 1.916,
"step": 92
},
{
"epoch": 0.24734299516908212,
"grad_norm": 2.6426079273223877,
"learning_rate": 8.73410738492077e-05,
"loss": 1.9309,
"step": 96
},
{
"epoch": 0.24734299516908212,
"eval_loss": 1.941253662109375,
"eval_runtime": 116.9383,
"eval_samples_per_second": 2.095,
"eval_steps_per_second": 1.052,
"step": 96
},
{
"epoch": 0.2576489533011272,
"grad_norm": 2.405819892883301,
"learning_rate": 8.622126023955446e-05,
"loss": 2.0233,
"step": 100
},
{
"epoch": 0.2679549114331723,
"grad_norm": 2.601391315460205,
"learning_rate": 8.506183921362443e-05,
"loss": 2.0042,
"step": 104
},
{
"epoch": 0.2679549114331723,
"eval_loss": 1.9425873756408691,
"eval_runtime": 109.1015,
"eval_samples_per_second": 2.246,
"eval_steps_per_second": 1.127,
"step": 104
},
{
"epoch": 0.2782608695652174,
"grad_norm": 2.4709646701812744,
"learning_rate": 8.386407858128706e-05,
"loss": 2.0291,
"step": 108
},
{
"epoch": 0.2885668276972625,
"grad_norm": 2.8112270832061768,
"learning_rate": 8.262928807620843e-05,
"loss": 1.8885,
"step": 112
},
{
"epoch": 0.2885668276972625,
"eval_loss": 1.9404640197753906,
"eval_runtime": 249.4306,
"eval_samples_per_second": 0.982,
"eval_steps_per_second": 0.493,
"step": 112
},
{
"epoch": 0.29887278582930754,
"grad_norm": 2.589830160140991,
"learning_rate": 8.135881792367686e-05,
"loss": 2.0415,
"step": 116
},
{
"epoch": 0.30917874396135264,
"grad_norm": 2.830230712890625,
"learning_rate": 8.005405736415126e-05,
"loss": 1.9462,
"step": 120
},
{
"epoch": 0.30917874396135264,
"eval_loss": 1.9408910274505615,
"eval_runtime": 118.2548,
"eval_samples_per_second": 2.072,
"eval_steps_per_second": 1.04,
"step": 120
},
{
"epoch": 0.31948470209339774,
"grad_norm": 3.2373831272125244,
"learning_rate": 7.871643313414718e-05,
"loss": 2.0322,
"step": 124
},
{
"epoch": 0.32979066022544284,
"grad_norm": 2.929025650024414,
"learning_rate": 7.734740790612136e-05,
"loss": 1.9787,
"step": 128
},
{
"epoch": 0.32979066022544284,
"eval_loss": 1.9440549612045288,
"eval_runtime": 124.819,
"eval_samples_per_second": 1.963,
"eval_steps_per_second": 0.985,
"step": 128
},
{
"epoch": 0.34009661835748795,
"grad_norm": 3.1260762214660645,
"learning_rate": 7.594847868906076e-05,
"loss": 1.9843,
"step": 132
},
{
"epoch": 0.350402576489533,
"grad_norm": 3.138162851333618,
"learning_rate": 7.452117519152542e-05,
"loss": 1.9647,
"step": 136
},
{
"epoch": 0.350402576489533,
"eval_loss": 1.9407563209533691,
"eval_runtime": 113.5508,
"eval_samples_per_second": 2.158,
"eval_steps_per_second": 1.083,
"step": 136
},
{
"epoch": 0.3607085346215781,
"grad_norm": 2.764012336730957,
"learning_rate": 7.30670581489344e-05,
"loss": 1.9292,
"step": 140
},
{
"epoch": 0.3710144927536232,
"grad_norm": 2.7180254459381104,
"learning_rate": 7.158771761692464e-05,
"loss": 1.9391,
"step": 144
},
{
"epoch": 0.3710144927536232,
"eval_loss": 1.9398295879364014,
"eval_runtime": 208.0995,
"eval_samples_per_second": 1.177,
"eval_steps_per_second": 0.591,
"step": 144
},
{
"epoch": 0.3813204508856683,
"grad_norm": 2.800187826156616,
"learning_rate": 7.008477123264848e-05,
"loss": 1.9976,
"step": 148
},
{
"epoch": 0.39162640901771334,
"grad_norm": 2.7626631259918213,
"learning_rate": 6.855986244591104e-05,
"loss": 2.0038,
"step": 152
},
{
"epoch": 0.39162640901771334,
"eval_loss": 1.9389182329177856,
"eval_runtime": 233.4305,
"eval_samples_per_second": 1.05,
"eval_steps_per_second": 0.527,
"step": 152
},
{
"epoch": 0.40193236714975844,
"grad_norm": 2.7909302711486816,
"learning_rate": 6.701465872208216e-05,
"loss": 1.9963,
"step": 156
},
{
"epoch": 0.41223832528180354,
"grad_norm": 2.133431911468506,
"learning_rate": 6.545084971874738e-05,
"loss": 2.0412,
"step": 160
},
{
"epoch": 0.41223832528180354,
"eval_loss": 1.9401731491088867,
"eval_runtime": 120.3714,
"eval_samples_per_second": 2.035,
"eval_steps_per_second": 1.022,
"step": 160
},
{
"epoch": 0.42254428341384864,
"grad_norm": 2.4993631839752197,
"learning_rate": 6.387014543809223e-05,
"loss": 2.0386,
"step": 164
},
{
"epoch": 0.43285024154589374,
"grad_norm": 2.903820753097534,
"learning_rate": 6.227427435703997e-05,
"loss": 2.0523,
"step": 168
},
{
"epoch": 0.43285024154589374,
"eval_loss": 1.9371063709259033,
"eval_runtime": 117.7362,
"eval_samples_per_second": 2.081,
"eval_steps_per_second": 1.045,
"step": 168
},
{
"epoch": 0.4431561996779388,
"grad_norm": 2.638428211212158,
"learning_rate": 6.066498153718735e-05,
"loss": 1.9451,
"step": 172
},
{
"epoch": 0.4534621578099839,
"grad_norm": 2.898460626602173,
"learning_rate": 5.90440267166055e-05,
"loss": 1.9364,
"step": 176
},
{
"epoch": 0.4534621578099839,
"eval_loss": 1.9393686056137085,
"eval_runtime": 130.6388,
"eval_samples_per_second": 1.875,
"eval_steps_per_second": 0.942,
"step": 176
},
{
"epoch": 0.463768115942029,
"grad_norm": 3.116927146911621,
"learning_rate": 5.74131823855921e-05,
"loss": 2.0401,
"step": 180
},
{
"epoch": 0.4740740740740741,
"grad_norm": 2.4770333766937256,
"learning_rate": 5.577423184847932e-05,
"loss": 1.9805,
"step": 184
},
{
"epoch": 0.4740740740740741,
"eval_loss": 1.9394539594650269,
"eval_runtime": 221.6098,
"eval_samples_per_second": 1.106,
"eval_steps_per_second": 0.555,
"step": 184
},
{
"epoch": 0.48438003220611914,
"grad_norm": 2.262666702270508,
"learning_rate": 5.4128967273616625e-05,
"loss": 1.9786,
"step": 188
},
{
"epoch": 0.49468599033816424,
"grad_norm": 2.686546802520752,
"learning_rate": 5.247918773366112e-05,
"loss": 1.9935,
"step": 192
},
{
"epoch": 0.49468599033816424,
"eval_loss": 1.9380260705947876,
"eval_runtime": 121.9702,
"eval_samples_per_second": 2.009,
"eval_steps_per_second": 1.008,
"step": 192
},
{
"epoch": 0.5049919484702093,
"grad_norm": 2.820659875869751,
"learning_rate": 5.0826697238317935e-05,
"loss": 1.9155,
"step": 196
},
{
"epoch": 0.5152979066022544,
"grad_norm": 2.929722785949707,
"learning_rate": 4.917330276168208e-05,
"loss": 1.9342,
"step": 200
},
{
"epoch": 0.5152979066022544,
"eval_loss": 1.9345794916152954,
"eval_runtime": 131.2895,
"eval_samples_per_second": 1.866,
"eval_steps_per_second": 0.937,
"step": 200
},
{
"epoch": 0.5256038647342995,
"grad_norm": 2.6336963176727295,
"learning_rate": 4.7520812266338885e-05,
"loss": 2.0489,
"step": 204
},
{
"epoch": 0.5359098228663446,
"grad_norm": 2.551440477371216,
"learning_rate": 4.5871032726383386e-05,
"loss": 1.9708,
"step": 208
},
{
"epoch": 0.5359098228663446,
"eval_loss": 1.9360543489456177,
"eval_runtime": 119.4716,
"eval_samples_per_second": 2.051,
"eval_steps_per_second": 1.03,
"step": 208
},
{
"epoch": 0.5462157809983897,
"grad_norm": 2.525174856185913,
"learning_rate": 4.4225768151520694e-05,
"loss": 1.9851,
"step": 212
},
{
"epoch": 0.5565217391304348,
"grad_norm": 2.6649765968322754,
"learning_rate": 4.2586817614407895e-05,
"loss": 2.0128,
"step": 216
},
{
"epoch": 0.5565217391304348,
"eval_loss": 1.9354972839355469,
"eval_runtime": 128.1483,
"eval_samples_per_second": 1.912,
"eval_steps_per_second": 0.96,
"step": 216
},
{
"epoch": 0.5668276972624798,
"grad_norm": 2.538203001022339,
"learning_rate": 4.095597328339452e-05,
"loss": 2.0375,
"step": 220
},
{
"epoch": 0.577133655394525,
"grad_norm": 2.639700412750244,
"learning_rate": 3.933501846281267e-05,
"loss": 1.9416,
"step": 224
},
{
"epoch": 0.577133655394525,
"eval_loss": 1.930440902709961,
"eval_runtime": 219.1333,
"eval_samples_per_second": 1.118,
"eval_steps_per_second": 0.561,
"step": 224
},
{
"epoch": 0.58743961352657,
"grad_norm": 2.561579704284668,
"learning_rate": 3.772572564296005e-05,
"loss": 2.022,
"step": 228
},
{
"epoch": 0.5977455716586151,
"grad_norm": 2.42655611038208,
"learning_rate": 3.612985456190778e-05,
"loss": 1.9658,
"step": 232
},
{
"epoch": 0.5977455716586151,
"eval_loss": 1.9349409341812134,
"eval_runtime": 107.6562,
"eval_samples_per_second": 2.276,
"eval_steps_per_second": 1.143,
"step": 232
},
{
"epoch": 0.6080515297906602,
"grad_norm": 2.426743268966675,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.9736,
"step": 236
},
{
"epoch": 0.6183574879227053,
"grad_norm": 2.663156747817993,
"learning_rate": 3.298534127791785e-05,
"loss": 1.9161,
"step": 240
},
{
"epoch": 0.6183574879227053,
"eval_loss": 1.9257853031158447,
"eval_runtime": 50.4859,
"eval_samples_per_second": 4.853,
"eval_steps_per_second": 2.436,
"step": 240
},
{
"epoch": 0.6286634460547504,
"grad_norm": 2.280381679534912,
"learning_rate": 3.144013755408895e-05,
"loss": 1.9576,
"step": 244
},
{
"epoch": 0.6389694041867955,
"grad_norm": 2.5414199829101562,
"learning_rate": 2.991522876735154e-05,
"loss": 1.94,
"step": 248
},
{
"epoch": 0.6389694041867955,
"eval_loss": 1.9257922172546387,
"eval_runtime": 128.998,
"eval_samples_per_second": 1.899,
"eval_steps_per_second": 0.954,
"step": 248
},
{
"epoch": 0.6492753623188405,
"grad_norm": 2.5111727714538574,
"learning_rate": 2.8412282383075363e-05,
"loss": 1.9636,
"step": 252
},
{
"epoch": 0.6595813204508857,
"grad_norm": 2.4180471897125244,
"learning_rate": 2.693294185106562e-05,
"loss": 1.9908,
"step": 256
},
{
"epoch": 0.6595813204508857,
"eval_loss": 1.9244165420532227,
"eval_runtime": 124.6471,
"eval_samples_per_second": 1.966,
"eval_steps_per_second": 0.987,
"step": 256
},
{
"epoch": 0.6698872785829307,
"grad_norm": 2.398568868637085,
"learning_rate": 2.547882480847461e-05,
"loss": 1.9779,
"step": 260
},
{
"epoch": 0.6801932367149759,
"grad_norm": 2.521932601928711,
"learning_rate": 2.405152131093926e-05,
"loss": 1.9169,
"step": 264
},
{
"epoch": 0.6801932367149759,
"eval_loss": 1.9241654872894287,
"eval_runtime": 232.9073,
"eval_samples_per_second": 1.052,
"eval_steps_per_second": 0.528,
"step": 264
},
{
"epoch": 0.6904991948470209,
"grad_norm": 2.5789191722869873,
"learning_rate": 2.2652592093878666e-05,
"loss": 2.015,
"step": 268
},
{
"epoch": 0.700805152979066,
"grad_norm": 2.303483724594116,
"learning_rate": 2.128356686585282e-05,
"loss": 1.9868,
"step": 272
},
{
"epoch": 0.700805152979066,
"eval_loss": 1.921614408493042,
"eval_runtime": 108.603,
"eval_samples_per_second": 2.256,
"eval_steps_per_second": 1.133,
"step": 272
},
{
"epoch": 0.7111111111111111,
"grad_norm": 2.581958293914795,
"learning_rate": 1.9945942635848748e-05,
"loss": 1.9239,
"step": 276
},
{
"epoch": 0.7214170692431562,
"grad_norm": 2.9368388652801514,
"learning_rate": 1.8641182076323148e-05,
"loss": 1.8737,
"step": 280
},
{
"epoch": 0.7214170692431562,
"eval_loss": 1.9208799600601196,
"eval_runtime": 123.5168,
"eval_samples_per_second": 1.984,
"eval_steps_per_second": 0.996,
"step": 280
},
{
"epoch": 0.7317230273752013,
"grad_norm": 2.706059455871582,
"learning_rate": 1.7370711923791567e-05,
"loss": 1.9982,
"step": 284
},
{
"epoch": 0.7420289855072464,
"grad_norm": 2.5310757160186768,
"learning_rate": 1.6135921418712956e-05,
"loss": 2.0166,
"step": 288
},
{
"epoch": 0.7420289855072464,
"eval_loss": 1.919832706451416,
"eval_runtime": 124.8133,
"eval_samples_per_second": 1.963,
"eval_steps_per_second": 0.985,
"step": 288
},
{
"epoch": 0.7523349436392914,
"grad_norm": 2.52554988861084,
"learning_rate": 1.4938160786375572e-05,
"loss": 1.9849,
"step": 292
},
{
"epoch": 0.7626409017713366,
"grad_norm": 2.3502135276794434,
"learning_rate": 1.3778739760445552e-05,
"loss": 1.9246,
"step": 296
},
{
"epoch": 0.7626409017713366,
"eval_loss": 1.918765664100647,
"eval_runtime": 100.865,
"eval_samples_per_second": 2.429,
"eval_steps_per_second": 1.219,
"step": 296
},
{
"epoch": 0.7729468599033816,
"grad_norm": 2.43375301361084,
"learning_rate": 1.2658926150792322e-05,
"loss": 1.9995,
"step": 300
},
{
"epoch": 0.7832528180354267,
"grad_norm": 2.5884594917297363,
"learning_rate": 1.157994445715706e-05,
"loss": 1.9418,
"step": 304
},
{
"epoch": 0.7832528180354267,
"eval_loss": 1.9197624921798706,
"eval_runtime": 223.502,
"eval_samples_per_second": 1.096,
"eval_steps_per_second": 0.55,
"step": 304
},
{
"epoch": 0.7935587761674718,
"grad_norm": 2.3900303840637207,
"learning_rate": 1.0542974530180327e-05,
"loss": 1.9024,
"step": 308
},
{
"epoch": 0.8038647342995169,
"grad_norm": 2.4108939170837402,
"learning_rate": 9.549150281252633e-06,
"loss": 1.9417,
"step": 312
},
{
"epoch": 0.8038647342995169,
"eval_loss": 1.9172273874282837,
"eval_runtime": 126.9286,
"eval_samples_per_second": 1.93,
"eval_steps_per_second": 0.969,
"step": 312
},
{
"epoch": 0.814170692431562,
"grad_norm": 2.105642557144165,
"learning_rate": 8.599558442598998e-06,
"loss": 1.9766,
"step": 316
},
{
"epoch": 0.8244766505636071,
"grad_norm": 2.292396306991577,
"learning_rate": 7.695237378953223e-06,
"loss": 1.9652,
"step": 320
},
{
"epoch": 0.8244766505636071,
"eval_loss": 1.9169241189956665,
"eval_runtime": 118.9904,
"eval_samples_per_second": 2.059,
"eval_steps_per_second": 1.034,
"step": 320
},
{
"epoch": 0.8347826086956521,
"grad_norm": 2.122368574142456,
"learning_rate": 6.837175952121306e-06,
"loss": 2.004,
"step": 324
},
{
"epoch": 0.8450885668276973,
"grad_norm": 2.2409677505493164,
"learning_rate": 6.026312439675552e-06,
"loss": 1.9715,
"step": 328
},
{
"epoch": 0.8450885668276973,
"eval_loss": 1.9170624017715454,
"eval_runtime": 111.3633,
"eval_samples_per_second": 2.2,
"eval_steps_per_second": 1.104,
"step": 328
},
{
"epoch": 0.8553945249597423,
"grad_norm": 2.5156409740448,
"learning_rate": 5.263533508961827e-06,
"loss": 1.9188,
"step": 332
},
{
"epoch": 0.8657004830917875,
"grad_norm": 2.1870505809783936,
"learning_rate": 4.549673247541875e-06,
"loss": 1.9634,
"step": 336
},
{
"epoch": 0.8657004830917875,
"eval_loss": 1.9158947467803955,
"eval_runtime": 234.5094,
"eval_samples_per_second": 1.045,
"eval_steps_per_second": 0.524,
"step": 336
},
{
"epoch": 0.8760064412238325,
"grad_norm": 2.428206443786621,
"learning_rate": 3.885512251130763e-06,
"loss": 1.9557,
"step": 340
},
{
"epoch": 0.8863123993558776,
"grad_norm": 2.5798287391662598,
"learning_rate": 3.271776770026963e-06,
"loss": 1.9566,
"step": 344
},
{
"epoch": 0.8863123993558776,
"eval_loss": 1.9153064489364624,
"eval_runtime": 139.4736,
"eval_samples_per_second": 1.757,
"eval_steps_per_second": 0.882,
"step": 344
},
{
"epoch": 0.8966183574879227,
"grad_norm": 2.345715045928955,
"learning_rate": 2.7091379149682685e-06,
"loss": 1.8952,
"step": 348
},
{
"epoch": 0.9069243156199678,
"grad_norm": 2.8093199729919434,
"learning_rate": 2.1982109232821178e-06,
"loss": 1.9277,
"step": 352
},
{
"epoch": 0.9069243156199678,
"eval_loss": 1.9147199392318726,
"eval_runtime": 148.1878,
"eval_samples_per_second": 1.653,
"eval_steps_per_second": 0.83,
"step": 352
},
{
"epoch": 0.9172302737520129,
"grad_norm": 2.609008312225342,
"learning_rate": 1.7395544861325718e-06,
"loss": 2.0425,
"step": 356
},
{
"epoch": 0.927536231884058,
"grad_norm": 2.2126529216766357,
"learning_rate": 1.333670137599713e-06,
"loss": 2.0087,
"step": 360
},
{
"epoch": 0.927536231884058,
"eval_loss": 1.9142426252365112,
"eval_runtime": 129.4444,
"eval_samples_per_second": 1.893,
"eval_steps_per_second": 0.95,
"step": 360
},
{
"epoch": 0.937842190016103,
"grad_norm": 2.5125489234924316,
"learning_rate": 9.810017062595322e-07,
"loss": 1.9817,
"step": 364
},
{
"epoch": 0.9481481481481482,
"grad_norm": 2.638725996017456,
"learning_rate": 6.819348298638839e-07,
"loss": 1.921,
"step": 368
},
{
"epoch": 0.9481481481481482,
"eval_loss": 1.9142839908599854,
"eval_runtime": 131.7251,
"eval_samples_per_second": 1.86,
"eval_steps_per_second": 0.934,
"step": 368
},
{
"epoch": 0.9584541062801932,
"grad_norm": 2.400455951690674,
"learning_rate": 4.367965336512403e-07,
"loss": 1.8983,
"step": 372
},
{
"epoch": 0.9687600644122383,
"grad_norm": 2.5474154949188232,
"learning_rate": 2.458548727494292e-07,
"loss": 1.9842,
"step": 376
},
{
"epoch": 0.9687600644122383,
"eval_loss": 1.9140400886535645,
"eval_runtime": 251.3639,
"eval_samples_per_second": 0.975,
"eval_steps_per_second": 0.489,
"step": 376
},
{
"epoch": 0.9790660225442834,
"grad_norm": 2.566486120223999,
"learning_rate": 1.0931863906127327e-07,
"loss": 1.8793,
"step": 380
},
{
"epoch": 0.9893719806763285,
"grad_norm": 2.8591597080230713,
"learning_rate": 2.7337132953697554e-08,
"loss": 1.8825,
"step": 384
},
{
"epoch": 0.9893719806763285,
"eval_loss": 1.914077639579773,
"eval_runtime": 110.406,
"eval_samples_per_second": 2.219,
"eval_steps_per_second": 1.114,
"step": 384
}
],
"logging_steps": 4,
"max_steps": 388,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 8,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.37207139891839e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}