mistral_7b_40000_tuned / trainer_state.json
ashikshaffi08's picture
Upload 12 files
ed94ae7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.7583963425356075,
"eval_steps": 500,
"global_step": 40000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 1.3358560800552368,
"learning_rate": 2.9999642387366694e-06,
"loss": 1.8182,
"step": 100
},
{
"epoch": 0.01,
"grad_norm": 1.0696768760681152,
"learning_rate": 2.999856956651834e-06,
"loss": 1.5829,
"step": 200
},
{
"epoch": 0.01,
"grad_norm": 1.1315983533859253,
"learning_rate": 2.9996781588608853e-06,
"loss": 1.5088,
"step": 300
},
{
"epoch": 0.02,
"grad_norm": 1.2688828706741333,
"learning_rate": 2.9994278538892026e-06,
"loss": 1.4755,
"step": 400
},
{
"epoch": 0.02,
"grad_norm": 1.3950492143630981,
"learning_rate": 2.999106053671749e-06,
"loss": 1.44,
"step": 500
},
{
"epoch": 0.02,
"eval_loss": 1.4453312158584595,
"eval_runtime": 1866.1237,
"eval_samples_per_second": 5.137,
"eval_steps_per_second": 0.643,
"step": 500
},
{
"epoch": 0.03,
"grad_norm": 1.4245915412902832,
"learning_rate": 2.9987127735525e-06,
"loss": 1.428,
"step": 600
},
{
"epoch": 0.03,
"grad_norm": 1.3934013843536377,
"learning_rate": 2.9982480322837146e-06,
"loss": 1.4205,
"step": 700
},
{
"epoch": 0.04,
"grad_norm": 1.345449447631836,
"learning_rate": 2.99771185202504e-06,
"loss": 1.4117,
"step": 800
},
{
"epoch": 0.04,
"grad_norm": 1.4815948009490967,
"learning_rate": 2.9971042583424534e-06,
"loss": 1.4029,
"step": 900
},
{
"epoch": 0.04,
"grad_norm": 1.416758418083191,
"learning_rate": 2.996425280207045e-06,
"loss": 1.4103,
"step": 1000
},
{
"epoch": 0.04,
"eval_loss": 1.4033704996109009,
"eval_runtime": 1866.432,
"eval_samples_per_second": 5.136,
"eval_steps_per_second": 0.642,
"step": 1000
},
{
"epoch": 0.05,
"grad_norm": 1.5439317226409912,
"learning_rate": 2.995674949993636e-06,
"loss": 1.3992,
"step": 1100
},
{
"epoch": 0.05,
"grad_norm": 1.6817480325698853,
"learning_rate": 2.9948533034792356e-06,
"loss": 1.3857,
"step": 1200
},
{
"epoch": 0.06,
"grad_norm": 1.5548583269119263,
"learning_rate": 2.993960379841333e-06,
"loss": 1.3889,
"step": 1300
},
{
"epoch": 0.06,
"grad_norm": 1.800005316734314,
"learning_rate": 2.992996221656031e-06,
"loss": 1.3798,
"step": 1400
},
{
"epoch": 0.07,
"grad_norm": 1.5900113582611084,
"learning_rate": 2.9919608748960165e-06,
"loss": 1.3915,
"step": 1500
},
{
"epoch": 0.07,
"eval_loss": 1.3856122493743896,
"eval_runtime": 1867.7341,
"eval_samples_per_second": 5.132,
"eval_steps_per_second": 0.642,
"step": 1500
},
{
"epoch": 0.07,
"grad_norm": 1.593215823173523,
"learning_rate": 2.990854388928367e-06,
"loss": 1.3707,
"step": 1600
},
{
"epoch": 0.07,
"grad_norm": 1.6342238187789917,
"learning_rate": 2.9896768165121967e-06,
"loss": 1.3909,
"step": 1700
},
{
"epoch": 0.08,
"grad_norm": 1.7908762693405151,
"learning_rate": 2.9884282137961426e-06,
"loss": 1.3758,
"step": 1800
},
{
"epoch": 0.08,
"grad_norm": 1.5925847291946411,
"learning_rate": 2.987108640315685e-06,
"loss": 1.3816,
"step": 1900
},
{
"epoch": 0.09,
"grad_norm": 1.8841816186904907,
"learning_rate": 2.9857181589903103e-06,
"loss": 1.383,
"step": 2000
},
{
"epoch": 0.09,
"eval_loss": 1.3752671480178833,
"eval_runtime": 1867.3688,
"eval_samples_per_second": 5.133,
"eval_steps_per_second": 0.642,
"step": 2000
},
{
"epoch": 0.09,
"grad_norm": 1.688745379447937,
"learning_rate": 2.984256836120511e-06,
"loss": 1.3796,
"step": 2100
},
{
"epoch": 0.1,
"grad_norm": 1.676340103149414,
"learning_rate": 2.9827247413846217e-06,
"loss": 1.3742,
"step": 2200
},
{
"epoch": 0.1,
"grad_norm": 2.0635907649993896,
"learning_rate": 2.981121947835501e-06,
"loss": 1.3712,
"step": 2300
},
{
"epoch": 0.11,
"grad_norm": 1.799085021018982,
"learning_rate": 2.979448531897045e-06,
"loss": 1.3694,
"step": 2400
},
{
"epoch": 0.11,
"grad_norm": 1.8315014839172363,
"learning_rate": 2.9777045733605437e-06,
"loss": 1.3558,
"step": 2500
},
{
"epoch": 0.11,
"eval_loss": 1.3671655654907227,
"eval_runtime": 1867.272,
"eval_samples_per_second": 5.134,
"eval_steps_per_second": 0.642,
"step": 2500
},
{
"epoch": 0.11,
"grad_norm": 1.8167532682418823,
"learning_rate": 2.9758901553808787e-06,
"loss": 1.3579,
"step": 2600
},
{
"epoch": 0.12,
"grad_norm": 1.8836266994476318,
"learning_rate": 2.9740053644725552e-06,
"loss": 1.3714,
"step": 2700
},
{
"epoch": 0.12,
"grad_norm": 1.823400855064392,
"learning_rate": 2.972050290505579e-06,
"loss": 1.3609,
"step": 2800
},
{
"epoch": 0.13,
"grad_norm": 1.8716390132904053,
"learning_rate": 2.970025026701169e-06,
"loss": 1.3685,
"step": 2900
},
{
"epoch": 0.13,
"grad_norm": 1.7368168830871582,
"learning_rate": 2.967929669627316e-06,
"loss": 1.3665,
"step": 3000
},
{
"epoch": 0.13,
"eval_loss": 1.3610810041427612,
"eval_runtime": 1867.7829,
"eval_samples_per_second": 5.132,
"eval_steps_per_second": 0.642,
"step": 3000
},
{
"epoch": 0.14,
"grad_norm": 1.8250432014465332,
"learning_rate": 2.9657643191941737e-06,
"loss": 1.3549,
"step": 3100
},
{
"epoch": 0.14,
"grad_norm": 1.9420477151870728,
"learning_rate": 2.9635290786492985e-06,
"loss": 1.3531,
"step": 3200
},
{
"epoch": 0.15,
"grad_norm": 2.0373518466949463,
"learning_rate": 2.9612240545727255e-06,
"loss": 1.3619,
"step": 3300
},
{
"epoch": 0.15,
"grad_norm": 1.9139854907989502,
"learning_rate": 2.9588493568718843e-06,
"loss": 1.3665,
"step": 3400
},
{
"epoch": 0.15,
"grad_norm": 1.9374817609786987,
"learning_rate": 2.9564050987763614e-06,
"loss": 1.3619,
"step": 3500
},
{
"epoch": 0.15,
"eval_loss": 1.3560757637023926,
"eval_runtime": 1867.3981,
"eval_samples_per_second": 5.133,
"eval_steps_per_second": 0.642,
"step": 3500
},
{
"epoch": 0.16,
"grad_norm": 1.8168325424194336,
"learning_rate": 2.9538913968325007e-06,
"loss": 1.3527,
"step": 3600
},
{
"epoch": 0.16,
"grad_norm": 1.948146939277649,
"learning_rate": 2.951308370897845e-06,
"loss": 1.3496,
"step": 3700
},
{
"epoch": 0.17,
"grad_norm": 2.068612575531006,
"learning_rate": 2.948656144135421e-06,
"loss": 1.358,
"step": 3800
},
{
"epoch": 0.17,
"grad_norm": 1.9631004333496094,
"learning_rate": 2.945934843007869e-06,
"loss": 1.3504,
"step": 3900
},
{
"epoch": 0.18,
"grad_norm": 2.0034778118133545,
"learning_rate": 2.9431445972714102e-06,
"loss": 1.3582,
"step": 4000
},
{
"epoch": 0.18,
"eval_loss": 1.3521347045898438,
"eval_runtime": 1867.5478,
"eval_samples_per_second": 5.133,
"eval_steps_per_second": 0.642,
"step": 4000
},
{
"epoch": 0.18,
"grad_norm": 1.872603178024292,
"learning_rate": 2.940285539969662e-06,
"loss": 1.3483,
"step": 4100
},
{
"epoch": 0.18,
"grad_norm": 1.8477139472961426,
"learning_rate": 2.9373578074272917e-06,
"loss": 1.3623,
"step": 4200
},
{
"epoch": 0.19,
"grad_norm": 2.0872058868408203,
"learning_rate": 2.9343615392435187e-06,
"loss": 1.3483,
"step": 4300
},
{
"epoch": 0.19,
"grad_norm": 1.931931495666504,
"learning_rate": 2.931296878285457e-06,
"loss": 1.3499,
"step": 4400
},
{
"epoch": 0.2,
"grad_norm": 1.9555952548980713,
"learning_rate": 2.928163970681304e-06,
"loss": 1.3525,
"step": 4500
},
{
"epoch": 0.2,
"eval_loss": 1.348177433013916,
"eval_runtime": 1865.9442,
"eval_samples_per_second": 5.137,
"eval_steps_per_second": 0.643,
"step": 4500
},
{
"epoch": 0.2,
"grad_norm": 1.9107462167739868,
"learning_rate": 2.9249629658133703e-06,
"loss": 1.3446,
"step": 4600
},
{
"epoch": 0.21,
"grad_norm": 2.1549768447875977,
"learning_rate": 2.9216940163109613e-06,
"loss": 1.3485,
"step": 4700
},
{
"epoch": 0.21,
"grad_norm": 1.9788259267807007,
"learning_rate": 2.9183572780430938e-06,
"loss": 1.3386,
"step": 4800
},
{
"epoch": 0.22,
"grad_norm": 2.008772850036621,
"learning_rate": 2.9149529101110707e-06,
"loss": 1.3449,
"step": 4900
},
{
"epoch": 0.22,
"grad_norm": 1.932020664215088,
"learning_rate": 2.9114810748408887e-06,
"loss": 1.3471,
"step": 5000
},
{
"epoch": 0.22,
"eval_loss": 1.3448920249938965,
"eval_runtime": 1866.903,
"eval_samples_per_second": 5.135,
"eval_steps_per_second": 0.642,
"step": 5000
},
{
"epoch": 0.22,
"grad_norm": 1.780934453010559,
"learning_rate": 2.9079419377755016e-06,
"loss": 1.3408,
"step": 5100
},
{
"epoch": 0.23,
"grad_norm": 2.0991992950439453,
"learning_rate": 2.904335667666926e-06,
"loss": 1.3377,
"step": 5200
},
{
"epoch": 0.23,
"grad_norm": 1.7961390018463135,
"learning_rate": 2.9006624364681955e-06,
"loss": 1.3446,
"step": 5300
},
{
"epoch": 0.24,
"grad_norm": 1.951217770576477,
"learning_rate": 2.896922419325161e-06,
"loss": 1.3434,
"step": 5400
},
{
"epoch": 0.24,
"grad_norm": 1.9937087297439575,
"learning_rate": 2.89311579456814e-06,
"loss": 1.3383,
"step": 5500
},
{
"epoch": 0.24,
"eval_loss": 1.3420648574829102,
"eval_runtime": 1864.5499,
"eval_samples_per_second": 5.141,
"eval_steps_per_second": 0.643,
"step": 5500
},
{
"epoch": 0.25,
"grad_norm": 1.9863967895507812,
"learning_rate": 2.889242743703412e-06,
"loss": 1.3553,
"step": 5600
},
{
"epoch": 0.25,
"grad_norm": 2.026629686355591,
"learning_rate": 2.8853034514045667e-06,
"loss": 1.3384,
"step": 5700
},
{
"epoch": 0.25,
"grad_norm": 2.1524720191955566,
"learning_rate": 2.8812981055036967e-06,
"loss": 1.339,
"step": 5800
},
{
"epoch": 0.26,
"grad_norm": 2.011669635772705,
"learning_rate": 2.8772268969824412e-06,
"loss": 1.3449,
"step": 5900
},
{
"epoch": 0.26,
"grad_norm": 1.96876060962677,
"learning_rate": 2.87309001996288e-06,
"loss": 1.3352,
"step": 6000
},
{
"epoch": 0.26,
"eval_loss": 1.3392791748046875,
"eval_runtime": 1863.5996,
"eval_samples_per_second": 5.144,
"eval_steps_per_second": 0.643,
"step": 6000
},
{
"epoch": 0.27,
"grad_norm": 2.030050754547119,
"learning_rate": 2.868887671698278e-06,
"loss": 1.3473,
"step": 6100
},
{
"epoch": 0.27,
"grad_norm": 1.9417366981506348,
"learning_rate": 2.8646200525636784e-06,
"loss": 1.3467,
"step": 6200
},
{
"epoch": 0.28,
"grad_norm": 1.8963119983673096,
"learning_rate": 2.860287366046351e-06,
"loss": 1.3298,
"step": 6300
},
{
"epoch": 0.28,
"grad_norm": 2.0446488857269287,
"learning_rate": 2.855889818736086e-06,
"loss": 1.3364,
"step": 6400
},
{
"epoch": 0.29,
"grad_norm": 1.9520761966705322,
"learning_rate": 2.8514276203153476e-06,
"loss": 1.3328,
"step": 6500
},
{
"epoch": 0.29,
"eval_loss": 1.3369933366775513,
"eval_runtime": 1864.5392,
"eval_samples_per_second": 5.141,
"eval_steps_per_second": 0.643,
"step": 6500
},
{
"epoch": 0.29,
"grad_norm": 1.9833135604858398,
"learning_rate": 2.846900983549272e-06,
"loss": 1.3461,
"step": 6600
},
{
"epoch": 0.29,
"grad_norm": 1.8837144374847412,
"learning_rate": 2.8423101242755264e-06,
"loss": 1.3354,
"step": 6700
},
{
"epoch": 0.3,
"grad_norm": 1.9261667728424072,
"learning_rate": 2.837655261394013e-06,
"loss": 1.3349,
"step": 6800
},
{
"epoch": 0.3,
"grad_norm": 1.8928134441375732,
"learning_rate": 2.832936616856434e-06,
"loss": 1.3247,
"step": 6900
},
{
"epoch": 0.31,
"grad_norm": 2.0842361450195312,
"learning_rate": 2.828154415655711e-06,
"loss": 1.3396,
"step": 7000
},
{
"epoch": 0.31,
"eval_loss": 1.3346595764160156,
"eval_runtime": 1864.6191,
"eval_samples_per_second": 5.141,
"eval_steps_per_second": 0.643,
"step": 7000
},
{
"epoch": 0.31,
"grad_norm": 2.0268609523773193,
"learning_rate": 2.823308885815251e-06,
"loss": 1.3577,
"step": 7100
},
{
"epoch": 0.32,
"grad_norm": 1.9914246797561646,
"learning_rate": 2.8184002583780802e-06,
"loss": 1.3273,
"step": 7200
},
{
"epoch": 0.32,
"grad_norm": 2.065075159072876,
"learning_rate": 2.813428767395822e-06,
"loss": 1.3449,
"step": 7300
},
{
"epoch": 0.33,
"grad_norm": 2.0914437770843506,
"learning_rate": 2.8083946499175413e-06,
"loss": 1.3415,
"step": 7400
},
{
"epoch": 0.33,
"grad_norm": 1.9424386024475098,
"learning_rate": 2.803298145978439e-06,
"loss": 1.3316,
"step": 7500
},
{
"epoch": 0.33,
"eval_loss": 1.3324897289276123,
"eval_runtime": 1864.3018,
"eval_samples_per_second": 5.142,
"eval_steps_per_second": 0.643,
"step": 7500
},
{
"epoch": 0.33,
"grad_norm": 2.083033561706543,
"learning_rate": 2.7981394985884074e-06,
"loss": 1.3213,
"step": 7600
},
{
"epoch": 0.34,
"grad_norm": 1.8912067413330078,
"learning_rate": 2.792918953720444e-06,
"loss": 1.3284,
"step": 7700
},
{
"epoch": 0.34,
"grad_norm": 1.8995610475540161,
"learning_rate": 2.7876367602989206e-06,
"loss": 1.3256,
"step": 7800
},
{
"epoch": 0.35,
"grad_norm": 1.9921075105667114,
"learning_rate": 2.7822931701877186e-06,
"loss": 1.3339,
"step": 7900
},
{
"epoch": 0.35,
"grad_norm": 1.9593900442123413,
"learning_rate": 2.7768884381782147e-06,
"loss": 1.3357,
"step": 8000
},
{
"epoch": 0.35,
"eval_loss": 1.3304702043533325,
"eval_runtime": 1863.8735,
"eval_samples_per_second": 5.143,
"eval_steps_per_second": 0.643,
"step": 8000
},
{
"epoch": 0.36,
"grad_norm": 2.0294156074523926,
"learning_rate": 2.7714228219771354e-06,
"loss": 1.3248,
"step": 8100
},
{
"epoch": 0.36,
"grad_norm": 1.9358181953430176,
"learning_rate": 2.765896582194267e-06,
"loss": 1.3321,
"step": 8200
},
{
"epoch": 0.36,
"grad_norm": 1.9804461002349854,
"learning_rate": 2.7603099823300316e-06,
"loss": 1.328,
"step": 8300
},
{
"epoch": 0.37,
"grad_norm": 1.9724797010421753,
"learning_rate": 2.754663288762921e-06,
"loss": 1.3313,
"step": 8400
},
{
"epoch": 0.37,
"grad_norm": 2.1071040630340576,
"learning_rate": 2.748956770736796e-06,
"loss": 1.3164,
"step": 8500
},
{
"epoch": 0.37,
"eval_loss": 1.3289612531661987,
"eval_runtime": 1864.6483,
"eval_samples_per_second": 5.141,
"eval_steps_per_second": 0.643,
"step": 8500
},
{
"epoch": 0.38,
"grad_norm": 1.8682750463485718,
"learning_rate": 2.743190700348048e-06,
"loss": 1.3318,
"step": 8600
},
{
"epoch": 0.38,
"grad_norm": 2.1705667972564697,
"learning_rate": 2.7373653525326257e-06,
"loss": 1.3203,
"step": 8700
},
{
"epoch": 0.39,
"grad_norm": 2.2123374938964844,
"learning_rate": 2.7314810050529265e-06,
"loss": 1.3279,
"step": 8800
},
{
"epoch": 0.39,
"grad_norm": 2.0958218574523926,
"learning_rate": 2.7255379384845483e-06,
"loss": 1.332,
"step": 8900
},
{
"epoch": 0.4,
"grad_norm": 2.0944671630859375,
"learning_rate": 2.7195364362029173e-06,
"loss": 1.3231,
"step": 9000
},
{
"epoch": 0.4,
"eval_loss": 1.3273080587387085,
"eval_runtime": 1864.6036,
"eval_samples_per_second": 5.141,
"eval_steps_per_second": 0.643,
"step": 9000
},
{
"epoch": 0.4,
"grad_norm": 2.045405149459839,
"learning_rate": 2.713476784369771e-06,
"loss": 1.3179,
"step": 9100
},
{
"epoch": 0.4,
"grad_norm": 1.985823392868042,
"learning_rate": 2.7073592719195155e-06,
"loss": 1.3318,
"step": 9200
},
{
"epoch": 0.41,
"grad_norm": 2.064948081970215,
"learning_rate": 2.7011841905454495e-06,
"loss": 1.3181,
"step": 9300
},
{
"epoch": 0.41,
"grad_norm": 2.136582612991333,
"learning_rate": 2.694951834685854e-06,
"loss": 1.3181,
"step": 9400
},
{
"epoch": 0.42,
"grad_norm": 2.394834041595459,
"learning_rate": 2.6886625015099553e-06,
"loss": 1.3287,
"step": 9500
},
{
"epoch": 0.42,
"eval_loss": 1.3257533311843872,
"eval_runtime": 1871.6529,
"eval_samples_per_second": 5.122,
"eval_steps_per_second": 0.641,
"step": 9500
},
{
"epoch": 0.42,
"grad_norm": 2.0295205116271973,
"learning_rate": 2.682316490903753e-06,
"loss": 1.3316,
"step": 9600
},
{
"epoch": 0.43,
"grad_norm": 1.883170247077942,
"learning_rate": 2.6759141054557214e-06,
"loss": 1.345,
"step": 9700
},
{
"epoch": 0.43,
"grad_norm": 2.172785997390747,
"learning_rate": 2.6694556504423836e-06,
"loss": 1.3197,
"step": 9800
},
{
"epoch": 0.44,
"grad_norm": 2.065781354904175,
"learning_rate": 2.662941433813754e-06,
"loss": 1.3165,
"step": 9900
},
{
"epoch": 0.44,
"grad_norm": 1.825239896774292,
"learning_rate": 2.6563717661786536e-06,
"loss": 1.3308,
"step": 10000
},
{
"epoch": 0.44,
"eval_loss": 1.324156641960144,
"eval_runtime": 1865.6117,
"eval_samples_per_second": 5.138,
"eval_steps_per_second": 0.643,
"step": 10000
},
{
"epoch": 0.44,
"grad_norm": 2.128471612930298,
"learning_rate": 2.649746960789902e-06,
"loss": 1.3176,
"step": 10100
},
{
"epoch": 0.45,
"grad_norm": 1.994718074798584,
"learning_rate": 2.6430673335293788e-06,
"loss": 1.3245,
"step": 10200
},
{
"epoch": 0.45,
"grad_norm": 2.0164434909820557,
"learning_rate": 2.6363332028929633e-06,
"loss": 1.3179,
"step": 10300
},
{
"epoch": 0.46,
"grad_norm": 1.879969835281372,
"learning_rate": 2.629544889975348e-06,
"loss": 1.3346,
"step": 10400
},
{
"epoch": 0.46,
"grad_norm": 1.9689232110977173,
"learning_rate": 2.6227027184547265e-06,
"loss": 1.311,
"step": 10500
},
{
"epoch": 0.46,
"eval_loss": 1.3230102062225342,
"eval_runtime": 1865.9111,
"eval_samples_per_second": 5.137,
"eval_steps_per_second": 0.643,
"step": 10500
},
{
"epoch": 0.47,
"grad_norm": 2.0227608680725098,
"learning_rate": 2.615807014577363e-06,
"loss": 1.3226,
"step": 10600
},
{
"epoch": 0.47,
"grad_norm": 1.9242812395095825,
"learning_rate": 2.608858107142033e-06,
"loss": 1.3254,
"step": 10700
},
{
"epoch": 0.47,
"grad_norm": 1.9587324857711792,
"learning_rate": 2.601856327484348e-06,
"loss": 1.3219,
"step": 10800
},
{
"epoch": 0.48,
"grad_norm": 2.0176897048950195,
"learning_rate": 2.594802009460957e-06,
"loss": 1.3133,
"step": 10900
},
{
"epoch": 0.48,
"grad_norm": 1.871654987335205,
"learning_rate": 2.5876954894336257e-06,
"loss": 1.3242,
"step": 11000
},
{
"epoch": 0.48,
"eval_loss": 1.321611762046814,
"eval_runtime": 1864.3465,
"eval_samples_per_second": 5.142,
"eval_steps_per_second": 0.643,
"step": 11000
},
{
"epoch": 0.49,
"grad_norm": 2.1732594966888428,
"learning_rate": 2.580537106253199e-06,
"loss": 1.3123,
"step": 11100
},
{
"epoch": 0.49,
"grad_norm": 1.9368730783462524,
"learning_rate": 2.5733272012434456e-06,
"loss": 1.323,
"step": 11200
},
{
"epoch": 0.5,
"grad_norm": 2.197732448577881,
"learning_rate": 2.56606611818478e-06,
"loss": 1.316,
"step": 11300
},
{
"epoch": 0.5,
"grad_norm": 2.0765159130096436,
"learning_rate": 2.5587542032978735e-06,
"loss": 1.326,
"step": 11400
},
{
"epoch": 0.51,
"grad_norm": 1.9759124517440796,
"learning_rate": 2.551391805227145e-06,
"loss": 1.319,
"step": 11500
},
{
"epoch": 0.51,
"eval_loss": 1.3201991319656372,
"eval_runtime": 1867.1093,
"eval_samples_per_second": 5.134,
"eval_steps_per_second": 0.642,
"step": 11500
},
{
"epoch": 0.51,
"grad_norm": 2.119062900543213,
"learning_rate": 2.5439792750241362e-06,
"loss": 1.319,
"step": 11600
},
{
"epoch": 0.51,
"grad_norm": 1.9623346328735352,
"learning_rate": 2.5365169661307723e-06,
"loss": 1.3188,
"step": 11700
},
{
"epoch": 0.52,
"grad_norm": 2.069474935531616,
"learning_rate": 2.529005234362512e-06,
"loss": 1.3236,
"step": 11800
},
{
"epoch": 0.52,
"grad_norm": 1.8530821800231934,
"learning_rate": 2.521444437891378e-06,
"loss": 1.3172,
"step": 11900
},
{
"epoch": 0.53,
"grad_norm": 2.1172099113464355,
"learning_rate": 2.513834937228883e-06,
"loss": 1.3183,
"step": 12000
},
{
"epoch": 0.53,
"eval_loss": 1.31929349899292,
"eval_runtime": 1867.4942,
"eval_samples_per_second": 5.133,
"eval_steps_per_second": 0.642,
"step": 12000
},
{
"epoch": 0.53,
"grad_norm": 1.981194019317627,
"learning_rate": 2.5061770952088354e-06,
"loss": 1.3278,
"step": 12100
},
{
"epoch": 0.54,
"grad_norm": 1.9915848970413208,
"learning_rate": 2.4984712769700423e-06,
"loss": 1.3143,
"step": 12200
},
{
"epoch": 0.54,
"grad_norm": 2.114997625350952,
"learning_rate": 2.490717849938897e-06,
"loss": 1.3207,
"step": 12300
},
{
"epoch": 0.55,
"grad_norm": 1.9648516178131104,
"learning_rate": 2.4829171838118613e-06,
"loss": 1.3129,
"step": 12400
},
{
"epoch": 0.55,
"grad_norm": 1.9119199514389038,
"learning_rate": 2.4750696505378355e-06,
"loss": 1.3211,
"step": 12500
},
{
"epoch": 0.55,
"eval_loss": 1.3180123567581177,
"eval_runtime": 1866.9983,
"eval_samples_per_second": 5.134,
"eval_steps_per_second": 0.642,
"step": 12500
},
{
"epoch": 0.55,
"grad_norm": 2.158143997192383,
"learning_rate": 2.4671756243004243e-06,
"loss": 1.3124,
"step": 12600
},
{
"epoch": 0.56,
"grad_norm": 2.059196949005127,
"learning_rate": 2.4592354815000963e-06,
"loss": 1.3229,
"step": 12700
},
{
"epoch": 0.56,
"grad_norm": 1.9812512397766113,
"learning_rate": 2.451249600736235e-06,
"loss": 1.3159,
"step": 12800
},
{
"epoch": 0.57,
"grad_norm": 1.9367738962173462,
"learning_rate": 2.4432183627890867e-06,
"loss": 1.3101,
"step": 12900
},
{
"epoch": 0.57,
"grad_norm": 2.1279842853546143,
"learning_rate": 2.4351421506016047e-06,
"loss": 1.3158,
"step": 13000
},
{
"epoch": 0.57,
"eval_loss": 1.3169833421707153,
"eval_runtime": 1870.0395,
"eval_samples_per_second": 5.126,
"eval_steps_per_second": 0.641,
"step": 13000
},
{
"epoch": 0.58,
"grad_norm": 2.213517189025879,
"learning_rate": 2.4270213492611903e-06,
"loss": 1.3113,
"step": 13100
},
{
"epoch": 0.58,
"grad_norm": 2.060974597930908,
"learning_rate": 2.41885634598133e-06,
"loss": 1.3248,
"step": 13200
},
{
"epoch": 0.58,
"grad_norm": 2.004950523376465,
"learning_rate": 2.4106475300831343e-06,
"loss": 1.321,
"step": 13300
},
{
"epoch": 0.59,
"grad_norm": 2.081263303756714,
"learning_rate": 2.402395292976772e-06,
"loss": 1.3058,
"step": 13400
},
{
"epoch": 0.59,
"grad_norm": 2.141289472579956,
"learning_rate": 2.394100028142809e-06,
"loss": 1.3054,
"step": 13500
},
{
"epoch": 0.59,
"eval_loss": 1.3159672021865845,
"eval_runtime": 1868.4185,
"eval_samples_per_second": 5.131,
"eval_steps_per_second": 0.642,
"step": 13500
},
{
"epoch": 0.6,
"grad_norm": 2.041181802749634,
"learning_rate": 2.3857621311134457e-06,
"loss": 1.324,
"step": 13600
},
{
"epoch": 0.6,
"grad_norm": 1.9553183317184448,
"learning_rate": 2.3773819994536564e-06,
"loss": 1.3115,
"step": 13700
},
{
"epoch": 0.61,
"grad_norm": 2.0408129692077637,
"learning_rate": 2.368960032742235e-06,
"loss": 1.3007,
"step": 13800
},
{
"epoch": 0.61,
"grad_norm": 2.0877633094787598,
"learning_rate": 2.36049663255274e-06,
"loss": 1.3174,
"step": 13900
},
{
"epoch": 0.62,
"grad_norm": 2.013047218322754,
"learning_rate": 2.35199220243435e-06,
"loss": 1.3249,
"step": 14000
},
{
"epoch": 0.62,
"eval_loss": 1.3149954080581665,
"eval_runtime": 1864.575,
"eval_samples_per_second": 5.141,
"eval_steps_per_second": 0.643,
"step": 14000
},
{
"epoch": 0.62,
"grad_norm": 1.9625511169433594,
"learning_rate": 2.3434471478926168e-06,
"loss": 1.3053,
"step": 14100
},
{
"epoch": 0.62,
"grad_norm": 1.9895579814910889,
"learning_rate": 2.334861876370135e-06,
"loss": 1.3097,
"step": 14200
},
{
"epoch": 0.63,
"grad_norm": 2.0086371898651123,
"learning_rate": 2.3262367972271126e-06,
"loss": 1.3075,
"step": 14300
},
{
"epoch": 0.63,
"grad_norm": 2.0822415351867676,
"learning_rate": 2.317572321721851e-06,
"loss": 1.3122,
"step": 14400
},
{
"epoch": 0.64,
"grad_norm": 2.1191747188568115,
"learning_rate": 2.3088688629911378e-06,
"loss": 1.3029,
"step": 14500
},
{
"epoch": 0.64,
"eval_loss": 1.313984990119934,
"eval_runtime": 1868.3094,
"eval_samples_per_second": 5.131,
"eval_steps_per_second": 0.642,
"step": 14500
},
{
"epoch": 0.64,
"grad_norm": 2.0153069496154785,
"learning_rate": 2.3001268360305446e-06,
"loss": 1.31,
"step": 14600
},
{
"epoch": 0.65,
"grad_norm": 2.031879425048828,
"learning_rate": 2.2913466576746433e-06,
"loss": 1.3113,
"step": 14700
},
{
"epoch": 0.65,
"grad_norm": 1.9688141345977783,
"learning_rate": 2.2825287465771276e-06,
"loss": 1.3114,
"step": 14800
},
{
"epoch": 0.66,
"grad_norm": 2.0744152069091797,
"learning_rate": 2.2736735231908515e-06,
"loss": 1.3068,
"step": 14900
},
{
"epoch": 0.66,
"grad_norm": 2.004817485809326,
"learning_rate": 2.2647814097477816e-06,
"loss": 1.3148,
"step": 15000
},
{
"epoch": 0.66,
"eval_loss": 1.3130929470062256,
"eval_runtime": 1864.9246,
"eval_samples_per_second": 5.14,
"eval_steps_per_second": 0.643,
"step": 15000
},
{
"epoch": 0.66,
"grad_norm": 2.018002510070801,
"learning_rate": 2.2558528302388663e-06,
"loss": 1.318,
"step": 15100
},
{
"epoch": 0.67,
"grad_norm": 2.0592522621154785,
"learning_rate": 2.2468882103938155e-06,
"loss": 1.3139,
"step": 15200
},
{
"epoch": 0.67,
"grad_norm": 1.9203455448150635,
"learning_rate": 2.237887977660804e-06,
"loss": 1.3163,
"step": 15300
},
{
"epoch": 0.68,
"grad_norm": 2.1021227836608887,
"learning_rate": 2.2288525611860886e-06,
"loss": 1.3182,
"step": 15400
},
{
"epoch": 0.68,
"grad_norm": 2.050011396408081,
"learning_rate": 2.219782391793547e-06,
"loss": 1.3036,
"step": 15500
},
{
"epoch": 0.68,
"eval_loss": 1.312229871749878,
"eval_runtime": 1865.3014,
"eval_samples_per_second": 5.139,
"eval_steps_per_second": 0.643,
"step": 15500
},
{
"epoch": 0.69,
"grad_norm": 2.0257480144500732,
"learning_rate": 2.2106779019641336e-06,
"loss": 1.2984,
"step": 15600
},
{
"epoch": 0.69,
"grad_norm": 1.9984742403030396,
"learning_rate": 2.2015395258152596e-06,
"loss": 1.3206,
"step": 15700
},
{
"epoch": 0.69,
"grad_norm": 2.094935417175293,
"learning_rate": 2.1923676990800927e-06,
"loss": 1.3157,
"step": 15800
},
{
"epoch": 0.7,
"grad_norm": 2.0469040870666504,
"learning_rate": 2.1831628590867812e-06,
"loss": 1.3048,
"step": 15900
},
{
"epoch": 0.7,
"grad_norm": 2.0622622966766357,
"learning_rate": 2.1739254447376006e-06,
"loss": 1.3138,
"step": 16000
},
{
"epoch": 0.7,
"eval_loss": 1.3114036321640015,
"eval_runtime": 1865.761,
"eval_samples_per_second": 5.138,
"eval_steps_per_second": 0.643,
"step": 16000
},
{
"epoch": 0.71,
"grad_norm": 2.0791163444519043,
"learning_rate": 2.1646558964880277e-06,
"loss": 1.3108,
"step": 16100
},
{
"epoch": 0.71,
"grad_norm": 2.027860403060913,
"learning_rate": 2.1553546563257362e-06,
"loss": 1.3116,
"step": 16200
},
{
"epoch": 0.72,
"grad_norm": 2.0047738552093506,
"learning_rate": 2.146022167749525e-06,
"loss": 1.3048,
"step": 16300
},
{
"epoch": 0.72,
"grad_norm": 2.213186025619507,
"learning_rate": 2.136658875748169e-06,
"loss": 1.3071,
"step": 16400
},
{
"epoch": 0.73,
"grad_norm": 2.0164272785186768,
"learning_rate": 2.1272652267792036e-06,
"loss": 1.314,
"step": 16500
},
{
"epoch": 0.73,
"eval_loss": 1.310592532157898,
"eval_runtime": 1865.0707,
"eval_samples_per_second": 5.14,
"eval_steps_per_second": 0.643,
"step": 16500
},
{
"epoch": 0.73,
"grad_norm": 2.063606023788452,
"learning_rate": 2.117841668747633e-06,
"loss": 1.3171,
"step": 16600
},
{
"epoch": 0.73,
"grad_norm": 2.066218137741089,
"learning_rate": 2.1083886509845794e-06,
"loss": 1.3095,
"step": 16700
},
{
"epoch": 0.74,
"grad_norm": 2.00728702545166,
"learning_rate": 2.098906624225852e-06,
"loss": 1.3082,
"step": 16800
},
{
"epoch": 0.74,
"grad_norm": 1.946075439453125,
"learning_rate": 2.089396040590459e-06,
"loss": 1.3106,
"step": 16900
},
{
"epoch": 0.75,
"grad_norm": 2.2883059978485107,
"learning_rate": 2.079857353559047e-06,
"loss": 1.3045,
"step": 17000
},
{
"epoch": 0.75,
"eval_loss": 1.3099678754806519,
"eval_runtime": 1865.3447,
"eval_samples_per_second": 5.139,
"eval_steps_per_second": 0.643,
"step": 17000
},
{
"epoch": 0.75,
"grad_norm": 1.9902862310409546,
"learning_rate": 2.070291017952282e-06,
"loss": 1.3188,
"step": 17100
},
{
"epoch": 0.76,
"grad_norm": 2.0283963680267334,
"learning_rate": 2.0606974899091596e-06,
"loss": 1.3115,
"step": 17200
},
{
"epoch": 0.76,
"grad_norm": 2.249575138092041,
"learning_rate": 2.0510772268652563e-06,
"loss": 1.3095,
"step": 17300
},
{
"epoch": 0.76,
"grad_norm": 2.022587537765503,
"learning_rate": 2.0414306875309195e-06,
"loss": 1.2989,
"step": 17400
},
{
"epoch": 0.77,
"grad_norm": 2.3016037940979004,
"learning_rate": 2.0317583318693924e-06,
"loss": 1.303,
"step": 17500
},
{
"epoch": 0.77,
"eval_loss": 1.3091822862625122,
"eval_runtime": 1863.7921,
"eval_samples_per_second": 5.143,
"eval_steps_per_second": 0.643,
"step": 17500
},
{
"epoch": 0.77,
"grad_norm": 2.1690895557403564,
"learning_rate": 2.022060621074887e-06,
"loss": 1.3057,
"step": 17600
},
{
"epoch": 0.78,
"grad_norm": 2.1167852878570557,
"learning_rate": 2.012338017550587e-06,
"loss": 1.297,
"step": 17700
},
{
"epoch": 0.78,
"grad_norm": 2.035123109817505,
"learning_rate": 2.0025909848866085e-06,
"loss": 1.305,
"step": 17800
},
{
"epoch": 0.79,
"grad_norm": 1.931913137435913,
"learning_rate": 1.9928199878378854e-06,
"loss": 1.2938,
"step": 17900
},
{
"epoch": 0.79,
"grad_norm": 2.0481088161468506,
"learning_rate": 1.983025492302016e-06,
"loss": 1.2977,
"step": 18000
},
{
"epoch": 0.79,
"eval_loss": 1.3084911108016968,
"eval_runtime": 1866.3126,
"eval_samples_per_second": 5.136,
"eval_steps_per_second": 0.642,
"step": 18000
},
{
"epoch": 0.8,
"grad_norm": 1.9030753374099731,
"learning_rate": 1.973207965297045e-06,
"loss": 1.3115,
"step": 18100
},
{
"epoch": 0.8,
"grad_norm": 1.9687721729278564,
"learning_rate": 1.963367874939198e-06,
"loss": 1.3026,
"step": 18200
},
{
"epoch": 0.8,
"grad_norm": 2.2594008445739746,
"learning_rate": 1.9535056904205588e-06,
"loss": 1.3134,
"step": 18300
},
{
"epoch": 0.81,
"grad_norm": 2.038999080657959,
"learning_rate": 1.943621881986696e-06,
"loss": 1.3039,
"step": 18400
},
{
"epoch": 0.81,
"grad_norm": 1.9501850605010986,
"learning_rate": 1.933716920914245e-06,
"loss": 1.3096,
"step": 18500
},
{
"epoch": 0.81,
"eval_loss": 1.3078255653381348,
"eval_runtime": 1865.4912,
"eval_samples_per_second": 5.139,
"eval_steps_per_second": 0.643,
"step": 18500
},
{
"epoch": 0.82,
"grad_norm": 2.107854127883911,
"learning_rate": 1.923791279488435e-06,
"loss": 1.307,
"step": 18600
},
{
"epoch": 0.82,
"grad_norm": 2.067185640335083,
"learning_rate": 1.913845430980567e-06,
"loss": 1.3225,
"step": 18700
},
{
"epoch": 0.83,
"grad_norm": 1.9632668495178223,
"learning_rate": 1.9038798496254518e-06,
"loss": 1.3095,
"step": 18800
},
{
"epoch": 0.83,
"grad_norm": 2.140648603439331,
"learning_rate": 1.8938950105987948e-06,
"loss": 1.2998,
"step": 18900
},
{
"epoch": 0.84,
"grad_norm": 1.9392528533935547,
"learning_rate": 1.8838913899945394e-06,
"loss": 1.3073,
"step": 19000
},
{
"epoch": 0.84,
"eval_loss": 1.3071683645248413,
"eval_runtime": 1864.0962,
"eval_samples_per_second": 5.142,
"eval_steps_per_second": 0.643,
"step": 19000
},
{
"epoch": 0.84,
"grad_norm": 2.2147011756896973,
"learning_rate": 1.8738694648021666e-06,
"loss": 1.3017,
"step": 19100
},
{
"epoch": 0.84,
"grad_norm": 2.112633228302002,
"learning_rate": 1.863829712883951e-06,
"loss": 1.312,
"step": 19200
},
{
"epoch": 0.85,
"grad_norm": 2.0416507720947266,
"learning_rate": 1.8537726129521755e-06,
"loss": 1.3036,
"step": 19300
},
{
"epoch": 0.85,
"grad_norm": 1.988303303718567,
"learning_rate": 1.8436986445463049e-06,
"loss": 1.3009,
"step": 19400
},
{
"epoch": 0.86,
"grad_norm": 2.023023843765259,
"learning_rate": 1.8336082880101228e-06,
"loss": 1.3137,
"step": 19500
},
{
"epoch": 0.86,
"eval_loss": 1.3065180778503418,
"eval_runtime": 1866.3466,
"eval_samples_per_second": 5.136,
"eval_steps_per_second": 0.642,
"step": 19500
},
{
"epoch": 0.86,
"grad_norm": 2.112020254135132,
"learning_rate": 1.8235020244688246e-06,
"loss": 1.3037,
"step": 19600
},
{
"epoch": 0.87,
"grad_norm": 2.056504249572754,
"learning_rate": 1.8133803358060798e-06,
"loss": 1.3141,
"step": 19700
},
{
"epoch": 0.87,
"grad_norm": 2.0338857173919678,
"learning_rate": 1.8032437046410528e-06,
"loss": 1.3012,
"step": 19800
},
{
"epoch": 0.87,
"grad_norm": 2.071561098098755,
"learning_rate": 1.7930926143053926e-06,
"loss": 1.3063,
"step": 19900
},
{
"epoch": 0.88,
"grad_norm": 2.080360174179077,
"learning_rate": 1.7829275488201849e-06,
"loss": 1.3083,
"step": 20000
},
{
"epoch": 0.88,
"eval_loss": 1.3060280084609985,
"eval_runtime": 1870.2241,
"eval_samples_per_second": 5.126,
"eval_steps_per_second": 0.641,
"step": 20000
},
{
"epoch": 0.88,
"grad_norm": 2.0536909103393555,
"learning_rate": 1.7727489928728747e-06,
"loss": 1.3115,
"step": 20100
},
{
"epoch": 0.89,
"grad_norm": 2.07513689994812,
"learning_rate": 1.762557431794155e-06,
"loss": 1.3028,
"step": 20200
},
{
"epoch": 0.89,
"grad_norm": 2.2155916690826416,
"learning_rate": 1.7523533515348249e-06,
"loss": 1.3061,
"step": 20300
},
{
"epoch": 0.9,
"grad_norm": 2.2243480682373047,
"learning_rate": 1.7421372386426185e-06,
"loss": 1.312,
"step": 20400
},
{
"epoch": 0.9,
"grad_norm": 2.010845184326172,
"learning_rate": 1.7319095802390087e-06,
"loss": 1.3199,
"step": 20500
},
{
"epoch": 0.9,
"eval_loss": 1.3053687810897827,
"eval_runtime": 1865.2464,
"eval_samples_per_second": 5.139,
"eval_steps_per_second": 0.643,
"step": 20500
},
{
"epoch": 0.91,
"grad_norm": 1.9459248781204224,
"learning_rate": 1.7216708639959744e-06,
"loss": 1.3091,
"step": 20600
},
{
"epoch": 0.91,
"grad_norm": 2.0189294815063477,
"learning_rate": 1.711421578112754e-06,
"loss": 1.2958,
"step": 20700
},
{
"epoch": 0.91,
"grad_norm": 2.0753591060638428,
"learning_rate": 1.701162211292561e-06,
"loss": 1.3041,
"step": 20800
},
{
"epoch": 0.92,
"grad_norm": 2.1882078647613525,
"learning_rate": 1.6908932527192886e-06,
"loss": 1.3104,
"step": 20900
},
{
"epoch": 0.92,
"grad_norm": 2.0360772609710693,
"learning_rate": 1.6806151920341779e-06,
"loss": 1.31,
"step": 21000
},
{
"epoch": 0.92,
"eval_loss": 1.3050304651260376,
"eval_runtime": 1868.8769,
"eval_samples_per_second": 5.129,
"eval_steps_per_second": 0.642,
"step": 21000
},
{
"epoch": 0.93,
"grad_norm": 2.137718439102173,
"learning_rate": 1.6703285193124748e-06,
"loss": 1.3027,
"step": 21100
},
{
"epoch": 0.93,
"grad_norm": 2.0401344299316406,
"learning_rate": 1.660033725040063e-06,
"loss": 1.3037,
"step": 21200
},
{
"epoch": 0.94,
"grad_norm": 1.9637211561203003,
"learning_rate": 1.649731300090074e-06,
"loss": 1.2974,
"step": 21300
},
{
"epoch": 0.94,
"grad_norm": 2.090510368347168,
"learning_rate": 1.6394217356994827e-06,
"loss": 1.2934,
"step": 21400
},
{
"epoch": 0.95,
"grad_norm": 2.3479647636413574,
"learning_rate": 1.6291055234456859e-06,
"loss": 1.3201,
"step": 21500
},
{
"epoch": 0.95,
"eval_loss": 1.3043450117111206,
"eval_runtime": 1866.4601,
"eval_samples_per_second": 5.136,
"eval_steps_per_second": 0.642,
"step": 21500
},
{
"epoch": 0.95,
"grad_norm": 2.1579744815826416,
"learning_rate": 1.6187831552230603e-06,
"loss": 1.2983,
"step": 21600
},
{
"epoch": 0.95,
"grad_norm": 2.2169971466064453,
"learning_rate": 1.608455123219511e-06,
"loss": 1.3011,
"step": 21700
},
{
"epoch": 0.96,
"grad_norm": 1.9283170700073242,
"learning_rate": 1.5981219198929995e-06,
"loss": 1.3036,
"step": 21800
},
{
"epoch": 0.96,
"grad_norm": 1.9650486707687378,
"learning_rate": 1.5877840379480678e-06,
"loss": 1.3017,
"step": 21900
},
{
"epoch": 0.97,
"grad_norm": 2.2148048877716064,
"learning_rate": 1.5774419703123393e-06,
"loss": 1.3118,
"step": 22000
},
{
"epoch": 0.97,
"eval_loss": 1.303900122642517,
"eval_runtime": 1863.2857,
"eval_samples_per_second": 5.145,
"eval_steps_per_second": 0.643,
"step": 22000
},
{
"epoch": 0.97,
"grad_norm": 2.119337558746338,
"learning_rate": 1.56709621011302e-06,
"loss": 1.3038,
"step": 22100
},
{
"epoch": 0.98,
"grad_norm": 2.062990427017212,
"learning_rate": 1.5567472506533827e-06,
"loss": 1.2878,
"step": 22200
},
{
"epoch": 0.98,
"grad_norm": 2.1745545864105225,
"learning_rate": 1.546395585389247e-06,
"loss": 1.2964,
"step": 22300
},
{
"epoch": 0.98,
"grad_norm": 2.1263065338134766,
"learning_rate": 1.5360417079054494e-06,
"loss": 1.295,
"step": 22400
},
{
"epoch": 0.99,
"grad_norm": 2.044600248336792,
"learning_rate": 1.5256861118923083e-06,
"loss": 1.3049,
"step": 22500
},
{
"epoch": 0.99,
"eval_loss": 1.303480625152588,
"eval_runtime": 1862.9597,
"eval_samples_per_second": 5.146,
"eval_steps_per_second": 0.644,
"step": 22500
},
{
"epoch": 0.99,
"grad_norm": 2.0431716442108154,
"learning_rate": 1.5153292911220858e-06,
"loss": 1.2992,
"step": 22600
},
{
"epoch": 1.0,
"grad_norm": 2.1952879428863525,
"learning_rate": 1.5049717394254412e-06,
"loss": 1.2952,
"step": 22700
},
{
"epoch": 1.0,
"grad_norm": 1.9967477321624756,
"learning_rate": 1.4946139506678864e-06,
"loss": 1.2967,
"step": 22800
},
{
"epoch": 1.01,
"grad_norm": 2.1949024200439453,
"learning_rate": 1.4842564187262365e-06,
"loss": 1.3107,
"step": 22900
},
{
"epoch": 1.01,
"grad_norm": 2.114365339279175,
"learning_rate": 1.47389963746506e-06,
"loss": 1.2946,
"step": 23000
},
{
"epoch": 1.01,
"eval_loss": 1.3029588460922241,
"eval_runtime": 1862.6203,
"eval_samples_per_second": 5.147,
"eval_steps_per_second": 0.644,
"step": 23000
},
{
"epoch": 1.02,
"grad_norm": 2.3793764114379883,
"learning_rate": 1.4635441007131343e-06,
"loss": 1.3098,
"step": 23100
},
{
"epoch": 1.02,
"grad_norm": 2.0778090953826904,
"learning_rate": 1.453190302239894e-06,
"loss": 1.3051,
"step": 23200
},
{
"epoch": 1.02,
"grad_norm": 2.019946336746216,
"learning_rate": 1.442838735731892e-06,
"loss": 1.2966,
"step": 23300
},
{
"epoch": 1.03,
"grad_norm": 2.1698460578918457,
"learning_rate": 1.432489894769254e-06,
"loss": 1.3029,
"step": 23400
},
{
"epoch": 1.03,
"grad_norm": 2.034184217453003,
"learning_rate": 1.4221442728021506e-06,
"loss": 1.316,
"step": 23500
},
{
"epoch": 1.03,
"eval_loss": 1.3026907444000244,
"eval_runtime": 1863.5533,
"eval_samples_per_second": 5.144,
"eval_steps_per_second": 0.643,
"step": 23500
},
{
"epoch": 1.04,
"grad_norm": 2.025113582611084,
"learning_rate": 1.4118023631272633e-06,
"loss": 1.2935,
"step": 23600
},
{
"epoch": 1.04,
"grad_norm": 2.073225259780884,
"learning_rate": 1.4014646588642656e-06,
"loss": 1.2967,
"step": 23700
},
{
"epoch": 1.05,
"grad_norm": 2.120889186859131,
"learning_rate": 1.391131652932311e-06,
"loss": 1.3013,
"step": 23800
},
{
"epoch": 1.05,
"grad_norm": 2.005122423171997,
"learning_rate": 1.3808038380265258e-06,
"loss": 1.2928,
"step": 23900
},
{
"epoch": 1.06,
"grad_norm": 2.147322654724121,
"learning_rate": 1.3704817065945235e-06,
"loss": 1.3059,
"step": 24000
},
{
"epoch": 1.06,
"eval_loss": 1.3021385669708252,
"eval_runtime": 1865.741,
"eval_samples_per_second": 5.138,
"eval_steps_per_second": 0.643,
"step": 24000
},
{
"epoch": 1.06,
"grad_norm": 2.336851119995117,
"learning_rate": 1.360165750812916e-06,
"loss": 1.2986,
"step": 24100
},
{
"epoch": 1.06,
"grad_norm": 2.077028274536133,
"learning_rate": 1.3498564625638525e-06,
"loss": 1.3062,
"step": 24200
},
{
"epoch": 1.07,
"grad_norm": 2.137953519821167,
"learning_rate": 1.3395543334115616e-06,
"loss": 1.3071,
"step": 24300
},
{
"epoch": 1.07,
"grad_norm": 2.054823875427246,
"learning_rate": 1.3292598545789157e-06,
"loss": 1.2981,
"step": 24400
},
{
"epoch": 1.08,
"grad_norm": 2.045687198638916,
"learning_rate": 1.3189735169240044e-06,
"loss": 1.2971,
"step": 24500
},
{
"epoch": 1.08,
"eval_loss": 1.3017216920852661,
"eval_runtime": 1863.3072,
"eval_samples_per_second": 5.145,
"eval_steps_per_second": 0.643,
"step": 24500
},
{
"epoch": 1.08,
"grad_norm": 2.2879574298858643,
"learning_rate": 1.308695810916735e-06,
"loss": 1.2943,
"step": 24600
},
{
"epoch": 1.09,
"grad_norm": 2.1517672538757324,
"learning_rate": 1.2984272266154414e-06,
"loss": 1.3083,
"step": 24700
},
{
"epoch": 1.09,
"grad_norm": 2.0498697757720947,
"learning_rate": 1.288168253643521e-06,
"loss": 1.2873,
"step": 24800
},
{
"epoch": 1.09,
"grad_norm": 2.1879069805145264,
"learning_rate": 1.2779193811660837e-06,
"loss": 1.2902,
"step": 24900
},
{
"epoch": 1.1,
"grad_norm": 2.2728075981140137,
"learning_rate": 1.2676810978666353e-06,
"loss": 1.3001,
"step": 25000
},
{
"epoch": 1.1,
"eval_loss": 1.3014191389083862,
"eval_runtime": 1863.092,
"eval_samples_per_second": 5.145,
"eval_steps_per_second": 0.644,
"step": 25000
},
{
"epoch": 1.1,
"grad_norm": 2.2328622341156006,
"learning_rate": 1.2574538919237673e-06,
"loss": 1.3028,
"step": 25100
},
{
"epoch": 1.11,
"grad_norm": 2.2706058025360107,
"learning_rate": 1.2472382509878873e-06,
"loss": 1.2931,
"step": 25200
},
{
"epoch": 1.11,
"grad_norm": 2.234330892562866,
"learning_rate": 1.2370346621579623e-06,
"loss": 1.3025,
"step": 25300
},
{
"epoch": 1.12,
"grad_norm": 2.125455379486084,
"learning_rate": 1.2268436119582958e-06,
"loss": 1.3031,
"step": 25400
},
{
"epoch": 1.12,
"grad_norm": 2.0227627754211426,
"learning_rate": 1.2166655863153263e-06,
"loss": 1.2743,
"step": 25500
},
{
"epoch": 1.12,
"eval_loss": 1.3010908365249634,
"eval_runtime": 1862.8931,
"eval_samples_per_second": 5.146,
"eval_steps_per_second": 0.644,
"step": 25500
},
{
"epoch": 1.13,
"grad_norm": 2.0580368041992188,
"learning_rate": 1.2065010705344618e-06,
"loss": 1.3054,
"step": 25600
},
{
"epoch": 1.13,
"grad_norm": 2.1647911071777344,
"learning_rate": 1.1963505492769355e-06,
"loss": 1.2916,
"step": 25700
},
{
"epoch": 1.13,
"grad_norm": 2.0057008266448975,
"learning_rate": 1.1862145065366998e-06,
"loss": 1.3016,
"step": 25800
},
{
"epoch": 1.14,
"grad_norm": 2.097618341445923,
"learning_rate": 1.1760934256173447e-06,
"loss": 1.2946,
"step": 25900
},
{
"epoch": 1.14,
"grad_norm": 2.1340222358703613,
"learning_rate": 1.1659877891090587e-06,
"loss": 1.2983,
"step": 26000
},
{
"epoch": 1.14,
"eval_loss": 1.300696849822998,
"eval_runtime": 1862.6712,
"eval_samples_per_second": 5.146,
"eval_steps_per_second": 0.644,
"step": 26000
},
{
"epoch": 1.15,
"grad_norm": 2.054945468902588,
"learning_rate": 1.155898078865611e-06,
"loss": 1.3035,
"step": 26100
},
{
"epoch": 1.15,
"grad_norm": 2.0940968990325928,
"learning_rate": 1.1458247759813828e-06,
"loss": 1.2955,
"step": 26200
},
{
"epoch": 1.16,
"grad_norm": 2.199674129486084,
"learning_rate": 1.135768360768423e-06,
"loss": 1.3013,
"step": 26300
},
{
"epoch": 1.16,
"grad_norm": 2.2380006313323975,
"learning_rate": 1.125729312733549e-06,
"loss": 1.3017,
"step": 26400
},
{
"epoch": 1.16,
"grad_norm": 2.0469789505004883,
"learning_rate": 1.1157081105554801e-06,
"loss": 1.3048,
"step": 26500
},
{
"epoch": 1.16,
"eval_loss": 1.3003997802734375,
"eval_runtime": 1862.5897,
"eval_samples_per_second": 5.147,
"eval_steps_per_second": 0.644,
"step": 26500
},
{
"epoch": 1.17,
"grad_norm": 2.136064052581787,
"learning_rate": 1.1057052320620172e-06,
"loss": 1.2849,
"step": 26600
},
{
"epoch": 1.17,
"grad_norm": 2.2330269813537598,
"learning_rate": 1.0957211542072556e-06,
"loss": 1.2946,
"step": 26700
},
{
"epoch": 1.18,
"grad_norm": 2.45259690284729,
"learning_rate": 1.085756353048846e-06,
"loss": 1.2956,
"step": 26800
},
{
"epoch": 1.18,
"grad_norm": 2.133361339569092,
"learning_rate": 1.0758113037252912e-06,
"loss": 1.3041,
"step": 26900
},
{
"epoch": 1.19,
"grad_norm": 2.1824896335601807,
"learning_rate": 1.065886480433296e-06,
"loss": 1.2945,
"step": 27000
},
{
"epoch": 1.19,
"eval_loss": 1.2999972105026245,
"eval_runtime": 1863.119,
"eval_samples_per_second": 5.145,
"eval_steps_per_second": 0.644,
"step": 27000
},
{
"epoch": 1.19,
"grad_norm": 2.218424081802368,
"learning_rate": 1.0559823564051523e-06,
"loss": 1.3011,
"step": 27100
},
{
"epoch": 1.2,
"grad_norm": 2.0408236980438232,
"learning_rate": 1.0460994038861768e-06,
"loss": 1.2935,
"step": 27200
},
{
"epoch": 1.2,
"grad_norm": 2.0885488986968994,
"learning_rate": 1.0362380941121925e-06,
"loss": 1.3001,
"step": 27300
},
{
"epoch": 1.2,
"grad_norm": 2.130805015563965,
"learning_rate": 1.0263988972870613e-06,
"loss": 1.3081,
"step": 27400
},
{
"epoch": 1.21,
"grad_norm": 2.0574839115142822,
"learning_rate": 1.0165822825602596e-06,
"loss": 1.3071,
"step": 27500
},
{
"epoch": 1.21,
"eval_loss": 1.2996830940246582,
"eval_runtime": 1863.286,
"eval_samples_per_second": 5.145,
"eval_steps_per_second": 0.643,
"step": 27500
},
{
"epoch": 1.21,
"grad_norm": 2.075486421585083,
"learning_rate": 1.0067887180045145e-06,
"loss": 1.296,
"step": 27600
},
{
"epoch": 1.22,
"grad_norm": 2.07733154296875,
"learning_rate": 9.970186705934803e-07,
"loss": 1.3015,
"step": 27700
},
{
"epoch": 1.22,
"grad_norm": 2.1551625728607178,
"learning_rate": 9.872726061794744e-07,
"loss": 1.2951,
"step": 27800
},
{
"epoch": 1.23,
"grad_norm": 2.174243927001953,
"learning_rate": 9.775509894712656e-07,
"loss": 1.3052,
"step": 27900
},
{
"epoch": 1.23,
"grad_norm": 2.11954665184021,
"learning_rate": 9.678542840119123e-07,
"loss": 1.294,
"step": 28000
},
{
"epoch": 1.23,
"eval_loss": 1.2994180917739868,
"eval_runtime": 1864.2204,
"eval_samples_per_second": 5.142,
"eval_steps_per_second": 0.643,
"step": 28000
},
{
"epoch": 1.24,
"grad_norm": 2.0479559898376465,
"learning_rate": 9.581829521566663e-07,
"loss": 1.3078,
"step": 28100
},
{
"epoch": 1.24,
"grad_norm": 2.1119577884674072,
"learning_rate": 9.485374550509197e-07,
"loss": 1.2878,
"step": 28200
},
{
"epoch": 1.24,
"grad_norm": 2.1557509899139404,
"learning_rate": 9.389182526082229e-07,
"loss": 1.2866,
"step": 28300
},
{
"epoch": 1.25,
"grad_norm": 2.226323127746582,
"learning_rate": 9.293258034883504e-07,
"loss": 1.2977,
"step": 28400
},
{
"epoch": 1.25,
"grad_norm": 2.1167123317718506,
"learning_rate": 9.197605650754353e-07,
"loss": 1.2937,
"step": 28500
},
{
"epoch": 1.25,
"eval_loss": 1.2991981506347656,
"eval_runtime": 1863.4013,
"eval_samples_per_second": 5.144,
"eval_steps_per_second": 0.643,
"step": 28500
},
{
"epoch": 1.26,
"grad_norm": 2.254310131072998,
"learning_rate": 9.10222993456156e-07,
"loss": 1.3016,
"step": 28600
},
{
"epoch": 1.26,
"grad_norm": 2.0988237857818604,
"learning_rate": 9.007135433979935e-07,
"loss": 1.3,
"step": 28700
},
{
"epoch": 1.27,
"grad_norm": 2.0965945720672607,
"learning_rate": 8.912326683275442e-07,
"loss": 1.2877,
"step": 28800
},
{
"epoch": 1.27,
"grad_norm": 2.037484884262085,
"learning_rate": 8.817808203089022e-07,
"loss": 1.3084,
"step": 28900
},
{
"epoch": 1.27,
"grad_norm": 2.0629284381866455,
"learning_rate": 8.723584500221013e-07,
"loss": 1.2857,
"step": 29000
},
{
"epoch": 1.27,
"eval_loss": 1.298945426940918,
"eval_runtime": 1863.2462,
"eval_samples_per_second": 5.145,
"eval_steps_per_second": 0.644,
"step": 29000
},
{
"epoch": 1.28,
"grad_norm": 2.1192641258239746,
"learning_rate": 8.62966006741628e-07,
"loss": 1.3011,
"step": 29100
},
{
"epoch": 1.28,
"grad_norm": 2.2492525577545166,
"learning_rate": 8.536039383150001e-07,
"loss": 1.2882,
"step": 29200
},
{
"epoch": 1.29,
"grad_norm": 2.1896586418151855,
"learning_rate": 8.442726911414094e-07,
"loss": 1.2933,
"step": 29300
},
{
"epoch": 1.29,
"grad_norm": 2.073396921157837,
"learning_rate": 8.349727101504388e-07,
"loss": 1.3119,
"step": 29400
},
{
"epoch": 1.3,
"grad_norm": 2.1311280727386475,
"learning_rate": 8.257044387808482e-07,
"loss": 1.2921,
"step": 29500
},
{
"epoch": 1.3,
"eval_loss": 1.2986724376678467,
"eval_runtime": 1862.6142,
"eval_samples_per_second": 5.147,
"eval_steps_per_second": 0.644,
"step": 29500
},
{
"epoch": 1.3,
"grad_norm": 2.1984448432922363,
"learning_rate": 8.164683189594269e-07,
"loss": 1.2933,
"step": 29600
},
{
"epoch": 1.31,
"grad_norm": 2.1039230823516846,
"learning_rate": 8.072647910799269e-07,
"loss": 1.3129,
"step": 29700
},
{
"epoch": 1.31,
"grad_norm": 1.9746812582015991,
"learning_rate": 7.980942939820596e-07,
"loss": 1.305,
"step": 29800
},
{
"epoch": 1.31,
"grad_norm": 2.099011182785034,
"learning_rate": 7.889572649305736e-07,
"loss": 1.2956,
"step": 29900
},
{
"epoch": 1.32,
"grad_norm": 2.3024837970733643,
"learning_rate": 7.798541395944053e-07,
"loss": 1.2794,
"step": 30000
},
{
"epoch": 1.32,
"eval_loss": 1.2984648942947388,
"eval_runtime": 1861.962,
"eval_samples_per_second": 5.148,
"eval_steps_per_second": 0.644,
"step": 30000
},
{
"epoch": 1.32,
"grad_norm": 2.1850967407226562,
"learning_rate": 7.707853520259052e-07,
"loss": 1.2854,
"step": 30100
},
{
"epoch": 1.33,
"grad_norm": 2.239342212677002,
"learning_rate": 7.617513346401388e-07,
"loss": 1.2919,
"step": 30200
},
{
"epoch": 1.33,
"grad_norm": 2.3949005603790283,
"learning_rate": 7.527525181942738e-07,
"loss": 1.3048,
"step": 30300
},
{
"epoch": 1.34,
"grad_norm": 2.2545089721679688,
"learning_rate": 7.437893317670351e-07,
"loss": 1.2909,
"step": 30400
},
{
"epoch": 1.34,
"grad_norm": 2.2839066982269287,
"learning_rate": 7.348622027382499e-07,
"loss": 1.299,
"step": 30500
},
{
"epoch": 1.34,
"eval_loss": 1.2982053756713867,
"eval_runtime": 1862.6374,
"eval_samples_per_second": 5.146,
"eval_steps_per_second": 0.644,
"step": 30500
},
{
"epoch": 1.35,
"grad_norm": 2.43034029006958,
"learning_rate": 7.259715567684677e-07,
"loss": 1.2973,
"step": 30600
},
{
"epoch": 1.35,
"grad_norm": 2.251255989074707,
"learning_rate": 7.171178177786646e-07,
"loss": 1.2973,
"step": 30700
},
{
"epoch": 1.35,
"grad_norm": 2.132671356201172,
"learning_rate": 7.083014079300282e-07,
"loss": 1.2931,
"step": 30800
},
{
"epoch": 1.36,
"grad_norm": 2.243849277496338,
"learning_rate": 6.995227476038316e-07,
"loss": 1.2967,
"step": 30900
},
{
"epoch": 1.36,
"grad_norm": 2.284914016723633,
"learning_rate": 6.907822553813857e-07,
"loss": 1.2925,
"step": 31000
},
{
"epoch": 1.36,
"eval_loss": 1.2980060577392578,
"eval_runtime": 1862.987,
"eval_samples_per_second": 5.146,
"eval_steps_per_second": 0.644,
"step": 31000
},
{
"epoch": 1.37,
"grad_norm": 2.1762306690216064,
"learning_rate": 6.820803480240832e-07,
"loss": 1.3035,
"step": 31100
},
{
"epoch": 1.37,
"grad_norm": 2.1974294185638428,
"learning_rate": 6.734174404535233e-07,
"loss": 1.2982,
"step": 31200
},
{
"epoch": 1.38,
"grad_norm": 2.1154937744140625,
"learning_rate": 6.647939457317332e-07,
"loss": 1.2987,
"step": 31300
},
{
"epoch": 1.38,
"grad_norm": 2.2262349128723145,
"learning_rate": 6.562102750414655e-07,
"loss": 1.2894,
"step": 31400
},
{
"epoch": 1.38,
"grad_norm": 2.2507781982421875,
"learning_rate": 6.476668376665978e-07,
"loss": 1.3024,
"step": 31500
},
{
"epoch": 1.38,
"eval_loss": 1.2977538108825684,
"eval_runtime": 1863.5376,
"eval_samples_per_second": 5.144,
"eval_steps_per_second": 0.643,
"step": 31500
},
{
"epoch": 1.39,
"grad_norm": 2.0441462993621826,
"learning_rate": 6.391640409726157e-07,
"loss": 1.2939,
"step": 31600
},
{
"epoch": 1.39,
"grad_norm": 2.172423839569092,
"learning_rate": 6.307022903871869e-07,
"loss": 1.3002,
"step": 31700
},
{
"epoch": 1.4,
"grad_norm": 1.932667851448059,
"learning_rate": 6.222819893808335e-07,
"loss": 1.292,
"step": 31800
},
{
"epoch": 1.4,
"grad_norm": 2.173680305480957,
"learning_rate": 6.139035394476905e-07,
"loss": 1.3025,
"step": 31900
},
{
"epoch": 1.41,
"grad_norm": 2.1886558532714844,
"learning_rate": 6.055673400863636e-07,
"loss": 1.3103,
"step": 32000
},
{
"epoch": 1.41,
"eval_loss": 1.2976477146148682,
"eval_runtime": 1862.7508,
"eval_samples_per_second": 5.146,
"eval_steps_per_second": 0.644,
"step": 32000
},
{
"epoch": 1.41,
"grad_norm": 2.3404366970062256,
"learning_rate": 5.972737887808805e-07,
"loss": 1.2915,
"step": 32100
},
{
"epoch": 1.42,
"grad_norm": 2.2503886222839355,
"learning_rate": 5.890232809817378e-07,
"loss": 1.2887,
"step": 32200
},
{
"epoch": 1.42,
"grad_norm": 2.1247682571411133,
"learning_rate": 5.808162100870441e-07,
"loss": 1.297,
"step": 32300
},
{
"epoch": 1.42,
"grad_norm": 2.1436526775360107,
"learning_rate": 5.72652967423765e-07,
"loss": 1.3008,
"step": 32400
},
{
"epoch": 1.43,
"grad_norm": 2.1058590412139893,
"learning_rate": 5.645339422290599e-07,
"loss": 1.2956,
"step": 32500
},
{
"epoch": 1.43,
"eval_loss": 1.2974357604980469,
"eval_runtime": 1866.987,
"eval_samples_per_second": 5.134,
"eval_steps_per_second": 0.642,
"step": 32500
},
{
"epoch": 1.43,
"grad_norm": 2.1536617279052734,
"learning_rate": 5.564595216317266e-07,
"loss": 1.299,
"step": 32600
},
{
"epoch": 1.44,
"grad_norm": 2.031917095184326,
"learning_rate": 5.484300906337403e-07,
"loss": 1.2828,
"step": 32700
},
{
"epoch": 1.44,
"grad_norm": 1.9297816753387451,
"learning_rate": 5.404460320918966e-07,
"loss": 1.2937,
"step": 32800
},
{
"epoch": 1.45,
"grad_norm": 2.253413677215576,
"learning_rate": 5.325077266995542e-07,
"loss": 1.3078,
"step": 32900
},
{
"epoch": 1.45,
"grad_norm": 2.250991106033325,
"learning_rate": 5.24615552968487e-07,
"loss": 1.2788,
"step": 33000
},
{
"epoch": 1.45,
"eval_loss": 1.297324776649475,
"eval_runtime": 1864.5246,
"eval_samples_per_second": 5.141,
"eval_steps_per_second": 0.643,
"step": 33000
},
{
"epoch": 1.46,
"grad_norm": 2.186206579208374,
"learning_rate": 5.167698872108312e-07,
"loss": 1.3048,
"step": 33100
},
{
"epoch": 1.46,
"grad_norm": 2.1334028244018555,
"learning_rate": 5.089711035211465e-07,
"loss": 1.279,
"step": 33200
},
{
"epoch": 1.46,
"grad_norm": 2.334712266921997,
"learning_rate": 5.012195737585756e-07,
"loss": 1.2949,
"step": 33300
},
{
"epoch": 1.47,
"grad_norm": 2.2277867794036865,
"learning_rate": 4.935156675291152e-07,
"loss": 1.2998,
"step": 33400
},
{
"epoch": 1.47,
"grad_norm": 2.1463205814361572,
"learning_rate": 4.858597521679899e-07,
"loss": 1.2878,
"step": 33500
},
{
"epoch": 1.47,
"eval_loss": 1.2971347570419312,
"eval_runtime": 1865.0446,
"eval_samples_per_second": 5.14,
"eval_steps_per_second": 0.643,
"step": 33500
},
{
"epoch": 1.48,
"grad_norm": 2.0899240970611572,
"learning_rate": 4.782521927221414e-07,
"loss": 1.2927,
"step": 33600
},
{
"epoch": 1.48,
"grad_norm": 2.1801438331604004,
"learning_rate": 4.70693351932817e-07,
"loss": 1.2822,
"step": 33700
},
{
"epoch": 1.49,
"grad_norm": 2.0565810203552246,
"learning_rate": 4.631835902182795e-07,
"loss": 1.2998,
"step": 33800
},
{
"epoch": 1.49,
"grad_norm": 2.0865461826324463,
"learning_rate": 4.55723265656616e-07,
"loss": 1.2912,
"step": 33900
},
{
"epoch": 1.49,
"grad_norm": 2.186192750930786,
"learning_rate": 4.483127339686686e-07,
"loss": 1.2893,
"step": 34000
},
{
"epoch": 1.49,
"eval_loss": 1.297006368637085,
"eval_runtime": 1864.1525,
"eval_samples_per_second": 5.142,
"eval_steps_per_second": 0.643,
"step": 34000
},
{
"epoch": 1.5,
"grad_norm": 2.418661594390869,
"learning_rate": 4.409523485010707e-07,
"loss": 1.2823,
"step": 34100
},
{
"epoch": 1.5,
"grad_norm": 2.213181972503662,
"learning_rate": 4.3364246020940003e-07,
"loss": 1.2921,
"step": 34200
},
{
"epoch": 1.51,
"grad_norm": 2.1837573051452637,
"learning_rate": 4.263834176414426e-07,
"loss": 1.2834,
"step": 34300
},
{
"epoch": 1.51,
"grad_norm": 2.2444188594818115,
"learning_rate": 4.191755669205763e-07,
"loss": 1.3072,
"step": 34400
},
{
"epoch": 1.52,
"grad_norm": 2.1974008083343506,
"learning_rate": 4.120192517292637e-07,
"loss": 1.295,
"step": 34500
},
{
"epoch": 1.52,
"eval_loss": 1.2968517541885376,
"eval_runtime": 1863.4227,
"eval_samples_per_second": 5.144,
"eval_steps_per_second": 0.643,
"step": 34500
},
{
"epoch": 1.52,
"grad_norm": 2.2060582637786865,
"learning_rate": 4.0491481329266823e-07,
"loss": 1.294,
"step": 34600
},
{
"epoch": 1.53,
"grad_norm": 2.2055234909057617,
"learning_rate": 3.978625903623813e-07,
"loss": 1.301,
"step": 34700
},
{
"epoch": 1.53,
"grad_norm": 2.3005611896514893,
"learning_rate": 3.9086291920027183e-07,
"loss": 1.2909,
"step": 34800
},
{
"epoch": 1.53,
"grad_norm": 2.2892158031463623,
"learning_rate": 3.839161335624504e-07,
"loss": 1.2898,
"step": 34900
},
{
"epoch": 1.54,
"grad_norm": 2.2120680809020996,
"learning_rate": 3.7702256468335877e-07,
"loss": 1.2937,
"step": 35000
},
{
"epoch": 1.54,
"eval_loss": 1.2967385053634644,
"eval_runtime": 1863.4438,
"eval_samples_per_second": 5.144,
"eval_steps_per_second": 0.643,
"step": 35000
},
{
"epoch": 1.54,
"grad_norm": 2.06485652923584,
"learning_rate": 3.7018254125997167e-07,
"loss": 1.3034,
"step": 35100
},
{
"epoch": 1.55,
"grad_norm": 2.2690818309783936,
"learning_rate": 3.6339638943612876e-07,
"loss": 1.2816,
"step": 35200
},
{
"epoch": 1.55,
"grad_norm": 2.1384482383728027,
"learning_rate": 3.566644327869784e-07,
"loss": 1.2958,
"step": 35300
},
{
"epoch": 1.56,
"grad_norm": 2.262807607650757,
"learning_rate": 3.4998699230355445e-07,
"loss": 1.2906,
"step": 35400
},
{
"epoch": 1.56,
"grad_norm": 2.1555404663085938,
"learning_rate": 3.4336438637746716e-07,
"loss": 1.2911,
"step": 35500
},
{
"epoch": 1.56,
"eval_loss": 1.2966289520263672,
"eval_runtime": 1863.3587,
"eval_samples_per_second": 5.144,
"eval_steps_per_second": 0.643,
"step": 35500
},
{
"epoch": 1.56,
"grad_norm": 2.088890552520752,
"learning_rate": 3.3679693078572135e-07,
"loss": 1.2905,
"step": 35600
},
{
"epoch": 1.57,
"grad_norm": 2.362851142883301,
"learning_rate": 3.302849386756631e-07,
"loss": 1.2877,
"step": 35700
},
{
"epoch": 1.57,
"grad_norm": 2.3154048919677734,
"learning_rate": 3.2382872055004437e-07,
"loss": 1.288,
"step": 35800
},
{
"epoch": 1.58,
"grad_norm": 2.204124927520752,
"learning_rate": 3.1742858425222153e-07,
"loss": 1.2888,
"step": 35900
},
{
"epoch": 1.58,
"grad_norm": 2.0219850540161133,
"learning_rate": 3.1108483495147276e-07,
"loss": 1.2962,
"step": 36000
},
{
"epoch": 1.58,
"eval_loss": 1.2965224981307983,
"eval_runtime": 1863.8916,
"eval_samples_per_second": 5.143,
"eval_steps_per_second": 0.643,
"step": 36000
},
{
"epoch": 1.59,
"grad_norm": 2.456171989440918,
"learning_rate": 3.0479777512845107e-07,
"loss": 1.291,
"step": 36100
},
{
"epoch": 1.59,
"grad_norm": 2.043088674545288,
"learning_rate": 2.9856770456075904e-07,
"loss": 1.2926,
"step": 36200
},
{
"epoch": 1.6,
"grad_norm": 2.3121893405914307,
"learning_rate": 2.9239492030865573e-07,
"loss": 1.2933,
"step": 36300
},
{
"epoch": 1.6,
"grad_norm": 2.117414712905884,
"learning_rate": 2.862797167008913e-07,
"loss": 1.2842,
"step": 36400
},
{
"epoch": 1.6,
"grad_norm": 2.2130415439605713,
"learning_rate": 2.802223853206752e-07,
"loss": 1.2952,
"step": 36500
},
{
"epoch": 1.6,
"eval_loss": 1.2964164018630981,
"eval_runtime": 1863.5413,
"eval_samples_per_second": 5.144,
"eval_steps_per_second": 0.643,
"step": 36500
},
{
"epoch": 1.61,
"grad_norm": 2.1747307777404785,
"learning_rate": 2.7422321499177006e-07,
"loss": 1.2953,
"step": 36600
},
{
"epoch": 1.61,
"grad_norm": 2.238208293914795,
"learning_rate": 2.6828249176472284e-07,
"loss": 1.2901,
"step": 36700
},
{
"epoch": 1.62,
"grad_norm": 2.170766830444336,
"learning_rate": 2.624004989032238e-07,
"loss": 1.3061,
"step": 36800
},
{
"epoch": 1.62,
"grad_norm": 2.2250659465789795,
"learning_rate": 2.565775168706008e-07,
"loss": 1.3015,
"step": 36900
},
{
"epoch": 1.63,
"grad_norm": 2.16519832611084,
"learning_rate": 2.5081382331644484e-07,
"loss": 1.3065,
"step": 37000
},
{
"epoch": 1.63,
"eval_loss": 1.2963379621505737,
"eval_runtime": 1863.6216,
"eval_samples_per_second": 5.144,
"eval_steps_per_second": 0.643,
"step": 37000
},
{
"epoch": 1.63,
"grad_norm": 2.3075404167175293,
"learning_rate": 2.451096930633741e-07,
"loss": 1.2933,
"step": 37100
},
{
"epoch": 1.64,
"grad_norm": 2.204376459121704,
"learning_rate": 2.3946539809392637e-07,
"loss": 1.2938,
"step": 37200
},
{
"epoch": 1.64,
"grad_norm": 2.1348817348480225,
"learning_rate": 2.33881207537594e-07,
"loss": 1.2975,
"step": 37300
},
{
"epoch": 1.64,
"grad_norm": 2.2030017375946045,
"learning_rate": 2.283573876579882e-07,
"loss": 1.2873,
"step": 37400
},
{
"epoch": 1.65,
"grad_norm": 2.280266523361206,
"learning_rate": 2.2289420184014636e-07,
"loss": 1.2862,
"step": 37500
},
{
"epoch": 1.65,
"eval_loss": 1.2962608337402344,
"eval_runtime": 1864.1974,
"eval_samples_per_second": 5.142,
"eval_steps_per_second": 0.643,
"step": 37500
},
{
"epoch": 1.65,
"grad_norm": 2.2561440467834473,
"learning_rate": 2.1749191057796996e-07,
"loss": 1.2926,
"step": 37600
},
{
"epoch": 1.66,
"grad_norm": 2.064223289489746,
"learning_rate": 2.1215077146180688e-07,
"loss": 1.2911,
"step": 37700
},
{
"epoch": 1.66,
"grad_norm": 2.0723884105682373,
"learning_rate": 2.0687103916616612e-07,
"loss": 1.2927,
"step": 37800
},
{
"epoch": 1.67,
"grad_norm": 2.1863532066345215,
"learning_rate": 2.0165296543757777e-07,
"loss": 1.2958,
"step": 37900
},
{
"epoch": 1.67,
"grad_norm": 2.242121458053589,
"learning_rate": 1.9649679908258606e-07,
"loss": 1.2856,
"step": 38000
},
{
"epoch": 1.67,
"eval_loss": 1.2961931228637695,
"eval_runtime": 1863.7107,
"eval_samples_per_second": 5.144,
"eval_steps_per_second": 0.643,
"step": 38000
},
{
"epoch": 1.67,
"grad_norm": 2.2258660793304443,
"learning_rate": 1.914027859558884e-07,
"loss": 1.2895,
"step": 38100
},
{
"epoch": 1.68,
"grad_norm": 2.194655418395996,
"learning_rate": 1.8637116894861117e-07,
"loss": 1.299,
"step": 38200
},
{
"epoch": 1.68,
"grad_norm": 2.083817958831787,
"learning_rate": 1.8140218797672898e-07,
"loss": 1.2868,
"step": 38300
},
{
"epoch": 1.69,
"grad_norm": 2.1037402153015137,
"learning_rate": 1.7649607996962425e-07,
"loss": 1.2833,
"step": 38400
},
{
"epoch": 1.69,
"grad_norm": 2.3146462440490723,
"learning_rate": 1.71653078858791e-07,
"loss": 1.295,
"step": 38500
},
{
"epoch": 1.69,
"eval_loss": 1.2961418628692627,
"eval_runtime": 1863.9344,
"eval_samples_per_second": 5.143,
"eval_steps_per_second": 0.643,
"step": 38500
},
{
"epoch": 1.7,
"grad_norm": 2.178917646408081,
"learning_rate": 1.6687341556667956e-07,
"loss": 1.2967,
"step": 38600
},
{
"epoch": 1.7,
"grad_norm": 2.230252742767334,
"learning_rate": 1.6215731799568707e-07,
"loss": 1.2912,
"step": 38700
},
{
"epoch": 1.71,
"grad_norm": 2.027996301651001,
"learning_rate": 1.575050110172896e-07,
"loss": 1.2931,
"step": 38800
},
{
"epoch": 1.71,
"grad_norm": 2.343449831008911,
"learning_rate": 1.5291671646132082e-07,
"loss": 1.2896,
"step": 38900
},
{
"epoch": 1.71,
"grad_norm": 2.1190574169158936,
"learning_rate": 1.483926531053933e-07,
"loss": 1.2926,
"step": 39000
},
{
"epoch": 1.71,
"eval_loss": 1.2960731983184814,
"eval_runtime": 1864.8274,
"eval_samples_per_second": 5.14,
"eval_steps_per_second": 0.643,
"step": 39000
},
{
"epoch": 1.72,
"grad_norm": 2.0614373683929443,
"learning_rate": 1.4393303666446873e-07,
"loss": 1.2939,
"step": 39100
},
{
"epoch": 1.72,
"grad_norm": 2.185314178466797,
"learning_rate": 1.395380797805706e-07,
"loss": 1.293,
"step": 39200
},
{
"epoch": 1.73,
"grad_norm": 2.1229093074798584,
"learning_rate": 1.3520799201264644e-07,
"loss": 1.3041,
"step": 39300
},
{
"epoch": 1.73,
"grad_norm": 2.213289976119995,
"learning_rate": 1.3094297982657484e-07,
"loss": 1.2956,
"step": 39400
},
{
"epoch": 1.74,
"grad_norm": 2.1949591636657715,
"learning_rate": 1.267432465853209e-07,
"loss": 1.2928,
"step": 39500
},
{
"epoch": 1.74,
"eval_loss": 1.2960156202316284,
"eval_runtime": 1862.2042,
"eval_samples_per_second": 5.148,
"eval_steps_per_second": 0.644,
"step": 39500
},
{
"epoch": 1.74,
"grad_norm": 2.0892333984375,
"learning_rate": 1.2260899253924008e-07,
"loss": 1.2991,
"step": 39600
},
{
"epoch": 1.75,
"grad_norm": 2.1460952758789062,
"learning_rate": 1.1854041481652849e-07,
"loss": 1.2949,
"step": 39700
},
{
"epoch": 1.75,
"grad_norm": 2.241546630859375,
"learning_rate": 1.1453770741382607e-07,
"loss": 1.2836,
"step": 39800
},
{
"epoch": 1.75,
"grad_norm": 2.225529670715332,
"learning_rate": 1.1060106118696345e-07,
"loss": 1.2923,
"step": 39900
},
{
"epoch": 1.76,
"grad_norm": 2.106523036956787,
"learning_rate": 1.0673066384186425e-07,
"loss": 1.2966,
"step": 40000
},
{
"epoch": 1.76,
"eval_loss": 1.295967698097229,
"eval_runtime": 1866.8915,
"eval_samples_per_second": 5.135,
"eval_steps_per_second": 0.642,
"step": 40000
}
],
"logging_steps": 100,
"max_steps": 45496,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000,
"total_flos": 1.403599875735552e+19,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}