gokuls's picture
End of training
e569b2c
{
"best_metric": 5.527246952056885,
"best_model_checkpoint": "bert_12_layer_model_v2_complete_training_new/checkpoint-110000",
"epoch": 0.9261252847007161,
"global_step": 113041,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 5.000000000000001e-07,
"loss": 10.0453,
"step": 500
},
{
"epoch": 0.01,
"learning_rate": 1.0000000000000002e-06,
"loss": 9.2552,
"step": 1000
},
{
"epoch": 0.01,
"learning_rate": 1.5e-06,
"loss": 8.8536,
"step": 1500
},
{
"epoch": 0.02,
"learning_rate": 2.0000000000000003e-06,
"loss": 8.3871,
"step": 2000
},
{
"epoch": 0.02,
"learning_rate": 2.5e-06,
"loss": 7.8881,
"step": 2500
},
{
"epoch": 0.02,
"learning_rate": 3e-06,
"loss": 7.478,
"step": 3000
},
{
"epoch": 0.03,
"learning_rate": 3.5e-06,
"loss": 7.2099,
"step": 3500
},
{
"epoch": 0.03,
"learning_rate": 4.000000000000001e-06,
"loss": 7.0705,
"step": 4000
},
{
"epoch": 0.04,
"learning_rate": 4.5e-06,
"loss": 6.9683,
"step": 4500
},
{
"epoch": 0.04,
"learning_rate": 5e-06,
"loss": 6.9046,
"step": 5000
},
{
"epoch": 0.05,
"learning_rate": 5.500000000000001e-06,
"loss": 6.8445,
"step": 5500
},
{
"epoch": 0.05,
"learning_rate": 6e-06,
"loss": 6.7982,
"step": 6000
},
{
"epoch": 0.05,
"learning_rate": 6.5000000000000004e-06,
"loss": 6.7663,
"step": 6500
},
{
"epoch": 0.06,
"learning_rate": 7e-06,
"loss": 6.7302,
"step": 7000
},
{
"epoch": 0.06,
"learning_rate": 7.500000000000001e-06,
"loss": 6.7009,
"step": 7500
},
{
"epoch": 0.07,
"learning_rate": 8.000000000000001e-06,
"loss": 6.6649,
"step": 8000
},
{
"epoch": 0.07,
"learning_rate": 8.5e-06,
"loss": 6.6373,
"step": 8500
},
{
"epoch": 0.07,
"learning_rate": 9e-06,
"loss": 6.6173,
"step": 9000
},
{
"epoch": 0.08,
"learning_rate": 9.5e-06,
"loss": 6.593,
"step": 9500
},
{
"epoch": 0.08,
"learning_rate": 1e-05,
"loss": 6.5761,
"step": 10000
},
{
"epoch": 0.08,
"eval_accuracy": 0.1269358790943153,
"eval_loss": 6.540441989898682,
"eval_runtime": 1752.9564,
"eval_samples_per_second": 175.906,
"eval_steps_per_second": 3.665,
"step": 10000
},
{
"epoch": 0.09,
"learning_rate": 9.991670692498626e-06,
"loss": 6.5543,
"step": 10500
},
{
"epoch": 0.09,
"learning_rate": 9.983341384997253e-06,
"loss": 6.5419,
"step": 11000
},
{
"epoch": 0.09,
"learning_rate": 9.975012077495878e-06,
"loss": 6.519,
"step": 11500
},
{
"epoch": 0.1,
"learning_rate": 9.966682769994504e-06,
"loss": 6.5036,
"step": 12000
},
{
"epoch": 0.1,
"learning_rate": 9.95835346249313e-06,
"loss": 6.4786,
"step": 12500
},
{
"epoch": 0.11,
"learning_rate": 9.950024154991756e-06,
"loss": 6.4681,
"step": 13000
},
{
"epoch": 0.11,
"learning_rate": 9.941694847490381e-06,
"loss": 6.4575,
"step": 13500
},
{
"epoch": 0.11,
"learning_rate": 9.933365539989006e-06,
"loss": 6.4456,
"step": 14000
},
{
"epoch": 0.12,
"learning_rate": 9.925036232487631e-06,
"loss": 6.4258,
"step": 14500
},
{
"epoch": 0.12,
"learning_rate": 9.916706924986258e-06,
"loss": 6.4151,
"step": 15000
},
{
"epoch": 0.13,
"learning_rate": 9.908377617484883e-06,
"loss": 6.3973,
"step": 15500
},
{
"epoch": 0.13,
"learning_rate": 9.90004830998351e-06,
"loss": 6.3957,
"step": 16000
},
{
"epoch": 0.14,
"learning_rate": 9.891719002482134e-06,
"loss": 6.3917,
"step": 16500
},
{
"epoch": 0.14,
"learning_rate": 9.883389694980761e-06,
"loss": 6.3716,
"step": 17000
},
{
"epoch": 0.14,
"learning_rate": 9.875060387479386e-06,
"loss": 6.3711,
"step": 17500
},
{
"epoch": 0.15,
"learning_rate": 9.866731079978011e-06,
"loss": 6.3624,
"step": 18000
},
{
"epoch": 0.15,
"learning_rate": 9.858401772476636e-06,
"loss": 6.3595,
"step": 18500
},
{
"epoch": 0.16,
"learning_rate": 9.850072464975263e-06,
"loss": 6.3422,
"step": 19000
},
{
"epoch": 0.16,
"learning_rate": 9.84174315747389e-06,
"loss": 6.3341,
"step": 19500
},
{
"epoch": 0.16,
"learning_rate": 9.833413849972514e-06,
"loss": 6.3286,
"step": 20000
},
{
"epoch": 0.16,
"eval_accuracy": 0.14093406733876643,
"eval_loss": 6.3052825927734375,
"eval_runtime": 1753.1305,
"eval_samples_per_second": 175.889,
"eval_steps_per_second": 3.665,
"step": 20000
},
{
"epoch": 0.17,
"learning_rate": 9.82508454247114e-06,
"loss": 6.3209,
"step": 20500
},
{
"epoch": 0.17,
"learning_rate": 9.816755234969766e-06,
"loss": 6.3103,
"step": 21000
},
{
"epoch": 0.18,
"learning_rate": 9.808425927468391e-06,
"loss": 6.3148,
"step": 21500
},
{
"epoch": 0.18,
"learning_rate": 9.800096619967016e-06,
"loss": 6.2979,
"step": 22000
},
{
"epoch": 0.18,
"learning_rate": 9.791767312465643e-06,
"loss": 6.3009,
"step": 22500
},
{
"epoch": 0.19,
"learning_rate": 9.783438004964268e-06,
"loss": 6.2826,
"step": 23000
},
{
"epoch": 0.19,
"learning_rate": 9.775108697462895e-06,
"loss": 6.2787,
"step": 23500
},
{
"epoch": 0.2,
"learning_rate": 9.76677938996152e-06,
"loss": 6.2723,
"step": 24000
},
{
"epoch": 0.2,
"learning_rate": 9.758450082460145e-06,
"loss": 6.2736,
"step": 24500
},
{
"epoch": 0.2,
"learning_rate": 9.750120774958771e-06,
"loss": 6.268,
"step": 25000
},
{
"epoch": 0.21,
"learning_rate": 9.741791467457396e-06,
"loss": 6.2594,
"step": 25500
},
{
"epoch": 0.21,
"learning_rate": 9.733462159956021e-06,
"loss": 6.2566,
"step": 26000
},
{
"epoch": 0.22,
"learning_rate": 9.725132852454648e-06,
"loss": 6.2608,
"step": 26500
},
{
"epoch": 0.22,
"learning_rate": 9.716803544953275e-06,
"loss": 6.2542,
"step": 27000
},
{
"epoch": 0.23,
"learning_rate": 9.7084742374519e-06,
"loss": 6.2447,
"step": 27500
},
{
"epoch": 0.23,
"learning_rate": 9.700144929950525e-06,
"loss": 6.2426,
"step": 28000
},
{
"epoch": 0.23,
"learning_rate": 9.69181562244915e-06,
"loss": 6.241,
"step": 28500
},
{
"epoch": 0.24,
"learning_rate": 9.683486314947776e-06,
"loss": 6.2359,
"step": 29000
},
{
"epoch": 0.24,
"learning_rate": 9.675157007446401e-06,
"loss": 6.2295,
"step": 29500
},
{
"epoch": 0.25,
"learning_rate": 9.666827699945028e-06,
"loss": 6.2283,
"step": 30000
},
{
"epoch": 0.25,
"eval_accuracy": 0.144928378851592,
"eval_loss": 6.213092803955078,
"eval_runtime": 1748.8872,
"eval_samples_per_second": 176.316,
"eval_steps_per_second": 3.674,
"step": 30000
},
{
"epoch": 0.25,
"learning_rate": 9.658498392443653e-06,
"loss": 6.2209,
"step": 30500
},
{
"epoch": 0.25,
"learning_rate": 9.65016908494228e-06,
"loss": 6.2214,
"step": 31000
},
{
"epoch": 0.26,
"learning_rate": 9.641839777440905e-06,
"loss": 6.222,
"step": 31500
},
{
"epoch": 0.26,
"learning_rate": 9.63351046993953e-06,
"loss": 6.2129,
"step": 32000
},
{
"epoch": 0.27,
"learning_rate": 9.625181162438155e-06,
"loss": 6.2089,
"step": 32500
},
{
"epoch": 0.27,
"learning_rate": 9.616851854936781e-06,
"loss": 6.2053,
"step": 33000
},
{
"epoch": 0.27,
"learning_rate": 9.608522547435406e-06,
"loss": 6.2008,
"step": 33500
},
{
"epoch": 0.28,
"learning_rate": 9.600193239934033e-06,
"loss": 6.2002,
"step": 34000
},
{
"epoch": 0.28,
"learning_rate": 9.591863932432658e-06,
"loss": 6.193,
"step": 34500
},
{
"epoch": 0.29,
"learning_rate": 9.583534624931285e-06,
"loss": 6.1947,
"step": 35000
},
{
"epoch": 0.29,
"learning_rate": 9.57520531742991e-06,
"loss": 6.1998,
"step": 35500
},
{
"epoch": 0.29,
"learning_rate": 9.566876009928535e-06,
"loss": 6.1921,
"step": 36000
},
{
"epoch": 0.3,
"learning_rate": 9.55854670242716e-06,
"loss": 6.1891,
"step": 36500
},
{
"epoch": 0.3,
"learning_rate": 9.550217394925787e-06,
"loss": 6.1855,
"step": 37000
},
{
"epoch": 0.31,
"learning_rate": 9.541888087424413e-06,
"loss": 6.1826,
"step": 37500
},
{
"epoch": 0.31,
"learning_rate": 9.533558779923038e-06,
"loss": 6.1716,
"step": 38000
},
{
"epoch": 0.32,
"learning_rate": 9.525229472421663e-06,
"loss": 6.1749,
"step": 38500
},
{
"epoch": 0.32,
"learning_rate": 9.51690016492029e-06,
"loss": 6.1675,
"step": 39000
},
{
"epoch": 0.32,
"learning_rate": 9.508570857418915e-06,
"loss": 6.1713,
"step": 39500
},
{
"epoch": 0.33,
"learning_rate": 9.50024154991754e-06,
"loss": 6.1756,
"step": 40000
},
{
"epoch": 0.33,
"eval_accuracy": 0.14781151384153332,
"eval_loss": 6.153561115264893,
"eval_runtime": 1756.6577,
"eval_samples_per_second": 175.536,
"eval_steps_per_second": 3.658,
"step": 40000
},
{
"epoch": 0.33,
"learning_rate": 9.491912242416167e-06,
"loss": 6.1657,
"step": 40500
},
{
"epoch": 0.34,
"learning_rate": 9.483582934914792e-06,
"loss": 6.1645,
"step": 41000
},
{
"epoch": 0.34,
"learning_rate": 9.475253627413418e-06,
"loss": 6.1614,
"step": 41500
},
{
"epoch": 0.34,
"learning_rate": 9.466924319912043e-06,
"loss": 6.1646,
"step": 42000
},
{
"epoch": 0.35,
"learning_rate": 9.458595012410668e-06,
"loss": 6.1524,
"step": 42500
},
{
"epoch": 0.35,
"learning_rate": 9.450265704909295e-06,
"loss": 6.1514,
"step": 43000
},
{
"epoch": 0.36,
"learning_rate": 9.44193639740792e-06,
"loss": 6.1478,
"step": 43500
},
{
"epoch": 0.36,
"learning_rate": 9.433607089906545e-06,
"loss": 6.1511,
"step": 44000
},
{
"epoch": 0.36,
"learning_rate": 9.425277782405172e-06,
"loss": 6.1472,
"step": 44500
},
{
"epoch": 0.37,
"learning_rate": 9.416948474903798e-06,
"loss": 6.1451,
"step": 45000
},
{
"epoch": 0.37,
"learning_rate": 9.408619167402423e-06,
"loss": 6.1472,
"step": 45500
},
{
"epoch": 0.38,
"learning_rate": 9.400289859901048e-06,
"loss": 6.1363,
"step": 46000
},
{
"epoch": 0.38,
"learning_rate": 9.391960552399673e-06,
"loss": 6.1363,
"step": 46500
},
{
"epoch": 0.39,
"learning_rate": 9.3836312448983e-06,
"loss": 6.1308,
"step": 47000
},
{
"epoch": 0.39,
"learning_rate": 9.375301937396925e-06,
"loss": 6.1324,
"step": 47500
},
{
"epoch": 0.39,
"learning_rate": 9.366972629895552e-06,
"loss": 6.1333,
"step": 48000
},
{
"epoch": 0.4,
"learning_rate": 9.358643322394177e-06,
"loss": 6.1275,
"step": 48500
},
{
"epoch": 0.4,
"learning_rate": 9.350314014892804e-06,
"loss": 6.1279,
"step": 49000
},
{
"epoch": 0.41,
"learning_rate": 9.341984707391429e-06,
"loss": 6.1283,
"step": 49500
},
{
"epoch": 0.41,
"learning_rate": 9.333655399890054e-06,
"loss": 6.1292,
"step": 50000
},
{
"epoch": 0.41,
"eval_accuracy": 0.14872839764273707,
"eval_loss": 6.118557453155518,
"eval_runtime": 1752.9311,
"eval_samples_per_second": 175.909,
"eval_steps_per_second": 3.665,
"step": 50000
},
{
"epoch": 0.41,
"learning_rate": 9.325326092388679e-06,
"loss": 6.1208,
"step": 50500
},
{
"epoch": 0.42,
"learning_rate": 9.316996784887305e-06,
"loss": 6.1215,
"step": 51000
},
{
"epoch": 0.42,
"learning_rate": 9.30866747738593e-06,
"loss": 6.1252,
"step": 51500
},
{
"epoch": 0.43,
"learning_rate": 9.300338169884557e-06,
"loss": 6.12,
"step": 52000
},
{
"epoch": 0.43,
"learning_rate": 9.292008862383182e-06,
"loss": 6.1161,
"step": 52500
},
{
"epoch": 0.43,
"learning_rate": 9.283679554881809e-06,
"loss": 6.12,
"step": 53000
},
{
"epoch": 0.44,
"learning_rate": 9.275350247380434e-06,
"loss": 6.1132,
"step": 53500
},
{
"epoch": 0.44,
"learning_rate": 9.267020939879059e-06,
"loss": 6.1154,
"step": 54000
},
{
"epoch": 0.45,
"learning_rate": 9.258691632377685e-06,
"loss": 6.1148,
"step": 54500
},
{
"epoch": 0.45,
"learning_rate": 9.25036232487631e-06,
"loss": 6.1065,
"step": 55000
},
{
"epoch": 0.45,
"learning_rate": 9.242033017374937e-06,
"loss": 6.1141,
"step": 55500
},
{
"epoch": 0.46,
"learning_rate": 9.233703709873562e-06,
"loss": 6.1072,
"step": 56000
},
{
"epoch": 0.46,
"learning_rate": 9.225374402372189e-06,
"loss": 6.1032,
"step": 56500
},
{
"epoch": 0.47,
"learning_rate": 9.217045094870814e-06,
"loss": 6.0943,
"step": 57000
},
{
"epoch": 0.47,
"learning_rate": 9.208715787369439e-06,
"loss": 6.1058,
"step": 57500
},
{
"epoch": 0.48,
"learning_rate": 9.200386479868064e-06,
"loss": 6.1061,
"step": 58000
},
{
"epoch": 0.48,
"learning_rate": 9.19205717236669e-06,
"loss": 6.1024,
"step": 58500
},
{
"epoch": 0.48,
"learning_rate": 9.183727864865315e-06,
"loss": 6.0978,
"step": 59000
},
{
"epoch": 0.49,
"learning_rate": 9.175398557363942e-06,
"loss": 6.1035,
"step": 59500
},
{
"epoch": 0.49,
"learning_rate": 9.167069249862567e-06,
"loss": 6.1008,
"step": 60000
},
{
"epoch": 0.49,
"eval_accuracy": 0.1493834597831717,
"eval_loss": 6.084464073181152,
"eval_runtime": 1754.0872,
"eval_samples_per_second": 175.793,
"eval_steps_per_second": 3.663,
"step": 60000
},
{
"epoch": 0.5,
"learning_rate": 9.158739942361194e-06,
"loss": 6.0913,
"step": 60500
},
{
"epoch": 0.5,
"learning_rate": 9.150410634859819e-06,
"loss": 6.0896,
"step": 61000
},
{
"epoch": 0.5,
"learning_rate": 9.142081327358444e-06,
"loss": 6.0932,
"step": 61500
},
{
"epoch": 0.51,
"learning_rate": 9.133752019857069e-06,
"loss": 6.0843,
"step": 62000
},
{
"epoch": 0.51,
"learning_rate": 9.125422712355696e-06,
"loss": 6.0898,
"step": 62500
},
{
"epoch": 0.52,
"learning_rate": 9.11709340485432e-06,
"loss": 6.0904,
"step": 63000
},
{
"epoch": 0.52,
"learning_rate": 9.108764097352947e-06,
"loss": 6.0793,
"step": 63500
},
{
"epoch": 0.52,
"learning_rate": 9.100434789851572e-06,
"loss": 6.0804,
"step": 64000
},
{
"epoch": 0.53,
"learning_rate": 9.092105482350199e-06,
"loss": 6.0831,
"step": 64500
},
{
"epoch": 0.53,
"learning_rate": 9.083776174848824e-06,
"loss": 6.0815,
"step": 65000
},
{
"epoch": 0.54,
"learning_rate": 9.075446867347449e-06,
"loss": 6.0844,
"step": 65500
},
{
"epoch": 0.54,
"learning_rate": 9.067117559846076e-06,
"loss": 6.0856,
"step": 66000
},
{
"epoch": 0.54,
"learning_rate": 9.0587882523447e-06,
"loss": 6.0749,
"step": 66500
},
{
"epoch": 0.55,
"learning_rate": 9.050458944843327e-06,
"loss": 6.0756,
"step": 67000
},
{
"epoch": 0.55,
"learning_rate": 9.042129637341952e-06,
"loss": 6.0746,
"step": 67500
},
{
"epoch": 0.56,
"learning_rate": 9.033800329840577e-06,
"loss": 6.08,
"step": 68000
},
{
"epoch": 0.56,
"learning_rate": 9.025471022339204e-06,
"loss": 6.0764,
"step": 68500
},
{
"epoch": 0.57,
"learning_rate": 9.017141714837829e-06,
"loss": 6.0726,
"step": 69000
},
{
"epoch": 0.57,
"learning_rate": 9.008812407336454e-06,
"loss": 6.0764,
"step": 69500
},
{
"epoch": 0.57,
"learning_rate": 9.00048309983508e-06,
"loss": 6.0718,
"step": 70000
},
{
"epoch": 0.57,
"eval_accuracy": 0.15036471447814498,
"eval_loss": 6.060632228851318,
"eval_runtime": 1753.9554,
"eval_samples_per_second": 175.806,
"eval_steps_per_second": 3.663,
"step": 70000
},
{
"epoch": 0.58,
"learning_rate": 8.992153792333706e-06,
"loss": 6.0666,
"step": 70500
},
{
"epoch": 0.58,
"learning_rate": 8.983824484832332e-06,
"loss": 6.06,
"step": 71000
},
{
"epoch": 0.59,
"learning_rate": 8.975495177330957e-06,
"loss": 6.0683,
"step": 71500
},
{
"epoch": 0.59,
"learning_rate": 8.967165869829582e-06,
"loss": 6.0636,
"step": 72000
},
{
"epoch": 0.59,
"learning_rate": 8.958836562328209e-06,
"loss": 6.0573,
"step": 72500
},
{
"epoch": 0.6,
"learning_rate": 8.950507254826834e-06,
"loss": 6.0455,
"step": 73000
},
{
"epoch": 0.6,
"learning_rate": 8.942177947325459e-06,
"loss": 6.0425,
"step": 73500
},
{
"epoch": 0.61,
"learning_rate": 8.933848639824086e-06,
"loss": 6.0225,
"step": 74000
},
{
"epoch": 0.61,
"learning_rate": 8.925519332322713e-06,
"loss": 6.0083,
"step": 74500
},
{
"epoch": 0.61,
"learning_rate": 8.917190024821338e-06,
"loss": 6.0018,
"step": 75000
},
{
"epoch": 0.62,
"learning_rate": 8.908860717319963e-06,
"loss": 5.9809,
"step": 75500
},
{
"epoch": 0.62,
"learning_rate": 8.900531409818588e-06,
"loss": 5.9722,
"step": 76000
},
{
"epoch": 0.63,
"learning_rate": 8.892202102317214e-06,
"loss": 5.9552,
"step": 76500
},
{
"epoch": 0.63,
"learning_rate": 8.88387279481584e-06,
"loss": 5.9461,
"step": 77000
},
{
"epoch": 0.63,
"learning_rate": 8.875543487314466e-06,
"loss": 5.9339,
"step": 77500
},
{
"epoch": 0.64,
"learning_rate": 8.867214179813091e-06,
"loss": 5.925,
"step": 78000
},
{
"epoch": 0.64,
"learning_rate": 8.858884872311718e-06,
"loss": 5.9211,
"step": 78500
},
{
"epoch": 0.65,
"learning_rate": 8.850555564810343e-06,
"loss": 5.9101,
"step": 79000
},
{
"epoch": 0.65,
"learning_rate": 8.842226257308968e-06,
"loss": 5.9076,
"step": 79500
},
{
"epoch": 0.66,
"learning_rate": 8.833896949807593e-06,
"loss": 5.9008,
"step": 80000
},
{
"epoch": 0.66,
"eval_accuracy": 0.15781682199112446,
"eval_loss": 5.865474224090576,
"eval_runtime": 1753.9735,
"eval_samples_per_second": 175.804,
"eval_steps_per_second": 3.663,
"step": 80000
},
{
"epoch": 0.66,
"learning_rate": 8.82556764230622e-06,
"loss": 5.8925,
"step": 80500
},
{
"epoch": 0.66,
"learning_rate": 8.817238334804844e-06,
"loss": 5.8869,
"step": 81000
},
{
"epoch": 0.67,
"learning_rate": 8.808909027303471e-06,
"loss": 5.877,
"step": 81500
},
{
"epoch": 0.67,
"learning_rate": 8.800579719802096e-06,
"loss": 5.8749,
"step": 82000
},
{
"epoch": 0.68,
"learning_rate": 8.792250412300723e-06,
"loss": 5.8628,
"step": 82500
},
{
"epoch": 0.68,
"learning_rate": 8.783921104799348e-06,
"loss": 5.867,
"step": 83000
},
{
"epoch": 0.68,
"learning_rate": 8.775591797297973e-06,
"loss": 5.8569,
"step": 83500
},
{
"epoch": 0.69,
"learning_rate": 8.767262489796598e-06,
"loss": 5.8513,
"step": 84000
},
{
"epoch": 0.69,
"learning_rate": 8.758933182295224e-06,
"loss": 5.8423,
"step": 84500
},
{
"epoch": 0.7,
"learning_rate": 8.750603874793851e-06,
"loss": 5.8422,
"step": 85000
},
{
"epoch": 0.7,
"learning_rate": 8.742274567292476e-06,
"loss": 5.8377,
"step": 85500
},
{
"epoch": 0.7,
"learning_rate": 8.733945259791101e-06,
"loss": 5.8294,
"step": 86000
},
{
"epoch": 0.71,
"learning_rate": 8.725615952289728e-06,
"loss": 5.8303,
"step": 86500
},
{
"epoch": 0.71,
"learning_rate": 8.717286644788353e-06,
"loss": 5.8235,
"step": 87000
},
{
"epoch": 0.72,
"learning_rate": 8.708957337286978e-06,
"loss": 5.8223,
"step": 87500
},
{
"epoch": 0.72,
"learning_rate": 8.700628029785605e-06,
"loss": 5.8133,
"step": 88000
},
{
"epoch": 0.73,
"learning_rate": 8.69229872228423e-06,
"loss": 5.8091,
"step": 88500
},
{
"epoch": 0.73,
"learning_rate": 8.683969414782856e-06,
"loss": 5.8065,
"step": 89000
},
{
"epoch": 0.73,
"learning_rate": 8.675640107281481e-06,
"loss": 5.7994,
"step": 89500
},
{
"epoch": 0.74,
"learning_rate": 8.667310799780106e-06,
"loss": 5.797,
"step": 90000
},
{
"epoch": 0.74,
"eval_accuracy": 0.16953206210188038,
"eval_loss": 5.756062984466553,
"eval_runtime": 1753.1019,
"eval_samples_per_second": 175.892,
"eval_steps_per_second": 3.665,
"step": 90000
},
{
"epoch": 0.74,
"learning_rate": 8.658981492278733e-06,
"loss": 5.7871,
"step": 90500
},
{
"epoch": 0.75,
"learning_rate": 8.650652184777358e-06,
"loss": 5.7853,
"step": 91000
},
{
"epoch": 0.75,
"learning_rate": 8.642322877275983e-06,
"loss": 5.7789,
"step": 91500
},
{
"epoch": 0.75,
"learning_rate": 8.63399356977461e-06,
"loss": 5.7855,
"step": 92000
},
{
"epoch": 0.76,
"learning_rate": 8.625664262273236e-06,
"loss": 5.7698,
"step": 92500
},
{
"epoch": 0.76,
"learning_rate": 8.617334954771861e-06,
"loss": 5.7685,
"step": 93000
},
{
"epoch": 0.77,
"learning_rate": 8.609005647270486e-06,
"loss": 5.759,
"step": 93500
},
{
"epoch": 0.77,
"learning_rate": 8.600676339769111e-06,
"loss": 5.7529,
"step": 94000
},
{
"epoch": 0.77,
"learning_rate": 8.592347032267738e-06,
"loss": 5.7574,
"step": 94500
},
{
"epoch": 0.78,
"learning_rate": 8.584017724766363e-06,
"loss": 5.7455,
"step": 95000
},
{
"epoch": 0.78,
"learning_rate": 8.57568841726499e-06,
"loss": 5.7458,
"step": 95500
},
{
"epoch": 0.79,
"learning_rate": 8.567359109763615e-06,
"loss": 5.743,
"step": 96000
},
{
"epoch": 0.79,
"learning_rate": 8.559029802262241e-06,
"loss": 5.736,
"step": 96500
},
{
"epoch": 0.79,
"learning_rate": 8.550700494760866e-06,
"loss": 5.7298,
"step": 97000
},
{
"epoch": 0.8,
"learning_rate": 8.542371187259491e-06,
"loss": 5.724,
"step": 97500
},
{
"epoch": 0.8,
"learning_rate": 8.534041879758118e-06,
"loss": 5.7211,
"step": 98000
},
{
"epoch": 0.81,
"learning_rate": 8.525712572256743e-06,
"loss": 5.7189,
"step": 98500
},
{
"epoch": 0.81,
"learning_rate": 8.517383264755368e-06,
"loss": 5.7113,
"step": 99000
},
{
"epoch": 0.82,
"learning_rate": 8.509053957253995e-06,
"loss": 5.7023,
"step": 99500
},
{
"epoch": 0.82,
"learning_rate": 8.500724649752622e-06,
"loss": 5.6959,
"step": 100000
},
{
"epoch": 0.82,
"eval_accuracy": 0.1831854186417631,
"eval_loss": 5.644111156463623,
"eval_runtime": 1754.1717,
"eval_samples_per_second": 175.784,
"eval_steps_per_second": 3.663,
"step": 100000
},
{
"epoch": 0.82,
"learning_rate": 8.492395342251247e-06,
"loss": 5.6932,
"step": 100500
},
{
"epoch": 0.83,
"learning_rate": 8.484066034749872e-06,
"loss": 5.685,
"step": 101000
},
{
"epoch": 0.83,
"learning_rate": 8.475736727248497e-06,
"loss": 5.6861,
"step": 101500
},
{
"epoch": 0.84,
"learning_rate": 8.467407419747123e-06,
"loss": 5.6767,
"step": 102000
},
{
"epoch": 0.84,
"learning_rate": 8.459078112245748e-06,
"loss": 5.6742,
"step": 102500
},
{
"epoch": 0.84,
"learning_rate": 8.450748804744375e-06,
"loss": 5.6718,
"step": 103000
},
{
"epoch": 0.85,
"learning_rate": 8.442419497243e-06,
"loss": 5.6643,
"step": 103500
},
{
"epoch": 0.85,
"learning_rate": 8.434090189741627e-06,
"loss": 5.6555,
"step": 104000
},
{
"epoch": 0.86,
"learning_rate": 8.425760882240252e-06,
"loss": 5.6618,
"step": 104500
},
{
"epoch": 0.86,
"learning_rate": 8.417431574738877e-06,
"loss": 5.6493,
"step": 105000
},
{
"epoch": 0.86,
"learning_rate": 8.409102267237502e-06,
"loss": 5.6511,
"step": 105500
},
{
"epoch": 0.87,
"learning_rate": 8.400772959736128e-06,
"loss": 5.6387,
"step": 106000
},
{
"epoch": 0.87,
"learning_rate": 8.392443652234753e-06,
"loss": 5.6329,
"step": 106500
},
{
"epoch": 0.88,
"learning_rate": 8.38411434473338e-06,
"loss": 5.6242,
"step": 107000
},
{
"epoch": 0.88,
"learning_rate": 8.375785037232005e-06,
"loss": 5.6202,
"step": 107500
},
{
"epoch": 0.88,
"learning_rate": 8.367455729730632e-06,
"loss": 5.6192,
"step": 108000
},
{
"epoch": 0.89,
"learning_rate": 8.359126422229257e-06,
"loss": 5.61,
"step": 108500
},
{
"epoch": 0.89,
"learning_rate": 8.350797114727882e-06,
"loss": 5.6059,
"step": 109000
},
{
"epoch": 0.9,
"learning_rate": 8.342467807226507e-06,
"loss": 5.6028,
"step": 109500
},
{
"epoch": 0.9,
"learning_rate": 8.334138499725133e-06,
"loss": 5.5955,
"step": 110000
},
{
"epoch": 0.9,
"eval_accuracy": 0.19825414875990727,
"eval_loss": 5.527246952056885,
"eval_runtime": 1752.8914,
"eval_samples_per_second": 175.913,
"eval_steps_per_second": 3.665,
"step": 110000
},
{
"epoch": 0.91,
"learning_rate": 8.32580919222376e-06,
"loss": 5.5958,
"step": 110500
},
{
"epoch": 0.91,
"learning_rate": 8.317479884722385e-06,
"loss": 5.5888,
"step": 111000
},
{
"epoch": 0.91,
"learning_rate": 8.30915057722101e-06,
"loss": 5.5827,
"step": 111500
},
{
"epoch": 0.92,
"learning_rate": 8.300821269719637e-06,
"loss": 5.5774,
"step": 112000
},
{
"epoch": 0.92,
"learning_rate": 8.292491962218262e-06,
"loss": 5.5786,
"step": 112500
},
{
"epoch": 0.93,
"learning_rate": 8.284162654716887e-06,
"loss": 5.5685,
"step": 113000
},
{
"epoch": 0.93,
"step": 113041,
"total_flos": 1.5885720743544095e+18,
"train_loss": 6.147486028159083,
"train_runtime": 107984.4242,
"train_samples_per_second": 271.278,
"train_steps_per_second": 5.652
}
],
"max_steps": 610290,
"num_train_epochs": 5,
"total_flos": 1.5885720743544095e+18,
"trial_name": null,
"trial_params": null
}