idefics2_ft_augmented_dataset / trainer_state.json
yaswanth-iitkgp's picture
End of training
332b32a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.1695906432748537,
"eval_steps": 100,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005847953216374269,
"grad_norm": 19.25,
"learning_rate": 2.5e-05,
"loss": 1.3094,
"step": 25
},
{
"epoch": 0.011695906432748537,
"grad_norm": 20.515625,
"learning_rate": 2.4874371859296484e-05,
"loss": 0.6781,
"step": 50
},
{
"epoch": 0.017543859649122806,
"grad_norm": 9.015625,
"learning_rate": 2.4748743718592964e-05,
"loss": 0.6127,
"step": 75
},
{
"epoch": 0.023391812865497075,
"grad_norm": 9.4765625,
"learning_rate": 2.462311557788945e-05,
"loss": 0.6338,
"step": 100
},
{
"epoch": 0.023391812865497075,
"eval_loss": 0.6474161744117737,
"eval_runtime": 125.9492,
"eval_samples_per_second": 3.573,
"eval_steps_per_second": 0.453,
"step": 100
},
{
"epoch": 0.029239766081871343,
"grad_norm": 16.71875,
"learning_rate": 2.449748743718593e-05,
"loss": 0.506,
"step": 125
},
{
"epoch": 0.03508771929824561,
"grad_norm": 21.125,
"learning_rate": 2.4371859296482413e-05,
"loss": 0.501,
"step": 150
},
{
"epoch": 0.04093567251461988,
"grad_norm": 17.953125,
"learning_rate": 2.4246231155778896e-05,
"loss": 0.6208,
"step": 175
},
{
"epoch": 0.04678362573099415,
"grad_norm": 7.9609375,
"learning_rate": 2.4120603015075376e-05,
"loss": 0.5748,
"step": 200
},
{
"epoch": 0.04678362573099415,
"eval_loss": 0.5898649096488953,
"eval_runtime": 129.2682,
"eval_samples_per_second": 3.481,
"eval_steps_per_second": 0.441,
"step": 200
},
{
"epoch": 0.05263157894736842,
"grad_norm": 7.4921875,
"learning_rate": 2.3994974874371863e-05,
"loss": 0.5821,
"step": 225
},
{
"epoch": 0.05847953216374269,
"grad_norm": 8.703125,
"learning_rate": 2.3869346733668342e-05,
"loss": 0.5764,
"step": 250
},
{
"epoch": 0.06432748538011696,
"grad_norm": 10.3828125,
"learning_rate": 2.3743718592964825e-05,
"loss": 0.5921,
"step": 275
},
{
"epoch": 0.07017543859649122,
"grad_norm": 14.90625,
"learning_rate": 2.361809045226131e-05,
"loss": 0.4678,
"step": 300
},
{
"epoch": 0.07017543859649122,
"eval_loss": 0.5602560639381409,
"eval_runtime": 129.0871,
"eval_samples_per_second": 3.486,
"eval_steps_per_second": 0.442,
"step": 300
},
{
"epoch": 0.07602339181286549,
"grad_norm": 16.6875,
"learning_rate": 2.3492462311557788e-05,
"loss": 0.5118,
"step": 325
},
{
"epoch": 0.08187134502923976,
"grad_norm": 17.921875,
"learning_rate": 2.3366834170854275e-05,
"loss": 0.5199,
"step": 350
},
{
"epoch": 0.08771929824561403,
"grad_norm": 16.5625,
"learning_rate": 2.3241206030150754e-05,
"loss": 0.4611,
"step": 375
},
{
"epoch": 0.0935672514619883,
"grad_norm": 10.5,
"learning_rate": 2.3115577889447238e-05,
"loss": 0.428,
"step": 400
},
{
"epoch": 0.0935672514619883,
"eval_loss": 0.524933397769928,
"eval_runtime": 127.1828,
"eval_samples_per_second": 3.538,
"eval_steps_per_second": 0.448,
"step": 400
},
{
"epoch": 0.09941520467836257,
"grad_norm": 11.6328125,
"learning_rate": 2.298994974874372e-05,
"loss": 0.4953,
"step": 425
},
{
"epoch": 0.10526315789473684,
"grad_norm": 5.4296875,
"learning_rate": 2.28643216080402e-05,
"loss": 0.4173,
"step": 450
},
{
"epoch": 0.1111111111111111,
"grad_norm": 8.8125,
"learning_rate": 2.2738693467336687e-05,
"loss": 0.4225,
"step": 475
},
{
"epoch": 0.11695906432748537,
"grad_norm": 8.84375,
"learning_rate": 2.2613065326633167e-05,
"loss": 0.3798,
"step": 500
},
{
"epoch": 0.11695906432748537,
"eval_loss": 0.49835124611854553,
"eval_runtime": 128.3365,
"eval_samples_per_second": 3.506,
"eval_steps_per_second": 0.444,
"step": 500
},
{
"epoch": 0.12280701754385964,
"grad_norm": 11.4609375,
"learning_rate": 2.248743718592965e-05,
"loss": 0.4386,
"step": 525
},
{
"epoch": 0.1286549707602339,
"grad_norm": 16.4375,
"learning_rate": 2.2361809045226133e-05,
"loss": 0.4537,
"step": 550
},
{
"epoch": 0.13450292397660818,
"grad_norm": 10.140625,
"learning_rate": 2.2236180904522613e-05,
"loss": 0.3514,
"step": 575
},
{
"epoch": 0.14035087719298245,
"grad_norm": 3.265625,
"learning_rate": 2.21105527638191e-05,
"loss": 0.3665,
"step": 600
},
{
"epoch": 0.14035087719298245,
"eval_loss": 0.47331658005714417,
"eval_runtime": 128.8126,
"eval_samples_per_second": 3.493,
"eval_steps_per_second": 0.443,
"step": 600
},
{
"epoch": 0.14619883040935672,
"grad_norm": 12.3046875,
"learning_rate": 2.198492462311558e-05,
"loss": 0.2409,
"step": 625
},
{
"epoch": 0.15204678362573099,
"grad_norm": 9.703125,
"learning_rate": 2.1859296482412062e-05,
"loss": 0.4553,
"step": 650
},
{
"epoch": 0.15789473684210525,
"grad_norm": 10.65625,
"learning_rate": 2.1733668341708545e-05,
"loss": 0.4242,
"step": 675
},
{
"epoch": 0.16374269005847952,
"grad_norm": 4.96484375,
"learning_rate": 2.1608040201005025e-05,
"loss": 0.4406,
"step": 700
},
{
"epoch": 0.16374269005847952,
"eval_loss": 0.45101454854011536,
"eval_runtime": 129.1251,
"eval_samples_per_second": 3.485,
"eval_steps_per_second": 0.441,
"step": 700
},
{
"epoch": 0.1695906432748538,
"grad_norm": 6.20703125,
"learning_rate": 2.1482412060301508e-05,
"loss": 0.408,
"step": 725
},
{
"epoch": 0.17543859649122806,
"grad_norm": 9.875,
"learning_rate": 2.135678391959799e-05,
"loss": 0.5067,
"step": 750
},
{
"epoch": 0.18128654970760233,
"grad_norm": 7.1875,
"learning_rate": 2.1231155778894474e-05,
"loss": 0.4338,
"step": 775
},
{
"epoch": 0.1871345029239766,
"grad_norm": 12.765625,
"learning_rate": 2.1105527638190957e-05,
"loss": 0.4723,
"step": 800
},
{
"epoch": 0.1871345029239766,
"eval_loss": 0.4244983494281769,
"eval_runtime": 127.3581,
"eval_samples_per_second": 3.533,
"eval_steps_per_second": 0.448,
"step": 800
},
{
"epoch": 0.19298245614035087,
"grad_norm": 26.03125,
"learning_rate": 2.0979899497487437e-05,
"loss": 0.4424,
"step": 825
},
{
"epoch": 0.19883040935672514,
"grad_norm": 5.234375,
"learning_rate": 2.085427135678392e-05,
"loss": 0.3599,
"step": 850
},
{
"epoch": 0.2046783625730994,
"grad_norm": 4.1875,
"learning_rate": 2.0728643216080403e-05,
"loss": 0.3399,
"step": 875
},
{
"epoch": 0.21052631578947367,
"grad_norm": 9.6640625,
"learning_rate": 2.0603015075376886e-05,
"loss": 0.4807,
"step": 900
},
{
"epoch": 0.21052631578947367,
"eval_loss": 0.41580215096473694,
"eval_runtime": 128.5483,
"eval_samples_per_second": 3.501,
"eval_steps_per_second": 0.443,
"step": 900
},
{
"epoch": 0.21637426900584794,
"grad_norm": 9.03125,
"learning_rate": 2.047738693467337e-05,
"loss": 0.3341,
"step": 925
},
{
"epoch": 0.2222222222222222,
"grad_norm": 10.3515625,
"learning_rate": 2.035175879396985e-05,
"loss": 0.4407,
"step": 950
},
{
"epoch": 0.22807017543859648,
"grad_norm": 13.046875,
"learning_rate": 2.0226130653266332e-05,
"loss": 0.391,
"step": 975
},
{
"epoch": 0.23391812865497075,
"grad_norm": 13.0859375,
"learning_rate": 2.0100502512562815e-05,
"loss": 0.4196,
"step": 1000
},
{
"epoch": 0.23391812865497075,
"eval_loss": 0.39709773659706116,
"eval_runtime": 129.6284,
"eval_samples_per_second": 3.471,
"eval_steps_per_second": 0.44,
"step": 1000
},
{
"epoch": 0.23976608187134502,
"grad_norm": 2.962890625,
"learning_rate": 1.9974874371859298e-05,
"loss": 0.3042,
"step": 1025
},
{
"epoch": 0.24561403508771928,
"grad_norm": 11.9765625,
"learning_rate": 1.984924623115578e-05,
"loss": 0.3731,
"step": 1050
},
{
"epoch": 0.25146198830409355,
"grad_norm": 5.42578125,
"learning_rate": 1.972361809045226e-05,
"loss": 0.3032,
"step": 1075
},
{
"epoch": 0.2573099415204678,
"grad_norm": 8.9140625,
"learning_rate": 1.9597989949748744e-05,
"loss": 0.3443,
"step": 1100
},
{
"epoch": 0.2573099415204678,
"eval_loss": 0.3737768530845642,
"eval_runtime": 126.3694,
"eval_samples_per_second": 3.561,
"eval_steps_per_second": 0.451,
"step": 1100
},
{
"epoch": 0.2631578947368421,
"grad_norm": 14.0625,
"learning_rate": 1.9472361809045227e-05,
"loss": 0.2804,
"step": 1125
},
{
"epoch": 0.26900584795321636,
"grad_norm": 5.19921875,
"learning_rate": 1.934673366834171e-05,
"loss": 0.3257,
"step": 1150
},
{
"epoch": 0.27485380116959063,
"grad_norm": 9.78125,
"learning_rate": 1.9221105527638193e-05,
"loss": 0.3625,
"step": 1175
},
{
"epoch": 0.2807017543859649,
"grad_norm": 12.890625,
"learning_rate": 1.9095477386934673e-05,
"loss": 0.4133,
"step": 1200
},
{
"epoch": 0.2807017543859649,
"eval_loss": 0.3631095290184021,
"eval_runtime": 125.638,
"eval_samples_per_second": 3.582,
"eval_steps_per_second": 0.454,
"step": 1200
},
{
"epoch": 0.28654970760233917,
"grad_norm": 7.71875,
"learning_rate": 1.8969849246231156e-05,
"loss": 0.5163,
"step": 1225
},
{
"epoch": 0.29239766081871343,
"grad_norm": 7.25390625,
"learning_rate": 1.884422110552764e-05,
"loss": 0.2991,
"step": 1250
},
{
"epoch": 0.2982456140350877,
"grad_norm": 6.5234375,
"learning_rate": 1.8718592964824123e-05,
"loss": 0.431,
"step": 1275
},
{
"epoch": 0.30409356725146197,
"grad_norm": 3.8515625,
"learning_rate": 1.8592964824120602e-05,
"loss": 0.2838,
"step": 1300
},
{
"epoch": 0.30409356725146197,
"eval_loss": 0.33338162302970886,
"eval_runtime": 127.3115,
"eval_samples_per_second": 3.535,
"eval_steps_per_second": 0.448,
"step": 1300
},
{
"epoch": 0.30994152046783624,
"grad_norm": 9.828125,
"learning_rate": 1.8467336683417085e-05,
"loss": 0.2653,
"step": 1325
},
{
"epoch": 0.3157894736842105,
"grad_norm": 5.34765625,
"learning_rate": 1.834170854271357e-05,
"loss": 0.2515,
"step": 1350
},
{
"epoch": 0.3216374269005848,
"grad_norm": 12.15625,
"learning_rate": 1.821608040201005e-05,
"loss": 0.2983,
"step": 1375
},
{
"epoch": 0.32748538011695905,
"grad_norm": 9.171875,
"learning_rate": 1.8090452261306535e-05,
"loss": 0.4134,
"step": 1400
},
{
"epoch": 0.32748538011695905,
"eval_loss": 0.32642024755477905,
"eval_runtime": 127.196,
"eval_samples_per_second": 3.538,
"eval_steps_per_second": 0.448,
"step": 1400
},
{
"epoch": 0.3333333333333333,
"grad_norm": 7.16015625,
"learning_rate": 1.7964824120603014e-05,
"loss": 0.3015,
"step": 1425
},
{
"epoch": 0.3391812865497076,
"grad_norm": 15.6640625,
"learning_rate": 1.7839195979899497e-05,
"loss": 0.3098,
"step": 1450
},
{
"epoch": 0.34502923976608185,
"grad_norm": 13.8984375,
"learning_rate": 1.771356783919598e-05,
"loss": 0.4022,
"step": 1475
},
{
"epoch": 0.3508771929824561,
"grad_norm": 9.90625,
"learning_rate": 1.7587939698492464e-05,
"loss": 0.2838,
"step": 1500
},
{
"epoch": 0.3508771929824561,
"eval_loss": 0.3124904930591583,
"eval_runtime": 128.6196,
"eval_samples_per_second": 3.499,
"eval_steps_per_second": 0.443,
"step": 1500
},
{
"epoch": 0.3567251461988304,
"grad_norm": 18.234375,
"learning_rate": 1.7462311557788947e-05,
"loss": 0.3792,
"step": 1525
},
{
"epoch": 0.36257309941520466,
"grad_norm": 1.482421875,
"learning_rate": 1.7336683417085427e-05,
"loss": 0.4708,
"step": 1550
},
{
"epoch": 0.3684210526315789,
"grad_norm": 10.8046875,
"learning_rate": 1.721105527638191e-05,
"loss": 0.3695,
"step": 1575
},
{
"epoch": 0.3742690058479532,
"grad_norm": 13.6328125,
"learning_rate": 1.7085427135678393e-05,
"loss": 0.275,
"step": 1600
},
{
"epoch": 0.3742690058479532,
"eval_loss": 0.294376403093338,
"eval_runtime": 125.5965,
"eval_samples_per_second": 3.583,
"eval_steps_per_second": 0.454,
"step": 1600
},
{
"epoch": 0.38011695906432746,
"grad_norm": 10.3359375,
"learning_rate": 1.6959798994974876e-05,
"loss": 0.3134,
"step": 1625
},
{
"epoch": 0.38596491228070173,
"grad_norm": 4.671875,
"learning_rate": 1.683417085427136e-05,
"loss": 0.1961,
"step": 1650
},
{
"epoch": 0.391812865497076,
"grad_norm": 3.734375,
"learning_rate": 1.670854271356784e-05,
"loss": 0.2686,
"step": 1675
},
{
"epoch": 0.39766081871345027,
"grad_norm": 11.5859375,
"learning_rate": 1.6582914572864322e-05,
"loss": 0.4141,
"step": 1700
},
{
"epoch": 0.39766081871345027,
"eval_loss": 0.2839096784591675,
"eval_runtime": 126.4222,
"eval_samples_per_second": 3.56,
"eval_steps_per_second": 0.451,
"step": 1700
},
{
"epoch": 0.40350877192982454,
"grad_norm": 12.0234375,
"learning_rate": 1.6457286432160805e-05,
"loss": 0.2948,
"step": 1725
},
{
"epoch": 0.4093567251461988,
"grad_norm": 6.44140625,
"learning_rate": 1.6331658291457288e-05,
"loss": 0.3546,
"step": 1750
},
{
"epoch": 0.4152046783625731,
"grad_norm": 7.5234375,
"learning_rate": 1.620603015075377e-05,
"loss": 0.3239,
"step": 1775
},
{
"epoch": 0.42105263157894735,
"grad_norm": 4.30078125,
"learning_rate": 1.608040201005025e-05,
"loss": 0.2498,
"step": 1800
},
{
"epoch": 0.42105263157894735,
"eval_loss": 0.2749183773994446,
"eval_runtime": 126.776,
"eval_samples_per_second": 3.55,
"eval_steps_per_second": 0.45,
"step": 1800
},
{
"epoch": 0.4269005847953216,
"grad_norm": 8.4375,
"learning_rate": 1.5954773869346734e-05,
"loss": 0.3026,
"step": 1825
},
{
"epoch": 0.4327485380116959,
"grad_norm": 8.140625,
"learning_rate": 1.5829145728643217e-05,
"loss": 0.3848,
"step": 1850
},
{
"epoch": 0.43859649122807015,
"grad_norm": 10.046875,
"learning_rate": 1.57035175879397e-05,
"loss": 0.2574,
"step": 1875
},
{
"epoch": 0.4444444444444444,
"grad_norm": 9.1640625,
"learning_rate": 1.5577889447236183e-05,
"loss": 0.2817,
"step": 1900
},
{
"epoch": 0.4444444444444444,
"eval_loss": 0.2605937719345093,
"eval_runtime": 129.5889,
"eval_samples_per_second": 3.473,
"eval_steps_per_second": 0.44,
"step": 1900
},
{
"epoch": 0.4502923976608187,
"grad_norm": 11.3828125,
"learning_rate": 1.5452261306532663e-05,
"loss": 0.376,
"step": 1925
},
{
"epoch": 0.45614035087719296,
"grad_norm": 9.6953125,
"learning_rate": 1.5326633165829146e-05,
"loss": 0.2708,
"step": 1950
},
{
"epoch": 0.4619883040935672,
"grad_norm": 10.0859375,
"learning_rate": 1.5201005025125627e-05,
"loss": 0.2509,
"step": 1975
},
{
"epoch": 0.4678362573099415,
"grad_norm": 11.4921875,
"learning_rate": 1.507537688442211e-05,
"loss": 0.2899,
"step": 2000
},
{
"epoch": 0.4678362573099415,
"eval_loss": 0.2526080906391144,
"eval_runtime": 127.4275,
"eval_samples_per_second": 3.531,
"eval_steps_per_second": 0.447,
"step": 2000
},
{
"epoch": 0.47368421052631576,
"grad_norm": 10.1796875,
"learning_rate": 1.4949748743718595e-05,
"loss": 0.3491,
"step": 2025
},
{
"epoch": 0.47953216374269003,
"grad_norm": 4.72265625,
"learning_rate": 1.4824120603015077e-05,
"loss": 0.1925,
"step": 2050
},
{
"epoch": 0.4853801169590643,
"grad_norm": 6.58203125,
"learning_rate": 1.4698492462311558e-05,
"loss": 0.2187,
"step": 2075
},
{
"epoch": 0.49122807017543857,
"grad_norm": 11.4609375,
"learning_rate": 1.457286432160804e-05,
"loss": 0.2695,
"step": 2100
},
{
"epoch": 0.49122807017543857,
"eval_loss": 0.2520677149295807,
"eval_runtime": 129.417,
"eval_samples_per_second": 3.477,
"eval_steps_per_second": 0.44,
"step": 2100
},
{
"epoch": 0.49707602339181284,
"grad_norm": 4.5,
"learning_rate": 1.4447236180904523e-05,
"loss": 0.1898,
"step": 2125
},
{
"epoch": 0.5029239766081871,
"grad_norm": 7.640625,
"learning_rate": 1.4321608040201007e-05,
"loss": 0.3,
"step": 2150
},
{
"epoch": 0.5087719298245614,
"grad_norm": 3.82421875,
"learning_rate": 1.4195979899497489e-05,
"loss": 0.2006,
"step": 2175
},
{
"epoch": 0.5146198830409356,
"grad_norm": 3.509765625,
"learning_rate": 1.407035175879397e-05,
"loss": 0.2619,
"step": 2200
},
{
"epoch": 0.5146198830409356,
"eval_loss": 0.24239127337932587,
"eval_runtime": 125.4976,
"eval_samples_per_second": 3.586,
"eval_steps_per_second": 0.454,
"step": 2200
},
{
"epoch": 0.52046783625731,
"grad_norm": 7.35546875,
"learning_rate": 1.3944723618090452e-05,
"loss": 0.3202,
"step": 2225
},
{
"epoch": 0.5263157894736842,
"grad_norm": 5.37109375,
"learning_rate": 1.3819095477386935e-05,
"loss": 0.143,
"step": 2250
},
{
"epoch": 0.5321637426900585,
"grad_norm": 13.203125,
"learning_rate": 1.369346733668342e-05,
"loss": 0.3726,
"step": 2275
},
{
"epoch": 0.5380116959064327,
"grad_norm": 7.08984375,
"learning_rate": 1.3567839195979901e-05,
"loss": 0.2238,
"step": 2300
},
{
"epoch": 0.5380116959064327,
"eval_loss": 0.2372661828994751,
"eval_runtime": 125.8271,
"eval_samples_per_second": 3.576,
"eval_steps_per_second": 0.453,
"step": 2300
},
{
"epoch": 0.543859649122807,
"grad_norm": 7.73046875,
"learning_rate": 1.3442211055276382e-05,
"loss": 0.2301,
"step": 2325
},
{
"epoch": 0.5497076023391813,
"grad_norm": 1.9326171875,
"learning_rate": 1.3316582914572864e-05,
"loss": 0.2891,
"step": 2350
},
{
"epoch": 0.5555555555555556,
"grad_norm": 5.25,
"learning_rate": 1.3190954773869347e-05,
"loss": 0.1861,
"step": 2375
},
{
"epoch": 0.5614035087719298,
"grad_norm": 15.7265625,
"learning_rate": 1.306532663316583e-05,
"loss": 0.3049,
"step": 2400
},
{
"epoch": 0.5614035087719298,
"eval_loss": 0.23007704317569733,
"eval_runtime": 125.6345,
"eval_samples_per_second": 3.582,
"eval_steps_per_second": 0.454,
"step": 2400
},
{
"epoch": 0.5672514619883041,
"grad_norm": 12.0078125,
"learning_rate": 1.2939698492462313e-05,
"loss": 0.2584,
"step": 2425
},
{
"epoch": 0.5730994152046783,
"grad_norm": 11.8125,
"learning_rate": 1.2814070351758795e-05,
"loss": 0.29,
"step": 2450
},
{
"epoch": 0.5789473684210527,
"grad_norm": 4.7578125,
"learning_rate": 1.2688442211055276e-05,
"loss": 0.2648,
"step": 2475
},
{
"epoch": 0.5847953216374269,
"grad_norm": 10.53125,
"learning_rate": 1.2562814070351759e-05,
"loss": 0.1308,
"step": 2500
},
{
"epoch": 0.5847953216374269,
"eval_loss": 0.2292058765888214,
"eval_runtime": 127.785,
"eval_samples_per_second": 3.522,
"eval_steps_per_second": 0.446,
"step": 2500
},
{
"epoch": 0.5906432748538012,
"grad_norm": 8.421875,
"learning_rate": 1.2437185929648242e-05,
"loss": 0.3249,
"step": 2525
},
{
"epoch": 0.5964912280701754,
"grad_norm": 10.8828125,
"learning_rate": 1.2311557788944725e-05,
"loss": 0.2164,
"step": 2550
},
{
"epoch": 0.6023391812865497,
"grad_norm": 7.421875,
"learning_rate": 1.2185929648241207e-05,
"loss": 0.2512,
"step": 2575
},
{
"epoch": 0.6081871345029239,
"grad_norm": 9.6171875,
"learning_rate": 1.2060301507537688e-05,
"loss": 0.1936,
"step": 2600
},
{
"epoch": 0.6081871345029239,
"eval_loss": 0.21902820467948914,
"eval_runtime": 125.3191,
"eval_samples_per_second": 3.591,
"eval_steps_per_second": 0.455,
"step": 2600
},
{
"epoch": 0.6140350877192983,
"grad_norm": 1.091796875,
"learning_rate": 1.1934673366834171e-05,
"loss": 0.189,
"step": 2625
},
{
"epoch": 0.6198830409356725,
"grad_norm": 7.83203125,
"learning_rate": 1.1809045226130654e-05,
"loss": 0.2179,
"step": 2650
},
{
"epoch": 0.6257309941520468,
"grad_norm": 8.2109375,
"learning_rate": 1.1683417085427137e-05,
"loss": 0.224,
"step": 2675
},
{
"epoch": 0.631578947368421,
"grad_norm": 5.1640625,
"learning_rate": 1.1557788944723619e-05,
"loss": 0.2479,
"step": 2700
},
{
"epoch": 0.631578947368421,
"eval_loss": 0.21907079219818115,
"eval_runtime": 126.3038,
"eval_samples_per_second": 3.563,
"eval_steps_per_second": 0.451,
"step": 2700
},
{
"epoch": 0.6374269005847953,
"grad_norm": 4.47265625,
"learning_rate": 1.14321608040201e-05,
"loss": 0.219,
"step": 2725
},
{
"epoch": 0.6432748538011696,
"grad_norm": 3.30078125,
"learning_rate": 1.1306532663316583e-05,
"loss": 0.2741,
"step": 2750
},
{
"epoch": 0.6491228070175439,
"grad_norm": 4.21484375,
"learning_rate": 1.1180904522613066e-05,
"loss": 0.1153,
"step": 2775
},
{
"epoch": 0.6549707602339181,
"grad_norm": 5.125,
"learning_rate": 1.105527638190955e-05,
"loss": 0.1575,
"step": 2800
},
{
"epoch": 0.6549707602339181,
"eval_loss": 0.2165260761976242,
"eval_runtime": 125.8096,
"eval_samples_per_second": 3.577,
"eval_steps_per_second": 0.453,
"step": 2800
},
{
"epoch": 0.6608187134502924,
"grad_norm": 0.83837890625,
"learning_rate": 1.0929648241206031e-05,
"loss": 0.2238,
"step": 2825
},
{
"epoch": 0.6666666666666666,
"grad_norm": 6.61328125,
"learning_rate": 1.0804020100502512e-05,
"loss": 0.3554,
"step": 2850
},
{
"epoch": 0.672514619883041,
"grad_norm": 12.875,
"learning_rate": 1.0678391959798995e-05,
"loss": 0.096,
"step": 2875
},
{
"epoch": 0.6783625730994152,
"grad_norm": 3.28125,
"learning_rate": 1.0552763819095479e-05,
"loss": 0.193,
"step": 2900
},
{
"epoch": 0.6783625730994152,
"eval_loss": 0.21065188944339752,
"eval_runtime": 125.6087,
"eval_samples_per_second": 3.583,
"eval_steps_per_second": 0.454,
"step": 2900
},
{
"epoch": 0.6842105263157895,
"grad_norm": 8.7265625,
"learning_rate": 1.042713567839196e-05,
"loss": 0.1936,
"step": 2925
},
{
"epoch": 0.6900584795321637,
"grad_norm": 4.0859375,
"learning_rate": 1.0301507537688443e-05,
"loss": 0.2103,
"step": 2950
},
{
"epoch": 0.695906432748538,
"grad_norm": 15.5703125,
"learning_rate": 1.0175879396984924e-05,
"loss": 0.2701,
"step": 2975
},
{
"epoch": 0.7017543859649122,
"grad_norm": 3.73046875,
"learning_rate": 1.0050251256281408e-05,
"loss": 0.2526,
"step": 3000
},
{
"epoch": 0.7017543859649122,
"eval_loss": 0.21144379675388336,
"eval_runtime": 128.1153,
"eval_samples_per_second": 3.512,
"eval_steps_per_second": 0.445,
"step": 3000
},
{
"epoch": 0.7076023391812866,
"grad_norm": 1.537109375,
"learning_rate": 9.92462311557789e-06,
"loss": 0.2087,
"step": 3025
},
{
"epoch": 0.7134502923976608,
"grad_norm": 0.55908203125,
"learning_rate": 9.798994974874372e-06,
"loss": 0.1532,
"step": 3050
},
{
"epoch": 0.7192982456140351,
"grad_norm": 9.7578125,
"learning_rate": 9.673366834170855e-06,
"loss": 0.1985,
"step": 3075
},
{
"epoch": 0.7251461988304093,
"grad_norm": 9.390625,
"learning_rate": 9.547738693467337e-06,
"loss": 0.1574,
"step": 3100
},
{
"epoch": 0.7251461988304093,
"eval_loss": 0.20868618786334991,
"eval_runtime": 129.4237,
"eval_samples_per_second": 3.477,
"eval_steps_per_second": 0.44,
"step": 3100
},
{
"epoch": 0.7309941520467836,
"grad_norm": 4.6171875,
"learning_rate": 9.42211055276382e-06,
"loss": 0.2921,
"step": 3125
},
{
"epoch": 0.7368421052631579,
"grad_norm": 10.859375,
"learning_rate": 9.296482412060301e-06,
"loss": 0.1932,
"step": 3150
},
{
"epoch": 0.7426900584795322,
"grad_norm": 5.97265625,
"learning_rate": 9.170854271356784e-06,
"loss": 0.2999,
"step": 3175
},
{
"epoch": 0.7485380116959064,
"grad_norm": 4.85546875,
"learning_rate": 9.045226130653267e-06,
"loss": 0.1989,
"step": 3200
},
{
"epoch": 0.7485380116959064,
"eval_loss": 0.20511174201965332,
"eval_runtime": 127.5209,
"eval_samples_per_second": 3.529,
"eval_steps_per_second": 0.447,
"step": 3200
},
{
"epoch": 0.7543859649122807,
"grad_norm": 8.7265625,
"learning_rate": 8.919597989949749e-06,
"loss": 0.1795,
"step": 3225
},
{
"epoch": 0.7602339181286549,
"grad_norm": 3.490234375,
"learning_rate": 8.793969849246232e-06,
"loss": 0.2444,
"step": 3250
},
{
"epoch": 0.7660818713450293,
"grad_norm": 6.671875,
"learning_rate": 8.668341708542713e-06,
"loss": 0.2775,
"step": 3275
},
{
"epoch": 0.7719298245614035,
"grad_norm": 5.1796875,
"learning_rate": 8.542713567839196e-06,
"loss": 0.1761,
"step": 3300
},
{
"epoch": 0.7719298245614035,
"eval_loss": 0.20133711397647858,
"eval_runtime": 129.2927,
"eval_samples_per_second": 3.48,
"eval_steps_per_second": 0.441,
"step": 3300
},
{
"epoch": 0.7777777777777778,
"grad_norm": 0.83935546875,
"learning_rate": 8.41708542713568e-06,
"loss": 0.216,
"step": 3325
},
{
"epoch": 0.783625730994152,
"grad_norm": 2.580078125,
"learning_rate": 8.291457286432161e-06,
"loss": 0.0873,
"step": 3350
},
{
"epoch": 0.7894736842105263,
"grad_norm": 5.87109375,
"learning_rate": 8.165829145728644e-06,
"loss": 0.1145,
"step": 3375
},
{
"epoch": 0.7953216374269005,
"grad_norm": 7.125,
"learning_rate": 8.040201005025125e-06,
"loss": 0.2223,
"step": 3400
},
{
"epoch": 0.7953216374269005,
"eval_loss": 0.19959186017513275,
"eval_runtime": 125.4382,
"eval_samples_per_second": 3.587,
"eval_steps_per_second": 0.454,
"step": 3400
},
{
"epoch": 0.8011695906432749,
"grad_norm": 3.94921875,
"learning_rate": 7.914572864321608e-06,
"loss": 0.1845,
"step": 3425
},
{
"epoch": 0.8070175438596491,
"grad_norm": 5.99609375,
"learning_rate": 7.788944723618092e-06,
"loss": 0.104,
"step": 3450
},
{
"epoch": 0.8128654970760234,
"grad_norm": 7.953125,
"learning_rate": 7.663316582914573e-06,
"loss": 0.1119,
"step": 3475
},
{
"epoch": 0.8187134502923976,
"grad_norm": 7.5859375,
"learning_rate": 7.537688442211055e-06,
"loss": 0.2127,
"step": 3500
},
{
"epoch": 0.8187134502923976,
"eval_loss": 0.1966124027967453,
"eval_runtime": 126.749,
"eval_samples_per_second": 3.55,
"eval_steps_per_second": 0.45,
"step": 3500
},
{
"epoch": 0.8245614035087719,
"grad_norm": 25.390625,
"learning_rate": 7.412060301507538e-06,
"loss": 0.1597,
"step": 3525
},
{
"epoch": 0.8304093567251462,
"grad_norm": 7.55078125,
"learning_rate": 7.28643216080402e-06,
"loss": 0.0942,
"step": 3550
},
{
"epoch": 0.8362573099415205,
"grad_norm": 6.5390625,
"learning_rate": 7.160804020100504e-06,
"loss": 0.236,
"step": 3575
},
{
"epoch": 0.8421052631578947,
"grad_norm": 9.390625,
"learning_rate": 7.035175879396985e-06,
"loss": 0.2477,
"step": 3600
},
{
"epoch": 0.8421052631578947,
"eval_loss": 0.1922728419303894,
"eval_runtime": 125.5954,
"eval_samples_per_second": 3.583,
"eval_steps_per_second": 0.454,
"step": 3600
},
{
"epoch": 0.847953216374269,
"grad_norm": 0.7783203125,
"learning_rate": 6.909547738693467e-06,
"loss": 0.3097,
"step": 3625
},
{
"epoch": 0.8538011695906432,
"grad_norm": 5.91796875,
"learning_rate": 6.7839195979899505e-06,
"loss": 0.2097,
"step": 3650
},
{
"epoch": 0.8596491228070176,
"grad_norm": 2.55078125,
"learning_rate": 6.658291457286432e-06,
"loss": 0.1837,
"step": 3675
},
{
"epoch": 0.8654970760233918,
"grad_norm": 5.1015625,
"learning_rate": 6.532663316582915e-06,
"loss": 0.1931,
"step": 3700
},
{
"epoch": 0.8654970760233918,
"eval_loss": 0.1908126324415207,
"eval_runtime": 125.264,
"eval_samples_per_second": 3.592,
"eval_steps_per_second": 0.455,
"step": 3700
},
{
"epoch": 0.8713450292397661,
"grad_norm": 1.0830078125,
"learning_rate": 6.407035175879397e-06,
"loss": 0.1688,
"step": 3725
},
{
"epoch": 0.8771929824561403,
"grad_norm": 2.021484375,
"learning_rate": 6.2814070351758795e-06,
"loss": 0.1635,
"step": 3750
},
{
"epoch": 0.8830409356725146,
"grad_norm": 24.90625,
"learning_rate": 6.155778894472363e-06,
"loss": 0.2588,
"step": 3775
},
{
"epoch": 0.8888888888888888,
"grad_norm": 4.9453125,
"learning_rate": 6.030150753768844e-06,
"loss": 0.182,
"step": 3800
},
{
"epoch": 0.8888888888888888,
"eval_loss": 0.18878790736198425,
"eval_runtime": 126.7176,
"eval_samples_per_second": 3.551,
"eval_steps_per_second": 0.45,
"step": 3800
},
{
"epoch": 0.8947368421052632,
"grad_norm": 7.25,
"learning_rate": 5.904522613065327e-06,
"loss": 0.2793,
"step": 3825
},
{
"epoch": 0.9005847953216374,
"grad_norm": 9.59375,
"learning_rate": 5.778894472361809e-06,
"loss": 0.1608,
"step": 3850
},
{
"epoch": 0.9064327485380117,
"grad_norm": 2.943359375,
"learning_rate": 5.653266331658292e-06,
"loss": 0.2136,
"step": 3875
},
{
"epoch": 0.9122807017543859,
"grad_norm": 0.48681640625,
"learning_rate": 5.527638190954775e-06,
"loss": 0.1693,
"step": 3900
},
{
"epoch": 0.9122807017543859,
"eval_loss": 0.18779730796813965,
"eval_runtime": 126.6799,
"eval_samples_per_second": 3.552,
"eval_steps_per_second": 0.45,
"step": 3900
},
{
"epoch": 0.9181286549707602,
"grad_norm": 3.2109375,
"learning_rate": 5.402010050251256e-06,
"loss": 0.0918,
"step": 3925
},
{
"epoch": 0.9239766081871345,
"grad_norm": 1.541015625,
"learning_rate": 5.276381909547739e-06,
"loss": 0.2076,
"step": 3950
},
{
"epoch": 0.9298245614035088,
"grad_norm": 4.7421875,
"learning_rate": 5.1507537688442215e-06,
"loss": 0.2429,
"step": 3975
},
{
"epoch": 0.935672514619883,
"grad_norm": 4.375,
"learning_rate": 5.025125628140704e-06,
"loss": 0.1346,
"step": 4000
},
{
"epoch": 0.935672514619883,
"eval_loss": 0.18527910113334656,
"eval_runtime": 129.3266,
"eval_samples_per_second": 3.48,
"eval_steps_per_second": 0.441,
"step": 4000
},
{
"epoch": 0.9415204678362573,
"grad_norm": 4.875,
"learning_rate": 4.899497487437186e-06,
"loss": 0.2457,
"step": 4025
},
{
"epoch": 0.9473684210526315,
"grad_norm": 3.013671875,
"learning_rate": 4.773869346733668e-06,
"loss": 0.187,
"step": 4050
},
{
"epoch": 0.9532163742690059,
"grad_norm": 4.3125,
"learning_rate": 4.6482412060301506e-06,
"loss": 0.1546,
"step": 4075
},
{
"epoch": 0.9590643274853801,
"grad_norm": 2.5,
"learning_rate": 4.522613065326634e-06,
"loss": 0.1484,
"step": 4100
},
{
"epoch": 0.9590643274853801,
"eval_loss": 0.18491099774837494,
"eval_runtime": 126.5822,
"eval_samples_per_second": 3.555,
"eval_steps_per_second": 0.45,
"step": 4100
},
{
"epoch": 0.9649122807017544,
"grad_norm": 5.03515625,
"learning_rate": 4.396984924623116e-06,
"loss": 0.1739,
"step": 4125
},
{
"epoch": 0.9707602339181286,
"grad_norm": 7.1015625,
"learning_rate": 4.271356783919598e-06,
"loss": 0.3268,
"step": 4150
},
{
"epoch": 0.9766081871345029,
"grad_norm": 0.397216796875,
"learning_rate": 4.1457286432160804e-06,
"loss": 0.2178,
"step": 4175
},
{
"epoch": 0.9824561403508771,
"grad_norm": 5.00390625,
"learning_rate": 4.020100502512563e-06,
"loss": 0.1217,
"step": 4200
},
{
"epoch": 0.9824561403508771,
"eval_loss": 0.1838068664073944,
"eval_runtime": 127.461,
"eval_samples_per_second": 3.53,
"eval_steps_per_second": 0.447,
"step": 4200
},
{
"epoch": 0.9883040935672515,
"grad_norm": 0.1112060546875,
"learning_rate": 3.894472361809046e-06,
"loss": 0.0894,
"step": 4225
},
{
"epoch": 0.9941520467836257,
"grad_norm": 1.748046875,
"learning_rate": 3.7688442211055276e-06,
"loss": 0.1733,
"step": 4250
},
{
"epoch": 1.0,
"grad_norm": 6.53125,
"learning_rate": 3.64321608040201e-06,
"loss": 0.1281,
"step": 4275
},
{
"epoch": 1.0058479532163742,
"grad_norm": 0.9111328125,
"learning_rate": 3.5175879396984926e-06,
"loss": 0.0669,
"step": 4300
},
{
"epoch": 1.0058479532163742,
"eval_loss": 0.18437370657920837,
"eval_runtime": 128.6228,
"eval_samples_per_second": 3.499,
"eval_steps_per_second": 0.443,
"step": 4300
},
{
"epoch": 1.0116959064327484,
"grad_norm": 0.486328125,
"learning_rate": 3.3919597989949752e-06,
"loss": 0.0748,
"step": 4325
},
{
"epoch": 1.0175438596491229,
"grad_norm": 7.703125,
"learning_rate": 3.2663316582914575e-06,
"loss": 0.071,
"step": 4350
},
{
"epoch": 1.023391812865497,
"grad_norm": 8.9765625,
"learning_rate": 3.1407035175879398e-06,
"loss": 0.1588,
"step": 4375
},
{
"epoch": 1.0292397660818713,
"grad_norm": 0.38427734375,
"learning_rate": 3.015075376884422e-06,
"loss": 0.1292,
"step": 4400
},
{
"epoch": 1.0292397660818713,
"eval_loss": 0.18767422437667847,
"eval_runtime": 126.9252,
"eval_samples_per_second": 3.545,
"eval_steps_per_second": 0.449,
"step": 4400
},
{
"epoch": 1.0350877192982457,
"grad_norm": 6.4375,
"learning_rate": 2.8894472361809047e-06,
"loss": 0.1419,
"step": 4425
},
{
"epoch": 1.04093567251462,
"grad_norm": 13.3515625,
"learning_rate": 2.7638190954773874e-06,
"loss": 0.084,
"step": 4450
},
{
"epoch": 1.0467836257309941,
"grad_norm": 9.203125,
"learning_rate": 2.6381909547738696e-06,
"loss": 0.072,
"step": 4475
},
{
"epoch": 1.0526315789473684,
"grad_norm": 2.439453125,
"learning_rate": 2.512562814070352e-06,
"loss": 0.1106,
"step": 4500
},
{
"epoch": 1.0526315789473684,
"eval_loss": 0.18756501376628876,
"eval_runtime": 128.4204,
"eval_samples_per_second": 3.504,
"eval_steps_per_second": 0.444,
"step": 4500
},
{
"epoch": 1.0584795321637426,
"grad_norm": 4.671875,
"learning_rate": 2.386934673366834e-06,
"loss": 0.1226,
"step": 4525
},
{
"epoch": 1.064327485380117,
"grad_norm": 2.62890625,
"learning_rate": 2.261306532663317e-06,
"loss": 0.123,
"step": 4550
},
{
"epoch": 1.0701754385964912,
"grad_norm": 16.640625,
"learning_rate": 2.135678391959799e-06,
"loss": 0.0593,
"step": 4575
},
{
"epoch": 1.0760233918128654,
"grad_norm": 6.8828125,
"learning_rate": 2.0100502512562813e-06,
"loss": 0.0828,
"step": 4600
},
{
"epoch": 1.0760233918128654,
"eval_loss": 0.1875353455543518,
"eval_runtime": 126.5483,
"eval_samples_per_second": 3.556,
"eval_steps_per_second": 0.45,
"step": 4600
},
{
"epoch": 1.0818713450292399,
"grad_norm": 4.55078125,
"learning_rate": 1.8844221105527638e-06,
"loss": 0.1481,
"step": 4625
},
{
"epoch": 1.087719298245614,
"grad_norm": 3.6796875,
"learning_rate": 1.7587939698492463e-06,
"loss": 0.1275,
"step": 4650
},
{
"epoch": 1.0935672514619883,
"grad_norm": 6.02734375,
"learning_rate": 1.6331658291457288e-06,
"loss": 0.1274,
"step": 4675
},
{
"epoch": 1.0994152046783625,
"grad_norm": 1.4111328125,
"learning_rate": 1.507537688442211e-06,
"loss": 0.0485,
"step": 4700
},
{
"epoch": 1.0994152046783625,
"eval_loss": 0.18709704279899597,
"eval_runtime": 129.1721,
"eval_samples_per_second": 3.484,
"eval_steps_per_second": 0.441,
"step": 4700
},
{
"epoch": 1.1052631578947367,
"grad_norm": 0.74853515625,
"learning_rate": 1.3819095477386937e-06,
"loss": 0.0483,
"step": 4725
},
{
"epoch": 1.1111111111111112,
"grad_norm": 1.044921875,
"learning_rate": 1.256281407035176e-06,
"loss": 0.0799,
"step": 4750
},
{
"epoch": 1.1169590643274854,
"grad_norm": 0.171142578125,
"learning_rate": 1.1306532663316584e-06,
"loss": 0.1273,
"step": 4775
},
{
"epoch": 1.1228070175438596,
"grad_norm": 0.346923828125,
"learning_rate": 1.0050251256281407e-06,
"loss": 0.0624,
"step": 4800
},
{
"epoch": 1.1228070175438596,
"eval_loss": 0.1874168962240219,
"eval_runtime": 128.2151,
"eval_samples_per_second": 3.51,
"eval_steps_per_second": 0.445,
"step": 4800
},
{
"epoch": 1.128654970760234,
"grad_norm": 0.1552734375,
"learning_rate": 8.793969849246231e-07,
"loss": 0.1017,
"step": 4825
},
{
"epoch": 1.1345029239766082,
"grad_norm": 1.619140625,
"learning_rate": 7.537688442211055e-07,
"loss": 0.1556,
"step": 4850
},
{
"epoch": 1.1403508771929824,
"grad_norm": 1.958984375,
"learning_rate": 6.28140703517588e-07,
"loss": 0.1113,
"step": 4875
},
{
"epoch": 1.1461988304093567,
"grad_norm": 5.375,
"learning_rate": 5.025125628140703e-07,
"loss": 0.0895,
"step": 4900
},
{
"epoch": 1.1461988304093567,
"eval_loss": 0.1870710253715515,
"eval_runtime": 126.862,
"eval_samples_per_second": 3.547,
"eval_steps_per_second": 0.449,
"step": 4900
},
{
"epoch": 1.1520467836257309,
"grad_norm": 5.984375,
"learning_rate": 3.7688442211055275e-07,
"loss": 0.1076,
"step": 4925
},
{
"epoch": 1.1578947368421053,
"grad_norm": 1.9619140625,
"learning_rate": 2.5125628140703517e-07,
"loss": 0.1024,
"step": 4950
},
{
"epoch": 1.1637426900584795,
"grad_norm": 6.17578125,
"learning_rate": 1.2562814070351758e-07,
"loss": 0.0698,
"step": 4975
},
{
"epoch": 1.1695906432748537,
"grad_norm": 7.06640625,
"learning_rate": 0.0,
"loss": 0.1,
"step": 5000
},
{
"epoch": 1.1695906432748537,
"eval_loss": 0.1871223896741867,
"eval_runtime": 128.9773,
"eval_samples_per_second": 3.489,
"eval_steps_per_second": 0.442,
"step": 5000
},
{
"epoch": 1.1695906432748537,
"step": 5000,
"total_flos": 2.536648947447456e+17,
"train_loss": 0.27304353165626527,
"train_runtime": 21409.3382,
"train_samples_per_second": 0.934,
"train_steps_per_second": 0.234
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.536648947447456e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}