mikhail-panzo's picture
Training in progress, step 6000, checkpoint
67dd4e3 verified
raw
history blame contribute delete
No virus
23.1 kB
{
"best_metric": 0.3196151554584503,
"best_model_checkpoint": "mikhail_panzo/zlm_b128_le4_s12000/checkpoint-6000",
"epoch": 10.052356020942408,
"eval_steps": 500,
"global_step": 6000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08376963350785341,
"grad_norm": 2.469545841217041,
"learning_rate": 2.4500000000000003e-06,
"loss": 1.0402,
"step": 50
},
{
"epoch": 0.16753926701570682,
"grad_norm": 3.3642780780792236,
"learning_rate": 4.950000000000001e-06,
"loss": 0.8472,
"step": 100
},
{
"epoch": 0.2513089005235602,
"grad_norm": 1.8375087976455688,
"learning_rate": 7.45e-06,
"loss": 0.7331,
"step": 150
},
{
"epoch": 0.33507853403141363,
"grad_norm": 3.700824737548828,
"learning_rate": 9.950000000000001e-06,
"loss": 0.6472,
"step": 200
},
{
"epoch": 0.418848167539267,
"grad_norm": 1.7632888555526733,
"learning_rate": 1.2450000000000001e-05,
"loss": 0.6112,
"step": 250
},
{
"epoch": 0.5026178010471204,
"grad_norm": 3.1861908435821533,
"learning_rate": 1.4950000000000001e-05,
"loss": 0.5865,
"step": 300
},
{
"epoch": 0.5863874345549738,
"grad_norm": 2.2845046520233154,
"learning_rate": 1.745e-05,
"loss": 0.5682,
"step": 350
},
{
"epoch": 0.6701570680628273,
"grad_norm": 2.079210042953491,
"learning_rate": 1.995e-05,
"loss": 0.5465,
"step": 400
},
{
"epoch": 0.7539267015706806,
"grad_norm": 3.669891119003296,
"learning_rate": 2.245e-05,
"loss": 0.5302,
"step": 450
},
{
"epoch": 0.837696335078534,
"grad_norm": 2.4679417610168457,
"learning_rate": 2.495e-05,
"loss": 0.5137,
"step": 500
},
{
"epoch": 0.837696335078534,
"eval_loss": 0.4513999819755554,
"eval_runtime": 277.1074,
"eval_samples_per_second": 30.634,
"eval_steps_per_second": 3.832,
"step": 500
},
{
"epoch": 0.9214659685863874,
"grad_norm": 2.703871726989746,
"learning_rate": 2.7450000000000003e-05,
"loss": 0.5071,
"step": 550
},
{
"epoch": 1.0052356020942408,
"grad_norm": 3.8184635639190674,
"learning_rate": 2.995e-05,
"loss": 0.4971,
"step": 600
},
{
"epoch": 1.0890052356020943,
"grad_norm": 2.2857866287231445,
"learning_rate": 3.245e-05,
"loss": 0.4955,
"step": 650
},
{
"epoch": 1.1727748691099475,
"grad_norm": 3.5974085330963135,
"learning_rate": 3.495e-05,
"loss": 0.4935,
"step": 700
},
{
"epoch": 1.256544502617801,
"grad_norm": 1.720818281173706,
"learning_rate": 3.745e-05,
"loss": 0.4763,
"step": 750
},
{
"epoch": 1.3403141361256545,
"grad_norm": 3.080139636993408,
"learning_rate": 3.995e-05,
"loss": 0.4735,
"step": 800
},
{
"epoch": 1.4240837696335078,
"grad_norm": 4.877579212188721,
"learning_rate": 4.245e-05,
"loss": 0.4654,
"step": 850
},
{
"epoch": 1.5078534031413613,
"grad_norm": 3.383965253829956,
"learning_rate": 4.495e-05,
"loss": 0.4628,
"step": 900
},
{
"epoch": 1.5916230366492146,
"grad_norm": 3.3636982440948486,
"learning_rate": 4.745e-05,
"loss": 0.4541,
"step": 950
},
{
"epoch": 1.675392670157068,
"grad_norm": 1.666568398475647,
"learning_rate": 4.995e-05,
"loss": 0.4565,
"step": 1000
},
{
"epoch": 1.675392670157068,
"eval_loss": 0.4093586802482605,
"eval_runtime": 269.4005,
"eval_samples_per_second": 31.511,
"eval_steps_per_second": 3.942,
"step": 1000
},
{
"epoch": 1.7591623036649215,
"grad_norm": 1.6522510051727295,
"learning_rate": 5.245e-05,
"loss": 0.4541,
"step": 1050
},
{
"epoch": 1.8429319371727748,
"grad_norm": 1.6531606912612915,
"learning_rate": 5.495e-05,
"loss": 0.4448,
"step": 1100
},
{
"epoch": 1.9267015706806283,
"grad_norm": 3.3253750801086426,
"learning_rate": 5.745e-05,
"loss": 0.4346,
"step": 1150
},
{
"epoch": 2.0104712041884816,
"grad_norm": 2.0393073558807373,
"learning_rate": 5.995000000000001e-05,
"loss": 0.4314,
"step": 1200
},
{
"epoch": 2.094240837696335,
"grad_norm": 1.906546950340271,
"learning_rate": 6.245000000000001e-05,
"loss": 0.4327,
"step": 1250
},
{
"epoch": 2.1780104712041886,
"grad_norm": 1.7925021648406982,
"learning_rate": 6.494999999999999e-05,
"loss": 0.4285,
"step": 1300
},
{
"epoch": 2.261780104712042,
"grad_norm": 2.5238988399505615,
"learning_rate": 6.745e-05,
"loss": 0.4251,
"step": 1350
},
{
"epoch": 2.345549738219895,
"grad_norm": 2.53450345993042,
"learning_rate": 6.995e-05,
"loss": 0.4284,
"step": 1400
},
{
"epoch": 2.4293193717277486,
"grad_norm": 1.9077616930007935,
"learning_rate": 7.245000000000001e-05,
"loss": 0.4244,
"step": 1450
},
{
"epoch": 2.513089005235602,
"grad_norm": 1.5720113515853882,
"learning_rate": 7.495e-05,
"loss": 0.4171,
"step": 1500
},
{
"epoch": 2.513089005235602,
"eval_loss": 0.384281188249588,
"eval_runtime": 274.8763,
"eval_samples_per_second": 30.883,
"eval_steps_per_second": 3.864,
"step": 1500
},
{
"epoch": 2.5968586387434556,
"grad_norm": 2.272549867630005,
"learning_rate": 7.745e-05,
"loss": 0.4175,
"step": 1550
},
{
"epoch": 2.680628272251309,
"grad_norm": 1.3440821170806885,
"learning_rate": 7.995e-05,
"loss": 0.4134,
"step": 1600
},
{
"epoch": 2.7643979057591626,
"grad_norm": 1.99045729637146,
"learning_rate": 8.245e-05,
"loss": 0.4117,
"step": 1650
},
{
"epoch": 2.8481675392670156,
"grad_norm": 1.3599165678024292,
"learning_rate": 8.495e-05,
"loss": 0.4101,
"step": 1700
},
{
"epoch": 2.931937172774869,
"grad_norm": 2.3722105026245117,
"learning_rate": 8.745000000000001e-05,
"loss": 0.413,
"step": 1750
},
{
"epoch": 3.0157068062827226,
"grad_norm": 1.5704238414764404,
"learning_rate": 8.995e-05,
"loss": 0.4086,
"step": 1800
},
{
"epoch": 3.099476439790576,
"grad_norm": 3.272968053817749,
"learning_rate": 9.245e-05,
"loss": 0.4057,
"step": 1850
},
{
"epoch": 3.183246073298429,
"grad_norm": 2.658064126968384,
"learning_rate": 9.495e-05,
"loss": 0.4071,
"step": 1900
},
{
"epoch": 3.2670157068062826,
"grad_norm": 1.688971757888794,
"learning_rate": 9.745000000000001e-05,
"loss": 0.3984,
"step": 1950
},
{
"epoch": 3.350785340314136,
"grad_norm": 1.4563082456588745,
"learning_rate": 9.995e-05,
"loss": 0.404,
"step": 2000
},
{
"epoch": 3.350785340314136,
"eval_loss": 0.36596959829330444,
"eval_runtime": 268.3638,
"eval_samples_per_second": 31.632,
"eval_steps_per_second": 3.957,
"step": 2000
},
{
"epoch": 3.4345549738219896,
"grad_norm": 1.6672757863998413,
"learning_rate": 9.951e-05,
"loss": 0.4032,
"step": 2050
},
{
"epoch": 3.518324607329843,
"grad_norm": 2.0084328651428223,
"learning_rate": 9.901e-05,
"loss": 0.4003,
"step": 2100
},
{
"epoch": 3.6020942408376966,
"grad_norm": 1.8289756774902344,
"learning_rate": 9.851e-05,
"loss": 0.3986,
"step": 2150
},
{
"epoch": 3.6858638743455496,
"grad_norm": 1.791348934173584,
"learning_rate": 9.801e-05,
"loss": 0.3928,
"step": 2200
},
{
"epoch": 3.769633507853403,
"grad_norm": 1.9079582691192627,
"learning_rate": 9.751e-05,
"loss": 0.3899,
"step": 2250
},
{
"epoch": 3.8534031413612566,
"grad_norm": 7.154463768005371,
"learning_rate": 9.701e-05,
"loss": 0.3894,
"step": 2300
},
{
"epoch": 3.93717277486911,
"grad_norm": 1.9638899564743042,
"learning_rate": 9.651e-05,
"loss": 0.399,
"step": 2350
},
{
"epoch": 4.020942408376963,
"grad_norm": 2.147676706314087,
"learning_rate": 9.601e-05,
"loss": 0.3902,
"step": 2400
},
{
"epoch": 4.104712041884817,
"grad_norm": 2.282815456390381,
"learning_rate": 9.551e-05,
"loss": 0.3906,
"step": 2450
},
{
"epoch": 4.18848167539267,
"grad_norm": 1.2255228757858276,
"learning_rate": 9.501e-05,
"loss": 0.3851,
"step": 2500
},
{
"epoch": 4.18848167539267,
"eval_loss": 0.354680597782135,
"eval_runtime": 269.5668,
"eval_samples_per_second": 31.491,
"eval_steps_per_second": 3.94,
"step": 2500
},
{
"epoch": 4.272251308900524,
"grad_norm": 1.2088522911071777,
"learning_rate": 9.451000000000002e-05,
"loss": 0.3891,
"step": 2550
},
{
"epoch": 4.356020942408377,
"grad_norm": 1.3467260599136353,
"learning_rate": 9.402000000000001e-05,
"loss": 0.3854,
"step": 2600
},
{
"epoch": 4.439790575916231,
"grad_norm": 1.2227791547775269,
"learning_rate": 9.352000000000001e-05,
"loss": 0.3818,
"step": 2650
},
{
"epoch": 4.523560209424084,
"grad_norm": 1.0672763586044312,
"learning_rate": 9.302e-05,
"loss": 0.389,
"step": 2700
},
{
"epoch": 4.607329842931938,
"grad_norm": 1.6198370456695557,
"learning_rate": 9.252e-05,
"loss": 0.3811,
"step": 2750
},
{
"epoch": 4.69109947643979,
"grad_norm": 1.296200156211853,
"learning_rate": 9.202e-05,
"loss": 0.382,
"step": 2800
},
{
"epoch": 4.774869109947644,
"grad_norm": 1.533375859260559,
"learning_rate": 9.152e-05,
"loss": 0.381,
"step": 2850
},
{
"epoch": 4.858638743455497,
"grad_norm": 2.161743640899658,
"learning_rate": 9.102e-05,
"loss": 0.3829,
"step": 2900
},
{
"epoch": 4.942408376963351,
"grad_norm": 1.2308694124221802,
"learning_rate": 9.052000000000001e-05,
"loss": 0.3806,
"step": 2950
},
{
"epoch": 5.026178010471204,
"grad_norm": 1.2448184490203857,
"learning_rate": 9.002000000000001e-05,
"loss": 0.3808,
"step": 3000
},
{
"epoch": 5.026178010471204,
"eval_loss": 0.3451012670993805,
"eval_runtime": 274.5252,
"eval_samples_per_second": 30.922,
"eval_steps_per_second": 3.868,
"step": 3000
},
{
"epoch": 5.109947643979058,
"grad_norm": 1.4961705207824707,
"learning_rate": 8.952000000000001e-05,
"loss": 0.3755,
"step": 3050
},
{
"epoch": 5.193717277486911,
"grad_norm": 1.2276802062988281,
"learning_rate": 8.902e-05,
"loss": 0.3771,
"step": 3100
},
{
"epoch": 5.277486910994765,
"grad_norm": 1.3186664581298828,
"learning_rate": 8.852e-05,
"loss": 0.3766,
"step": 3150
},
{
"epoch": 5.361256544502618,
"grad_norm": 1.4056960344314575,
"learning_rate": 8.802e-05,
"loss": 0.3761,
"step": 3200
},
{
"epoch": 5.445026178010472,
"grad_norm": 1.4609332084655762,
"learning_rate": 8.752e-05,
"loss": 0.377,
"step": 3250
},
{
"epoch": 5.528795811518324,
"grad_norm": 1.2348676919937134,
"learning_rate": 8.702e-05,
"loss": 0.3761,
"step": 3300
},
{
"epoch": 5.612565445026178,
"grad_norm": 1.2290446758270264,
"learning_rate": 8.652e-05,
"loss": 0.3735,
"step": 3350
},
{
"epoch": 5.696335078534031,
"grad_norm": 1.0205050706863403,
"learning_rate": 8.602e-05,
"loss": 0.3709,
"step": 3400
},
{
"epoch": 5.780104712041885,
"grad_norm": 1.83589768409729,
"learning_rate": 8.552e-05,
"loss": 0.3709,
"step": 3450
},
{
"epoch": 5.863874345549738,
"grad_norm": 1.8419536352157593,
"learning_rate": 8.502e-05,
"loss": 0.3704,
"step": 3500
},
{
"epoch": 5.863874345549738,
"eval_loss": 0.33645617961883545,
"eval_runtime": 267.7957,
"eval_samples_per_second": 31.7,
"eval_steps_per_second": 3.966,
"step": 3500
},
{
"epoch": 5.947643979057592,
"grad_norm": 0.9079850316047668,
"learning_rate": 8.452e-05,
"loss": 0.3674,
"step": 3550
},
{
"epoch": 6.031413612565445,
"grad_norm": 1.6563224792480469,
"learning_rate": 8.402e-05,
"loss": 0.368,
"step": 3600
},
{
"epoch": 6.115183246073299,
"grad_norm": 0.9500299096107483,
"learning_rate": 8.352000000000001e-05,
"loss": 0.3692,
"step": 3650
},
{
"epoch": 6.198952879581152,
"grad_norm": 1.219301700592041,
"learning_rate": 8.302000000000001e-05,
"loss": 0.3664,
"step": 3700
},
{
"epoch": 6.282722513089006,
"grad_norm": 1.466737985610962,
"learning_rate": 8.252e-05,
"loss": 0.3697,
"step": 3750
},
{
"epoch": 6.366492146596858,
"grad_norm": 1.329934000968933,
"learning_rate": 8.202e-05,
"loss": 0.3676,
"step": 3800
},
{
"epoch": 6.450261780104712,
"grad_norm": 1.0545940399169922,
"learning_rate": 8.152e-05,
"loss": 0.3673,
"step": 3850
},
{
"epoch": 6.534031413612565,
"grad_norm": 1.7130441665649414,
"learning_rate": 8.102000000000001e-05,
"loss": 0.3658,
"step": 3900
},
{
"epoch": 6.617801047120419,
"grad_norm": 2.0463407039642334,
"learning_rate": 8.052000000000001e-05,
"loss": 0.3681,
"step": 3950
},
{
"epoch": 6.701570680628272,
"grad_norm": 1.0734078884124756,
"learning_rate": 8.002000000000001e-05,
"loss": 0.3664,
"step": 4000
},
{
"epoch": 6.701570680628272,
"eval_loss": 0.3334435820579529,
"eval_runtime": 269.2566,
"eval_samples_per_second": 31.528,
"eval_steps_per_second": 3.944,
"step": 4000
},
{
"epoch": 6.785340314136126,
"grad_norm": 0.9216086864471436,
"learning_rate": 7.952000000000001e-05,
"loss": 0.3619,
"step": 4050
},
{
"epoch": 6.869109947643979,
"grad_norm": 1.802753210067749,
"learning_rate": 7.902e-05,
"loss": 0.3628,
"step": 4100
},
{
"epoch": 6.952879581151833,
"grad_norm": 1.3391004800796509,
"learning_rate": 7.852e-05,
"loss": 0.3626,
"step": 4150
},
{
"epoch": 7.036649214659686,
"grad_norm": 0.9989505410194397,
"learning_rate": 7.802e-05,
"loss": 0.3641,
"step": 4200
},
{
"epoch": 7.12041884816754,
"grad_norm": 1.318490743637085,
"learning_rate": 7.752e-05,
"loss": 0.3661,
"step": 4250
},
{
"epoch": 7.204188481675392,
"grad_norm": 1.4725680351257324,
"learning_rate": 7.702e-05,
"loss": 0.359,
"step": 4300
},
{
"epoch": 7.287958115183246,
"grad_norm": 1.3948692083358765,
"learning_rate": 7.652e-05,
"loss": 0.3622,
"step": 4350
},
{
"epoch": 7.371727748691099,
"grad_norm": 1.4226194620132446,
"learning_rate": 7.602000000000001e-05,
"loss": 0.3641,
"step": 4400
},
{
"epoch": 7.455497382198953,
"grad_norm": 1.008616328239441,
"learning_rate": 7.552e-05,
"loss": 0.3643,
"step": 4450
},
{
"epoch": 7.539267015706806,
"grad_norm": 1.3399937152862549,
"learning_rate": 7.502e-05,
"loss": 0.3598,
"step": 4500
},
{
"epoch": 7.539267015706806,
"eval_loss": 0.32835692167282104,
"eval_runtime": 275.5633,
"eval_samples_per_second": 30.806,
"eval_steps_per_second": 3.854,
"step": 4500
},
{
"epoch": 7.62303664921466,
"grad_norm": 1.2946890592575073,
"learning_rate": 7.452e-05,
"loss": 0.3595,
"step": 4550
},
{
"epoch": 7.706806282722513,
"grad_norm": 1.672792911529541,
"learning_rate": 7.402e-05,
"loss": 0.3651,
"step": 4600
},
{
"epoch": 7.790575916230367,
"grad_norm": 1.3620250225067139,
"learning_rate": 7.352e-05,
"loss": 0.3666,
"step": 4650
},
{
"epoch": 7.87434554973822,
"grad_norm": 2.236588954925537,
"learning_rate": 7.302e-05,
"loss": 0.365,
"step": 4700
},
{
"epoch": 7.958115183246074,
"grad_norm": 2.052438259124756,
"learning_rate": 7.252e-05,
"loss": 0.3557,
"step": 4750
},
{
"epoch": 8.041884816753926,
"grad_norm": 1.1328259706497192,
"learning_rate": 7.202e-05,
"loss": 0.3573,
"step": 4800
},
{
"epoch": 8.12565445026178,
"grad_norm": 1.4987516403198242,
"learning_rate": 7.151999999999999e-05,
"loss": 0.3547,
"step": 4850
},
{
"epoch": 8.209424083769633,
"grad_norm": 1.502505898475647,
"learning_rate": 7.102000000000001e-05,
"loss": 0.3668,
"step": 4900
},
{
"epoch": 8.293193717277488,
"grad_norm": 1.4446626901626587,
"learning_rate": 7.052000000000001e-05,
"loss": 0.3566,
"step": 4950
},
{
"epoch": 8.37696335078534,
"grad_norm": 0.9949765205383301,
"learning_rate": 7.002000000000001e-05,
"loss": 0.3612,
"step": 5000
},
{
"epoch": 8.37696335078534,
"eval_loss": 0.3273167312145233,
"eval_runtime": 277.1246,
"eval_samples_per_second": 30.632,
"eval_steps_per_second": 3.832,
"step": 5000
},
{
"epoch": 8.460732984293193,
"grad_norm": 1.5191736221313477,
"learning_rate": 6.952000000000001e-05,
"loss": 0.355,
"step": 5050
},
{
"epoch": 8.544502617801047,
"grad_norm": 2.3525736331939697,
"learning_rate": 6.902000000000001e-05,
"loss": 0.3554,
"step": 5100
},
{
"epoch": 8.6282722513089,
"grad_norm": 1.3651047945022583,
"learning_rate": 6.852e-05,
"loss": 0.36,
"step": 5150
},
{
"epoch": 8.712041884816754,
"grad_norm": 1.8448572158813477,
"learning_rate": 6.802e-05,
"loss": 0.3535,
"step": 5200
},
{
"epoch": 8.795811518324607,
"grad_norm": 1.2485219240188599,
"learning_rate": 6.752e-05,
"loss": 0.3564,
"step": 5250
},
{
"epoch": 8.879581151832461,
"grad_norm": 1.3560365438461304,
"learning_rate": 6.702e-05,
"loss": 0.3536,
"step": 5300
},
{
"epoch": 8.963350785340314,
"grad_norm": 1.9484964609146118,
"learning_rate": 6.652000000000001e-05,
"loss": 0.3554,
"step": 5350
},
{
"epoch": 9.047120418848168,
"grad_norm": 1.1570740938186646,
"learning_rate": 6.602000000000001e-05,
"loss": 0.3608,
"step": 5400
},
{
"epoch": 9.13089005235602,
"grad_norm": 2.5024497509002686,
"learning_rate": 6.552000000000001e-05,
"loss": 0.3519,
"step": 5450
},
{
"epoch": 9.214659685863875,
"grad_norm": 1.2458699941635132,
"learning_rate": 6.502e-05,
"loss": 0.3523,
"step": 5500
},
{
"epoch": 9.214659685863875,
"eval_loss": 0.32777872681617737,
"eval_runtime": 273.5101,
"eval_samples_per_second": 31.037,
"eval_steps_per_second": 3.883,
"step": 5500
},
{
"epoch": 9.298429319371728,
"grad_norm": 1.3453543186187744,
"learning_rate": 6.452e-05,
"loss": 0.3534,
"step": 5550
},
{
"epoch": 9.38219895287958,
"grad_norm": 1.2104579210281372,
"learning_rate": 6.402e-05,
"loss": 0.3553,
"step": 5600
},
{
"epoch": 9.465968586387435,
"grad_norm": 1.1557406187057495,
"learning_rate": 6.352e-05,
"loss": 0.3514,
"step": 5650
},
{
"epoch": 9.549738219895287,
"grad_norm": 1.621117353439331,
"learning_rate": 6.302e-05,
"loss": 0.3542,
"step": 5700
},
{
"epoch": 9.633507853403142,
"grad_norm": 0.9746065139770508,
"learning_rate": 6.252e-05,
"loss": 0.3538,
"step": 5750
},
{
"epoch": 9.717277486910994,
"grad_norm": 1.445143222808838,
"learning_rate": 6.202e-05,
"loss": 0.3536,
"step": 5800
},
{
"epoch": 9.801047120418849,
"grad_norm": 1.8927617073059082,
"learning_rate": 6.152e-05,
"loss": 0.3532,
"step": 5850
},
{
"epoch": 9.884816753926701,
"grad_norm": 1.3752628564834595,
"learning_rate": 6.102e-05,
"loss": 0.3479,
"step": 5900
},
{
"epoch": 9.968586387434556,
"grad_norm": 1.8939998149871826,
"learning_rate": 6.0519999999999997e-05,
"loss": 0.3533,
"step": 5950
},
{
"epoch": 10.052356020942408,
"grad_norm": 1.564810037612915,
"learning_rate": 6.002e-05,
"loss": 0.3484,
"step": 6000
},
{
"epoch": 10.052356020942408,
"eval_loss": 0.3196151554584503,
"eval_runtime": 270.4466,
"eval_samples_per_second": 31.389,
"eval_steps_per_second": 3.927,
"step": 6000
}
],
"logging_steps": 50,
"max_steps": 12000,
"num_input_tokens_seen": 0,
"num_train_epochs": 21,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.0787618967689203e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}