fil_b64_le5_s8000 / last-checkpoint /trainer_state.json
mikhail-panzo's picture
Training in progress, step 8000, checkpoint
f7880a8 verified
raw
history blame contribute delete
No virus
31.4 kB
{
"best_metric": 0.40983906388282776,
"best_model_checkpoint": "mikhail-panzo/fil_b64_le5_s8000/checkpoint-5500",
"epoch": 347.82608695652175,
"eval_steps": 500,
"global_step": 8000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 2.1739130434782608,
"grad_norm": 4.140745639801025,
"learning_rate": 2.4500000000000004e-07,
"loss": 0.8272,
"step": 50
},
{
"epoch": 4.3478260869565215,
"grad_norm": 1.78262197971344,
"learning_rate": 4.95e-07,
"loss": 0.7463,
"step": 100
},
{
"epoch": 6.521739130434782,
"grad_norm": 4.001489639282227,
"learning_rate": 7.450000000000001e-07,
"loss": 0.7504,
"step": 150
},
{
"epoch": 8.695652173913043,
"grad_norm": 1.0013818740844727,
"learning_rate": 9.950000000000002e-07,
"loss": 0.7295,
"step": 200
},
{
"epoch": 10.869565217391305,
"grad_norm": 1.9014511108398438,
"learning_rate": 1.2450000000000002e-06,
"loss": 0.7182,
"step": 250
},
{
"epoch": 13.043478260869565,
"grad_norm": 13.358899116516113,
"learning_rate": 1.495e-06,
"loss": 0.7037,
"step": 300
},
{
"epoch": 15.217391304347826,
"grad_norm": 1.4039109945297241,
"learning_rate": 1.745e-06,
"loss": 0.6966,
"step": 350
},
{
"epoch": 17.391304347826086,
"grad_norm": 3.0226972103118896,
"learning_rate": 1.9950000000000004e-06,
"loss": 0.6702,
"step": 400
},
{
"epoch": 19.565217391304348,
"grad_norm": 1.716300129890442,
"learning_rate": 2.24e-06,
"loss": 0.6487,
"step": 450
},
{
"epoch": 21.73913043478261,
"grad_norm": 2.9351744651794434,
"learning_rate": 2.4900000000000003e-06,
"loss": 0.6071,
"step": 500
},
{
"epoch": 21.73913043478261,
"eval_loss": 0.521294116973877,
"eval_runtime": 6.6451,
"eval_samples_per_second": 23.927,
"eval_steps_per_second": 3.01,
"step": 500
},
{
"epoch": 23.91304347826087,
"grad_norm": 1.9992552995681763,
"learning_rate": 2.7400000000000004e-06,
"loss": 0.5759,
"step": 550
},
{
"epoch": 26.08695652173913,
"grad_norm": 1.2594470977783203,
"learning_rate": 2.99e-06,
"loss": 0.56,
"step": 600
},
{
"epoch": 28.26086956521739,
"grad_norm": 1.0602363348007202,
"learning_rate": 3.2400000000000003e-06,
"loss": 0.5709,
"step": 650
},
{
"epoch": 30.434782608695652,
"grad_norm": 1.116231083869934,
"learning_rate": 3.49e-06,
"loss": 0.5322,
"step": 700
},
{
"epoch": 32.608695652173914,
"grad_norm": 1.5203620195388794,
"learning_rate": 3.74e-06,
"loss": 0.5264,
"step": 750
},
{
"epoch": 34.78260869565217,
"grad_norm": 1.6139357089996338,
"learning_rate": 3.990000000000001e-06,
"loss": 0.5191,
"step": 800
},
{
"epoch": 36.95652173913044,
"grad_norm": 1.436273217201233,
"learning_rate": 4.24e-06,
"loss": 0.5272,
"step": 850
},
{
"epoch": 39.130434782608695,
"grad_norm": 1.757323145866394,
"learning_rate": 4.49e-06,
"loss": 0.5016,
"step": 900
},
{
"epoch": 41.30434782608695,
"grad_norm": 0.9879368543624878,
"learning_rate": 4.74e-06,
"loss": 0.4975,
"step": 950
},
{
"epoch": 43.47826086956522,
"grad_norm": 1.3402222394943237,
"learning_rate": 4.9900000000000005e-06,
"loss": 0.5126,
"step": 1000
},
{
"epoch": 43.47826086956522,
"eval_loss": 0.4506620466709137,
"eval_runtime": 6.4644,
"eval_samples_per_second": 24.596,
"eval_steps_per_second": 3.094,
"step": 1000
},
{
"epoch": 45.65217391304348,
"grad_norm": 1.9366440773010254,
"learning_rate": 5.240000000000001e-06,
"loss": 0.5043,
"step": 1050
},
{
"epoch": 47.82608695652174,
"grad_norm": 1.5358505249023438,
"learning_rate": 5.490000000000001e-06,
"loss": 0.5034,
"step": 1100
},
{
"epoch": 50.0,
"grad_norm": 1.6836323738098145,
"learning_rate": 5.74e-06,
"loss": 0.499,
"step": 1150
},
{
"epoch": 52.17391304347826,
"grad_norm": 1.5092076063156128,
"learning_rate": 5.99e-06,
"loss": 0.487,
"step": 1200
},
{
"epoch": 54.34782608695652,
"grad_norm": 1.0751235485076904,
"learning_rate": 6.24e-06,
"loss": 0.4871,
"step": 1250
},
{
"epoch": 56.52173913043478,
"grad_norm": 1.327528476715088,
"learning_rate": 6.4900000000000005e-06,
"loss": 0.4911,
"step": 1300
},
{
"epoch": 58.69565217391305,
"grad_norm": 1.7827174663543701,
"learning_rate": 6.740000000000001e-06,
"loss": 0.4772,
"step": 1350
},
{
"epoch": 60.869565217391305,
"grad_norm": 1.2707558870315552,
"learning_rate": 6.99e-06,
"loss": 0.4786,
"step": 1400
},
{
"epoch": 63.04347826086956,
"grad_norm": 1.0794321298599243,
"learning_rate": 7.24e-06,
"loss": 0.4741,
"step": 1450
},
{
"epoch": 65.21739130434783,
"grad_norm": 2.0180580615997314,
"learning_rate": 7.49e-06,
"loss": 0.4749,
"step": 1500
},
{
"epoch": 65.21739130434783,
"eval_loss": 0.4311259388923645,
"eval_runtime": 6.4494,
"eval_samples_per_second": 24.653,
"eval_steps_per_second": 3.101,
"step": 1500
},
{
"epoch": 67.3913043478261,
"grad_norm": 1.2938276529312134,
"learning_rate": 7.74e-06,
"loss": 0.4684,
"step": 1550
},
{
"epoch": 69.56521739130434,
"grad_norm": 1.8100048303604126,
"learning_rate": 7.990000000000001e-06,
"loss": 0.4797,
"step": 1600
},
{
"epoch": 71.73913043478261,
"grad_norm": 1.8357532024383545,
"learning_rate": 8.24e-06,
"loss": 0.4813,
"step": 1650
},
{
"epoch": 73.91304347826087,
"grad_norm": 1.4251928329467773,
"learning_rate": 8.49e-06,
"loss": 0.4622,
"step": 1700
},
{
"epoch": 76.08695652173913,
"grad_norm": 1.1764353513717651,
"learning_rate": 8.740000000000001e-06,
"loss": 0.4642,
"step": 1750
},
{
"epoch": 78.26086956521739,
"grad_norm": 1.6363924741744995,
"learning_rate": 8.99e-06,
"loss": 0.4669,
"step": 1800
},
{
"epoch": 80.43478260869566,
"grad_norm": 1.485127568244934,
"learning_rate": 9.240000000000001e-06,
"loss": 0.4595,
"step": 1850
},
{
"epoch": 82.6086956521739,
"grad_norm": 1.2844778299331665,
"learning_rate": 9.49e-06,
"loss": 0.463,
"step": 1900
},
{
"epoch": 84.78260869565217,
"grad_norm": 1.0181550979614258,
"learning_rate": 9.74e-06,
"loss": 0.4529,
"step": 1950
},
{
"epoch": 86.95652173913044,
"grad_norm": 1.3658286333084106,
"learning_rate": 9.990000000000001e-06,
"loss": 0.454,
"step": 2000
},
{
"epoch": 86.95652173913044,
"eval_loss": 0.4230838716030121,
"eval_runtime": 6.4713,
"eval_samples_per_second": 24.57,
"eval_steps_per_second": 3.091,
"step": 2000
},
{
"epoch": 89.1304347826087,
"grad_norm": 2.8028907775878906,
"learning_rate": 9.920000000000002e-06,
"loss": 0.4694,
"step": 2050
},
{
"epoch": 91.30434782608695,
"grad_norm": 0.8982108235359192,
"learning_rate": 9.836666666666668e-06,
"loss": 0.4495,
"step": 2100
},
{
"epoch": 93.47826086956522,
"grad_norm": 2.500758409500122,
"learning_rate": 9.753333333333335e-06,
"loss": 0.4456,
"step": 2150
},
{
"epoch": 95.65217391304348,
"grad_norm": 1.7864420413970947,
"learning_rate": 9.67e-06,
"loss": 0.4816,
"step": 2200
},
{
"epoch": 97.82608695652173,
"grad_norm": 1.3289756774902344,
"learning_rate": 9.586666666666667e-06,
"loss": 0.4512,
"step": 2250
},
{
"epoch": 100.0,
"grad_norm": 1.5230562686920166,
"learning_rate": 9.503333333333334e-06,
"loss": 0.4564,
"step": 2300
},
{
"epoch": 102.17391304347827,
"grad_norm": 2.709244728088379,
"learning_rate": 9.42e-06,
"loss": 0.4543,
"step": 2350
},
{
"epoch": 104.34782608695652,
"grad_norm": 2.3624541759490967,
"learning_rate": 9.336666666666666e-06,
"loss": 0.4505,
"step": 2400
},
{
"epoch": 106.52173913043478,
"grad_norm": 1.4635056257247925,
"learning_rate": 9.253333333333333e-06,
"loss": 0.4558,
"step": 2450
},
{
"epoch": 108.69565217391305,
"grad_norm": 1.9029183387756348,
"learning_rate": 9.17e-06,
"loss": 0.443,
"step": 2500
},
{
"epoch": 108.69565217391305,
"eval_loss": 0.41728243231773376,
"eval_runtime": 6.4572,
"eval_samples_per_second": 24.624,
"eval_steps_per_second": 3.097,
"step": 2500
},
{
"epoch": 110.8695652173913,
"grad_norm": 1.4863485097885132,
"learning_rate": 9.088333333333334e-06,
"loss": 0.4562,
"step": 2550
},
{
"epoch": 113.04347826086956,
"grad_norm": 1.1933757066726685,
"learning_rate": 9.005000000000001e-06,
"loss": 0.4438,
"step": 2600
},
{
"epoch": 115.21739130434783,
"grad_norm": 2.2257957458496094,
"learning_rate": 8.921666666666668e-06,
"loss": 0.4511,
"step": 2650
},
{
"epoch": 117.3913043478261,
"grad_norm": 1.382228970527649,
"learning_rate": 8.838333333333335e-06,
"loss": 0.4442,
"step": 2700
},
{
"epoch": 119.56521739130434,
"grad_norm": 1.1577272415161133,
"learning_rate": 8.755e-06,
"loss": 0.4376,
"step": 2750
},
{
"epoch": 121.73913043478261,
"grad_norm": 1.16239333152771,
"learning_rate": 8.671666666666667e-06,
"loss": 0.4378,
"step": 2800
},
{
"epoch": 123.91304347826087,
"grad_norm": 1.3845179080963135,
"learning_rate": 8.588333333333334e-06,
"loss": 0.4368,
"step": 2850
},
{
"epoch": 126.08695652173913,
"grad_norm": 1.4513936042785645,
"learning_rate": 8.505e-06,
"loss": 0.4362,
"step": 2900
},
{
"epoch": 128.2608695652174,
"grad_norm": 2.2269773483276367,
"learning_rate": 8.421666666666668e-06,
"loss": 0.4498,
"step": 2950
},
{
"epoch": 130.43478260869566,
"grad_norm": 2.3055992126464844,
"learning_rate": 8.338333333333335e-06,
"loss": 0.4376,
"step": 3000
},
{
"epoch": 130.43478260869566,
"eval_loss": 0.4168698191642761,
"eval_runtime": 6.612,
"eval_samples_per_second": 24.047,
"eval_steps_per_second": 3.025,
"step": 3000
},
{
"epoch": 132.6086956521739,
"grad_norm": 1.1443853378295898,
"learning_rate": 8.255000000000001e-06,
"loss": 0.4369,
"step": 3050
},
{
"epoch": 134.7826086956522,
"grad_norm": 1.3519920110702515,
"learning_rate": 8.171666666666668e-06,
"loss": 0.4322,
"step": 3100
},
{
"epoch": 136.95652173913044,
"grad_norm": 1.4229977130889893,
"learning_rate": 8.088333333333334e-06,
"loss": 0.436,
"step": 3150
},
{
"epoch": 139.1304347826087,
"grad_norm": 3.4992916584014893,
"learning_rate": 8.005e-06,
"loss": 0.4275,
"step": 3200
},
{
"epoch": 141.30434782608697,
"grad_norm": 0.9590508937835693,
"learning_rate": 7.921666666666667e-06,
"loss": 0.4366,
"step": 3250
},
{
"epoch": 143.47826086956522,
"grad_norm": 4.028496265411377,
"learning_rate": 7.838333333333334e-06,
"loss": 0.437,
"step": 3300
},
{
"epoch": 145.65217391304347,
"grad_norm": 0.9438303709030151,
"learning_rate": 7.755000000000001e-06,
"loss": 0.4317,
"step": 3350
},
{
"epoch": 147.82608695652175,
"grad_norm": 1.5847684144973755,
"learning_rate": 7.671666666666668e-06,
"loss": 0.4303,
"step": 3400
},
{
"epoch": 150.0,
"grad_norm": 1.676293969154358,
"learning_rate": 7.588333333333334e-06,
"loss": 0.4312,
"step": 3450
},
{
"epoch": 152.17391304347825,
"grad_norm": 0.9531082510948181,
"learning_rate": 7.505e-06,
"loss": 0.4287,
"step": 3500
},
{
"epoch": 152.17391304347825,
"eval_loss": 0.41326722502708435,
"eval_runtime": 6.485,
"eval_samples_per_second": 24.518,
"eval_steps_per_second": 3.084,
"step": 3500
},
{
"epoch": 154.34782608695653,
"grad_norm": 2.30401611328125,
"learning_rate": 7.421666666666667e-06,
"loss": 0.4248,
"step": 3550
},
{
"epoch": 156.52173913043478,
"grad_norm": 0.9738525152206421,
"learning_rate": 7.338333333333334e-06,
"loss": 0.4354,
"step": 3600
},
{
"epoch": 158.69565217391303,
"grad_norm": 2.276857376098633,
"learning_rate": 7.255000000000001e-06,
"loss": 0.4345,
"step": 3650
},
{
"epoch": 160.8695652173913,
"grad_norm": 1.118604063987732,
"learning_rate": 7.171666666666667e-06,
"loss": 0.4243,
"step": 3700
},
{
"epoch": 163.04347826086956,
"grad_norm": 1.1995043754577637,
"learning_rate": 7.088333333333334e-06,
"loss": 0.4229,
"step": 3750
},
{
"epoch": 165.2173913043478,
"grad_norm": 1.1676712036132812,
"learning_rate": 7.005000000000001e-06,
"loss": 0.4225,
"step": 3800
},
{
"epoch": 167.3913043478261,
"grad_norm": 1.7886515855789185,
"learning_rate": 6.921666666666668e-06,
"loss": 0.4243,
"step": 3850
},
{
"epoch": 169.56521739130434,
"grad_norm": 1.0336685180664062,
"learning_rate": 6.838333333333334e-06,
"loss": 0.4288,
"step": 3900
},
{
"epoch": 171.7391304347826,
"grad_norm": 3.572209596633911,
"learning_rate": 6.7550000000000005e-06,
"loss": 0.4262,
"step": 3950
},
{
"epoch": 173.91304347826087,
"grad_norm": 3.631232261657715,
"learning_rate": 6.6716666666666674e-06,
"loss": 0.4264,
"step": 4000
},
{
"epoch": 173.91304347826087,
"eval_loss": 0.4149872660636902,
"eval_runtime": 6.8222,
"eval_samples_per_second": 23.306,
"eval_steps_per_second": 2.932,
"step": 4000
},
{
"epoch": 176.08695652173913,
"grad_norm": 1.0449178218841553,
"learning_rate": 6.588333333333334e-06,
"loss": 0.4232,
"step": 4050
},
{
"epoch": 178.2608695652174,
"grad_norm": 1.121854305267334,
"learning_rate": 6.505e-06,
"loss": 0.4214,
"step": 4100
},
{
"epoch": 180.43478260869566,
"grad_norm": 1.9070690870285034,
"learning_rate": 6.421666666666667e-06,
"loss": 0.4349,
"step": 4150
},
{
"epoch": 182.6086956521739,
"grad_norm": 1.6927332878112793,
"learning_rate": 6.338333333333334e-06,
"loss": 0.4216,
"step": 4200
},
{
"epoch": 184.7826086956522,
"grad_norm": 1.3572489023208618,
"learning_rate": 6.255e-06,
"loss": 0.4218,
"step": 4250
},
{
"epoch": 186.95652173913044,
"grad_norm": 1.9043023586273193,
"learning_rate": 6.171666666666667e-06,
"loss": 0.4204,
"step": 4300
},
{
"epoch": 189.1304347826087,
"grad_norm": 1.420276165008545,
"learning_rate": 6.088333333333334e-06,
"loss": 0.4259,
"step": 4350
},
{
"epoch": 191.30434782608697,
"grad_norm": 0.8243815302848816,
"learning_rate": 6.005000000000001e-06,
"loss": 0.4178,
"step": 4400
},
{
"epoch": 193.47826086956522,
"grad_norm": 0.995266854763031,
"learning_rate": 5.921666666666667e-06,
"loss": 0.4302,
"step": 4450
},
{
"epoch": 195.65217391304347,
"grad_norm": 1.3305209875106812,
"learning_rate": 5.838333333333334e-06,
"loss": 0.423,
"step": 4500
},
{
"epoch": 195.65217391304347,
"eval_loss": 0.41338014602661133,
"eval_runtime": 6.5844,
"eval_samples_per_second": 24.148,
"eval_steps_per_second": 3.037,
"step": 4500
},
{
"epoch": 197.82608695652175,
"grad_norm": 1.1709681749343872,
"learning_rate": 5.755000000000001e-06,
"loss": 0.4178,
"step": 4550
},
{
"epoch": 200.0,
"grad_norm": 3.5350754261016846,
"learning_rate": 5.671666666666668e-06,
"loss": 0.4211,
"step": 4600
},
{
"epoch": 202.17391304347825,
"grad_norm": 2.050933361053467,
"learning_rate": 5.588333333333334e-06,
"loss": 0.4266,
"step": 4650
},
{
"epoch": 204.34782608695653,
"grad_norm": 1.973078966140747,
"learning_rate": 5.505000000000001e-06,
"loss": 0.4187,
"step": 4700
},
{
"epoch": 206.52173913043478,
"grad_norm": 1.4094629287719727,
"learning_rate": 5.4216666666666676e-06,
"loss": 0.4294,
"step": 4750
},
{
"epoch": 208.69565217391303,
"grad_norm": 0.9872909188270569,
"learning_rate": 5.3383333333333345e-06,
"loss": 0.4239,
"step": 4800
},
{
"epoch": 210.8695652173913,
"grad_norm": 1.4357950687408447,
"learning_rate": 5.2550000000000005e-06,
"loss": 0.4117,
"step": 4850
},
{
"epoch": 213.04347826086956,
"grad_norm": 1.4760295152664185,
"learning_rate": 5.171666666666667e-06,
"loss": 0.4239,
"step": 4900
},
{
"epoch": 215.2173913043478,
"grad_norm": 0.9877930879592896,
"learning_rate": 5.088333333333334e-06,
"loss": 0.4151,
"step": 4950
},
{
"epoch": 217.3913043478261,
"grad_norm": 1.3829108476638794,
"learning_rate": 5.0049999999999995e-06,
"loss": 0.4223,
"step": 5000
},
{
"epoch": 217.3913043478261,
"eval_loss": 0.41126948595046997,
"eval_runtime": 6.7795,
"eval_samples_per_second": 23.453,
"eval_steps_per_second": 2.95,
"step": 5000
},
{
"epoch": 219.56521739130434,
"grad_norm": 1.438590168952942,
"learning_rate": 4.921666666666666e-06,
"loss": 0.4114,
"step": 5050
},
{
"epoch": 221.7391304347826,
"grad_norm": 0.9894844889640808,
"learning_rate": 4.838333333333334e-06,
"loss": 0.413,
"step": 5100
},
{
"epoch": 223.91304347826087,
"grad_norm": 0.9712705612182617,
"learning_rate": 4.755e-06,
"loss": 0.4108,
"step": 5150
},
{
"epoch": 226.08695652173913,
"grad_norm": 0.9972001314163208,
"learning_rate": 4.671666666666667e-06,
"loss": 0.4173,
"step": 5200
},
{
"epoch": 228.2608695652174,
"grad_norm": 0.9916852116584778,
"learning_rate": 4.588333333333333e-06,
"loss": 0.426,
"step": 5250
},
{
"epoch": 230.43478260869566,
"grad_norm": 3.0285840034484863,
"learning_rate": 4.505e-06,
"loss": 0.4211,
"step": 5300
},
{
"epoch": 232.6086956521739,
"grad_norm": 0.8678073883056641,
"learning_rate": 4.421666666666667e-06,
"loss": 0.4165,
"step": 5350
},
{
"epoch": 234.7826086956522,
"grad_norm": 1.2311697006225586,
"learning_rate": 4.338333333333334e-06,
"loss": 0.4192,
"step": 5400
},
{
"epoch": 236.95652173913044,
"grad_norm": 1.1257262229919434,
"learning_rate": 4.255e-06,
"loss": 0.431,
"step": 5450
},
{
"epoch": 239.1304347826087,
"grad_norm": 0.9861388206481934,
"learning_rate": 4.171666666666667e-06,
"loss": 0.4104,
"step": 5500
},
{
"epoch": 239.1304347826087,
"eval_loss": 0.40983906388282776,
"eval_runtime": 6.76,
"eval_samples_per_second": 23.521,
"eval_steps_per_second": 2.959,
"step": 5500
},
{
"epoch": 241.30434782608697,
"grad_norm": 0.9192537665367126,
"learning_rate": 4.088333333333334e-06,
"loss": 0.4105,
"step": 5550
},
{
"epoch": 243.47826086956522,
"grad_norm": 1.0816397666931152,
"learning_rate": 4.005000000000001e-06,
"loss": 0.4116,
"step": 5600
},
{
"epoch": 245.65217391304347,
"grad_norm": 1.2673571109771729,
"learning_rate": 3.921666666666667e-06,
"loss": 0.4122,
"step": 5650
},
{
"epoch": 247.82608695652175,
"grad_norm": 1.131993055343628,
"learning_rate": 3.8383333333333336e-06,
"loss": 0.4097,
"step": 5700
},
{
"epoch": 250.0,
"grad_norm": 2.051192283630371,
"learning_rate": 3.7550000000000005e-06,
"loss": 0.4075,
"step": 5750
},
{
"epoch": 252.17391304347825,
"grad_norm": 0.8863391280174255,
"learning_rate": 3.6716666666666665e-06,
"loss": 0.4077,
"step": 5800
},
{
"epoch": 254.34782608695653,
"grad_norm": 0.8587177395820618,
"learning_rate": 3.588333333333334e-06,
"loss": 0.4047,
"step": 5850
},
{
"epoch": 256.5217391304348,
"grad_norm": 1.0080711841583252,
"learning_rate": 3.505e-06,
"loss": 0.4093,
"step": 5900
},
{
"epoch": 258.69565217391306,
"grad_norm": 1.022581934928894,
"learning_rate": 3.4216666666666672e-06,
"loss": 0.4069,
"step": 5950
},
{
"epoch": 260.8695652173913,
"grad_norm": 1.0036355257034302,
"learning_rate": 3.3383333333333333e-06,
"loss": 0.4192,
"step": 6000
},
{
"epoch": 260.8695652173913,
"eval_loss": 0.41063958406448364,
"eval_runtime": 6.6166,
"eval_samples_per_second": 24.031,
"eval_steps_per_second": 3.023,
"step": 6000
},
{
"epoch": 263.04347826086956,
"grad_norm": 1.1398719549179077,
"learning_rate": 3.255e-06,
"loss": 0.4187,
"step": 6050
},
{
"epoch": 265.2173913043478,
"grad_norm": 0.8705397248268127,
"learning_rate": 3.1716666666666667e-06,
"loss": 0.4189,
"step": 6100
},
{
"epoch": 267.39130434782606,
"grad_norm": 1.1828796863555908,
"learning_rate": 3.0883333333333336e-06,
"loss": 0.412,
"step": 6150
},
{
"epoch": 269.5652173913044,
"grad_norm": 1.2808082103729248,
"learning_rate": 3.005e-06,
"loss": 0.4135,
"step": 6200
},
{
"epoch": 271.7391304347826,
"grad_norm": 1.2082769870758057,
"learning_rate": 2.921666666666667e-06,
"loss": 0.4163,
"step": 6250
},
{
"epoch": 273.9130434782609,
"grad_norm": 1.2765779495239258,
"learning_rate": 2.8383333333333334e-06,
"loss": 0.409,
"step": 6300
},
{
"epoch": 276.0869565217391,
"grad_norm": 1.0085750818252563,
"learning_rate": 2.7550000000000003e-06,
"loss": 0.4168,
"step": 6350
},
{
"epoch": 278.2608695652174,
"grad_norm": 1.2481540441513062,
"learning_rate": 2.6716666666666668e-06,
"loss": 0.4163,
"step": 6400
},
{
"epoch": 280.4347826086956,
"grad_norm": 1.0520100593566895,
"learning_rate": 2.5883333333333337e-06,
"loss": 0.4165,
"step": 6450
},
{
"epoch": 282.60869565217394,
"grad_norm": 1.2364658117294312,
"learning_rate": 2.505e-06,
"loss": 0.4089,
"step": 6500
},
{
"epoch": 282.60869565217394,
"eval_loss": 0.4121515154838562,
"eval_runtime": 6.5878,
"eval_samples_per_second": 24.136,
"eval_steps_per_second": 3.036,
"step": 6500
},
{
"epoch": 284.7826086956522,
"grad_norm": 1.1955006122589111,
"learning_rate": 2.421666666666667e-06,
"loss": 0.4065,
"step": 6550
},
{
"epoch": 286.95652173913044,
"grad_norm": 1.8908520936965942,
"learning_rate": 2.3383333333333335e-06,
"loss": 0.4162,
"step": 6600
},
{
"epoch": 289.1304347826087,
"grad_norm": 1.1614066362380981,
"learning_rate": 2.2550000000000004e-06,
"loss": 0.4099,
"step": 6650
},
{
"epoch": 291.30434782608694,
"grad_norm": 0.8983772993087769,
"learning_rate": 2.171666666666667e-06,
"loss": 0.4121,
"step": 6700
},
{
"epoch": 293.4782608695652,
"grad_norm": 1.2100547552108765,
"learning_rate": 2.088333333333334e-06,
"loss": 0.4041,
"step": 6750
},
{
"epoch": 295.6521739130435,
"grad_norm": 1.1354186534881592,
"learning_rate": 2.006666666666667e-06,
"loss": 0.4127,
"step": 6800
},
{
"epoch": 297.82608695652175,
"grad_norm": 1.1679037809371948,
"learning_rate": 1.9233333333333333e-06,
"loss": 0.4068,
"step": 6850
},
{
"epoch": 300.0,
"grad_norm": 1.502341389656067,
"learning_rate": 1.8400000000000002e-06,
"loss": 0.4147,
"step": 6900
},
{
"epoch": 302.17391304347825,
"grad_norm": 0.9498834013938904,
"learning_rate": 1.7566666666666669e-06,
"loss": 0.4056,
"step": 6950
},
{
"epoch": 304.3478260869565,
"grad_norm": 0.8643518686294556,
"learning_rate": 1.6733333333333335e-06,
"loss": 0.4146,
"step": 7000
},
{
"epoch": 304.3478260869565,
"eval_loss": 0.41151124238967896,
"eval_runtime": 6.5375,
"eval_samples_per_second": 24.321,
"eval_steps_per_second": 3.059,
"step": 7000
},
{
"epoch": 306.5217391304348,
"grad_norm": 0.9846628308296204,
"learning_rate": 1.5900000000000002e-06,
"loss": 0.4115,
"step": 7050
},
{
"epoch": 308.69565217391306,
"grad_norm": 1.0331532955169678,
"learning_rate": 1.506666666666667e-06,
"loss": 0.4143,
"step": 7100
},
{
"epoch": 310.8695652173913,
"grad_norm": 0.9437756538391113,
"learning_rate": 1.4233333333333336e-06,
"loss": 0.4109,
"step": 7150
},
{
"epoch": 313.04347826086956,
"grad_norm": 1.0904934406280518,
"learning_rate": 1.34e-06,
"loss": 0.4109,
"step": 7200
},
{
"epoch": 315.2173913043478,
"grad_norm": 0.9227349162101746,
"learning_rate": 1.2566666666666668e-06,
"loss": 0.406,
"step": 7250
},
{
"epoch": 317.39130434782606,
"grad_norm": 0.991308867931366,
"learning_rate": 1.1733333333333335e-06,
"loss": 0.4161,
"step": 7300
},
{
"epoch": 319.5652173913044,
"grad_norm": 0.9621168375015259,
"learning_rate": 1.0900000000000002e-06,
"loss": 0.4123,
"step": 7350
},
{
"epoch": 321.7391304347826,
"grad_norm": 1.1078619956970215,
"learning_rate": 1.0066666666666668e-06,
"loss": 0.4109,
"step": 7400
},
{
"epoch": 323.9130434782609,
"grad_norm": 0.8910597562789917,
"learning_rate": 9.233333333333334e-07,
"loss": 0.4052,
"step": 7450
},
{
"epoch": 326.0869565217391,
"grad_norm": 0.760857880115509,
"learning_rate": 8.400000000000001e-07,
"loss": 0.4116,
"step": 7500
},
{
"epoch": 326.0869565217391,
"eval_loss": 0.4111355245113373,
"eval_runtime": 6.8924,
"eval_samples_per_second": 23.069,
"eval_steps_per_second": 2.902,
"step": 7500
},
{
"epoch": 328.2608695652174,
"grad_norm": 0.8036932945251465,
"learning_rate": 7.566666666666667e-07,
"loss": 0.4167,
"step": 7550
},
{
"epoch": 330.4347826086956,
"grad_norm": 0.9011595845222473,
"learning_rate": 6.733333333333334e-07,
"loss": 0.4146,
"step": 7600
},
{
"epoch": 332.60869565217394,
"grad_norm": 1.1051392555236816,
"learning_rate": 5.900000000000001e-07,
"loss": 0.414,
"step": 7650
},
{
"epoch": 334.7826086956522,
"grad_norm": 0.8294230103492737,
"learning_rate": 5.066666666666667e-07,
"loss": 0.4136,
"step": 7700
},
{
"epoch": 336.95652173913044,
"grad_norm": 0.8385105729103088,
"learning_rate": 4.233333333333334e-07,
"loss": 0.4031,
"step": 7750
},
{
"epoch": 339.1304347826087,
"grad_norm": 1.015714406967163,
"learning_rate": 3.4000000000000003e-07,
"loss": 0.4013,
"step": 7800
},
{
"epoch": 341.30434782608694,
"grad_norm": 0.8815127611160278,
"learning_rate": 2.566666666666667e-07,
"loss": 0.4067,
"step": 7850
},
{
"epoch": 343.4782608695652,
"grad_norm": 1.0422344207763672,
"learning_rate": 1.7333333333333335e-07,
"loss": 0.4095,
"step": 7900
},
{
"epoch": 345.6521739130435,
"grad_norm": 1.6998138427734375,
"learning_rate": 9e-08,
"loss": 0.4089,
"step": 7950
},
{
"epoch": 347.82608695652175,
"grad_norm": 0.9665780067443848,
"learning_rate": 6.666666666666667e-09,
"loss": 0.4097,
"step": 8000
},
{
"epoch": 347.82608695652175,
"eval_loss": 0.41188350319862366,
"eval_runtime": 6.8472,
"eval_samples_per_second": 23.221,
"eval_steps_per_second": 2.921,
"step": 8000
}
],
"logging_steps": 50,
"max_steps": 8000,
"num_input_tokens_seen": 0,
"num_train_epochs": 348,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.187693654068754e+17,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}