zlm_b128_le4_s8000 / last-checkpoint /trainer_state.json
mikhail-panzo's picture
Training in progress, step 8000, checkpoint
407dcb8 verified
raw
history blame contribute delete
No virus
31.4 kB
{
"best_metric": 0.31398773193359375,
"best_model_checkpoint": "mikhail_panzo/zlm_b128_le4_s8000/checkpoint-8000",
"epoch": 13.403141361256544,
"eval_steps": 500,
"global_step": 8000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08376963350785341,
"grad_norm": 2.9895308017730713,
"learning_rate": 2.4500000000000003e-06,
"loss": 1.0423,
"step": 50
},
{
"epoch": 0.16753926701570682,
"grad_norm": 3.051593542098999,
"learning_rate": 4.950000000000001e-06,
"loss": 0.8473,
"step": 100
},
{
"epoch": 0.2513089005235602,
"grad_norm": 2.0044381618499756,
"learning_rate": 7.45e-06,
"loss": 0.733,
"step": 150
},
{
"epoch": 0.33507853403141363,
"grad_norm": 3.4974701404571533,
"learning_rate": 9.950000000000001e-06,
"loss": 0.6511,
"step": 200
},
{
"epoch": 0.418848167539267,
"grad_norm": 1.854073405265808,
"learning_rate": 1.2450000000000001e-05,
"loss": 0.6143,
"step": 250
},
{
"epoch": 0.5026178010471204,
"grad_norm": 1.737787127494812,
"learning_rate": 1.4950000000000001e-05,
"loss": 0.5909,
"step": 300
},
{
"epoch": 0.5863874345549738,
"grad_norm": 2.0971367359161377,
"learning_rate": 1.745e-05,
"loss": 0.5684,
"step": 350
},
{
"epoch": 0.6701570680628273,
"grad_norm": 1.8380221128463745,
"learning_rate": 1.995e-05,
"loss": 0.5472,
"step": 400
},
{
"epoch": 0.7539267015706806,
"grad_norm": 3.9271857738494873,
"learning_rate": 2.245e-05,
"loss": 0.5287,
"step": 450
},
{
"epoch": 0.837696335078534,
"grad_norm": 7.809891700744629,
"learning_rate": 2.495e-05,
"loss": 0.5174,
"step": 500
},
{
"epoch": 0.837696335078534,
"eval_loss": 0.4793977439403534,
"eval_runtime": 265.0789,
"eval_samples_per_second": 32.024,
"eval_steps_per_second": 4.006,
"step": 500
},
{
"epoch": 0.9214659685863874,
"grad_norm": 2.2309463024139404,
"learning_rate": 2.7450000000000003e-05,
"loss": 0.5084,
"step": 550
},
{
"epoch": 1.0052356020942408,
"grad_norm": 1.8079086542129517,
"learning_rate": 2.995e-05,
"loss": 0.4954,
"step": 600
},
{
"epoch": 1.0890052356020943,
"grad_norm": 6.239879608154297,
"learning_rate": 3.245e-05,
"loss": 0.4954,
"step": 650
},
{
"epoch": 1.1727748691099475,
"grad_norm": 12.593622207641602,
"learning_rate": 3.495e-05,
"loss": 0.4946,
"step": 700
},
{
"epoch": 1.256544502617801,
"grad_norm": 3.1568186283111572,
"learning_rate": 3.745e-05,
"loss": 0.4768,
"step": 750
},
{
"epoch": 1.3403141361256545,
"grad_norm": 3.9486255645751953,
"learning_rate": 3.995e-05,
"loss": 0.4737,
"step": 800
},
{
"epoch": 1.4240837696335078,
"grad_norm": 2.641502618789673,
"learning_rate": 4.245e-05,
"loss": 0.4706,
"step": 850
},
{
"epoch": 1.5078534031413613,
"grad_norm": 1.9798855781555176,
"learning_rate": 4.495e-05,
"loss": 0.4605,
"step": 900
},
{
"epoch": 1.5916230366492146,
"grad_norm": 2.691363573074341,
"learning_rate": 4.745e-05,
"loss": 0.4554,
"step": 950
},
{
"epoch": 1.675392670157068,
"grad_norm": 3.704902410507202,
"learning_rate": 4.995e-05,
"loss": 0.4561,
"step": 1000
},
{
"epoch": 1.675392670157068,
"eval_loss": 0.402164101600647,
"eval_runtime": 265.618,
"eval_samples_per_second": 31.959,
"eval_steps_per_second": 3.998,
"step": 1000
},
{
"epoch": 1.7591623036649215,
"grad_norm": 4.97900390625,
"learning_rate": 5.245e-05,
"loss": 0.4553,
"step": 1050
},
{
"epoch": 1.8429319371727748,
"grad_norm": 1.9889676570892334,
"learning_rate": 5.495e-05,
"loss": 0.449,
"step": 1100
},
{
"epoch": 1.9267015706806283,
"grad_norm": 1.5135546922683716,
"learning_rate": 5.745e-05,
"loss": 0.4353,
"step": 1150
},
{
"epoch": 2.0104712041884816,
"grad_norm": 7.610673904418945,
"learning_rate": 5.995000000000001e-05,
"loss": 0.4311,
"step": 1200
},
{
"epoch": 2.094240837696335,
"grad_norm": 2.049562454223633,
"learning_rate": 6.245000000000001e-05,
"loss": 0.4312,
"step": 1250
},
{
"epoch": 2.1780104712041886,
"grad_norm": 1.4102027416229248,
"learning_rate": 6.494999999999999e-05,
"loss": 0.4282,
"step": 1300
},
{
"epoch": 2.261780104712042,
"grad_norm": 1.701119065284729,
"learning_rate": 6.745e-05,
"loss": 0.4272,
"step": 1350
},
{
"epoch": 2.345549738219895,
"grad_norm": 2.0149667263031006,
"learning_rate": 6.995e-05,
"loss": 0.4277,
"step": 1400
},
{
"epoch": 2.4293193717277486,
"grad_norm": 2.1658883094787598,
"learning_rate": 7.245000000000001e-05,
"loss": 0.4247,
"step": 1450
},
{
"epoch": 2.513089005235602,
"grad_norm": 2.6821463108062744,
"learning_rate": 7.495e-05,
"loss": 0.4169,
"step": 1500
},
{
"epoch": 2.513089005235602,
"eval_loss": 0.3853827118873596,
"eval_runtime": 269.2607,
"eval_samples_per_second": 31.527,
"eval_steps_per_second": 3.944,
"step": 1500
},
{
"epoch": 2.5968586387434556,
"grad_norm": 1.4221985340118408,
"learning_rate": 7.745e-05,
"loss": 0.4145,
"step": 1550
},
{
"epoch": 2.680628272251309,
"grad_norm": 3.651655435562134,
"learning_rate": 7.995e-05,
"loss": 0.4129,
"step": 1600
},
{
"epoch": 2.7643979057591626,
"grad_norm": 1.2700576782226562,
"learning_rate": 8.245e-05,
"loss": 0.4106,
"step": 1650
},
{
"epoch": 2.8481675392670156,
"grad_norm": 1.8690059185028076,
"learning_rate": 8.495e-05,
"loss": 0.4134,
"step": 1700
},
{
"epoch": 2.931937172774869,
"grad_norm": 2.483203649520874,
"learning_rate": 8.745000000000001e-05,
"loss": 0.413,
"step": 1750
},
{
"epoch": 3.0157068062827226,
"grad_norm": 1.738501787185669,
"learning_rate": 8.995e-05,
"loss": 0.4063,
"step": 1800
},
{
"epoch": 3.099476439790576,
"grad_norm": 1.837342619895935,
"learning_rate": 9.245e-05,
"loss": 0.4049,
"step": 1850
},
{
"epoch": 3.183246073298429,
"grad_norm": 1.284974455833435,
"learning_rate": 9.495e-05,
"loss": 0.4065,
"step": 1900
},
{
"epoch": 3.2670157068062826,
"grad_norm": 2.637281656265259,
"learning_rate": 9.745000000000001e-05,
"loss": 0.3986,
"step": 1950
},
{
"epoch": 3.350785340314136,
"grad_norm": 1.4775941371917725,
"learning_rate": 9.995e-05,
"loss": 0.4054,
"step": 2000
},
{
"epoch": 3.350785340314136,
"eval_loss": 0.36490994691848755,
"eval_runtime": 265.9165,
"eval_samples_per_second": 31.924,
"eval_steps_per_second": 3.994,
"step": 2000
},
{
"epoch": 3.4345549738219896,
"grad_norm": 2.121384859085083,
"learning_rate": 9.918333333333334e-05,
"loss": 0.4058,
"step": 2050
},
{
"epoch": 3.518324607329843,
"grad_norm": 1.645984411239624,
"learning_rate": 9.835e-05,
"loss": 0.4021,
"step": 2100
},
{
"epoch": 3.6020942408376966,
"grad_norm": 1.246239185333252,
"learning_rate": 9.751666666666666e-05,
"loss": 0.3991,
"step": 2150
},
{
"epoch": 3.6858638743455496,
"grad_norm": 1.9096795320510864,
"learning_rate": 9.668333333333334e-05,
"loss": 0.3961,
"step": 2200
},
{
"epoch": 3.769633507853403,
"grad_norm": 1.8867601156234741,
"learning_rate": 9.585000000000001e-05,
"loss": 0.3904,
"step": 2250
},
{
"epoch": 3.8534031413612566,
"grad_norm": 1.7438101768493652,
"learning_rate": 9.501666666666668e-05,
"loss": 0.3895,
"step": 2300
},
{
"epoch": 3.93717277486911,
"grad_norm": 1.1799490451812744,
"learning_rate": 9.418333333333334e-05,
"loss": 0.4027,
"step": 2350
},
{
"epoch": 4.020942408376963,
"grad_norm": 1.1952763795852661,
"learning_rate": 9.335e-05,
"loss": 0.3893,
"step": 2400
},
{
"epoch": 4.104712041884817,
"grad_norm": 2.008756160736084,
"learning_rate": 9.251666666666667e-05,
"loss": 0.3878,
"step": 2450
},
{
"epoch": 4.18848167539267,
"grad_norm": 2.2693591117858887,
"learning_rate": 9.168333333333333e-05,
"loss": 0.3863,
"step": 2500
},
{
"epoch": 4.18848167539267,
"eval_loss": 0.3528364896774292,
"eval_runtime": 272.7627,
"eval_samples_per_second": 31.122,
"eval_steps_per_second": 3.893,
"step": 2500
},
{
"epoch": 4.272251308900524,
"grad_norm": 1.0409568548202515,
"learning_rate": 9.086666666666666e-05,
"loss": 0.3894,
"step": 2550
},
{
"epoch": 4.356020942408377,
"grad_norm": 2.047908306121826,
"learning_rate": 9.003333333333333e-05,
"loss": 0.3862,
"step": 2600
},
{
"epoch": 4.439790575916231,
"grad_norm": 1.767168641090393,
"learning_rate": 8.92e-05,
"loss": 0.3832,
"step": 2650
},
{
"epoch": 4.523560209424084,
"grad_norm": 1.1918233633041382,
"learning_rate": 8.836666666666667e-05,
"loss": 0.3839,
"step": 2700
},
{
"epoch": 4.607329842931938,
"grad_norm": 0.9553185105323792,
"learning_rate": 8.753333333333334e-05,
"loss": 0.3807,
"step": 2750
},
{
"epoch": 4.69109947643979,
"grad_norm": 1.1633609533309937,
"learning_rate": 8.67e-05,
"loss": 0.3836,
"step": 2800
},
{
"epoch": 4.774869109947644,
"grad_norm": 1.1633822917938232,
"learning_rate": 8.586666666666668e-05,
"loss": 0.3815,
"step": 2850
},
{
"epoch": 4.858638743455497,
"grad_norm": 1.7152239084243774,
"learning_rate": 8.503333333333334e-05,
"loss": 0.3809,
"step": 2900
},
{
"epoch": 4.942408376963351,
"grad_norm": 1.2379584312438965,
"learning_rate": 8.42e-05,
"loss": 0.3801,
"step": 2950
},
{
"epoch": 5.026178010471204,
"grad_norm": 1.1686451435089111,
"learning_rate": 8.336666666666667e-05,
"loss": 0.3807,
"step": 3000
},
{
"epoch": 5.026178010471204,
"eval_loss": 0.3480445444583893,
"eval_runtime": 272.7064,
"eval_samples_per_second": 31.129,
"eval_steps_per_second": 3.894,
"step": 3000
},
{
"epoch": 5.109947643979058,
"grad_norm": 1.5680384635925293,
"learning_rate": 8.253333333333334e-05,
"loss": 0.3739,
"step": 3050
},
{
"epoch": 5.193717277486911,
"grad_norm": 1.5168578624725342,
"learning_rate": 8.17e-05,
"loss": 0.3756,
"step": 3100
},
{
"epoch": 5.277486910994765,
"grad_norm": 1.0925545692443848,
"learning_rate": 8.086666666666666e-05,
"loss": 0.3739,
"step": 3150
},
{
"epoch": 5.361256544502618,
"grad_norm": 0.8022084832191467,
"learning_rate": 8.003333333333333e-05,
"loss": 0.3736,
"step": 3200
},
{
"epoch": 5.445026178010472,
"grad_norm": 1.6964820623397827,
"learning_rate": 7.920000000000001e-05,
"loss": 0.379,
"step": 3250
},
{
"epoch": 5.528795811518324,
"grad_norm": 1.0717741250991821,
"learning_rate": 7.836666666666667e-05,
"loss": 0.3762,
"step": 3300
},
{
"epoch": 5.612565445026178,
"grad_norm": 1.2660809755325317,
"learning_rate": 7.753333333333334e-05,
"loss": 0.3708,
"step": 3350
},
{
"epoch": 5.696335078534031,
"grad_norm": 1.6065802574157715,
"learning_rate": 7.670000000000001e-05,
"loss": 0.3717,
"step": 3400
},
{
"epoch": 5.780104712041885,
"grad_norm": 1.2089744806289673,
"learning_rate": 7.586666666666668e-05,
"loss": 0.3715,
"step": 3450
},
{
"epoch": 5.863874345549738,
"grad_norm": 3.1524300575256348,
"learning_rate": 7.503333333333333e-05,
"loss": 0.3708,
"step": 3500
},
{
"epoch": 5.863874345549738,
"eval_loss": 0.3431868255138397,
"eval_runtime": 272.9384,
"eval_samples_per_second": 31.102,
"eval_steps_per_second": 3.891,
"step": 3500
},
{
"epoch": 5.947643979057592,
"grad_norm": 1.914294719696045,
"learning_rate": 7.42e-05,
"loss": 0.3673,
"step": 3550
},
{
"epoch": 6.031413612565445,
"grad_norm": 1.3186005353927612,
"learning_rate": 7.336666666666667e-05,
"loss": 0.3665,
"step": 3600
},
{
"epoch": 6.115183246073299,
"grad_norm": 2.019273519515991,
"learning_rate": 7.253333333333334e-05,
"loss": 0.3697,
"step": 3650
},
{
"epoch": 6.198952879581152,
"grad_norm": 1.0517597198486328,
"learning_rate": 7.17e-05,
"loss": 0.3674,
"step": 3700
},
{
"epoch": 6.282722513089006,
"grad_norm": 1.0202686786651611,
"learning_rate": 7.086666666666666e-05,
"loss": 0.3706,
"step": 3750
},
{
"epoch": 6.366492146596858,
"grad_norm": 1.4179818630218506,
"learning_rate": 7.003333333333335e-05,
"loss": 0.3681,
"step": 3800
},
{
"epoch": 6.450261780104712,
"grad_norm": 1.3820505142211914,
"learning_rate": 6.92e-05,
"loss": 0.3671,
"step": 3850
},
{
"epoch": 6.534031413612565,
"grad_norm": 1.3857202529907227,
"learning_rate": 6.836666666666667e-05,
"loss": 0.3641,
"step": 3900
},
{
"epoch": 6.617801047120419,
"grad_norm": 1.0996108055114746,
"learning_rate": 6.753333333333334e-05,
"loss": 0.3685,
"step": 3950
},
{
"epoch": 6.701570680628272,
"grad_norm": 0.9405946731567383,
"learning_rate": 6.670000000000001e-05,
"loss": 0.366,
"step": 4000
},
{
"epoch": 6.701570680628272,
"eval_loss": 0.33282962441444397,
"eval_runtime": 271.6112,
"eval_samples_per_second": 31.254,
"eval_steps_per_second": 3.91,
"step": 4000
},
{
"epoch": 6.785340314136126,
"grad_norm": 0.9167115092277527,
"learning_rate": 6.586666666666666e-05,
"loss": 0.3618,
"step": 4050
},
{
"epoch": 6.869109947643979,
"grad_norm": 1.0496320724487305,
"learning_rate": 6.503333333333333e-05,
"loss": 0.3624,
"step": 4100
},
{
"epoch": 6.952879581151833,
"grad_norm": 1.7927188873291016,
"learning_rate": 6.42e-05,
"loss": 0.3621,
"step": 4150
},
{
"epoch": 7.036649214659686,
"grad_norm": 1.3362383842468262,
"learning_rate": 6.336666666666667e-05,
"loss": 0.3631,
"step": 4200
},
{
"epoch": 7.12041884816754,
"grad_norm": 1.5686593055725098,
"learning_rate": 6.253333333333333e-05,
"loss": 0.3672,
"step": 4250
},
{
"epoch": 7.204188481675392,
"grad_norm": 2.042232036590576,
"learning_rate": 6.170000000000001e-05,
"loss": 0.3598,
"step": 4300
},
{
"epoch": 7.287958115183246,
"grad_norm": 1.2040599584579468,
"learning_rate": 6.086666666666667e-05,
"loss": 0.3628,
"step": 4350
},
{
"epoch": 7.371727748691099,
"grad_norm": 1.3659878969192505,
"learning_rate": 6.003333333333334e-05,
"loss": 0.3626,
"step": 4400
},
{
"epoch": 7.455497382198953,
"grad_norm": 1.9217562675476074,
"learning_rate": 5.92e-05,
"loss": 0.3636,
"step": 4450
},
{
"epoch": 7.539267015706806,
"grad_norm": 1.5920127630233765,
"learning_rate": 5.836666666666667e-05,
"loss": 0.3575,
"step": 4500
},
{
"epoch": 7.539267015706806,
"eval_loss": 0.33003953099250793,
"eval_runtime": 274.282,
"eval_samples_per_second": 30.95,
"eval_steps_per_second": 3.872,
"step": 4500
},
{
"epoch": 7.62303664921466,
"grad_norm": 1.3059043884277344,
"learning_rate": 5.753333333333334e-05,
"loss": 0.3603,
"step": 4550
},
{
"epoch": 7.706806282722513,
"grad_norm": 1.6313170194625854,
"learning_rate": 5.6699999999999996e-05,
"loss": 0.3622,
"step": 4600
},
{
"epoch": 7.790575916230367,
"grad_norm": 1.7790875434875488,
"learning_rate": 5.5866666666666665e-05,
"loss": 0.3635,
"step": 4650
},
{
"epoch": 7.87434554973822,
"grad_norm": 1.1874761581420898,
"learning_rate": 5.5033333333333334e-05,
"loss": 0.3635,
"step": 4700
},
{
"epoch": 7.958115183246074,
"grad_norm": 0.9745219349861145,
"learning_rate": 5.420000000000001e-05,
"loss": 0.357,
"step": 4750
},
{
"epoch": 8.041884816753926,
"grad_norm": 1.3074321746826172,
"learning_rate": 5.3366666666666665e-05,
"loss": 0.3543,
"step": 4800
},
{
"epoch": 8.12565445026178,
"grad_norm": 1.3384989500045776,
"learning_rate": 5.2533333333333334e-05,
"loss": 0.3556,
"step": 4850
},
{
"epoch": 8.209424083769633,
"grad_norm": 1.0133076906204224,
"learning_rate": 5.17e-05,
"loss": 0.3654,
"step": 4900
},
{
"epoch": 8.293193717277488,
"grad_norm": 1.194360375404358,
"learning_rate": 5.086666666666667e-05,
"loss": 0.3547,
"step": 4950
},
{
"epoch": 8.37696335078534,
"grad_norm": 1.049672245979309,
"learning_rate": 5.0033333333333334e-05,
"loss": 0.3607,
"step": 5000
},
{
"epoch": 8.37696335078534,
"eval_loss": 0.3284249007701874,
"eval_runtime": 274.7134,
"eval_samples_per_second": 30.901,
"eval_steps_per_second": 3.866,
"step": 5000
},
{
"epoch": 8.460732984293193,
"grad_norm": 1.1301287412643433,
"learning_rate": 4.92e-05,
"loss": 0.3542,
"step": 5050
},
{
"epoch": 8.544502617801047,
"grad_norm": 1.0920559167861938,
"learning_rate": 4.836666666666667e-05,
"loss": 0.3534,
"step": 5100
},
{
"epoch": 8.6282722513089,
"grad_norm": 1.1312081813812256,
"learning_rate": 4.7533333333333334e-05,
"loss": 0.3598,
"step": 5150
},
{
"epoch": 8.712041884816754,
"grad_norm": 1.5819182395935059,
"learning_rate": 4.6700000000000003e-05,
"loss": 0.3537,
"step": 5200
},
{
"epoch": 8.795811518324607,
"grad_norm": 1.0059967041015625,
"learning_rate": 4.5866666666666666e-05,
"loss": 0.3553,
"step": 5250
},
{
"epoch": 8.879581151832461,
"grad_norm": 1.1407588720321655,
"learning_rate": 4.5033333333333335e-05,
"loss": 0.3557,
"step": 5300
},
{
"epoch": 8.963350785340314,
"grad_norm": 1.296221137046814,
"learning_rate": 4.4200000000000004e-05,
"loss": 0.3545,
"step": 5350
},
{
"epoch": 9.047120418848168,
"grad_norm": 1.4306052923202515,
"learning_rate": 4.3366666666666666e-05,
"loss": 0.3583,
"step": 5400
},
{
"epoch": 9.13089005235602,
"grad_norm": 1.1458420753479004,
"learning_rate": 4.2533333333333335e-05,
"loss": 0.3503,
"step": 5450
},
{
"epoch": 9.214659685863875,
"grad_norm": 0.9508205056190491,
"learning_rate": 4.17e-05,
"loss": 0.352,
"step": 5500
},
{
"epoch": 9.214659685863875,
"eval_loss": 0.3231986165046692,
"eval_runtime": 273.3343,
"eval_samples_per_second": 31.057,
"eval_steps_per_second": 3.885,
"step": 5500
},
{
"epoch": 9.298429319371728,
"grad_norm": 2.127049446105957,
"learning_rate": 4.086666666666667e-05,
"loss": 0.3547,
"step": 5550
},
{
"epoch": 9.38219895287958,
"grad_norm": 1.5531483888626099,
"learning_rate": 4.0033333333333335e-05,
"loss": 0.3576,
"step": 5600
},
{
"epoch": 9.465968586387435,
"grad_norm": 1.5712753534317017,
"learning_rate": 3.9200000000000004e-05,
"loss": 0.3492,
"step": 5650
},
{
"epoch": 9.549738219895287,
"grad_norm": 1.1668370962142944,
"learning_rate": 3.8366666666666666e-05,
"loss": 0.3529,
"step": 5700
},
{
"epoch": 9.633507853403142,
"grad_norm": 1.1639858484268188,
"learning_rate": 3.7533333333333335e-05,
"loss": 0.3526,
"step": 5750
},
{
"epoch": 9.717277486910994,
"grad_norm": 1.4188268184661865,
"learning_rate": 3.6700000000000004e-05,
"loss": 0.3527,
"step": 5800
},
{
"epoch": 9.801047120418849,
"grad_norm": 1.8606890439987183,
"learning_rate": 3.586666666666667e-05,
"loss": 0.3517,
"step": 5850
},
{
"epoch": 9.884816753926701,
"grad_norm": 1.2933155298233032,
"learning_rate": 3.5033333333333336e-05,
"loss": 0.3463,
"step": 5900
},
{
"epoch": 9.968586387434556,
"grad_norm": 1.4728389978408813,
"learning_rate": 3.4200000000000005e-05,
"loss": 0.3505,
"step": 5950
},
{
"epoch": 10.052356020942408,
"grad_norm": 1.6055335998535156,
"learning_rate": 3.336666666666667e-05,
"loss": 0.3471,
"step": 6000
},
{
"epoch": 10.052356020942408,
"eval_loss": 0.3189197778701782,
"eval_runtime": 273.3525,
"eval_samples_per_second": 31.055,
"eval_steps_per_second": 3.885,
"step": 6000
},
{
"epoch": 10.136125654450261,
"grad_norm": 1.572178840637207,
"learning_rate": 3.253333333333333e-05,
"loss": 0.3492,
"step": 6050
},
{
"epoch": 10.219895287958115,
"grad_norm": 1.063959002494812,
"learning_rate": 3.1700000000000005e-05,
"loss": 0.3525,
"step": 6100
},
{
"epoch": 10.303664921465968,
"grad_norm": 1.1579703092575073,
"learning_rate": 3.086666666666667e-05,
"loss": 0.3486,
"step": 6150
},
{
"epoch": 10.387434554973822,
"grad_norm": 1.4260714054107666,
"learning_rate": 3.0033333333333336e-05,
"loss": 0.3483,
"step": 6200
},
{
"epoch": 10.471204188481675,
"grad_norm": 1.453321099281311,
"learning_rate": 2.9199999999999998e-05,
"loss": 0.3481,
"step": 6250
},
{
"epoch": 10.55497382198953,
"grad_norm": 1.8545498847961426,
"learning_rate": 2.836666666666667e-05,
"loss": 0.3482,
"step": 6300
},
{
"epoch": 10.638743455497382,
"grad_norm": 1.073957920074463,
"learning_rate": 2.7533333333333333e-05,
"loss": 0.348,
"step": 6350
},
{
"epoch": 10.722513089005236,
"grad_norm": 1.0049316883087158,
"learning_rate": 2.6700000000000002e-05,
"loss": 0.3487,
"step": 6400
},
{
"epoch": 10.806282722513089,
"grad_norm": 1.4970500469207764,
"learning_rate": 2.5866666666666667e-05,
"loss": 0.3468,
"step": 6450
},
{
"epoch": 10.890052356020943,
"grad_norm": 1.6566526889801025,
"learning_rate": 2.5033333333333336e-05,
"loss": 0.3469,
"step": 6500
},
{
"epoch": 10.890052356020943,
"eval_loss": 0.31707289814949036,
"eval_runtime": 275.2143,
"eval_samples_per_second": 30.845,
"eval_steps_per_second": 3.859,
"step": 6500
},
{
"epoch": 10.973821989528796,
"grad_norm": 1.1960996389389038,
"learning_rate": 2.4200000000000002e-05,
"loss": 0.3478,
"step": 6550
},
{
"epoch": 11.057591623036648,
"grad_norm": 0.97001713514328,
"learning_rate": 2.3366666666666668e-05,
"loss": 0.3475,
"step": 6600
},
{
"epoch": 11.141361256544503,
"grad_norm": 1.1384519338607788,
"learning_rate": 2.2533333333333333e-05,
"loss": 0.3484,
"step": 6650
},
{
"epoch": 11.225130890052355,
"grad_norm": 0.9649496078491211,
"learning_rate": 2.1700000000000002e-05,
"loss": 0.3454,
"step": 6700
},
{
"epoch": 11.30890052356021,
"grad_norm": 1.0407809019088745,
"learning_rate": 2.0866666666666668e-05,
"loss": 0.3446,
"step": 6750
},
{
"epoch": 11.392670157068062,
"grad_norm": 1.087108850479126,
"learning_rate": 2.0033333333333334e-05,
"loss": 0.3475,
"step": 6800
},
{
"epoch": 11.476439790575917,
"grad_norm": 0.8870049715042114,
"learning_rate": 1.9200000000000003e-05,
"loss": 0.3454,
"step": 6850
},
{
"epoch": 11.56020942408377,
"grad_norm": 1.0377373695373535,
"learning_rate": 1.8366666666666668e-05,
"loss": 0.3447,
"step": 6900
},
{
"epoch": 11.643979057591624,
"grad_norm": 1.138604760169983,
"learning_rate": 1.7533333333333334e-05,
"loss": 0.345,
"step": 6950
},
{
"epoch": 11.727748691099476,
"grad_norm": 1.6464053392410278,
"learning_rate": 1.6700000000000003e-05,
"loss": 0.3465,
"step": 7000
},
{
"epoch": 11.727748691099476,
"eval_loss": 0.31611359119415283,
"eval_runtime": 271.5426,
"eval_samples_per_second": 31.262,
"eval_steps_per_second": 3.911,
"step": 7000
},
{
"epoch": 11.81151832460733,
"grad_norm": 0.9007428288459778,
"learning_rate": 1.586666666666667e-05,
"loss": 0.3461,
"step": 7050
},
{
"epoch": 11.895287958115183,
"grad_norm": 1.1172393560409546,
"learning_rate": 1.5033333333333336e-05,
"loss": 0.3489,
"step": 7100
},
{
"epoch": 11.979057591623036,
"grad_norm": 1.0454591512680054,
"learning_rate": 1.42e-05,
"loss": 0.3429,
"step": 7150
},
{
"epoch": 12.06282722513089,
"grad_norm": 1.1494097709655762,
"learning_rate": 1.3383333333333335e-05,
"loss": 0.3523,
"step": 7200
},
{
"epoch": 12.146596858638743,
"grad_norm": 0.8459360003471375,
"learning_rate": 1.255e-05,
"loss": 0.3469,
"step": 7250
},
{
"epoch": 12.230366492146597,
"grad_norm": 1.034494161605835,
"learning_rate": 1.1716666666666667e-05,
"loss": 0.3451,
"step": 7300
},
{
"epoch": 12.31413612565445,
"grad_norm": 1.1340311765670776,
"learning_rate": 1.0883333333333335e-05,
"loss": 0.3441,
"step": 7350
},
{
"epoch": 12.397905759162304,
"grad_norm": 1.4063786268234253,
"learning_rate": 1.005e-05,
"loss": 0.3472,
"step": 7400
},
{
"epoch": 12.481675392670157,
"grad_norm": 1.271904468536377,
"learning_rate": 9.216666666666666e-06,
"loss": 0.3458,
"step": 7450
},
{
"epoch": 12.565445026178011,
"grad_norm": 1.2097103595733643,
"learning_rate": 8.383333333333333e-06,
"loss": 0.3443,
"step": 7500
},
{
"epoch": 12.565445026178011,
"eval_loss": 0.3158954679965973,
"eval_runtime": 271.3497,
"eval_samples_per_second": 31.284,
"eval_steps_per_second": 3.914,
"step": 7500
},
{
"epoch": 12.649214659685864,
"grad_norm": 1.0162887573242188,
"learning_rate": 7.55e-06,
"loss": 0.3432,
"step": 7550
},
{
"epoch": 12.732984293193716,
"grad_norm": 0.977488100528717,
"learning_rate": 6.716666666666667e-06,
"loss": 0.3457,
"step": 7600
},
{
"epoch": 12.81675392670157,
"grad_norm": 0.8293308019638062,
"learning_rate": 5.8833333333333335e-06,
"loss": 0.3445,
"step": 7650
},
{
"epoch": 12.900523560209423,
"grad_norm": 0.9540147185325623,
"learning_rate": 5.050000000000001e-06,
"loss": 0.3444,
"step": 7700
},
{
"epoch": 12.984293193717278,
"grad_norm": 1.6243274211883545,
"learning_rate": 4.216666666666666e-06,
"loss": 0.3481,
"step": 7750
},
{
"epoch": 13.06806282722513,
"grad_norm": 0.8080894351005554,
"learning_rate": 3.3833333333333337e-06,
"loss": 0.3416,
"step": 7800
},
{
"epoch": 13.151832460732985,
"grad_norm": 0.8952440023422241,
"learning_rate": 2.55e-06,
"loss": 0.3423,
"step": 7850
},
{
"epoch": 13.235602094240837,
"grad_norm": 0.6296280026435852,
"learning_rate": 1.7166666666666668e-06,
"loss": 0.3454,
"step": 7900
},
{
"epoch": 13.319371727748692,
"grad_norm": 1.5492085218429565,
"learning_rate": 8.833333333333334e-07,
"loss": 0.344,
"step": 7950
},
{
"epoch": 13.403141361256544,
"grad_norm": 0.6316419839859009,
"learning_rate": 5.0000000000000004e-08,
"loss": 0.3432,
"step": 8000
},
{
"epoch": 13.403141361256544,
"eval_loss": 0.31398773193359375,
"eval_runtime": 271.4112,
"eval_samples_per_second": 31.277,
"eval_steps_per_second": 3.913,
"step": 8000
}
],
"logging_steps": 50,
"max_steps": 8000,
"num_input_tokens_seen": 0,
"num_train_epochs": 14,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.4332007404692806e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}