|
{ |
|
"best_metric": 0.41181260347366333, |
|
"best_model_checkpoint": "mikhail-panzo/zlm-fil_b64_le5_s8000/checkpoint-5000", |
|
"epoch": 244.44444444444446, |
|
"eval_steps": 500, |
|
"global_step": 5500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 2.2222222222222223, |
|
"grad_norm": 4.803515434265137, |
|
"learning_rate": 2.5000000000000004e-07, |
|
"loss": 0.8968, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 4.444444444444445, |
|
"grad_norm": 2.6444506645202637, |
|
"learning_rate": 5.000000000000001e-07, |
|
"loss": 0.8539, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 6.666666666666667, |
|
"grad_norm": 1.9820537567138672, |
|
"learning_rate": 7.5e-07, |
|
"loss": 0.7856, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 8.88888888888889, |
|
"grad_norm": 1.2031773328781128, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 0.7306, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 11.11111111111111, |
|
"grad_norm": 1.728398084640503, |
|
"learning_rate": 1.25e-06, |
|
"loss": 0.6821, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 13.333333333333334, |
|
"grad_norm": 1.232366681098938, |
|
"learning_rate": 1.5e-06, |
|
"loss": 0.6348, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 15.555555555555555, |
|
"grad_norm": 1.4039260149002075, |
|
"learning_rate": 1.75e-06, |
|
"loss": 0.5962, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 17.77777777777778, |
|
"grad_norm": 1.1848849058151245, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 0.5758, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"grad_norm": 0.9365352392196655, |
|
"learning_rate": 2.25e-06, |
|
"loss": 0.5636, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 22.22222222222222, |
|
"grad_norm": 0.9893051981925964, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.5529, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 22.22222222222222, |
|
"eval_loss": 0.4999743402004242, |
|
"eval_runtime": 6.8701, |
|
"eval_samples_per_second": 23.144, |
|
"eval_steps_per_second": 2.911, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 24.444444444444443, |
|
"grad_norm": 0.7535696029663086, |
|
"learning_rate": 2.7500000000000004e-06, |
|
"loss": 0.5476, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 26.666666666666668, |
|
"grad_norm": 0.7487667202949524, |
|
"learning_rate": 3e-06, |
|
"loss": 0.536, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 28.88888888888889, |
|
"grad_norm": 0.9480589628219604, |
|
"learning_rate": 3.2500000000000002e-06, |
|
"loss": 0.5271, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 31.11111111111111, |
|
"grad_norm": 1.0027987957000732, |
|
"learning_rate": 3.5e-06, |
|
"loss": 0.5202, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 33.333333333333336, |
|
"grad_norm": 0.7353075742721558, |
|
"learning_rate": 3.7500000000000005e-06, |
|
"loss": 0.5128, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 35.55555555555556, |
|
"grad_norm": 1.2223567962646484, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 0.5159, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 37.77777777777778, |
|
"grad_norm": 0.835096001625061, |
|
"learning_rate": 4.25e-06, |
|
"loss": 0.5051, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"grad_norm": 0.8878389000892639, |
|
"learning_rate": 4.5e-06, |
|
"loss": 0.5044, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 42.22222222222222, |
|
"grad_norm": 0.8464574813842773, |
|
"learning_rate": 4.75e-06, |
|
"loss": 0.4984, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 44.44444444444444, |
|
"grad_norm": 0.8119956254959106, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4974, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 44.44444444444444, |
|
"eval_loss": 0.4557439386844635, |
|
"eval_runtime": 6.6345, |
|
"eval_samples_per_second": 23.965, |
|
"eval_steps_per_second": 3.015, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 46.666666666666664, |
|
"grad_norm": 1.2427846193313599, |
|
"learning_rate": 5.2500000000000006e-06, |
|
"loss": 0.4943, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 48.888888888888886, |
|
"grad_norm": 0.9691639542579651, |
|
"learning_rate": 5.500000000000001e-06, |
|
"loss": 0.4879, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 51.111111111111114, |
|
"grad_norm": 0.815934419631958, |
|
"learning_rate": 5.75e-06, |
|
"loss": 0.4825, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 53.333333333333336, |
|
"grad_norm": 1.090553641319275, |
|
"learning_rate": 6e-06, |
|
"loss": 0.4875, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 55.55555555555556, |
|
"grad_norm": 1.090770959854126, |
|
"learning_rate": 6.25e-06, |
|
"loss": 0.4821, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 57.77777777777778, |
|
"grad_norm": 1.2514219284057617, |
|
"learning_rate": 6.5000000000000004e-06, |
|
"loss": 0.4752, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"grad_norm": 1.0580047369003296, |
|
"learning_rate": 6.750000000000001e-06, |
|
"loss": 0.4749, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 62.22222222222222, |
|
"grad_norm": 1.2864220142364502, |
|
"learning_rate": 7e-06, |
|
"loss": 0.4727, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 64.44444444444444, |
|
"grad_norm": 1.0031774044036865, |
|
"learning_rate": 7.25e-06, |
|
"loss": 0.4706, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 66.66666666666667, |
|
"grad_norm": 0.7133861184120178, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 0.4716, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 66.66666666666667, |
|
"eval_loss": 0.43592509627342224, |
|
"eval_runtime": 6.6514, |
|
"eval_samples_per_second": 23.905, |
|
"eval_steps_per_second": 3.007, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 68.88888888888889, |
|
"grad_norm": 0.8695252537727356, |
|
"learning_rate": 7.75e-06, |
|
"loss": 0.464, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 71.11111111111111, |
|
"grad_norm": 2.2705039978027344, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.4677, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 73.33333333333333, |
|
"grad_norm": 0.8814280033111572, |
|
"learning_rate": 8.25e-06, |
|
"loss": 0.4628, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 75.55555555555556, |
|
"grad_norm": 0.6686401963233948, |
|
"learning_rate": 8.5e-06, |
|
"loss": 0.4642, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 77.77777777777777, |
|
"grad_norm": 1.1037653684616089, |
|
"learning_rate": 8.750000000000001e-06, |
|
"loss": 0.4582, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"grad_norm": 1.0339733362197876, |
|
"learning_rate": 9e-06, |
|
"loss": 0.4595, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 82.22222222222223, |
|
"grad_norm": 0.715323269367218, |
|
"learning_rate": 9.250000000000001e-06, |
|
"loss": 0.4621, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 84.44444444444444, |
|
"grad_norm": 0.872926652431488, |
|
"learning_rate": 9.5e-06, |
|
"loss": 0.4553, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 86.66666666666667, |
|
"grad_norm": 0.8996181488037109, |
|
"learning_rate": 9.75e-06, |
|
"loss": 0.4528, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 88.88888888888889, |
|
"grad_norm": 1.3289371728897095, |
|
"learning_rate": 1e-05, |
|
"loss": 0.453, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 88.88888888888889, |
|
"eval_loss": 0.4245700240135193, |
|
"eval_runtime": 6.6886, |
|
"eval_samples_per_second": 23.772, |
|
"eval_steps_per_second": 2.99, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 91.11111111111111, |
|
"grad_norm": 0.9035242795944214, |
|
"learning_rate": 9.916666666666668e-06, |
|
"loss": 0.4504, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 93.33333333333333, |
|
"grad_norm": 1.091410756111145, |
|
"learning_rate": 9.833333333333333e-06, |
|
"loss": 0.4528, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 95.55555555555556, |
|
"grad_norm": 0.7232839465141296, |
|
"learning_rate": 9.75e-06, |
|
"loss": 0.4481, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 97.77777777777777, |
|
"grad_norm": 0.9006613492965698, |
|
"learning_rate": 9.666666666666667e-06, |
|
"loss": 0.444, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"grad_norm": 0.9088640809059143, |
|
"learning_rate": 9.583333333333335e-06, |
|
"loss": 0.4445, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 102.22222222222223, |
|
"grad_norm": 1.2409088611602783, |
|
"learning_rate": 9.5e-06, |
|
"loss": 0.4457, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 104.44444444444444, |
|
"grad_norm": 0.749260663986206, |
|
"learning_rate": 9.416666666666667e-06, |
|
"loss": 0.4453, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 106.66666666666667, |
|
"grad_norm": 0.7552911639213562, |
|
"learning_rate": 9.333333333333334e-06, |
|
"loss": 0.4402, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 108.88888888888889, |
|
"grad_norm": 1.0751075744628906, |
|
"learning_rate": 9.250000000000001e-06, |
|
"loss": 0.4417, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 111.11111111111111, |
|
"grad_norm": 0.8774231672286987, |
|
"learning_rate": 9.166666666666666e-06, |
|
"loss": 0.4428, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 111.11111111111111, |
|
"eval_loss": 0.41958874464035034, |
|
"eval_runtime": 6.7008, |
|
"eval_samples_per_second": 23.728, |
|
"eval_steps_per_second": 2.985, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 113.33333333333333, |
|
"grad_norm": 1.3239281177520752, |
|
"learning_rate": 9.083333333333333e-06, |
|
"loss": 0.4399, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 115.55555555555556, |
|
"grad_norm": 0.9855658411979675, |
|
"learning_rate": 9e-06, |
|
"loss": 0.4376, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 117.77777777777777, |
|
"grad_norm": 0.9498355984687805, |
|
"learning_rate": 8.916666666666667e-06, |
|
"loss": 0.4427, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 120.0, |
|
"grad_norm": 1.3080662488937378, |
|
"learning_rate": 8.833333333333334e-06, |
|
"loss": 0.437, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 122.22222222222223, |
|
"grad_norm": 0.6745712757110596, |
|
"learning_rate": 8.750000000000001e-06, |
|
"loss": 0.4405, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 124.44444444444444, |
|
"grad_norm": 1.136643409729004, |
|
"learning_rate": 8.666666666666668e-06, |
|
"loss": 0.4329, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 126.66666666666667, |
|
"grad_norm": 0.8538027405738831, |
|
"learning_rate": 8.583333333333333e-06, |
|
"loss": 0.4313, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 128.88888888888889, |
|
"grad_norm": 0.8776795268058777, |
|
"learning_rate": 8.5e-06, |
|
"loss": 0.43, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 131.11111111111111, |
|
"grad_norm": 0.987410306930542, |
|
"learning_rate": 8.416666666666667e-06, |
|
"loss": 0.4346, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 133.33333333333334, |
|
"grad_norm": 1.061345100402832, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.4332, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 133.33333333333334, |
|
"eval_loss": 0.4170692563056946, |
|
"eval_runtime": 6.8806, |
|
"eval_samples_per_second": 23.108, |
|
"eval_steps_per_second": 2.907, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 135.55555555555554, |
|
"grad_norm": 0.9580554962158203, |
|
"learning_rate": 8.25e-06, |
|
"loss": 0.4344, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 137.77777777777777, |
|
"grad_norm": 0.8951635956764221, |
|
"learning_rate": 8.166666666666668e-06, |
|
"loss": 0.4303, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 140.0, |
|
"grad_norm": 1.0797936916351318, |
|
"learning_rate": 8.083333333333334e-06, |
|
"loss": 0.4331, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 142.22222222222223, |
|
"grad_norm": 1.1288533210754395, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.425, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 144.44444444444446, |
|
"grad_norm": 0.9219405055046082, |
|
"learning_rate": 7.916666666666667e-06, |
|
"loss": 0.4335, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 146.66666666666666, |
|
"grad_norm": 0.8145209550857544, |
|
"learning_rate": 7.833333333333333e-06, |
|
"loss": 0.4249, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 148.88888888888889, |
|
"grad_norm": 0.7597942352294922, |
|
"learning_rate": 7.75e-06, |
|
"loss": 0.4284, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 151.11111111111111, |
|
"grad_norm": 1.0502862930297852, |
|
"learning_rate": 7.666666666666667e-06, |
|
"loss": 0.4285, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 153.33333333333334, |
|
"grad_norm": 0.9932737350463867, |
|
"learning_rate": 7.583333333333333e-06, |
|
"loss": 0.4263, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 155.55555555555554, |
|
"grad_norm": 0.9701213836669922, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 0.4246, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 155.55555555555554, |
|
"eval_loss": 0.41539278626441956, |
|
"eval_runtime": 6.9499, |
|
"eval_samples_per_second": 22.878, |
|
"eval_steps_per_second": 2.878, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 157.77777777777777, |
|
"grad_norm": 1.0768437385559082, |
|
"learning_rate": 7.416666666666668e-06, |
|
"loss": 0.4252, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 160.0, |
|
"grad_norm": 0.7991815805435181, |
|
"learning_rate": 7.333333333333333e-06, |
|
"loss": 0.4219, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 162.22222222222223, |
|
"grad_norm": 1.1402180194854736, |
|
"learning_rate": 7.25e-06, |
|
"loss": 0.4247, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 164.44444444444446, |
|
"grad_norm": 0.8970450758934021, |
|
"learning_rate": 7.166666666666667e-06, |
|
"loss": 0.4252, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 166.66666666666666, |
|
"grad_norm": 0.733568549156189, |
|
"learning_rate": 7.083333333333335e-06, |
|
"loss": 0.4238, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 168.88888888888889, |
|
"grad_norm": 1.1045589447021484, |
|
"learning_rate": 7e-06, |
|
"loss": 0.4275, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 171.11111111111111, |
|
"grad_norm": 1.0485199689865112, |
|
"learning_rate": 6.916666666666667e-06, |
|
"loss": 0.4295, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 173.33333333333334, |
|
"grad_norm": 0.9441601634025574, |
|
"learning_rate": 6.833333333333334e-06, |
|
"loss": 0.4206, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 175.55555555555554, |
|
"grad_norm": 0.7985955476760864, |
|
"learning_rate": 6.750000000000001e-06, |
|
"loss": 0.4263, |
|
"step": 3950 |
|
}, |
|
{ |
|
"epoch": 177.77777777777777, |
|
"grad_norm": 0.6838188171386719, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.4202, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 177.77777777777777, |
|
"eval_loss": 0.41334837675094604, |
|
"eval_runtime": 6.7658, |
|
"eval_samples_per_second": 23.5, |
|
"eval_steps_per_second": 2.956, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 180.0, |
|
"grad_norm": 1.0729726552963257, |
|
"learning_rate": 6.5833333333333335e-06, |
|
"loss": 0.4218, |
|
"step": 4050 |
|
}, |
|
{ |
|
"epoch": 182.22222222222223, |
|
"grad_norm": 0.8395955562591553, |
|
"learning_rate": 6.5000000000000004e-06, |
|
"loss": 0.4172, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 184.44444444444446, |
|
"grad_norm": 0.7987414002418518, |
|
"learning_rate": 6.416666666666667e-06, |
|
"loss": 0.4222, |
|
"step": 4150 |
|
}, |
|
{ |
|
"epoch": 186.66666666666666, |
|
"grad_norm": 0.7480828762054443, |
|
"learning_rate": 6.333333333333333e-06, |
|
"loss": 0.4178, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 188.88888888888889, |
|
"grad_norm": 1.003481149673462, |
|
"learning_rate": 6.25e-06, |
|
"loss": 0.4183, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 191.11111111111111, |
|
"grad_norm": 1.080949306488037, |
|
"learning_rate": 6.168333333333334e-06, |
|
"loss": 0.4207, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 193.33333333333334, |
|
"grad_norm": 1.0521224737167358, |
|
"learning_rate": 6.085000000000001e-06, |
|
"loss": 0.4186, |
|
"step": 4350 |
|
}, |
|
{ |
|
"epoch": 195.55555555555554, |
|
"grad_norm": 0.9048583507537842, |
|
"learning_rate": 6.001666666666667e-06, |
|
"loss": 0.418, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 197.77777777777777, |
|
"grad_norm": 1.4653879404067993, |
|
"learning_rate": 5.918333333333334e-06, |
|
"loss": 0.414, |
|
"step": 4450 |
|
}, |
|
{ |
|
"epoch": 200.0, |
|
"grad_norm": 1.5214589834213257, |
|
"learning_rate": 5.835000000000001e-06, |
|
"loss": 0.4223, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 200.0, |
|
"eval_loss": 0.41453027725219727, |
|
"eval_runtime": 6.6498, |
|
"eval_samples_per_second": 23.91, |
|
"eval_steps_per_second": 3.008, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 202.22222222222223, |
|
"grad_norm": 1.1058125495910645, |
|
"learning_rate": 5.751666666666668e-06, |
|
"loss": 0.4164, |
|
"step": 4550 |
|
}, |
|
{ |
|
"epoch": 204.44444444444446, |
|
"grad_norm": 0.8250144124031067, |
|
"learning_rate": 5.668333333333334e-06, |
|
"loss": 0.418, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 206.66666666666666, |
|
"grad_norm": 0.710696816444397, |
|
"learning_rate": 5.585000000000001e-06, |
|
"loss": 0.4195, |
|
"step": 4650 |
|
}, |
|
{ |
|
"epoch": 208.88888888888889, |
|
"grad_norm": 0.8722075819969177, |
|
"learning_rate": 5.501666666666668e-06, |
|
"loss": 0.4127, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 211.11111111111111, |
|
"grad_norm": 1.548494815826416, |
|
"learning_rate": 5.418333333333333e-06, |
|
"loss": 0.4175, |
|
"step": 4750 |
|
}, |
|
{ |
|
"epoch": 213.33333333333334, |
|
"grad_norm": 1.0577216148376465, |
|
"learning_rate": 5.335000000000001e-06, |
|
"loss": 0.4142, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 215.55555555555554, |
|
"grad_norm": 0.8775042295455933, |
|
"learning_rate": 5.2516666666666675e-06, |
|
"loss": 0.4185, |
|
"step": 4850 |
|
}, |
|
{ |
|
"epoch": 217.77777777777777, |
|
"grad_norm": 0.8470151424407959, |
|
"learning_rate": 5.168333333333334e-06, |
|
"loss": 0.4107, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 220.0, |
|
"grad_norm": 0.9309485554695129, |
|
"learning_rate": 5.085e-06, |
|
"loss": 0.4138, |
|
"step": 4950 |
|
}, |
|
{ |
|
"epoch": 222.22222222222223, |
|
"grad_norm": 0.7797917127609253, |
|
"learning_rate": 5.0016666666666665e-06, |
|
"loss": 0.4127, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 222.22222222222223, |
|
"eval_loss": 0.41181260347366333, |
|
"eval_runtime": 6.6484, |
|
"eval_samples_per_second": 23.915, |
|
"eval_steps_per_second": 3.008, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 224.44444444444446, |
|
"grad_norm": 0.7094744443893433, |
|
"learning_rate": 4.918333333333334e-06, |
|
"loss": 0.418, |
|
"step": 5050 |
|
}, |
|
{ |
|
"epoch": 226.66666666666666, |
|
"grad_norm": 0.798126220703125, |
|
"learning_rate": 4.835e-06, |
|
"loss": 0.4099, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 228.88888888888889, |
|
"grad_norm": 0.8107690811157227, |
|
"learning_rate": 4.751666666666667e-06, |
|
"loss": 0.4115, |
|
"step": 5150 |
|
}, |
|
{ |
|
"epoch": 231.11111111111111, |
|
"grad_norm": 0.9693463444709778, |
|
"learning_rate": 4.668333333333333e-06, |
|
"loss": 0.4096, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 233.33333333333334, |
|
"grad_norm": 0.6815030574798584, |
|
"learning_rate": 4.585e-06, |
|
"loss": 0.416, |
|
"step": 5250 |
|
}, |
|
{ |
|
"epoch": 235.55555555555554, |
|
"grad_norm": 0.9453757405281067, |
|
"learning_rate": 4.501666666666667e-06, |
|
"loss": 0.412, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 237.77777777777777, |
|
"grad_norm": 0.744594395160675, |
|
"learning_rate": 4.418333333333334e-06, |
|
"loss": 0.41, |
|
"step": 5350 |
|
}, |
|
{ |
|
"epoch": 240.0, |
|
"grad_norm": 0.7826279401779175, |
|
"learning_rate": 4.335e-06, |
|
"loss": 0.4174, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 242.22222222222223, |
|
"grad_norm": 0.8793866634368896, |
|
"learning_rate": 4.253333333333334e-06, |
|
"loss": 0.4189, |
|
"step": 5450 |
|
}, |
|
{ |
|
"epoch": 244.44444444444446, |
|
"grad_norm": 0.7588925361633301, |
|
"learning_rate": 4.17e-06, |
|
"loss": 0.418, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 244.44444444444446, |
|
"eval_loss": 0.41301560401916504, |
|
"eval_runtime": 6.6915, |
|
"eval_samples_per_second": 23.761, |
|
"eval_steps_per_second": 2.989, |
|
"step": 5500 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 8000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 364, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.00460473760985e+16, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|