mikhail-panzo's picture
Training in progress, step 8000, checkpoint
0d7f5ad verified
raw
history blame contribute delete
No virus
31.5 kB
{
"best_metric": 0.40593138337135315,
"best_model_checkpoint": "mikhail_panzo/zlm-fil_b64_le5_s8000/checkpoint-7000",
"epoch": 347.82608695652175,
"eval_steps": 500,
"global_step": 8000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 2.1739130434782608,
"grad_norm": 5.992666721343994,
"learning_rate": 2.5000000000000004e-07,
"loss": 0.9248,
"step": 50
},
{
"epoch": 4.3478260869565215,
"grad_norm": 3.4944448471069336,
"learning_rate": 4.95e-07,
"loss": 0.8709,
"step": 100
},
{
"epoch": 6.521739130434782,
"grad_norm": 1.9902064800262451,
"learning_rate": 7.450000000000001e-07,
"loss": 0.8005,
"step": 150
},
{
"epoch": 8.695652173913043,
"grad_norm": 1.4649031162261963,
"learning_rate": 9.950000000000002e-07,
"loss": 0.733,
"step": 200
},
{
"epoch": 10.869565217391305,
"grad_norm": 1.4645055532455444,
"learning_rate": 1.2450000000000002e-06,
"loss": 0.6758,
"step": 250
},
{
"epoch": 13.043478260869565,
"grad_norm": 1.5862590074539185,
"learning_rate": 1.495e-06,
"loss": 0.6246,
"step": 300
},
{
"epoch": 15.217391304347826,
"grad_norm": 1.198890209197998,
"learning_rate": 1.745e-06,
"loss": 0.5917,
"step": 350
},
{
"epoch": 17.391304347826086,
"grad_norm": 1.324212908744812,
"learning_rate": 1.9950000000000004e-06,
"loss": 0.5721,
"step": 400
},
{
"epoch": 19.565217391304348,
"grad_norm": 1.1638662815093994,
"learning_rate": 2.245e-06,
"loss": 0.5635,
"step": 450
},
{
"epoch": 21.73913043478261,
"grad_norm": 0.9293428063392639,
"learning_rate": 2.4950000000000003e-06,
"loss": 0.5541,
"step": 500
},
{
"epoch": 21.73913043478261,
"eval_loss": 0.49769046902656555,
"eval_runtime": 12.8521,
"eval_samples_per_second": 12.527,
"eval_steps_per_second": 1.634,
"step": 500
},
{
"epoch": 23.91304347826087,
"grad_norm": 0.7982977628707886,
"learning_rate": 2.7450000000000004e-06,
"loss": 0.5487,
"step": 550
},
{
"epoch": 26.08695652173913,
"grad_norm": 0.8491495251655579,
"learning_rate": 2.995e-06,
"loss": 0.5373,
"step": 600
},
{
"epoch": 28.26086956521739,
"grad_norm": 0.7158030867576599,
"learning_rate": 3.2450000000000003e-06,
"loss": 0.5271,
"step": 650
},
{
"epoch": 30.434782608695652,
"grad_norm": 1.2020411491394043,
"learning_rate": 3.495e-06,
"loss": 0.5201,
"step": 700
},
{
"epoch": 32.608695652173914,
"grad_norm": 0.7339745163917542,
"learning_rate": 3.745e-06,
"loss": 0.5126,
"step": 750
},
{
"epoch": 34.78260869565217,
"grad_norm": 1.1129218339920044,
"learning_rate": 3.995000000000001e-06,
"loss": 0.5136,
"step": 800
},
{
"epoch": 36.95652173913044,
"grad_norm": 0.7733961939811707,
"learning_rate": 4.245e-06,
"loss": 0.5099,
"step": 850
},
{
"epoch": 39.130434782608695,
"grad_norm": 1.1559356451034546,
"learning_rate": 4.495e-06,
"loss": 0.5047,
"step": 900
},
{
"epoch": 41.30434782608695,
"grad_norm": 0.873518705368042,
"learning_rate": 4.745e-06,
"loss": 0.4955,
"step": 950
},
{
"epoch": 43.47826086956522,
"grad_norm": 0.703760027885437,
"learning_rate": 4.9950000000000005e-06,
"loss": 0.4931,
"step": 1000
},
{
"epoch": 43.47826086956522,
"eval_loss": 0.4528730511665344,
"eval_runtime": 12.5404,
"eval_samples_per_second": 12.838,
"eval_steps_per_second": 1.675,
"step": 1000
},
{
"epoch": 45.65217391304348,
"grad_norm": 0.9317522644996643,
"learning_rate": 5.245e-06,
"loss": 0.4922,
"step": 1050
},
{
"epoch": 47.82608695652174,
"grad_norm": 0.9118973016738892,
"learning_rate": 5.495000000000001e-06,
"loss": 0.4855,
"step": 1100
},
{
"epoch": 50.0,
"grad_norm": 2.0197670459747314,
"learning_rate": 5.745000000000001e-06,
"loss": 0.485,
"step": 1150
},
{
"epoch": 52.17391304347826,
"grad_norm": 1.1067094802856445,
"learning_rate": 5.995000000000001e-06,
"loss": 0.4872,
"step": 1200
},
{
"epoch": 54.34782608695652,
"grad_norm": 1.20389723777771,
"learning_rate": 6.245000000000001e-06,
"loss": 0.4836,
"step": 1250
},
{
"epoch": 56.52173913043478,
"grad_norm": 0.9784926176071167,
"learning_rate": 6.4950000000000005e-06,
"loss": 0.4793,
"step": 1300
},
{
"epoch": 58.69565217391305,
"grad_norm": 1.4276039600372314,
"learning_rate": 6.745000000000001e-06,
"loss": 0.4774,
"step": 1350
},
{
"epoch": 60.869565217391305,
"grad_norm": 1.117233157157898,
"learning_rate": 6.995000000000001e-06,
"loss": 0.4776,
"step": 1400
},
{
"epoch": 63.04347826086956,
"grad_norm": 0.9267759919166565,
"learning_rate": 7.245000000000001e-06,
"loss": 0.4682,
"step": 1450
},
{
"epoch": 65.21739130434783,
"grad_norm": 0.8514929413795471,
"learning_rate": 7.495000000000001e-06,
"loss": 0.4695,
"step": 1500
},
{
"epoch": 65.21739130434783,
"eval_loss": 0.43296581506729126,
"eval_runtime": 12.858,
"eval_samples_per_second": 12.521,
"eval_steps_per_second": 1.633,
"step": 1500
},
{
"epoch": 67.3913043478261,
"grad_norm": 1.6638849973678589,
"learning_rate": 7.745e-06,
"loss": 0.4674,
"step": 1550
},
{
"epoch": 69.56521739130434,
"grad_norm": 0.8030112981796265,
"learning_rate": 7.995e-06,
"loss": 0.4652,
"step": 1600
},
{
"epoch": 71.73913043478261,
"grad_norm": 1.2407337427139282,
"learning_rate": 8.245000000000002e-06,
"loss": 0.4638,
"step": 1650
},
{
"epoch": 73.91304347826087,
"grad_norm": 1.0331224203109741,
"learning_rate": 8.495e-06,
"loss": 0.4685,
"step": 1700
},
{
"epoch": 76.08695652173913,
"grad_norm": 2.4681615829467773,
"learning_rate": 8.745000000000002e-06,
"loss": 0.4627,
"step": 1750
},
{
"epoch": 78.26086956521739,
"grad_norm": 0.9344178438186646,
"learning_rate": 8.995000000000001e-06,
"loss": 0.4575,
"step": 1800
},
{
"epoch": 80.43478260869566,
"grad_norm": 1.0561341047286987,
"learning_rate": 9.245e-06,
"loss": 0.4621,
"step": 1850
},
{
"epoch": 82.6086956521739,
"grad_norm": 0.9816989302635193,
"learning_rate": 9.495000000000001e-06,
"loss": 0.4544,
"step": 1900
},
{
"epoch": 84.78260869565217,
"grad_norm": 1.4779256582260132,
"learning_rate": 9.745e-06,
"loss": 0.4579,
"step": 1950
},
{
"epoch": 86.95652173913044,
"grad_norm": 1.1634950637817383,
"learning_rate": 9.995000000000002e-06,
"loss": 0.4518,
"step": 2000
},
{
"epoch": 86.95652173913044,
"eval_loss": 0.42301931977272034,
"eval_runtime": 12.725,
"eval_samples_per_second": 12.652,
"eval_steps_per_second": 1.65,
"step": 2000
},
{
"epoch": 89.1304347826087,
"grad_norm": 1.0197867155075073,
"learning_rate": 9.918333333333335e-06,
"loss": 0.4501,
"step": 2050
},
{
"epoch": 91.30434782608695,
"grad_norm": 1.2543045282363892,
"learning_rate": 9.835000000000002e-06,
"loss": 0.4499,
"step": 2100
},
{
"epoch": 93.47826086956522,
"grad_norm": 0.8574827909469604,
"learning_rate": 9.751666666666667e-06,
"loss": 0.4485,
"step": 2150
},
{
"epoch": 95.65217391304348,
"grad_norm": 1.3784998655319214,
"learning_rate": 9.668333333333334e-06,
"loss": 0.4507,
"step": 2200
},
{
"epoch": 97.82608695652173,
"grad_norm": 0.9518347978591919,
"learning_rate": 9.585e-06,
"loss": 0.4486,
"step": 2250
},
{
"epoch": 100.0,
"grad_norm": 2.422320604324341,
"learning_rate": 9.501666666666667e-06,
"loss": 0.4455,
"step": 2300
},
{
"epoch": 102.17391304347827,
"grad_norm": 1.3441377878189087,
"learning_rate": 9.418333333333334e-06,
"loss": 0.45,
"step": 2350
},
{
"epoch": 104.34782608695652,
"grad_norm": 0.969160795211792,
"learning_rate": 9.335000000000001e-06,
"loss": 0.4454,
"step": 2400
},
{
"epoch": 106.52173913043478,
"grad_norm": 2.1275179386138916,
"learning_rate": 9.251666666666668e-06,
"loss": 0.4459,
"step": 2450
},
{
"epoch": 108.69565217391305,
"grad_norm": 1.3673362731933594,
"learning_rate": 9.168333333333333e-06,
"loss": 0.4442,
"step": 2500
},
{
"epoch": 108.69565217391305,
"eval_loss": 0.4178585410118103,
"eval_runtime": 13.0534,
"eval_samples_per_second": 12.334,
"eval_steps_per_second": 1.609,
"step": 2500
},
{
"epoch": 110.8695652173913,
"grad_norm": 1.3769398927688599,
"learning_rate": 9.085e-06,
"loss": 0.4406,
"step": 2550
},
{
"epoch": 113.04347826086956,
"grad_norm": 0.9913681745529175,
"learning_rate": 9.001666666666667e-06,
"loss": 0.4389,
"step": 2600
},
{
"epoch": 115.21739130434783,
"grad_norm": 1.1747106313705444,
"learning_rate": 8.918333333333334e-06,
"loss": 0.4386,
"step": 2650
},
{
"epoch": 117.3913043478261,
"grad_norm": 1.0514781475067139,
"learning_rate": 8.835000000000001e-06,
"loss": 0.4393,
"step": 2700
},
{
"epoch": 119.56521739130434,
"grad_norm": 1.8967915773391724,
"learning_rate": 8.751666666666668e-06,
"loss": 0.4421,
"step": 2750
},
{
"epoch": 121.73913043478261,
"grad_norm": 0.8795832395553589,
"learning_rate": 8.668333333333335e-06,
"loss": 0.436,
"step": 2800
},
{
"epoch": 123.91304347826087,
"grad_norm": 0.7928704023361206,
"learning_rate": 8.585000000000002e-06,
"loss": 0.4333,
"step": 2850
},
{
"epoch": 126.08695652173913,
"grad_norm": 1.2805510759353638,
"learning_rate": 8.501666666666667e-06,
"loss": 0.4326,
"step": 2900
},
{
"epoch": 128.2608695652174,
"grad_norm": 1.420920968055725,
"learning_rate": 8.418333333333334e-06,
"loss": 0.4317,
"step": 2950
},
{
"epoch": 130.43478260869566,
"grad_norm": 0.7063888907432556,
"learning_rate": 8.335e-06,
"loss": 0.4344,
"step": 3000
},
{
"epoch": 130.43478260869566,
"eval_loss": 0.41350311040878296,
"eval_runtime": 12.8066,
"eval_samples_per_second": 12.572,
"eval_steps_per_second": 1.64,
"step": 3000
},
{
"epoch": 132.6086956521739,
"grad_norm": 1.8065855503082275,
"learning_rate": 8.251666666666668e-06,
"loss": 0.4361,
"step": 3050
},
{
"epoch": 134.7826086956522,
"grad_norm": 0.8073704838752747,
"learning_rate": 8.168333333333334e-06,
"loss": 0.4339,
"step": 3100
},
{
"epoch": 136.95652173913044,
"grad_norm": 1.2890065908432007,
"learning_rate": 8.085000000000001e-06,
"loss": 0.4325,
"step": 3150
},
{
"epoch": 139.1304347826087,
"grad_norm": 1.336401104927063,
"learning_rate": 8.001666666666668e-06,
"loss": 0.4334,
"step": 3200
},
{
"epoch": 141.30434782608697,
"grad_norm": 1.2965891361236572,
"learning_rate": 7.918333333333333e-06,
"loss": 0.4298,
"step": 3250
},
{
"epoch": 143.47826086956522,
"grad_norm": 0.8761409521102905,
"learning_rate": 7.835e-06,
"loss": 0.4231,
"step": 3300
},
{
"epoch": 145.65217391304347,
"grad_norm": 1.1475930213928223,
"learning_rate": 7.751666666666667e-06,
"loss": 0.4316,
"step": 3350
},
{
"epoch": 147.82608695652175,
"grad_norm": 0.8305974006652832,
"learning_rate": 7.668333333333334e-06,
"loss": 0.4277,
"step": 3400
},
{
"epoch": 150.0,
"grad_norm": 1.6335935592651367,
"learning_rate": 7.585e-06,
"loss": 0.4248,
"step": 3450
},
{
"epoch": 152.17391304347825,
"grad_norm": 1.1171984672546387,
"learning_rate": 7.501666666666667e-06,
"loss": 0.4318,
"step": 3500
},
{
"epoch": 152.17391304347825,
"eval_loss": 0.4111216962337494,
"eval_runtime": 13.0991,
"eval_samples_per_second": 12.291,
"eval_steps_per_second": 1.603,
"step": 3500
},
{
"epoch": 154.34782608695653,
"grad_norm": 0.8932999968528748,
"learning_rate": 7.418333333333334e-06,
"loss": 0.4287,
"step": 3550
},
{
"epoch": 156.52173913043478,
"grad_norm": 0.9644981622695923,
"learning_rate": 7.335000000000001e-06,
"loss": 0.4253,
"step": 3600
},
{
"epoch": 158.69565217391303,
"grad_norm": 1.5559085607528687,
"learning_rate": 7.251666666666667e-06,
"loss": 0.4282,
"step": 3650
},
{
"epoch": 160.8695652173913,
"grad_norm": 1.0696306228637695,
"learning_rate": 7.168333333333334e-06,
"loss": 0.4221,
"step": 3700
},
{
"epoch": 163.04347826086956,
"grad_norm": 1.0170419216156006,
"learning_rate": 7.085000000000001e-06,
"loss": 0.4254,
"step": 3750
},
{
"epoch": 165.2173913043478,
"grad_norm": 0.7487155795097351,
"learning_rate": 7.001666666666668e-06,
"loss": 0.4269,
"step": 3800
},
{
"epoch": 167.3913043478261,
"grad_norm": 0.7589255571365356,
"learning_rate": 6.918333333333334e-06,
"loss": 0.4252,
"step": 3850
},
{
"epoch": 169.56521739130434,
"grad_norm": 0.9557852745056152,
"learning_rate": 6.835000000000001e-06,
"loss": 0.4224,
"step": 3900
},
{
"epoch": 171.7391304347826,
"grad_norm": 0.7511025667190552,
"learning_rate": 6.7516666666666675e-06,
"loss": 0.4201,
"step": 3950
},
{
"epoch": 173.91304347826087,
"grad_norm": 0.9032562375068665,
"learning_rate": 6.668333333333334e-06,
"loss": 0.4201,
"step": 4000
},
{
"epoch": 173.91304347826087,
"eval_loss": 0.41097140312194824,
"eval_runtime": 12.8806,
"eval_samples_per_second": 12.499,
"eval_steps_per_second": 1.63,
"step": 4000
},
{
"epoch": 176.08695652173913,
"grad_norm": 0.9379887580871582,
"learning_rate": 6.5866666666666666e-06,
"loss": 0.426,
"step": 4050
},
{
"epoch": 178.2608695652174,
"grad_norm": 1.2516026496887207,
"learning_rate": 6.5033333333333335e-06,
"loss": 0.4244,
"step": 4100
},
{
"epoch": 180.43478260869566,
"grad_norm": 0.898951530456543,
"learning_rate": 6.42e-06,
"loss": 0.4212,
"step": 4150
},
{
"epoch": 182.6086956521739,
"grad_norm": 0.810229480266571,
"learning_rate": 6.336666666666667e-06,
"loss": 0.4188,
"step": 4200
},
{
"epoch": 184.7826086956522,
"grad_norm": 0.8796959519386292,
"learning_rate": 6.253333333333333e-06,
"loss": 0.4196,
"step": 4250
},
{
"epoch": 186.95652173913044,
"grad_norm": 0.8476919531822205,
"learning_rate": 6.17e-06,
"loss": 0.4215,
"step": 4300
},
{
"epoch": 189.1304347826087,
"grad_norm": 0.8933534622192383,
"learning_rate": 6.086666666666667e-06,
"loss": 0.4209,
"step": 4350
},
{
"epoch": 191.30434782608697,
"grad_norm": 0.965189516544342,
"learning_rate": 6.003333333333334e-06,
"loss": 0.4157,
"step": 4400
},
{
"epoch": 193.47826086956522,
"grad_norm": 0.6911047101020813,
"learning_rate": 5.92e-06,
"loss": 0.4156,
"step": 4450
},
{
"epoch": 195.65217391304347,
"grad_norm": 0.98875492811203,
"learning_rate": 5.836666666666667e-06,
"loss": 0.4185,
"step": 4500
},
{
"epoch": 195.65217391304347,
"eval_loss": 0.40913259983062744,
"eval_runtime": 13.1748,
"eval_samples_per_second": 12.22,
"eval_steps_per_second": 1.594,
"step": 4500
},
{
"epoch": 197.82608695652175,
"grad_norm": 0.8193331360816956,
"learning_rate": 5.753333333333334e-06,
"loss": 0.4171,
"step": 4550
},
{
"epoch": 200.0,
"grad_norm": 2.4391345977783203,
"learning_rate": 5.67e-06,
"loss": 0.4156,
"step": 4600
},
{
"epoch": 202.17391304347825,
"grad_norm": 0.7653400301933289,
"learning_rate": 5.586666666666667e-06,
"loss": 0.4198,
"step": 4650
},
{
"epoch": 204.34782608695653,
"grad_norm": 0.9531933069229126,
"learning_rate": 5.503333333333334e-06,
"loss": 0.4165,
"step": 4700
},
{
"epoch": 206.52173913043478,
"grad_norm": 1.7444751262664795,
"learning_rate": 5.420000000000001e-06,
"loss": 0.417,
"step": 4750
},
{
"epoch": 208.69565217391303,
"grad_norm": 0.9747681021690369,
"learning_rate": 5.336666666666667e-06,
"loss": 0.4133,
"step": 4800
},
{
"epoch": 210.8695652173913,
"grad_norm": 1.063536524772644,
"learning_rate": 5.2533333333333336e-06,
"loss": 0.4153,
"step": 4850
},
{
"epoch": 213.04347826086956,
"grad_norm": 0.8719898462295532,
"learning_rate": 5.1700000000000005e-06,
"loss": 0.4104,
"step": 4900
},
{
"epoch": 215.2173913043478,
"grad_norm": 0.7953347563743591,
"learning_rate": 5.086666666666667e-06,
"loss": 0.4204,
"step": 4950
},
{
"epoch": 217.3913043478261,
"grad_norm": 0.9870523810386658,
"learning_rate": 5.0033333333333334e-06,
"loss": 0.4153,
"step": 5000
},
{
"epoch": 217.3913043478261,
"eval_loss": 0.4096957743167877,
"eval_runtime": 13.2951,
"eval_samples_per_second": 12.11,
"eval_steps_per_second": 1.58,
"step": 5000
},
{
"epoch": 219.56521739130434,
"grad_norm": 0.9140251874923706,
"learning_rate": 4.92e-06,
"loss": 0.4142,
"step": 5050
},
{
"epoch": 221.7391304347826,
"grad_norm": 0.7339205741882324,
"learning_rate": 4.836666666666667e-06,
"loss": 0.4126,
"step": 5100
},
{
"epoch": 223.91304347826087,
"grad_norm": 0.8861171007156372,
"learning_rate": 4.753333333333333e-06,
"loss": 0.4189,
"step": 5150
},
{
"epoch": 226.08695652173913,
"grad_norm": 0.7763547301292419,
"learning_rate": 4.670000000000001e-06,
"loss": 0.415,
"step": 5200
},
{
"epoch": 228.2608695652174,
"grad_norm": 0.913635790348053,
"learning_rate": 4.586666666666667e-06,
"loss": 0.413,
"step": 5250
},
{
"epoch": 230.43478260869566,
"grad_norm": 1.0177322626113892,
"learning_rate": 4.503333333333333e-06,
"loss": 0.4119,
"step": 5300
},
{
"epoch": 232.6086956521739,
"grad_norm": 1.0655298233032227,
"learning_rate": 4.42e-06,
"loss": 0.4122,
"step": 5350
},
{
"epoch": 234.7826086956522,
"grad_norm": 0.84236079454422,
"learning_rate": 4.336666666666667e-06,
"loss": 0.4104,
"step": 5400
},
{
"epoch": 236.95652173913044,
"grad_norm": 1.0191460847854614,
"learning_rate": 4.253333333333334e-06,
"loss": 0.4128,
"step": 5450
},
{
"epoch": 239.1304347826087,
"grad_norm": 0.7806400060653687,
"learning_rate": 4.17e-06,
"loss": 0.414,
"step": 5500
},
{
"epoch": 239.1304347826087,
"eval_loss": 0.4069381058216095,
"eval_runtime": 13.5672,
"eval_samples_per_second": 11.867,
"eval_steps_per_second": 1.548,
"step": 5500
},
{
"epoch": 241.30434782608697,
"grad_norm": 0.866113007068634,
"learning_rate": 4.086666666666667e-06,
"loss": 0.4117,
"step": 5550
},
{
"epoch": 243.47826086956522,
"grad_norm": 0.8896365165710449,
"learning_rate": 4.003333333333334e-06,
"loss": 0.4161,
"step": 5600
},
{
"epoch": 245.65217391304347,
"grad_norm": 0.8311805129051208,
"learning_rate": 3.920000000000001e-06,
"loss": 0.4094,
"step": 5650
},
{
"epoch": 247.82608695652175,
"grad_norm": 1.686919927597046,
"learning_rate": 3.836666666666667e-06,
"loss": 0.4128,
"step": 5700
},
{
"epoch": 250.0,
"grad_norm": 1.9034643173217773,
"learning_rate": 3.753333333333334e-06,
"loss": 0.4143,
"step": 5750
},
{
"epoch": 252.17391304347825,
"grad_norm": 0.8296219706535339,
"learning_rate": 3.6700000000000004e-06,
"loss": 0.4123,
"step": 5800
},
{
"epoch": 254.34782608695653,
"grad_norm": 0.6855341792106628,
"learning_rate": 3.5866666666666673e-06,
"loss": 0.4099,
"step": 5850
},
{
"epoch": 256.5217391304348,
"grad_norm": 1.0125635862350464,
"learning_rate": 3.5033333333333334e-06,
"loss": 0.4104,
"step": 5900
},
{
"epoch": 258.69565217391306,
"grad_norm": 0.8589877486228943,
"learning_rate": 3.4200000000000007e-06,
"loss": 0.4069,
"step": 5950
},
{
"epoch": 260.8695652173913,
"grad_norm": 0.9898667931556702,
"learning_rate": 3.3366666666666668e-06,
"loss": 0.4113,
"step": 6000
},
{
"epoch": 260.8695652173913,
"eval_loss": 0.40804117918014526,
"eval_runtime": 13.2074,
"eval_samples_per_second": 12.19,
"eval_steps_per_second": 1.59,
"step": 6000
},
{
"epoch": 263.04347826086956,
"grad_norm": 1.1999317407608032,
"learning_rate": 3.2533333333333332e-06,
"loss": 0.4121,
"step": 6050
},
{
"epoch": 265.2173913043478,
"grad_norm": 0.8878545761108398,
"learning_rate": 3.17e-06,
"loss": 0.4112,
"step": 6100
},
{
"epoch": 267.39130434782606,
"grad_norm": 0.7173246145248413,
"learning_rate": 3.0866666666666666e-06,
"loss": 0.4079,
"step": 6150
},
{
"epoch": 269.5652173913044,
"grad_norm": 0.8617014288902283,
"learning_rate": 3.0033333333333335e-06,
"loss": 0.4095,
"step": 6200
},
{
"epoch": 271.7391304347826,
"grad_norm": 1.0037391185760498,
"learning_rate": 2.92e-06,
"loss": 0.4086,
"step": 6250
},
{
"epoch": 273.9130434782609,
"grad_norm": 1.068651795387268,
"learning_rate": 2.836666666666667e-06,
"loss": 0.4136,
"step": 6300
},
{
"epoch": 276.0869565217391,
"grad_norm": 0.7301667332649231,
"learning_rate": 2.7533333333333334e-06,
"loss": 0.4094,
"step": 6350
},
{
"epoch": 278.2608695652174,
"grad_norm": 0.7777795195579529,
"learning_rate": 2.6700000000000003e-06,
"loss": 0.4099,
"step": 6400
},
{
"epoch": 280.4347826086956,
"grad_norm": 2.096015453338623,
"learning_rate": 2.5866666666666667e-06,
"loss": 0.4142,
"step": 6450
},
{
"epoch": 282.60869565217394,
"grad_norm": 1.2232917547225952,
"learning_rate": 2.5033333333333336e-06,
"loss": 0.4133,
"step": 6500
},
{
"epoch": 282.60869565217394,
"eval_loss": 0.40729132294654846,
"eval_runtime": 13.1317,
"eval_samples_per_second": 12.26,
"eval_steps_per_second": 1.599,
"step": 6500
},
{
"epoch": 284.7826086956522,
"grad_norm": 0.8479878902435303,
"learning_rate": 2.42e-06,
"loss": 0.4069,
"step": 6550
},
{
"epoch": 286.95652173913044,
"grad_norm": 0.9611658453941345,
"learning_rate": 2.3366666666666666e-06,
"loss": 0.4115,
"step": 6600
},
{
"epoch": 289.1304347826087,
"grad_norm": 0.9590737819671631,
"learning_rate": 2.2533333333333335e-06,
"loss": 0.4115,
"step": 6650
},
{
"epoch": 291.30434782608694,
"grad_norm": 0.8980545401573181,
"learning_rate": 2.17e-06,
"loss": 0.4075,
"step": 6700
},
{
"epoch": 293.4782608695652,
"grad_norm": 1.0965752601623535,
"learning_rate": 2.086666666666667e-06,
"loss": 0.4097,
"step": 6750
},
{
"epoch": 295.6521739130435,
"grad_norm": 0.7770458459854126,
"learning_rate": 2.0033333333333334e-06,
"loss": 0.4122,
"step": 6800
},
{
"epoch": 297.82608695652175,
"grad_norm": 0.7583262920379639,
"learning_rate": 1.9200000000000003e-06,
"loss": 0.4072,
"step": 6850
},
{
"epoch": 300.0,
"grad_norm": 1.9386942386627197,
"learning_rate": 1.836666666666667e-06,
"loss": 0.4096,
"step": 6900
},
{
"epoch": 302.17391304347825,
"grad_norm": 0.9514946341514587,
"learning_rate": 1.7533333333333336e-06,
"loss": 0.4101,
"step": 6950
},
{
"epoch": 304.3478260869565,
"grad_norm": 1.0851093530654907,
"learning_rate": 1.6716666666666666e-06,
"loss": 0.4095,
"step": 7000
},
{
"epoch": 304.3478260869565,
"eval_loss": 0.40593138337135315,
"eval_runtime": 13.2236,
"eval_samples_per_second": 12.175,
"eval_steps_per_second": 1.588,
"step": 7000
},
{
"epoch": 306.5217391304348,
"grad_norm": 0.8590102791786194,
"learning_rate": 1.5883333333333333e-06,
"loss": 0.4127,
"step": 7050
},
{
"epoch": 308.69565217391306,
"grad_norm": 0.9405089616775513,
"learning_rate": 1.505e-06,
"loss": 0.413,
"step": 7100
},
{
"epoch": 310.8695652173913,
"grad_norm": 0.7493429780006409,
"learning_rate": 1.4216666666666667e-06,
"loss": 0.4137,
"step": 7150
},
{
"epoch": 313.04347826086956,
"grad_norm": 0.8461248278617859,
"learning_rate": 1.3383333333333334e-06,
"loss": 0.4104,
"step": 7200
},
{
"epoch": 315.2173913043478,
"grad_norm": 0.6727828979492188,
"learning_rate": 1.255e-06,
"loss": 0.4071,
"step": 7250
},
{
"epoch": 317.39130434782606,
"grad_norm": 0.7143705487251282,
"learning_rate": 1.1716666666666667e-06,
"loss": 0.4086,
"step": 7300
},
{
"epoch": 319.5652173913044,
"grad_norm": 0.8034409284591675,
"learning_rate": 1.0883333333333334e-06,
"loss": 0.4051,
"step": 7350
},
{
"epoch": 321.7391304347826,
"grad_norm": 0.7853546738624573,
"learning_rate": 1.0050000000000001e-06,
"loss": 0.4067,
"step": 7400
},
{
"epoch": 323.9130434782609,
"grad_norm": 0.8830783367156982,
"learning_rate": 9.216666666666667e-07,
"loss": 0.4089,
"step": 7450
},
{
"epoch": 326.0869565217391,
"grad_norm": 0.8115708231925964,
"learning_rate": 8.383333333333334e-07,
"loss": 0.4129,
"step": 7500
},
{
"epoch": 326.0869565217391,
"eval_loss": 0.4083060622215271,
"eval_runtime": 12.9398,
"eval_samples_per_second": 12.442,
"eval_steps_per_second": 1.623,
"step": 7500
},
{
"epoch": 328.2608695652174,
"grad_norm": 0.6245155334472656,
"learning_rate": 7.550000000000001e-07,
"loss": 0.4067,
"step": 7550
},
{
"epoch": 330.4347826086956,
"grad_norm": 0.7212103009223938,
"learning_rate": 6.716666666666668e-07,
"loss": 0.4038,
"step": 7600
},
{
"epoch": 332.60869565217394,
"grad_norm": 0.8665474057197571,
"learning_rate": 5.883333333333333e-07,
"loss": 0.4048,
"step": 7650
},
{
"epoch": 334.7826086956522,
"grad_norm": 0.7773168087005615,
"learning_rate": 5.05e-07,
"loss": 0.4054,
"step": 7700
},
{
"epoch": 336.95652173913044,
"grad_norm": 0.7131752967834473,
"learning_rate": 4.2166666666666667e-07,
"loss": 0.4058,
"step": 7750
},
{
"epoch": 339.1304347826087,
"grad_norm": 0.7420967221260071,
"learning_rate": 3.3833333333333336e-07,
"loss": 0.409,
"step": 7800
},
{
"epoch": 341.30434782608694,
"grad_norm": 0.866454541683197,
"learning_rate": 2.55e-07,
"loss": 0.4071,
"step": 7850
},
{
"epoch": 343.4782608695652,
"grad_norm": 0.6852492094039917,
"learning_rate": 1.7166666666666668e-07,
"loss": 0.4054,
"step": 7900
},
{
"epoch": 345.6521739130435,
"grad_norm": 0.8399931788444519,
"learning_rate": 8.833333333333334e-08,
"loss": 0.4033,
"step": 7950
},
{
"epoch": 347.82608695652175,
"grad_norm": 0.8192908763885498,
"learning_rate": 5e-09,
"loss": 0.4035,
"step": 8000
},
{
"epoch": 347.82608695652175,
"eval_loss": 0.40772464871406555,
"eval_runtime": 13.1445,
"eval_samples_per_second": 12.248,
"eval_steps_per_second": 1.598,
"step": 8000
}
],
"logging_steps": 50,
"max_steps": 8000,
"num_input_tokens_seen": 0,
"num_train_epochs": 348,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.695483990986939e+16,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}