fil_b128_le5_s8000 / last-checkpoint /trainer_state.json
mikhail-panzo's picture
Training in progress, step 7500, checkpoint
6b12610 verified
raw
history blame
29.4 kB
{
"best_metric": 0.402925044298172,
"best_model_checkpoint": "mikhail-panzo/fil_b128_le5_s8000/checkpoint-4500",
"epoch": 652.1739130434783,
"eval_steps": 500,
"global_step": 7500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 4.3478260869565215,
"grad_norm": 4.460227966308594,
"learning_rate": 2.5000000000000004e-07,
"loss": 0.814,
"step": 50
},
{
"epoch": 8.695652173913043,
"grad_norm": 2.496962785720825,
"learning_rate": 5.000000000000001e-07,
"loss": 0.7805,
"step": 100
},
{
"epoch": 13.043478260869565,
"grad_norm": 2.0162875652313232,
"learning_rate": 7.5e-07,
"loss": 0.7591,
"step": 150
},
{
"epoch": 17.391304347826086,
"grad_norm": 1.1545039415359497,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.7243,
"step": 200
},
{
"epoch": 21.73913043478261,
"grad_norm": 1.3521718978881836,
"learning_rate": 1.25e-06,
"loss": 0.7066,
"step": 250
},
{
"epoch": 26.08695652173913,
"grad_norm": 2.4172327518463135,
"learning_rate": 1.5e-06,
"loss": 0.6976,
"step": 300
},
{
"epoch": 30.434782608695652,
"grad_norm": 0.9816296696662903,
"learning_rate": 1.75e-06,
"loss": 0.6674,
"step": 350
},
{
"epoch": 34.78260869565217,
"grad_norm": 2.370589017868042,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.6427,
"step": 400
},
{
"epoch": 39.130434782608695,
"grad_norm": 1.0166665315628052,
"learning_rate": 2.25e-06,
"loss": 0.5999,
"step": 450
},
{
"epoch": 43.47826086956522,
"grad_norm": 0.9980425238609314,
"learning_rate": 2.5e-06,
"loss": 0.5575,
"step": 500
},
{
"epoch": 43.47826086956522,
"eval_loss": 0.4795418679714203,
"eval_runtime": 12.6598,
"eval_samples_per_second": 12.717,
"eval_steps_per_second": 1.659,
"step": 500
},
{
"epoch": 47.82608695652174,
"grad_norm": 1.1796629428863525,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.5467,
"step": 550
},
{
"epoch": 52.17391304347826,
"grad_norm": 1.1312341690063477,
"learning_rate": 3e-06,
"loss": 0.5345,
"step": 600
},
{
"epoch": 56.52173913043478,
"grad_norm": 0.855106770992279,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.5315,
"step": 650
},
{
"epoch": 60.869565217391305,
"grad_norm": 1.4999840259552002,
"learning_rate": 3.5e-06,
"loss": 0.5258,
"step": 700
},
{
"epoch": 65.21739130434783,
"grad_norm": 1.2242575883865356,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.5167,
"step": 750
},
{
"epoch": 69.56521739130434,
"grad_norm": 0.9953985810279846,
"learning_rate": 4.000000000000001e-06,
"loss": 0.5032,
"step": 800
},
{
"epoch": 73.91304347826087,
"grad_norm": 0.9109128713607788,
"learning_rate": 4.25e-06,
"loss": 0.5094,
"step": 850
},
{
"epoch": 78.26086956521739,
"grad_norm": 1.020887851715088,
"learning_rate": 4.5e-06,
"loss": 0.4988,
"step": 900
},
{
"epoch": 82.6086956521739,
"grad_norm": 1.0142054557800293,
"learning_rate": 4.75e-06,
"loss": 0.4911,
"step": 950
},
{
"epoch": 86.95652173913044,
"grad_norm": 0.9454975128173828,
"learning_rate": 5e-06,
"loss": 0.4877,
"step": 1000
},
{
"epoch": 86.95652173913044,
"eval_loss": 0.4371128976345062,
"eval_runtime": 12.7935,
"eval_samples_per_second": 12.585,
"eval_steps_per_second": 1.641,
"step": 1000
},
{
"epoch": 91.30434782608695,
"grad_norm": 1.0903185606002808,
"learning_rate": 5.2500000000000006e-06,
"loss": 0.4903,
"step": 1050
},
{
"epoch": 95.65217391304348,
"grad_norm": 1.2410277128219604,
"learning_rate": 5.500000000000001e-06,
"loss": 0.4831,
"step": 1100
},
{
"epoch": 100.0,
"grad_norm": 1.5670720338821411,
"learning_rate": 5.75e-06,
"loss": 0.4772,
"step": 1150
},
{
"epoch": 104.34782608695652,
"grad_norm": 0.8733631372451782,
"learning_rate": 6e-06,
"loss": 0.4797,
"step": 1200
},
{
"epoch": 108.69565217391305,
"grad_norm": 1.095543384552002,
"learning_rate": 6.25e-06,
"loss": 0.4785,
"step": 1250
},
{
"epoch": 113.04347826086956,
"grad_norm": 1.7245070934295654,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.4734,
"step": 1300
},
{
"epoch": 117.3913043478261,
"grad_norm": 1.1396911144256592,
"learning_rate": 6.750000000000001e-06,
"loss": 0.4742,
"step": 1350
},
{
"epoch": 121.73913043478261,
"grad_norm": 1.195086121559143,
"learning_rate": 7e-06,
"loss": 0.4711,
"step": 1400
},
{
"epoch": 126.08695652173913,
"grad_norm": 0.9940504431724548,
"learning_rate": 7.25e-06,
"loss": 0.4613,
"step": 1450
},
{
"epoch": 130.43478260869566,
"grad_norm": 0.9867850542068481,
"learning_rate": 7.500000000000001e-06,
"loss": 0.4582,
"step": 1500
},
{
"epoch": 130.43478260869566,
"eval_loss": 0.4196487069129944,
"eval_runtime": 12.7443,
"eval_samples_per_second": 12.633,
"eval_steps_per_second": 1.648,
"step": 1500
},
{
"epoch": 134.7826086956522,
"grad_norm": 0.9487758874893188,
"learning_rate": 7.75e-06,
"loss": 0.4613,
"step": 1550
},
{
"epoch": 139.1304347826087,
"grad_norm": 1.4387928247451782,
"learning_rate": 8.000000000000001e-06,
"loss": 0.4602,
"step": 1600
},
{
"epoch": 143.47826086956522,
"grad_norm": 1.1203278303146362,
"learning_rate": 8.25e-06,
"loss": 0.4583,
"step": 1650
},
{
"epoch": 147.82608695652175,
"grad_norm": 0.8609073162078857,
"learning_rate": 8.5e-06,
"loss": 0.4535,
"step": 1700
},
{
"epoch": 152.17391304347825,
"grad_norm": 0.8195172548294067,
"learning_rate": 8.750000000000001e-06,
"loss": 0.4546,
"step": 1750
},
{
"epoch": 156.52173913043478,
"grad_norm": 0.9077767729759216,
"learning_rate": 9e-06,
"loss": 0.4466,
"step": 1800
},
{
"epoch": 160.8695652173913,
"grad_norm": 0.9574931263923645,
"learning_rate": 9.250000000000001e-06,
"loss": 0.4537,
"step": 1850
},
{
"epoch": 165.2173913043478,
"grad_norm": 0.8706139326095581,
"learning_rate": 9.5e-06,
"loss": 0.449,
"step": 1900
},
{
"epoch": 169.56521739130434,
"grad_norm": 1.2113181352615356,
"learning_rate": 9.75e-06,
"loss": 0.4513,
"step": 1950
},
{
"epoch": 173.91304347826087,
"grad_norm": 1.0548679828643799,
"learning_rate": 1e-05,
"loss": 0.4486,
"step": 2000
},
{
"epoch": 173.91304347826087,
"eval_loss": 0.41451236605644226,
"eval_runtime": 13.2202,
"eval_samples_per_second": 12.178,
"eval_steps_per_second": 1.588,
"step": 2000
},
{
"epoch": 178.2608695652174,
"grad_norm": 1.1025744676589966,
"learning_rate": 9.916666666666668e-06,
"loss": 0.4463,
"step": 2050
},
{
"epoch": 182.6086956521739,
"grad_norm": 1.0485080480575562,
"learning_rate": 9.833333333333333e-06,
"loss": 0.4452,
"step": 2100
},
{
"epoch": 186.95652173913044,
"grad_norm": 0.9438555240631104,
"learning_rate": 9.751666666666667e-06,
"loss": 0.4401,
"step": 2150
},
{
"epoch": 191.30434782608697,
"grad_norm": 0.8600581884384155,
"learning_rate": 9.668333333333334e-06,
"loss": 0.4406,
"step": 2200
},
{
"epoch": 195.65217391304347,
"grad_norm": 0.915958046913147,
"learning_rate": 9.585e-06,
"loss": 0.4402,
"step": 2250
},
{
"epoch": 200.0,
"grad_norm": 1.931151032447815,
"learning_rate": 9.501666666666667e-06,
"loss": 0.439,
"step": 2300
},
{
"epoch": 204.34782608695653,
"grad_norm": 1.006366491317749,
"learning_rate": 9.418333333333334e-06,
"loss": 0.4386,
"step": 2350
},
{
"epoch": 208.69565217391303,
"grad_norm": 1.2179653644561768,
"learning_rate": 9.335000000000001e-06,
"loss": 0.4381,
"step": 2400
},
{
"epoch": 213.04347826086956,
"grad_norm": 1.779303789138794,
"learning_rate": 9.251666666666668e-06,
"loss": 0.4342,
"step": 2450
},
{
"epoch": 217.3913043478261,
"grad_norm": 0.9080890417098999,
"learning_rate": 9.168333333333333e-06,
"loss": 0.4353,
"step": 2500
},
{
"epoch": 217.3913043478261,
"eval_loss": 0.40957868099212646,
"eval_runtime": 13.4977,
"eval_samples_per_second": 11.928,
"eval_steps_per_second": 1.556,
"step": 2500
},
{
"epoch": 221.7391304347826,
"grad_norm": 0.7047795057296753,
"learning_rate": 9.085e-06,
"loss": 0.4285,
"step": 2550
},
{
"epoch": 226.08695652173913,
"grad_norm": 1.3410165309906006,
"learning_rate": 9.001666666666667e-06,
"loss": 0.4294,
"step": 2600
},
{
"epoch": 230.43478260869566,
"grad_norm": 1.68880033493042,
"learning_rate": 8.918333333333334e-06,
"loss": 0.437,
"step": 2650
},
{
"epoch": 234.7826086956522,
"grad_norm": 0.8228254914283752,
"learning_rate": 8.835000000000001e-06,
"loss": 0.4305,
"step": 2700
},
{
"epoch": 239.1304347826087,
"grad_norm": 1.1968687772750854,
"learning_rate": 8.751666666666668e-06,
"loss": 0.432,
"step": 2750
},
{
"epoch": 243.47826086956522,
"grad_norm": 0.6908459663391113,
"learning_rate": 8.668333333333335e-06,
"loss": 0.4282,
"step": 2800
},
{
"epoch": 247.82608695652175,
"grad_norm": 0.7675824165344238,
"learning_rate": 8.585000000000002e-06,
"loss": 0.427,
"step": 2850
},
{
"epoch": 252.17391304347825,
"grad_norm": 0.893234133720398,
"learning_rate": 8.501666666666667e-06,
"loss": 0.4278,
"step": 2900
},
{
"epoch": 256.5217391304348,
"grad_norm": 0.8852680921554565,
"learning_rate": 8.418333333333334e-06,
"loss": 0.4275,
"step": 2950
},
{
"epoch": 260.8695652173913,
"grad_norm": 0.8615354299545288,
"learning_rate": 8.335e-06,
"loss": 0.4261,
"step": 3000
},
{
"epoch": 260.8695652173913,
"eval_loss": 0.40570691227912903,
"eval_runtime": 13.5692,
"eval_samples_per_second": 11.865,
"eval_steps_per_second": 1.548,
"step": 3000
},
{
"epoch": 265.2173913043478,
"grad_norm": 1.5136363506317139,
"learning_rate": 8.251666666666668e-06,
"loss": 0.426,
"step": 3050
},
{
"epoch": 269.5652173913044,
"grad_norm": 1.0311447381973267,
"learning_rate": 8.168333333333334e-06,
"loss": 0.4225,
"step": 3100
},
{
"epoch": 273.9130434782609,
"grad_norm": 2.1306591033935547,
"learning_rate": 8.085000000000001e-06,
"loss": 0.4225,
"step": 3150
},
{
"epoch": 278.2608695652174,
"grad_norm": 0.9035950303077698,
"learning_rate": 8.001666666666668e-06,
"loss": 0.4227,
"step": 3200
},
{
"epoch": 282.60869565217394,
"grad_norm": 1.382899284362793,
"learning_rate": 7.918333333333333e-06,
"loss": 0.4222,
"step": 3250
},
{
"epoch": 286.95652173913044,
"grad_norm": 0.9895361065864563,
"learning_rate": 7.835e-06,
"loss": 0.4215,
"step": 3300
},
{
"epoch": 291.30434782608694,
"grad_norm": 0.7423685789108276,
"learning_rate": 7.751666666666667e-06,
"loss": 0.4218,
"step": 3350
},
{
"epoch": 295.6521739130435,
"grad_norm": 1.230416178703308,
"learning_rate": 7.668333333333334e-06,
"loss": 0.4224,
"step": 3400
},
{
"epoch": 300.0,
"grad_norm": 1.3802359104156494,
"learning_rate": 7.585e-06,
"loss": 0.4205,
"step": 3450
},
{
"epoch": 304.3478260869565,
"grad_norm": 0.758198618888855,
"learning_rate": 7.501666666666667e-06,
"loss": 0.4206,
"step": 3500
},
{
"epoch": 304.3478260869565,
"eval_loss": 0.4048587381839752,
"eval_runtime": 14.3547,
"eval_samples_per_second": 11.216,
"eval_steps_per_second": 1.463,
"step": 3500
},
{
"epoch": 308.69565217391306,
"grad_norm": 0.9321383833885193,
"learning_rate": 7.418333333333334e-06,
"loss": 0.4189,
"step": 3550
},
{
"epoch": 313.04347826086956,
"grad_norm": 1.2097365856170654,
"learning_rate": 7.335000000000001e-06,
"loss": 0.4183,
"step": 3600
},
{
"epoch": 317.39130434782606,
"grad_norm": 0.8793150782585144,
"learning_rate": 7.251666666666667e-06,
"loss": 0.4192,
"step": 3650
},
{
"epoch": 321.7391304347826,
"grad_norm": 1.274627923965454,
"learning_rate": 7.168333333333334e-06,
"loss": 0.4131,
"step": 3700
},
{
"epoch": 326.0869565217391,
"grad_norm": 0.7330735921859741,
"learning_rate": 7.085000000000001e-06,
"loss": 0.4173,
"step": 3750
},
{
"epoch": 330.4347826086956,
"grad_norm": 0.9770796895027161,
"learning_rate": 7.001666666666668e-06,
"loss": 0.4136,
"step": 3800
},
{
"epoch": 334.7826086956522,
"grad_norm": 0.8670146465301514,
"learning_rate": 6.918333333333334e-06,
"loss": 0.4124,
"step": 3850
},
{
"epoch": 339.1304347826087,
"grad_norm": 1.0471419095993042,
"learning_rate": 6.835000000000001e-06,
"loss": 0.4152,
"step": 3900
},
{
"epoch": 343.4782608695652,
"grad_norm": 0.6883171796798706,
"learning_rate": 6.7516666666666675e-06,
"loss": 0.4149,
"step": 3950
},
{
"epoch": 347.82608695652175,
"grad_norm": 0.8158336877822876,
"learning_rate": 6.668333333333334e-06,
"loss": 0.4117,
"step": 4000
},
{
"epoch": 347.82608695652175,
"eval_loss": 0.40652915835380554,
"eval_runtime": 13.3468,
"eval_samples_per_second": 12.063,
"eval_steps_per_second": 1.573,
"step": 4000
},
{
"epoch": 352.17391304347825,
"grad_norm": 0.8498961925506592,
"learning_rate": 6.5850000000000005e-06,
"loss": 0.4104,
"step": 4050
},
{
"epoch": 356.5217391304348,
"grad_norm": 0.8166142702102661,
"learning_rate": 6.501666666666667e-06,
"loss": 0.4124,
"step": 4100
},
{
"epoch": 360.8695652173913,
"grad_norm": 1.0088080167770386,
"learning_rate": 6.418333333333334e-06,
"loss": 0.4093,
"step": 4150
},
{
"epoch": 365.2173913043478,
"grad_norm": 0.7711963653564453,
"learning_rate": 6.335e-06,
"loss": 0.4123,
"step": 4200
},
{
"epoch": 369.5652173913044,
"grad_norm": 0.6871639490127563,
"learning_rate": 6.253333333333333e-06,
"loss": 0.4173,
"step": 4250
},
{
"epoch": 373.9130434782609,
"grad_norm": 0.8275335431098938,
"learning_rate": 6.17e-06,
"loss": 0.4151,
"step": 4300
},
{
"epoch": 378.2608695652174,
"grad_norm": 0.7653161883354187,
"learning_rate": 6.086666666666667e-06,
"loss": 0.4129,
"step": 4350
},
{
"epoch": 382.60869565217394,
"grad_norm": 0.8653395771980286,
"learning_rate": 6.003333333333334e-06,
"loss": 0.4109,
"step": 4400
},
{
"epoch": 386.95652173913044,
"grad_norm": 0.8151092529296875,
"learning_rate": 5.92e-06,
"loss": 0.4141,
"step": 4450
},
{
"epoch": 391.30434782608694,
"grad_norm": 0.7493378520011902,
"learning_rate": 5.836666666666667e-06,
"loss": 0.4123,
"step": 4500
},
{
"epoch": 391.30434782608694,
"eval_loss": 0.402925044298172,
"eval_runtime": 13.3404,
"eval_samples_per_second": 12.069,
"eval_steps_per_second": 1.574,
"step": 4500
},
{
"epoch": 395.6521739130435,
"grad_norm": 0.9856454133987427,
"learning_rate": 5.753333333333334e-06,
"loss": 0.4176,
"step": 4550
},
{
"epoch": 400.0,
"grad_norm": 1.347724437713623,
"learning_rate": 5.67e-06,
"loss": 0.4077,
"step": 4600
},
{
"epoch": 404.3478260869565,
"grad_norm": 0.6690952777862549,
"learning_rate": 5.586666666666667e-06,
"loss": 0.4099,
"step": 4650
},
{
"epoch": 408.69565217391306,
"grad_norm": 0.7244478464126587,
"learning_rate": 5.503333333333334e-06,
"loss": 0.411,
"step": 4700
},
{
"epoch": 413.04347826086956,
"grad_norm": 2.4231059551239014,
"learning_rate": 5.420000000000001e-06,
"loss": 0.4133,
"step": 4750
},
{
"epoch": 417.39130434782606,
"grad_norm": 0.7757388353347778,
"learning_rate": 5.336666666666667e-06,
"loss": 0.4121,
"step": 4800
},
{
"epoch": 421.7391304347826,
"grad_norm": 0.9179409146308899,
"learning_rate": 5.2533333333333336e-06,
"loss": 0.4089,
"step": 4850
},
{
"epoch": 426.0869565217391,
"grad_norm": 1.120680809020996,
"learning_rate": 5.1700000000000005e-06,
"loss": 0.4141,
"step": 4900
},
{
"epoch": 430.4347826086956,
"grad_norm": 0.9229013323783875,
"learning_rate": 5.086666666666667e-06,
"loss": 0.4086,
"step": 4950
},
{
"epoch": 434.7826086956522,
"grad_norm": 0.7975584864616394,
"learning_rate": 5.0033333333333334e-06,
"loss": 0.4117,
"step": 5000
},
{
"epoch": 434.7826086956522,
"eval_loss": 0.40458473563194275,
"eval_runtime": 13.8621,
"eval_samples_per_second": 11.614,
"eval_steps_per_second": 1.515,
"step": 5000
},
{
"epoch": 439.1304347826087,
"grad_norm": 0.6378179788589478,
"learning_rate": 4.92e-06,
"loss": 0.4081,
"step": 5050
},
{
"epoch": 443.4782608695652,
"grad_norm": 1.1477278470993042,
"learning_rate": 4.836666666666667e-06,
"loss": 0.4077,
"step": 5100
},
{
"epoch": 447.82608695652175,
"grad_norm": 0.8010798692703247,
"learning_rate": 4.753333333333333e-06,
"loss": 0.41,
"step": 5150
},
{
"epoch": 452.17391304347825,
"grad_norm": 0.8638696074485779,
"learning_rate": 4.670000000000001e-06,
"loss": 0.41,
"step": 5200
},
{
"epoch": 456.5217391304348,
"grad_norm": 0.6933819055557251,
"learning_rate": 4.586666666666667e-06,
"loss": 0.4063,
"step": 5250
},
{
"epoch": 460.8695652173913,
"grad_norm": 0.8501341938972473,
"learning_rate": 4.503333333333333e-06,
"loss": 0.4077,
"step": 5300
},
{
"epoch": 465.2173913043478,
"grad_norm": 0.6721881031990051,
"learning_rate": 4.42e-06,
"loss": 0.4069,
"step": 5350
},
{
"epoch": 469.5652173913044,
"grad_norm": 1.053196668624878,
"learning_rate": 4.336666666666667e-06,
"loss": 0.4045,
"step": 5400
},
{
"epoch": 473.9130434782609,
"grad_norm": 1.0177547931671143,
"learning_rate": 4.253333333333334e-06,
"loss": 0.4038,
"step": 5450
},
{
"epoch": 478.2608695652174,
"grad_norm": 0.8582258820533752,
"learning_rate": 4.17e-06,
"loss": 0.4053,
"step": 5500
},
{
"epoch": 478.2608695652174,
"eval_loss": 0.4044737219810486,
"eval_runtime": 12.7138,
"eval_samples_per_second": 12.663,
"eval_steps_per_second": 1.652,
"step": 5500
},
{
"epoch": 482.60869565217394,
"grad_norm": 0.7471197843551636,
"learning_rate": 4.086666666666667e-06,
"loss": 0.4008,
"step": 5550
},
{
"epoch": 486.95652173913044,
"grad_norm": 0.7691963315010071,
"learning_rate": 4.003333333333334e-06,
"loss": 0.4076,
"step": 5600
},
{
"epoch": 491.30434782608694,
"grad_norm": 0.7329992651939392,
"learning_rate": 3.920000000000001e-06,
"loss": 0.4039,
"step": 5650
},
{
"epoch": 495.6521739130435,
"grad_norm": 1.2654510736465454,
"learning_rate": 3.836666666666667e-06,
"loss": 0.4057,
"step": 5700
},
{
"epoch": 500.0,
"grad_norm": 2.19535756111145,
"learning_rate": 3.753333333333334e-06,
"loss": 0.4082,
"step": 5750
},
{
"epoch": 504.3478260869565,
"grad_norm": 0.962538480758667,
"learning_rate": 3.6700000000000004e-06,
"loss": 0.4043,
"step": 5800
},
{
"epoch": 508.69565217391306,
"grad_norm": 0.9239832162857056,
"learning_rate": 3.5866666666666673e-06,
"loss": 0.4069,
"step": 5850
},
{
"epoch": 513.0434782608696,
"grad_norm": 2.786410331726074,
"learning_rate": 3.5033333333333334e-06,
"loss": 0.4081,
"step": 5900
},
{
"epoch": 517.3913043478261,
"grad_norm": 0.7412209510803223,
"learning_rate": 3.4200000000000007e-06,
"loss": 0.4028,
"step": 5950
},
{
"epoch": 521.7391304347826,
"grad_norm": 0.8102017045021057,
"learning_rate": 3.3366666666666668e-06,
"loss": 0.4064,
"step": 6000
},
{
"epoch": 521.7391304347826,
"eval_loss": 0.4046972692012787,
"eval_runtime": 12.7264,
"eval_samples_per_second": 12.651,
"eval_steps_per_second": 1.65,
"step": 6000
},
{
"epoch": 526.0869565217391,
"grad_norm": 1.0029244422912598,
"learning_rate": 3.2533333333333332e-06,
"loss": 0.4032,
"step": 6050
},
{
"epoch": 530.4347826086956,
"grad_norm": 0.7244696617126465,
"learning_rate": 3.17e-06,
"loss": 0.4082,
"step": 6100
},
{
"epoch": 534.7826086956521,
"grad_norm": 0.7756295204162598,
"learning_rate": 3.0866666666666666e-06,
"loss": 0.4063,
"step": 6150
},
{
"epoch": 539.1304347826087,
"grad_norm": 0.6352535486221313,
"learning_rate": 3.0033333333333335e-06,
"loss": 0.408,
"step": 6200
},
{
"epoch": 543.4782608695652,
"grad_norm": 0.8275020122528076,
"learning_rate": 2.92e-06,
"loss": 0.4047,
"step": 6250
},
{
"epoch": 547.8260869565217,
"grad_norm": 0.6523252129554749,
"learning_rate": 2.836666666666667e-06,
"loss": 0.4054,
"step": 6300
},
{
"epoch": 552.1739130434783,
"grad_norm": 0.864088237285614,
"learning_rate": 2.7533333333333334e-06,
"loss": 0.4043,
"step": 6350
},
{
"epoch": 556.5217391304348,
"grad_norm": 0.6934670805931091,
"learning_rate": 2.6700000000000003e-06,
"loss": 0.4014,
"step": 6400
},
{
"epoch": 560.8695652173913,
"grad_norm": 0.6325249671936035,
"learning_rate": 2.5866666666666667e-06,
"loss": 0.4002,
"step": 6450
},
{
"epoch": 565.2173913043479,
"grad_norm": 0.6945725083351135,
"learning_rate": 2.505e-06,
"loss": 0.4058,
"step": 6500
},
{
"epoch": 565.2173913043479,
"eval_loss": 0.4041679799556732,
"eval_runtime": 12.8417,
"eval_samples_per_second": 12.537,
"eval_steps_per_second": 1.635,
"step": 6500
},
{
"epoch": 569.5652173913044,
"grad_norm": 0.7380744218826294,
"learning_rate": 2.421666666666667e-06,
"loss": 0.407,
"step": 6550
},
{
"epoch": 573.9130434782609,
"grad_norm": 2.16170597076416,
"learning_rate": 2.3383333333333335e-06,
"loss": 0.3985,
"step": 6600
},
{
"epoch": 578.2608695652174,
"grad_norm": 0.753318727016449,
"learning_rate": 2.2550000000000004e-06,
"loss": 0.4015,
"step": 6650
},
{
"epoch": 582.6086956521739,
"grad_norm": 0.8654139637947083,
"learning_rate": 2.171666666666667e-06,
"loss": 0.4024,
"step": 6700
},
{
"epoch": 586.9565217391304,
"grad_norm": 0.6627745032310486,
"learning_rate": 2.088333333333334e-06,
"loss": 0.4013,
"step": 6750
},
{
"epoch": 591.304347826087,
"grad_norm": 0.7147239446640015,
"learning_rate": 2.0050000000000003e-06,
"loss": 0.3997,
"step": 6800
},
{
"epoch": 595.6521739130435,
"grad_norm": 0.6943681836128235,
"learning_rate": 1.9216666666666668e-06,
"loss": 0.4043,
"step": 6850
},
{
"epoch": 600.0,
"grad_norm": 0.94109046459198,
"learning_rate": 1.8383333333333334e-06,
"loss": 0.4031,
"step": 6900
},
{
"epoch": 604.3478260869565,
"grad_norm": 0.6653702855110168,
"learning_rate": 1.7550000000000001e-06,
"loss": 0.3996,
"step": 6950
},
{
"epoch": 608.695652173913,
"grad_norm": 0.8962767720222473,
"learning_rate": 1.6716666666666666e-06,
"loss": 0.3995,
"step": 7000
},
{
"epoch": 608.695652173913,
"eval_loss": 0.4058188796043396,
"eval_runtime": 12.7074,
"eval_samples_per_second": 12.67,
"eval_steps_per_second": 1.653,
"step": 7000
},
{
"epoch": 613.0434782608696,
"grad_norm": 1.6197575330734253,
"learning_rate": 1.5883333333333333e-06,
"loss": 0.4003,
"step": 7050
},
{
"epoch": 617.3913043478261,
"grad_norm": 0.6977828741073608,
"learning_rate": 1.505e-06,
"loss": 0.3998,
"step": 7100
},
{
"epoch": 621.7391304347826,
"grad_norm": 0.665290355682373,
"learning_rate": 1.4216666666666667e-06,
"loss": 0.4003,
"step": 7150
},
{
"epoch": 626.0869565217391,
"grad_norm": 0.7739102840423584,
"learning_rate": 1.3383333333333334e-06,
"loss": 0.4033,
"step": 7200
},
{
"epoch": 630.4347826086956,
"grad_norm": 0.6389193534851074,
"learning_rate": 1.255e-06,
"loss": 0.4039,
"step": 7250
},
{
"epoch": 634.7826086956521,
"grad_norm": 0.6950168609619141,
"learning_rate": 1.1716666666666667e-06,
"loss": 0.4017,
"step": 7300
},
{
"epoch": 639.1304347826087,
"grad_norm": 0.6477563381195068,
"learning_rate": 1.0883333333333334e-06,
"loss": 0.3986,
"step": 7350
},
{
"epoch": 643.4782608695652,
"grad_norm": 0.5801939964294434,
"learning_rate": 1.0050000000000001e-06,
"loss": 0.3996,
"step": 7400
},
{
"epoch": 647.8260869565217,
"grad_norm": 0.6561626195907593,
"learning_rate": 9.216666666666667e-07,
"loss": 0.4035,
"step": 7450
},
{
"epoch": 652.1739130434783,
"grad_norm": 0.6258853077888489,
"learning_rate": 8.383333333333334e-07,
"loss": 0.4076,
"step": 7500
},
{
"epoch": 652.1739130434783,
"eval_loss": 0.40540868043899536,
"eval_runtime": 13.1806,
"eval_samples_per_second": 12.215,
"eval_steps_per_second": 1.593,
"step": 7500
}
],
"logging_steps": 50,
"max_steps": 8000,
"num_input_tokens_seen": 0,
"num_train_epochs": 728,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.817971026659378e+17,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}