biqwen2-alpha / checkpoint-3694 /trainer_state.json
manu's picture
Upload folder using huggingface_hub
368887d verified
raw
history blame
69.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 100,
"global_step": 3694,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0027070925825663237,
"grad_norm": 0.65234375,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.7066,
"step": 10
},
{
"epoch": 0.005414185165132647,
"grad_norm": 0.4765625,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.7067,
"step": 20
},
{
"epoch": 0.008121277747698972,
"grad_norm": 0.71484375,
"learning_rate": 3e-06,
"loss": 0.7044,
"step": 30
},
{
"epoch": 0.010828370330265295,
"grad_norm": 0.345703125,
"learning_rate": 4.000000000000001e-06,
"loss": 0.7024,
"step": 40
},
{
"epoch": 0.01353546291283162,
"grad_norm": 0.3671875,
"learning_rate": 5e-06,
"loss": 0.7002,
"step": 50
},
{
"epoch": 0.016242555495397944,
"grad_norm": 0.2333984375,
"learning_rate": 6e-06,
"loss": 0.6987,
"step": 60
},
{
"epoch": 0.018949648077964266,
"grad_norm": 0.220703125,
"learning_rate": 7.000000000000001e-06,
"loss": 0.6983,
"step": 70
},
{
"epoch": 0.02165674066053059,
"grad_norm": 0.1884765625,
"learning_rate": 8.000000000000001e-06,
"loss": 0.6971,
"step": 80
},
{
"epoch": 0.024363833243096916,
"grad_norm": 0.208984375,
"learning_rate": 9e-06,
"loss": 0.6968,
"step": 90
},
{
"epoch": 0.02707092582566324,
"grad_norm": 0.1572265625,
"learning_rate": 1e-05,
"loss": 0.6964,
"step": 100
},
{
"epoch": 0.02707092582566324,
"eval_loss": 0.6960552334785461,
"eval_runtime": 96.0637,
"eval_samples_per_second": 5.205,
"eval_steps_per_second": 0.167,
"step": 100
},
{
"epoch": 0.02977801840822956,
"grad_norm": 0.16796875,
"learning_rate": 1.1000000000000001e-05,
"loss": 0.6959,
"step": 110
},
{
"epoch": 0.03248511099079589,
"grad_norm": 0.1103515625,
"learning_rate": 1.2e-05,
"loss": 0.6958,
"step": 120
},
{
"epoch": 0.03519220357336221,
"grad_norm": 0.11279296875,
"learning_rate": 1.3000000000000001e-05,
"loss": 0.6954,
"step": 130
},
{
"epoch": 0.03789929615592853,
"grad_norm": 0.1455078125,
"learning_rate": 1.4000000000000001e-05,
"loss": 0.6952,
"step": 140
},
{
"epoch": 0.040606388738494856,
"grad_norm": 0.12158203125,
"learning_rate": 1.5e-05,
"loss": 0.6951,
"step": 150
},
{
"epoch": 0.04331348132106118,
"grad_norm": 0.1005859375,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.6951,
"step": 160
},
{
"epoch": 0.0460205739036275,
"grad_norm": 0.1279296875,
"learning_rate": 1.7000000000000003e-05,
"loss": 0.6949,
"step": 170
},
{
"epoch": 0.04872766648619383,
"grad_norm": 0.11328125,
"learning_rate": 1.8e-05,
"loss": 0.6947,
"step": 180
},
{
"epoch": 0.051434759068760154,
"grad_norm": 0.17578125,
"learning_rate": 1.9e-05,
"loss": 0.6947,
"step": 190
},
{
"epoch": 0.05414185165132648,
"grad_norm": 0.0849609375,
"learning_rate": 2e-05,
"loss": 0.6944,
"step": 200
},
{
"epoch": 0.05414185165132648,
"eval_loss": 0.6944612860679626,
"eval_runtime": 70.079,
"eval_samples_per_second": 7.135,
"eval_steps_per_second": 0.228,
"step": 200
},
{
"epoch": 0.0568489442338928,
"grad_norm": 0.1396484375,
"learning_rate": 2.1e-05,
"loss": 0.6943,
"step": 210
},
{
"epoch": 0.05955603681645912,
"grad_norm": 0.0849609375,
"learning_rate": 2.2000000000000003e-05,
"loss": 0.6943,
"step": 220
},
{
"epoch": 0.062263129399025445,
"grad_norm": 0.06396484375,
"learning_rate": 2.3000000000000003e-05,
"loss": 0.6943,
"step": 230
},
{
"epoch": 0.06497022198159177,
"grad_norm": 0.123046875,
"learning_rate": 2.4e-05,
"loss": 0.6944,
"step": 240
},
{
"epoch": 0.0676773145641581,
"grad_norm": 0.068359375,
"learning_rate": 2.5e-05,
"loss": 0.6943,
"step": 250
},
{
"epoch": 0.07038440714672442,
"grad_norm": 0.08349609375,
"learning_rate": 2.6000000000000002e-05,
"loss": 0.6942,
"step": 260
},
{
"epoch": 0.07309149972929074,
"grad_norm": 0.08447265625,
"learning_rate": 2.7000000000000002e-05,
"loss": 0.6941,
"step": 270
},
{
"epoch": 0.07579859231185707,
"grad_norm": 0.11279296875,
"learning_rate": 2.8000000000000003e-05,
"loss": 0.694,
"step": 280
},
{
"epoch": 0.07850568489442339,
"grad_norm": 0.055419921875,
"learning_rate": 2.9e-05,
"loss": 0.6939,
"step": 290
},
{
"epoch": 0.08121277747698971,
"grad_norm": 0.0654296875,
"learning_rate": 3e-05,
"loss": 0.6939,
"step": 300
},
{
"epoch": 0.08121277747698971,
"eval_loss": 0.69379723072052,
"eval_runtime": 71.3483,
"eval_samples_per_second": 7.008,
"eval_steps_per_second": 0.224,
"step": 300
},
{
"epoch": 0.08391987005955603,
"grad_norm": 0.0966796875,
"learning_rate": 3.1e-05,
"loss": 0.6939,
"step": 310
},
{
"epoch": 0.08662696264212236,
"grad_norm": 0.0693359375,
"learning_rate": 3.2000000000000005e-05,
"loss": 0.6936,
"step": 320
},
{
"epoch": 0.08933405522468868,
"grad_norm": 0.06396484375,
"learning_rate": 3.3e-05,
"loss": 0.6931,
"step": 330
},
{
"epoch": 0.092041147807255,
"grad_norm": 0.09765625,
"learning_rate": 3.4000000000000007e-05,
"loss": 0.6916,
"step": 340
},
{
"epoch": 0.09474824038982133,
"grad_norm": 0.2060546875,
"learning_rate": 3.5e-05,
"loss": 0.6835,
"step": 350
},
{
"epoch": 0.09745533297238766,
"grad_norm": 0.349609375,
"learning_rate": 3.6e-05,
"loss": 0.6623,
"step": 360
},
{
"epoch": 0.10016242555495398,
"grad_norm": 0.375,
"learning_rate": 3.7e-05,
"loss": 0.6366,
"step": 370
},
{
"epoch": 0.10286951813752031,
"grad_norm": 0.345703125,
"learning_rate": 3.8e-05,
"loss": 0.6162,
"step": 380
},
{
"epoch": 0.10557661072008663,
"grad_norm": 0.5390625,
"learning_rate": 3.9000000000000006e-05,
"loss": 0.5946,
"step": 390
},
{
"epoch": 0.10828370330265295,
"grad_norm": 0.337890625,
"learning_rate": 4e-05,
"loss": 0.5911,
"step": 400
},
{
"epoch": 0.10828370330265295,
"eval_loss": 0.5952291488647461,
"eval_runtime": 69.8814,
"eval_samples_per_second": 7.155,
"eval_steps_per_second": 0.229,
"step": 400
},
{
"epoch": 0.11099079588521928,
"grad_norm": 0.349609375,
"learning_rate": 4.1e-05,
"loss": 0.5777,
"step": 410
},
{
"epoch": 0.1136978884677856,
"grad_norm": 0.328125,
"learning_rate": 4.2e-05,
"loss": 0.5755,
"step": 420
},
{
"epoch": 0.11640498105035192,
"grad_norm": 0.314453125,
"learning_rate": 4.3e-05,
"loss": 0.5732,
"step": 430
},
{
"epoch": 0.11911207363291824,
"grad_norm": 0.31640625,
"learning_rate": 4.4000000000000006e-05,
"loss": 0.5729,
"step": 440
},
{
"epoch": 0.12181916621548457,
"grad_norm": 0.31640625,
"learning_rate": 4.5e-05,
"loss": 0.569,
"step": 450
},
{
"epoch": 0.12452625879805089,
"grad_norm": 0.328125,
"learning_rate": 4.600000000000001e-05,
"loss": 0.5648,
"step": 460
},
{
"epoch": 0.12723335138061723,
"grad_norm": 0.29296875,
"learning_rate": 4.7e-05,
"loss": 0.5778,
"step": 470
},
{
"epoch": 0.12994044396318355,
"grad_norm": 0.373046875,
"learning_rate": 4.8e-05,
"loss": 0.5705,
"step": 480
},
{
"epoch": 0.13264753654574987,
"grad_norm": 0.3671875,
"learning_rate": 4.9e-05,
"loss": 0.5675,
"step": 490
},
{
"epoch": 0.1353546291283162,
"grad_norm": 0.328125,
"learning_rate": 5e-05,
"loss": 0.5752,
"step": 500
},
{
"epoch": 0.1353546291283162,
"eval_loss": 0.5701072812080383,
"eval_runtime": 70.1857,
"eval_samples_per_second": 7.124,
"eval_steps_per_second": 0.228,
"step": 500
},
{
"epoch": 0.13806172171088252,
"grad_norm": 0.33984375,
"learning_rate": 4.984345648090169e-05,
"loss": 0.5513,
"step": 510
},
{
"epoch": 0.14076881429344884,
"grad_norm": 0.431640625,
"learning_rate": 4.9686912961803384e-05,
"loss": 0.5644,
"step": 520
},
{
"epoch": 0.14347590687601516,
"grad_norm": 0.318359375,
"learning_rate": 4.9530369442705075e-05,
"loss": 0.567,
"step": 530
},
{
"epoch": 0.1461829994585815,
"grad_norm": 0.33984375,
"learning_rate": 4.9373825923606765e-05,
"loss": 0.5639,
"step": 540
},
{
"epoch": 0.1488900920411478,
"grad_norm": 0.333984375,
"learning_rate": 4.9217282404508456e-05,
"loss": 0.556,
"step": 550
},
{
"epoch": 0.15159718462371413,
"grad_norm": 0.35546875,
"learning_rate": 4.906073888541015e-05,
"loss": 0.5697,
"step": 560
},
{
"epoch": 0.15430427720628045,
"grad_norm": 0.3359375,
"learning_rate": 4.890419536631184e-05,
"loss": 0.5458,
"step": 570
},
{
"epoch": 0.15701136978884678,
"grad_norm": 0.345703125,
"learning_rate": 4.874765184721353e-05,
"loss": 0.5701,
"step": 580
},
{
"epoch": 0.1597184623714131,
"grad_norm": 0.3984375,
"learning_rate": 4.859110832811522e-05,
"loss": 0.5665,
"step": 590
},
{
"epoch": 0.16242555495397942,
"grad_norm": 0.390625,
"learning_rate": 4.843456480901691e-05,
"loss": 0.55,
"step": 600
},
{
"epoch": 0.16242555495397942,
"eval_loss": 0.5633333325386047,
"eval_runtime": 75.1182,
"eval_samples_per_second": 6.656,
"eval_steps_per_second": 0.213,
"step": 600
},
{
"epoch": 0.16513264753654575,
"grad_norm": 0.345703125,
"learning_rate": 4.82780212899186e-05,
"loss": 0.5584,
"step": 610
},
{
"epoch": 0.16783974011911207,
"grad_norm": 0.306640625,
"learning_rate": 4.812147777082029e-05,
"loss": 0.5505,
"step": 620
},
{
"epoch": 0.1705468327016784,
"grad_norm": 0.38671875,
"learning_rate": 4.796493425172198e-05,
"loss": 0.5526,
"step": 630
},
{
"epoch": 0.17325392528424471,
"grad_norm": 0.40625,
"learning_rate": 4.780839073262367e-05,
"loss": 0.5579,
"step": 640
},
{
"epoch": 0.17596101786681104,
"grad_norm": 0.373046875,
"learning_rate": 4.765184721352536e-05,
"loss": 0.5546,
"step": 650
},
{
"epoch": 0.17866811044937736,
"grad_norm": 0.30078125,
"learning_rate": 4.7495303694427054e-05,
"loss": 0.5467,
"step": 660
},
{
"epoch": 0.18137520303194368,
"grad_norm": 0.328125,
"learning_rate": 4.7338760175328744e-05,
"loss": 0.5496,
"step": 670
},
{
"epoch": 0.18408229561451,
"grad_norm": 0.404296875,
"learning_rate": 4.7182216656230435e-05,
"loss": 0.563,
"step": 680
},
{
"epoch": 0.18678938819707633,
"grad_norm": 0.341796875,
"learning_rate": 4.7025673137132126e-05,
"loss": 0.5599,
"step": 690
},
{
"epoch": 0.18949648077964265,
"grad_norm": 0.310546875,
"learning_rate": 4.6869129618033816e-05,
"loss": 0.5542,
"step": 700
},
{
"epoch": 0.18949648077964265,
"eval_loss": 0.5565248727798462,
"eval_runtime": 70.5824,
"eval_samples_per_second": 7.084,
"eval_steps_per_second": 0.227,
"step": 700
},
{
"epoch": 0.19220357336220897,
"grad_norm": 0.33203125,
"learning_rate": 4.671258609893551e-05,
"loss": 0.5479,
"step": 710
},
{
"epoch": 0.19491066594477532,
"grad_norm": 0.333984375,
"learning_rate": 4.65560425798372e-05,
"loss": 0.5443,
"step": 720
},
{
"epoch": 0.19761775852734165,
"grad_norm": 0.3359375,
"learning_rate": 4.639949906073889e-05,
"loss": 0.549,
"step": 730
},
{
"epoch": 0.20032485110990797,
"grad_norm": 0.37890625,
"learning_rate": 4.624295554164057e-05,
"loss": 0.5605,
"step": 740
},
{
"epoch": 0.2030319436924743,
"grad_norm": 0.369140625,
"learning_rate": 4.608641202254227e-05,
"loss": 0.5498,
"step": 750
},
{
"epoch": 0.20573903627504062,
"grad_norm": 0.318359375,
"learning_rate": 4.5929868503443954e-05,
"loss": 0.5541,
"step": 760
},
{
"epoch": 0.20844612885760694,
"grad_norm": 0.392578125,
"learning_rate": 4.577332498434565e-05,
"loss": 0.5444,
"step": 770
},
{
"epoch": 0.21115322144017326,
"grad_norm": 0.3671875,
"learning_rate": 4.561678146524734e-05,
"loss": 0.541,
"step": 780
},
{
"epoch": 0.21386031402273958,
"grad_norm": 0.353515625,
"learning_rate": 4.546023794614903e-05,
"loss": 0.5646,
"step": 790
},
{
"epoch": 0.2165674066053059,
"grad_norm": 0.349609375,
"learning_rate": 4.5303694427050724e-05,
"loss": 0.543,
"step": 800
},
{
"epoch": 0.2165674066053059,
"eval_loss": 0.5533172488212585,
"eval_runtime": 70.7826,
"eval_samples_per_second": 7.064,
"eval_steps_per_second": 0.226,
"step": 800
},
{
"epoch": 0.21927449918787223,
"grad_norm": 0.302734375,
"learning_rate": 4.5147150907952414e-05,
"loss": 0.5387,
"step": 810
},
{
"epoch": 0.22198159177043855,
"grad_norm": 0.392578125,
"learning_rate": 4.4990607388854105e-05,
"loss": 0.5447,
"step": 820
},
{
"epoch": 0.22468868435300487,
"grad_norm": 0.37109375,
"learning_rate": 4.4834063869755796e-05,
"loss": 0.5397,
"step": 830
},
{
"epoch": 0.2273957769355712,
"grad_norm": 0.326171875,
"learning_rate": 4.4677520350657486e-05,
"loss": 0.5482,
"step": 840
},
{
"epoch": 0.23010286951813752,
"grad_norm": 0.322265625,
"learning_rate": 4.452097683155918e-05,
"loss": 0.5553,
"step": 850
},
{
"epoch": 0.23280996210070384,
"grad_norm": 0.388671875,
"learning_rate": 4.436443331246087e-05,
"loss": 0.5456,
"step": 860
},
{
"epoch": 0.23551705468327017,
"grad_norm": 0.431640625,
"learning_rate": 4.420788979336256e-05,
"loss": 0.5541,
"step": 870
},
{
"epoch": 0.2382241472658365,
"grad_norm": 0.330078125,
"learning_rate": 4.405134627426425e-05,
"loss": 0.5377,
"step": 880
},
{
"epoch": 0.2409312398484028,
"grad_norm": 0.361328125,
"learning_rate": 4.389480275516594e-05,
"loss": 0.5376,
"step": 890
},
{
"epoch": 0.24363833243096913,
"grad_norm": 0.35546875,
"learning_rate": 4.373825923606763e-05,
"loss": 0.5324,
"step": 900
},
{
"epoch": 0.24363833243096913,
"eval_loss": 0.5525312423706055,
"eval_runtime": 71.8319,
"eval_samples_per_second": 6.961,
"eval_steps_per_second": 0.223,
"step": 900
},
{
"epoch": 0.24634542501353546,
"grad_norm": 0.333984375,
"learning_rate": 4.358171571696932e-05,
"loss": 0.5521,
"step": 910
},
{
"epoch": 0.24905251759610178,
"grad_norm": 0.369140625,
"learning_rate": 4.342517219787101e-05,
"loss": 0.543,
"step": 920
},
{
"epoch": 0.2517596101786681,
"grad_norm": 0.322265625,
"learning_rate": 4.3268628678772696e-05,
"loss": 0.5553,
"step": 930
},
{
"epoch": 0.25446670276123445,
"grad_norm": 0.392578125,
"learning_rate": 4.3112085159674393e-05,
"loss": 0.5289,
"step": 940
},
{
"epoch": 0.25717379534380075,
"grad_norm": 0.294921875,
"learning_rate": 4.295554164057608e-05,
"loss": 0.5355,
"step": 950
},
{
"epoch": 0.2598808879263671,
"grad_norm": 0.36328125,
"learning_rate": 4.2798998121477775e-05,
"loss": 0.5488,
"step": 960
},
{
"epoch": 0.2625879805089334,
"grad_norm": 0.42578125,
"learning_rate": 4.264245460237946e-05,
"loss": 0.549,
"step": 970
},
{
"epoch": 0.26529507309149974,
"grad_norm": 0.3359375,
"learning_rate": 4.2485911083281156e-05,
"loss": 0.5395,
"step": 980
},
{
"epoch": 0.26800216567406604,
"grad_norm": 0.373046875,
"learning_rate": 4.232936756418284e-05,
"loss": 0.5408,
"step": 990
},
{
"epoch": 0.2707092582566324,
"grad_norm": 0.298828125,
"learning_rate": 4.217282404508454e-05,
"loss": 0.5399,
"step": 1000
},
{
"epoch": 0.2707092582566324,
"eval_loss": 0.5489118695259094,
"eval_runtime": 70.8351,
"eval_samples_per_second": 7.059,
"eval_steps_per_second": 0.226,
"step": 1000
},
{
"epoch": 0.2734163508391987,
"grad_norm": 0.306640625,
"learning_rate": 4.201628052598622e-05,
"loss": 0.5416,
"step": 1010
},
{
"epoch": 0.27612344342176504,
"grad_norm": 0.306640625,
"learning_rate": 4.185973700688792e-05,
"loss": 0.5422,
"step": 1020
},
{
"epoch": 0.27883053600433133,
"grad_norm": 0.359375,
"learning_rate": 4.170319348778961e-05,
"loss": 0.5417,
"step": 1030
},
{
"epoch": 0.2815376285868977,
"grad_norm": 0.3125,
"learning_rate": 4.15466499686913e-05,
"loss": 0.5441,
"step": 1040
},
{
"epoch": 0.284244721169464,
"grad_norm": 0.369140625,
"learning_rate": 4.139010644959299e-05,
"loss": 0.5457,
"step": 1050
},
{
"epoch": 0.2869518137520303,
"grad_norm": 0.326171875,
"learning_rate": 4.123356293049468e-05,
"loss": 0.5341,
"step": 1060
},
{
"epoch": 0.2896589063345966,
"grad_norm": 0.3515625,
"learning_rate": 4.107701941139637e-05,
"loss": 0.5428,
"step": 1070
},
{
"epoch": 0.292365998917163,
"grad_norm": 0.345703125,
"learning_rate": 4.092047589229806e-05,
"loss": 0.5404,
"step": 1080
},
{
"epoch": 0.29507309149972927,
"grad_norm": 0.369140625,
"learning_rate": 4.0763932373199754e-05,
"loss": 0.5378,
"step": 1090
},
{
"epoch": 0.2977801840822956,
"grad_norm": 0.345703125,
"learning_rate": 4.0607388854101445e-05,
"loss": 0.549,
"step": 1100
},
{
"epoch": 0.2977801840822956,
"eval_loss": 0.5469087362289429,
"eval_runtime": 69.8693,
"eval_samples_per_second": 7.156,
"eval_steps_per_second": 0.229,
"step": 1100
},
{
"epoch": 0.3004872766648619,
"grad_norm": 0.390625,
"learning_rate": 4.0450845335003135e-05,
"loss": 0.5505,
"step": 1110
},
{
"epoch": 0.30319436924742826,
"grad_norm": 0.353515625,
"learning_rate": 4.029430181590482e-05,
"loss": 0.5404,
"step": 1120
},
{
"epoch": 0.30590146182999456,
"grad_norm": 0.4375,
"learning_rate": 4.013775829680652e-05,
"loss": 0.5411,
"step": 1130
},
{
"epoch": 0.3086085544125609,
"grad_norm": 0.337890625,
"learning_rate": 3.99812147777082e-05,
"loss": 0.5404,
"step": 1140
},
{
"epoch": 0.31131564699512726,
"grad_norm": 0.34375,
"learning_rate": 3.98246712586099e-05,
"loss": 0.5309,
"step": 1150
},
{
"epoch": 0.31402273957769355,
"grad_norm": 0.369140625,
"learning_rate": 3.966812773951158e-05,
"loss": 0.5448,
"step": 1160
},
{
"epoch": 0.3167298321602599,
"grad_norm": 0.375,
"learning_rate": 3.951158422041328e-05,
"loss": 0.5374,
"step": 1170
},
{
"epoch": 0.3194369247428262,
"grad_norm": 0.3515625,
"learning_rate": 3.9355040701314964e-05,
"loss": 0.5482,
"step": 1180
},
{
"epoch": 0.32214401732539255,
"grad_norm": 0.330078125,
"learning_rate": 3.919849718221666e-05,
"loss": 0.5411,
"step": 1190
},
{
"epoch": 0.32485110990795885,
"grad_norm": 0.35546875,
"learning_rate": 3.9041953663118345e-05,
"loss": 0.5409,
"step": 1200
},
{
"epoch": 0.32485110990795885,
"eval_loss": 0.546714186668396,
"eval_runtime": 71.2431,
"eval_samples_per_second": 7.018,
"eval_steps_per_second": 0.225,
"step": 1200
},
{
"epoch": 0.3275582024905252,
"grad_norm": 0.431640625,
"learning_rate": 3.888541014402004e-05,
"loss": 0.5334,
"step": 1210
},
{
"epoch": 0.3302652950730915,
"grad_norm": 0.359375,
"learning_rate": 3.8728866624921726e-05,
"loss": 0.5338,
"step": 1220
},
{
"epoch": 0.33297238765565784,
"grad_norm": 0.326171875,
"learning_rate": 3.8572323105823424e-05,
"loss": 0.5433,
"step": 1230
},
{
"epoch": 0.33567948023822414,
"grad_norm": 0.3515625,
"learning_rate": 3.841577958672511e-05,
"loss": 0.5372,
"step": 1240
},
{
"epoch": 0.3383865728207905,
"grad_norm": 0.380859375,
"learning_rate": 3.8259236067626805e-05,
"loss": 0.5421,
"step": 1250
},
{
"epoch": 0.3410936654033568,
"grad_norm": 0.31640625,
"learning_rate": 3.810269254852849e-05,
"loss": 0.5436,
"step": 1260
},
{
"epoch": 0.34380075798592313,
"grad_norm": 0.361328125,
"learning_rate": 3.794614902943019e-05,
"loss": 0.5408,
"step": 1270
},
{
"epoch": 0.34650785056848943,
"grad_norm": 0.365234375,
"learning_rate": 3.778960551033187e-05,
"loss": 0.5433,
"step": 1280
},
{
"epoch": 0.3492149431510558,
"grad_norm": 0.3515625,
"learning_rate": 3.763306199123356e-05,
"loss": 0.5396,
"step": 1290
},
{
"epoch": 0.3519220357336221,
"grad_norm": 0.421875,
"learning_rate": 3.747651847213526e-05,
"loss": 0.5438,
"step": 1300
},
{
"epoch": 0.3519220357336221,
"eval_loss": 0.5430516004562378,
"eval_runtime": 70.6664,
"eval_samples_per_second": 7.075,
"eval_steps_per_second": 0.226,
"step": 1300
},
{
"epoch": 0.3546291283161884,
"grad_norm": 0.310546875,
"learning_rate": 3.731997495303694e-05,
"loss": 0.5328,
"step": 1310
},
{
"epoch": 0.3573362208987547,
"grad_norm": 0.390625,
"learning_rate": 3.716343143393864e-05,
"loss": 0.5399,
"step": 1320
},
{
"epoch": 0.36004331348132107,
"grad_norm": 0.408203125,
"learning_rate": 3.7006887914840324e-05,
"loss": 0.5301,
"step": 1330
},
{
"epoch": 0.36275040606388737,
"grad_norm": 0.359375,
"learning_rate": 3.685034439574202e-05,
"loss": 0.5356,
"step": 1340
},
{
"epoch": 0.3654574986464537,
"grad_norm": 0.341796875,
"learning_rate": 3.6693800876643706e-05,
"loss": 0.5431,
"step": 1350
},
{
"epoch": 0.36816459122902,
"grad_norm": 0.353515625,
"learning_rate": 3.65372573575454e-05,
"loss": 0.5278,
"step": 1360
},
{
"epoch": 0.37087168381158636,
"grad_norm": 0.376953125,
"learning_rate": 3.638071383844709e-05,
"loss": 0.5382,
"step": 1370
},
{
"epoch": 0.37357877639415266,
"grad_norm": 0.341796875,
"learning_rate": 3.6224170319348784e-05,
"loss": 0.5361,
"step": 1380
},
{
"epoch": 0.376285868976719,
"grad_norm": 0.35546875,
"learning_rate": 3.606762680025047e-05,
"loss": 0.5397,
"step": 1390
},
{
"epoch": 0.3789929615592853,
"grad_norm": 0.390625,
"learning_rate": 3.5911083281152166e-05,
"loss": 0.5467,
"step": 1400
},
{
"epoch": 0.3789929615592853,
"eval_loss": 0.5431599020957947,
"eval_runtime": 70.1255,
"eval_samples_per_second": 7.13,
"eval_steps_per_second": 0.228,
"step": 1400
},
{
"epoch": 0.38170005414185165,
"grad_norm": 0.369140625,
"learning_rate": 3.575453976205385e-05,
"loss": 0.5332,
"step": 1410
},
{
"epoch": 0.38440714672441795,
"grad_norm": 0.37890625,
"learning_rate": 3.559799624295555e-05,
"loss": 0.542,
"step": 1420
},
{
"epoch": 0.3871142393069843,
"grad_norm": 0.365234375,
"learning_rate": 3.544145272385723e-05,
"loss": 0.5312,
"step": 1430
},
{
"epoch": 0.38982133188955065,
"grad_norm": 0.3671875,
"learning_rate": 3.528490920475893e-05,
"loss": 0.5375,
"step": 1440
},
{
"epoch": 0.39252842447211694,
"grad_norm": 0.306640625,
"learning_rate": 3.512836568566061e-05,
"loss": 0.5287,
"step": 1450
},
{
"epoch": 0.3952355170546833,
"grad_norm": 0.353515625,
"learning_rate": 3.497182216656231e-05,
"loss": 0.5203,
"step": 1460
},
{
"epoch": 0.3979426096372496,
"grad_norm": 0.42578125,
"learning_rate": 3.4815278647463994e-05,
"loss": 0.5135,
"step": 1470
},
{
"epoch": 0.40064970221981594,
"grad_norm": 0.412109375,
"learning_rate": 3.4658735128365685e-05,
"loss": 0.549,
"step": 1480
},
{
"epoch": 0.40335679480238223,
"grad_norm": 0.330078125,
"learning_rate": 3.4502191609267375e-05,
"loss": 0.5543,
"step": 1490
},
{
"epoch": 0.4060638873849486,
"grad_norm": 0.369140625,
"learning_rate": 3.4345648090169066e-05,
"loss": 0.5466,
"step": 1500
},
{
"epoch": 0.4060638873849486,
"eval_loss": 0.5431920289993286,
"eval_runtime": 71.0212,
"eval_samples_per_second": 7.04,
"eval_steps_per_second": 0.225,
"step": 1500
},
{
"epoch": 0.4087709799675149,
"grad_norm": 0.353515625,
"learning_rate": 3.418910457107076e-05,
"loss": 0.5372,
"step": 1510
},
{
"epoch": 0.41147807255008123,
"grad_norm": 0.369140625,
"learning_rate": 3.403256105197245e-05,
"loss": 0.5361,
"step": 1520
},
{
"epoch": 0.4141851651326475,
"grad_norm": 0.357421875,
"learning_rate": 3.387601753287414e-05,
"loss": 0.5377,
"step": 1530
},
{
"epoch": 0.4168922577152139,
"grad_norm": 0.38671875,
"learning_rate": 3.371947401377583e-05,
"loss": 0.5281,
"step": 1540
},
{
"epoch": 0.41959935029778017,
"grad_norm": 0.380859375,
"learning_rate": 3.3562930494677526e-05,
"loss": 0.5284,
"step": 1550
},
{
"epoch": 0.4223064428803465,
"grad_norm": 0.34765625,
"learning_rate": 3.340638697557921e-05,
"loss": 0.5338,
"step": 1560
},
{
"epoch": 0.4250135354629128,
"grad_norm": 0.34765625,
"learning_rate": 3.324984345648091e-05,
"loss": 0.5275,
"step": 1570
},
{
"epoch": 0.42772062804547917,
"grad_norm": 0.3359375,
"learning_rate": 3.309329993738259e-05,
"loss": 0.5402,
"step": 1580
},
{
"epoch": 0.43042772062804546,
"grad_norm": 0.34765625,
"learning_rate": 3.293675641828429e-05,
"loss": 0.5393,
"step": 1590
},
{
"epoch": 0.4331348132106118,
"grad_norm": 0.392578125,
"learning_rate": 3.278021289918597e-05,
"loss": 0.5309,
"step": 1600
},
{
"epoch": 0.4331348132106118,
"eval_loss": 0.5397661328315735,
"eval_runtime": 70.9009,
"eval_samples_per_second": 7.052,
"eval_steps_per_second": 0.226,
"step": 1600
},
{
"epoch": 0.4358419057931781,
"grad_norm": 0.4296875,
"learning_rate": 3.262366938008767e-05,
"loss": 0.537,
"step": 1610
},
{
"epoch": 0.43854899837574446,
"grad_norm": 0.40625,
"learning_rate": 3.2467125860989355e-05,
"loss": 0.5405,
"step": 1620
},
{
"epoch": 0.44125609095831075,
"grad_norm": 0.40234375,
"learning_rate": 3.231058234189105e-05,
"loss": 0.5228,
"step": 1630
},
{
"epoch": 0.4439631835408771,
"grad_norm": 0.390625,
"learning_rate": 3.2154038822792736e-05,
"loss": 0.5315,
"step": 1640
},
{
"epoch": 0.4466702761234434,
"grad_norm": 0.412109375,
"learning_rate": 3.1997495303694433e-05,
"loss": 0.5531,
"step": 1650
},
{
"epoch": 0.44937736870600975,
"grad_norm": 0.3984375,
"learning_rate": 3.184095178459612e-05,
"loss": 0.5467,
"step": 1660
},
{
"epoch": 0.45208446128857604,
"grad_norm": 0.38671875,
"learning_rate": 3.168440826549781e-05,
"loss": 0.5302,
"step": 1670
},
{
"epoch": 0.4547915538711424,
"grad_norm": 0.482421875,
"learning_rate": 3.15278647463995e-05,
"loss": 0.5465,
"step": 1680
},
{
"epoch": 0.4574986464537087,
"grad_norm": 0.380859375,
"learning_rate": 3.137132122730119e-05,
"loss": 0.5241,
"step": 1690
},
{
"epoch": 0.46020573903627504,
"grad_norm": 0.3515625,
"learning_rate": 3.121477770820288e-05,
"loss": 0.5241,
"step": 1700
},
{
"epoch": 0.46020573903627504,
"eval_loss": 0.540625810623169,
"eval_runtime": 70.5131,
"eval_samples_per_second": 7.091,
"eval_steps_per_second": 0.227,
"step": 1700
},
{
"epoch": 0.4629128316188414,
"grad_norm": 0.43359375,
"learning_rate": 3.105823418910457e-05,
"loss": 0.5276,
"step": 1710
},
{
"epoch": 0.4656199242014077,
"grad_norm": 0.392578125,
"learning_rate": 3.090169067000626e-05,
"loss": 0.5396,
"step": 1720
},
{
"epoch": 0.46832701678397404,
"grad_norm": 0.439453125,
"learning_rate": 3.074514715090795e-05,
"loss": 0.5386,
"step": 1730
},
{
"epoch": 0.47103410936654033,
"grad_norm": 0.423828125,
"learning_rate": 3.058860363180964e-05,
"loss": 0.5324,
"step": 1740
},
{
"epoch": 0.4737412019491067,
"grad_norm": 0.435546875,
"learning_rate": 3.0432060112711337e-05,
"loss": 0.5328,
"step": 1750
},
{
"epoch": 0.476448294531673,
"grad_norm": 0.375,
"learning_rate": 3.0275516593613024e-05,
"loss": 0.5314,
"step": 1760
},
{
"epoch": 0.47915538711423933,
"grad_norm": 0.396484375,
"learning_rate": 3.011897307451472e-05,
"loss": 0.5327,
"step": 1770
},
{
"epoch": 0.4818624796968056,
"grad_norm": 0.365234375,
"learning_rate": 2.9962429555416406e-05,
"loss": 0.5435,
"step": 1780
},
{
"epoch": 0.484569572279372,
"grad_norm": 0.353515625,
"learning_rate": 2.9805886036318097e-05,
"loss": 0.5261,
"step": 1790
},
{
"epoch": 0.48727666486193827,
"grad_norm": 0.421875,
"learning_rate": 2.9649342517219787e-05,
"loss": 0.523,
"step": 1800
},
{
"epoch": 0.48727666486193827,
"eval_loss": 0.5400844812393188,
"eval_runtime": 70.1604,
"eval_samples_per_second": 7.127,
"eval_steps_per_second": 0.228,
"step": 1800
},
{
"epoch": 0.4899837574445046,
"grad_norm": 0.361328125,
"learning_rate": 2.9492798998121478e-05,
"loss": 0.5265,
"step": 1810
},
{
"epoch": 0.4926908500270709,
"grad_norm": 0.3984375,
"learning_rate": 2.9336255479023172e-05,
"loss": 0.5389,
"step": 1820
},
{
"epoch": 0.49539794260963727,
"grad_norm": 0.4375,
"learning_rate": 2.917971195992486e-05,
"loss": 0.5239,
"step": 1830
},
{
"epoch": 0.49810503519220356,
"grad_norm": 0.494140625,
"learning_rate": 2.9023168440826553e-05,
"loss": 0.5212,
"step": 1840
},
{
"epoch": 0.5008121277747699,
"grad_norm": 0.40625,
"learning_rate": 2.886662492172824e-05,
"loss": 0.5437,
"step": 1850
},
{
"epoch": 0.5035192203573362,
"grad_norm": 0.423828125,
"learning_rate": 2.8710081402629935e-05,
"loss": 0.5356,
"step": 1860
},
{
"epoch": 0.5062263129399025,
"grad_norm": 0.37109375,
"learning_rate": 2.8553537883531622e-05,
"loss": 0.5233,
"step": 1870
},
{
"epoch": 0.5089334055224689,
"grad_norm": 0.38671875,
"learning_rate": 2.8396994364433316e-05,
"loss": 0.5255,
"step": 1880
},
{
"epoch": 0.5116404981050352,
"grad_norm": 0.431640625,
"learning_rate": 2.8240450845335004e-05,
"loss": 0.5206,
"step": 1890
},
{
"epoch": 0.5143475906876015,
"grad_norm": 0.365234375,
"learning_rate": 2.8083907326236698e-05,
"loss": 0.5241,
"step": 1900
},
{
"epoch": 0.5143475906876015,
"eval_loss": 0.5394680500030518,
"eval_runtime": 70.7559,
"eval_samples_per_second": 7.067,
"eval_steps_per_second": 0.226,
"step": 1900
},
{
"epoch": 0.5170546832701678,
"grad_norm": 0.419921875,
"learning_rate": 2.7927363807138385e-05,
"loss": 0.5319,
"step": 1910
},
{
"epoch": 0.5197617758527342,
"grad_norm": 0.431640625,
"learning_rate": 2.777082028804008e-05,
"loss": 0.5351,
"step": 1920
},
{
"epoch": 0.5224688684353005,
"grad_norm": 0.43359375,
"learning_rate": 2.7614276768941766e-05,
"loss": 0.5284,
"step": 1930
},
{
"epoch": 0.5251759610178668,
"grad_norm": 0.447265625,
"learning_rate": 2.745773324984346e-05,
"loss": 0.531,
"step": 1940
},
{
"epoch": 0.5278830536004331,
"grad_norm": 0.451171875,
"learning_rate": 2.7301189730745148e-05,
"loss": 0.533,
"step": 1950
},
{
"epoch": 0.5305901461829995,
"grad_norm": 0.421875,
"learning_rate": 2.7144646211646842e-05,
"loss": 0.5226,
"step": 1960
},
{
"epoch": 0.5332972387655658,
"grad_norm": 0.486328125,
"learning_rate": 2.698810269254853e-05,
"loss": 0.5311,
"step": 1970
},
{
"epoch": 0.5360043313481321,
"grad_norm": 0.466796875,
"learning_rate": 2.683155917345022e-05,
"loss": 0.5244,
"step": 1980
},
{
"epoch": 0.5387114239306985,
"grad_norm": 0.388671875,
"learning_rate": 2.667501565435191e-05,
"loss": 0.5393,
"step": 1990
},
{
"epoch": 0.5414185165132648,
"grad_norm": 0.453125,
"learning_rate": 2.65184721352536e-05,
"loss": 0.5365,
"step": 2000
},
{
"epoch": 0.5414185165132648,
"eval_loss": 0.5388503670692444,
"eval_runtime": 70.6892,
"eval_samples_per_second": 7.073,
"eval_steps_per_second": 0.226,
"step": 2000
},
{
"epoch": 0.5441256090958311,
"grad_norm": 0.404296875,
"learning_rate": 2.636192861615529e-05,
"loss": 0.5295,
"step": 2010
},
{
"epoch": 0.5468327016783974,
"grad_norm": 0.345703125,
"learning_rate": 2.6205385097056983e-05,
"loss": 0.5302,
"step": 2020
},
{
"epoch": 0.5495397942609638,
"grad_norm": 0.40234375,
"learning_rate": 2.604884157795867e-05,
"loss": 0.5305,
"step": 2030
},
{
"epoch": 0.5522468868435301,
"grad_norm": 0.423828125,
"learning_rate": 2.5892298058860364e-05,
"loss": 0.5196,
"step": 2040
},
{
"epoch": 0.5549539794260964,
"grad_norm": 0.384765625,
"learning_rate": 2.573575453976205e-05,
"loss": 0.5223,
"step": 2050
},
{
"epoch": 0.5576610720086627,
"grad_norm": 0.34765625,
"learning_rate": 2.5579211020663746e-05,
"loss": 0.5312,
"step": 2060
},
{
"epoch": 0.5603681645912291,
"grad_norm": 0.431640625,
"learning_rate": 2.5422667501565433e-05,
"loss": 0.532,
"step": 2070
},
{
"epoch": 0.5630752571737954,
"grad_norm": 0.396484375,
"learning_rate": 2.5266123982467127e-05,
"loss": 0.5216,
"step": 2080
},
{
"epoch": 0.5657823497563617,
"grad_norm": 0.39453125,
"learning_rate": 2.510958046336882e-05,
"loss": 0.5334,
"step": 2090
},
{
"epoch": 0.568489442338928,
"grad_norm": 0.435546875,
"learning_rate": 2.495303694427051e-05,
"loss": 0.5296,
"step": 2100
},
{
"epoch": 0.568489442338928,
"eval_loss": 0.5370610952377319,
"eval_runtime": 71.1301,
"eval_samples_per_second": 7.029,
"eval_steps_per_second": 0.225,
"step": 2100
},
{
"epoch": 0.5711965349214944,
"grad_norm": 0.3359375,
"learning_rate": 2.47964934251722e-05,
"loss": 0.5315,
"step": 2110
},
{
"epoch": 0.5739036275040607,
"grad_norm": 0.3671875,
"learning_rate": 2.463994990607389e-05,
"loss": 0.51,
"step": 2120
},
{
"epoch": 0.576610720086627,
"grad_norm": 0.423828125,
"learning_rate": 2.448340638697558e-05,
"loss": 0.5339,
"step": 2130
},
{
"epoch": 0.5793178126691932,
"grad_norm": 0.375,
"learning_rate": 2.432686286787727e-05,
"loss": 0.5215,
"step": 2140
},
{
"epoch": 0.5820249052517596,
"grad_norm": 0.392578125,
"learning_rate": 2.4170319348778962e-05,
"loss": 0.5235,
"step": 2150
},
{
"epoch": 0.584731997834326,
"grad_norm": 0.40625,
"learning_rate": 2.4013775829680653e-05,
"loss": 0.5246,
"step": 2160
},
{
"epoch": 0.5874390904168922,
"grad_norm": 0.4453125,
"learning_rate": 2.3857232310582343e-05,
"loss": 0.528,
"step": 2170
},
{
"epoch": 0.5901461829994585,
"grad_norm": 0.447265625,
"learning_rate": 2.3700688791484034e-05,
"loss": 0.532,
"step": 2180
},
{
"epoch": 0.5928532755820249,
"grad_norm": 0.38671875,
"learning_rate": 2.3544145272385725e-05,
"loss": 0.5283,
"step": 2190
},
{
"epoch": 0.5955603681645912,
"grad_norm": 0.396484375,
"learning_rate": 2.3387601753287412e-05,
"loss": 0.5256,
"step": 2200
},
{
"epoch": 0.5955603681645912,
"eval_loss": 0.5376315712928772,
"eval_runtime": 70.6924,
"eval_samples_per_second": 7.073,
"eval_steps_per_second": 0.226,
"step": 2200
},
{
"epoch": 0.5982674607471575,
"grad_norm": 0.373046875,
"learning_rate": 2.3231058234189106e-05,
"loss": 0.5264,
"step": 2210
},
{
"epoch": 0.6009745533297238,
"grad_norm": 0.40625,
"learning_rate": 2.3074514715090797e-05,
"loss": 0.5271,
"step": 2220
},
{
"epoch": 0.6036816459122902,
"grad_norm": 0.39453125,
"learning_rate": 2.2917971195992488e-05,
"loss": 0.5483,
"step": 2230
},
{
"epoch": 0.6063887384948565,
"grad_norm": 0.37109375,
"learning_rate": 2.2761427676894178e-05,
"loss": 0.5243,
"step": 2240
},
{
"epoch": 0.6090958310774228,
"grad_norm": 0.37109375,
"learning_rate": 2.260488415779587e-05,
"loss": 0.5223,
"step": 2250
},
{
"epoch": 0.6118029236599891,
"grad_norm": 0.44140625,
"learning_rate": 2.244834063869756e-05,
"loss": 0.5245,
"step": 2260
},
{
"epoch": 0.6145100162425555,
"grad_norm": 0.53125,
"learning_rate": 2.229179711959925e-05,
"loss": 0.5253,
"step": 2270
},
{
"epoch": 0.6172171088251218,
"grad_norm": 0.5390625,
"learning_rate": 2.213525360050094e-05,
"loss": 0.5339,
"step": 2280
},
{
"epoch": 0.6199242014076881,
"grad_norm": 0.431640625,
"learning_rate": 2.1978710081402632e-05,
"loss": 0.5269,
"step": 2290
},
{
"epoch": 0.6226312939902545,
"grad_norm": 0.404296875,
"learning_rate": 2.1822166562304323e-05,
"loss": 0.5319,
"step": 2300
},
{
"epoch": 0.6226312939902545,
"eval_loss": 0.5361697673797607,
"eval_runtime": 70.5195,
"eval_samples_per_second": 7.09,
"eval_steps_per_second": 0.227,
"step": 2300
},
{
"epoch": 0.6253383865728208,
"grad_norm": 0.486328125,
"learning_rate": 2.1665623043206013e-05,
"loss": 0.5139,
"step": 2310
},
{
"epoch": 0.6280454791553871,
"grad_norm": 0.51953125,
"learning_rate": 2.1509079524107704e-05,
"loss": 0.5373,
"step": 2320
},
{
"epoch": 0.6307525717379534,
"grad_norm": 0.458984375,
"learning_rate": 2.1352536005009395e-05,
"loss": 0.53,
"step": 2330
},
{
"epoch": 0.6334596643205198,
"grad_norm": 0.37890625,
"learning_rate": 2.1195992485911085e-05,
"loss": 0.5282,
"step": 2340
},
{
"epoch": 0.6361667569030861,
"grad_norm": 0.4296875,
"learning_rate": 2.1039448966812776e-05,
"loss": 0.5211,
"step": 2350
},
{
"epoch": 0.6388738494856524,
"grad_norm": 0.388671875,
"learning_rate": 2.0882905447714467e-05,
"loss": 0.5268,
"step": 2360
},
{
"epoch": 0.6415809420682187,
"grad_norm": 0.369140625,
"learning_rate": 2.0726361928616157e-05,
"loss": 0.5222,
"step": 2370
},
{
"epoch": 0.6442880346507851,
"grad_norm": 0.52734375,
"learning_rate": 2.0569818409517845e-05,
"loss": 0.5249,
"step": 2380
},
{
"epoch": 0.6469951272333514,
"grad_norm": 0.41015625,
"learning_rate": 2.0413274890419535e-05,
"loss": 0.542,
"step": 2390
},
{
"epoch": 0.6497022198159177,
"grad_norm": 0.396484375,
"learning_rate": 2.0256731371321226e-05,
"loss": 0.5172,
"step": 2400
},
{
"epoch": 0.6497022198159177,
"eval_loss": 0.5371243953704834,
"eval_runtime": 70.8703,
"eval_samples_per_second": 7.055,
"eval_steps_per_second": 0.226,
"step": 2400
},
{
"epoch": 0.652409312398484,
"grad_norm": 0.494140625,
"learning_rate": 2.0100187852222917e-05,
"loss": 0.5347,
"step": 2410
},
{
"epoch": 0.6551164049810504,
"grad_norm": 0.443359375,
"learning_rate": 1.9943644333124608e-05,
"loss": 0.5288,
"step": 2420
},
{
"epoch": 0.6578234975636167,
"grad_norm": 0.4453125,
"learning_rate": 1.9787100814026298e-05,
"loss": 0.5282,
"step": 2430
},
{
"epoch": 0.660530590146183,
"grad_norm": 0.4453125,
"learning_rate": 1.963055729492799e-05,
"loss": 0.5151,
"step": 2440
},
{
"epoch": 0.6632376827287493,
"grad_norm": 0.470703125,
"learning_rate": 1.947401377582968e-05,
"loss": 0.5339,
"step": 2450
},
{
"epoch": 0.6659447753113157,
"grad_norm": 0.412109375,
"learning_rate": 1.931747025673137e-05,
"loss": 0.5322,
"step": 2460
},
{
"epoch": 0.668651867893882,
"grad_norm": 0.4375,
"learning_rate": 1.9160926737633064e-05,
"loss": 0.512,
"step": 2470
},
{
"epoch": 0.6713589604764483,
"grad_norm": 0.421875,
"learning_rate": 1.9004383218534755e-05,
"loss": 0.5334,
"step": 2480
},
{
"epoch": 0.6740660530590146,
"grad_norm": 0.3828125,
"learning_rate": 1.8847839699436446e-05,
"loss": 0.5265,
"step": 2490
},
{
"epoch": 0.676773145641581,
"grad_norm": 0.39453125,
"learning_rate": 1.8691296180338137e-05,
"loss": 0.5355,
"step": 2500
},
{
"epoch": 0.676773145641581,
"eval_loss": 0.5358985066413879,
"eval_runtime": 72.2037,
"eval_samples_per_second": 6.925,
"eval_steps_per_second": 0.222,
"step": 2500
},
{
"epoch": 0.6794802382241473,
"grad_norm": 0.412109375,
"learning_rate": 1.8534752661239827e-05,
"loss": 0.534,
"step": 2510
},
{
"epoch": 0.6821873308067136,
"grad_norm": 0.388671875,
"learning_rate": 1.8378209142141518e-05,
"loss": 0.5373,
"step": 2520
},
{
"epoch": 0.6848944233892799,
"grad_norm": 0.447265625,
"learning_rate": 1.822166562304321e-05,
"loss": 0.5381,
"step": 2530
},
{
"epoch": 0.6876015159718463,
"grad_norm": 0.427734375,
"learning_rate": 1.80651221039449e-05,
"loss": 0.5087,
"step": 2540
},
{
"epoch": 0.6903086085544126,
"grad_norm": 0.427734375,
"learning_rate": 1.790857858484659e-05,
"loss": 0.5328,
"step": 2550
},
{
"epoch": 0.6930157011369789,
"grad_norm": 0.39453125,
"learning_rate": 1.775203506574828e-05,
"loss": 0.515,
"step": 2560
},
{
"epoch": 0.6957227937195453,
"grad_norm": 0.33984375,
"learning_rate": 1.7595491546649968e-05,
"loss": 0.5161,
"step": 2570
},
{
"epoch": 0.6984298863021116,
"grad_norm": 0.388671875,
"learning_rate": 1.743894802755166e-05,
"loss": 0.5295,
"step": 2580
},
{
"epoch": 0.7011369788846779,
"grad_norm": 0.396484375,
"learning_rate": 1.728240450845335e-05,
"loss": 0.5188,
"step": 2590
},
{
"epoch": 0.7038440714672441,
"grad_norm": 0.408203125,
"learning_rate": 1.712586098935504e-05,
"loss": 0.5081,
"step": 2600
},
{
"epoch": 0.7038440714672441,
"eval_loss": 0.5356553792953491,
"eval_runtime": 70.9086,
"eval_samples_per_second": 7.051,
"eval_steps_per_second": 0.226,
"step": 2600
},
{
"epoch": 0.7065511640498106,
"grad_norm": 0.404296875,
"learning_rate": 1.696931747025673e-05,
"loss": 0.5228,
"step": 2610
},
{
"epoch": 0.7092582566323768,
"grad_norm": 0.41015625,
"learning_rate": 1.681277395115842e-05,
"loss": 0.5256,
"step": 2620
},
{
"epoch": 0.7119653492149431,
"grad_norm": 0.515625,
"learning_rate": 1.6656230432060112e-05,
"loss": 0.5379,
"step": 2630
},
{
"epoch": 0.7146724417975094,
"grad_norm": 0.421875,
"learning_rate": 1.6499686912961803e-05,
"loss": 0.5289,
"step": 2640
},
{
"epoch": 0.7173795343800758,
"grad_norm": 0.40625,
"learning_rate": 1.6343143393863494e-05,
"loss": 0.5217,
"step": 2650
},
{
"epoch": 0.7200866269626421,
"grad_norm": 0.44921875,
"learning_rate": 1.6186599874765184e-05,
"loss": 0.5284,
"step": 2660
},
{
"epoch": 0.7227937195452084,
"grad_norm": 0.376953125,
"learning_rate": 1.6030056355666875e-05,
"loss": 0.5143,
"step": 2670
},
{
"epoch": 0.7255008121277747,
"grad_norm": 0.4296875,
"learning_rate": 1.5873512836568566e-05,
"loss": 0.5426,
"step": 2680
},
{
"epoch": 0.7282079047103411,
"grad_norm": 0.392578125,
"learning_rate": 1.5716969317470257e-05,
"loss": 0.5211,
"step": 2690
},
{
"epoch": 0.7309149972929074,
"grad_norm": 0.4609375,
"learning_rate": 1.5560425798371947e-05,
"loss": 0.5318,
"step": 2700
},
{
"epoch": 0.7309149972929074,
"eval_loss": 0.5345706343650818,
"eval_runtime": 71.1706,
"eval_samples_per_second": 7.025,
"eval_steps_per_second": 0.225,
"step": 2700
},
{
"epoch": 0.7336220898754737,
"grad_norm": 0.466796875,
"learning_rate": 1.5403882279273638e-05,
"loss": 0.5184,
"step": 2710
},
{
"epoch": 0.73632918245804,
"grad_norm": 0.47265625,
"learning_rate": 1.5247338760175329e-05,
"loss": 0.5234,
"step": 2720
},
{
"epoch": 0.7390362750406064,
"grad_norm": 0.427734375,
"learning_rate": 1.5090795241077021e-05,
"loss": 0.523,
"step": 2730
},
{
"epoch": 0.7417433676231727,
"grad_norm": 0.376953125,
"learning_rate": 1.4934251721978712e-05,
"loss": 0.5274,
"step": 2740
},
{
"epoch": 0.744450460205739,
"grad_norm": 0.482421875,
"learning_rate": 1.4777708202880403e-05,
"loss": 0.5179,
"step": 2750
},
{
"epoch": 0.7471575527883053,
"grad_norm": 0.421875,
"learning_rate": 1.4621164683782093e-05,
"loss": 0.5255,
"step": 2760
},
{
"epoch": 0.7498646453708717,
"grad_norm": 0.4375,
"learning_rate": 1.4464621164683784e-05,
"loss": 0.5413,
"step": 2770
},
{
"epoch": 0.752571737953438,
"grad_norm": 0.435546875,
"learning_rate": 1.4308077645585475e-05,
"loss": 0.5164,
"step": 2780
},
{
"epoch": 0.7552788305360043,
"grad_norm": 0.349609375,
"learning_rate": 1.4151534126487165e-05,
"loss": 0.5266,
"step": 2790
},
{
"epoch": 0.7579859231185706,
"grad_norm": 0.435546875,
"learning_rate": 1.3994990607388856e-05,
"loss": 0.5436,
"step": 2800
},
{
"epoch": 0.7579859231185706,
"eval_loss": 0.5348847508430481,
"eval_runtime": 70.2342,
"eval_samples_per_second": 7.119,
"eval_steps_per_second": 0.228,
"step": 2800
},
{
"epoch": 0.760693015701137,
"grad_norm": 0.4140625,
"learning_rate": 1.3838447088290547e-05,
"loss": 0.5208,
"step": 2810
},
{
"epoch": 0.7634001082837033,
"grad_norm": 0.40234375,
"learning_rate": 1.3681903569192236e-05,
"loss": 0.5145,
"step": 2820
},
{
"epoch": 0.7661072008662696,
"grad_norm": 0.375,
"learning_rate": 1.3525360050093926e-05,
"loss": 0.5099,
"step": 2830
},
{
"epoch": 0.7688142934488359,
"grad_norm": 0.380859375,
"learning_rate": 1.3368816530995617e-05,
"loss": 0.5377,
"step": 2840
},
{
"epoch": 0.7715213860314023,
"grad_norm": 0.392578125,
"learning_rate": 1.3212273011897308e-05,
"loss": 0.5156,
"step": 2850
},
{
"epoch": 0.7742284786139686,
"grad_norm": 0.466796875,
"learning_rate": 1.3055729492798999e-05,
"loss": 0.5171,
"step": 2860
},
{
"epoch": 0.7769355711965349,
"grad_norm": 0.5078125,
"learning_rate": 1.289918597370069e-05,
"loss": 0.5226,
"step": 2870
},
{
"epoch": 0.7796426637791013,
"grad_norm": 0.333984375,
"learning_rate": 1.274264245460238e-05,
"loss": 0.5347,
"step": 2880
},
{
"epoch": 0.7823497563616676,
"grad_norm": 0.458984375,
"learning_rate": 1.258609893550407e-05,
"loss": 0.5331,
"step": 2890
},
{
"epoch": 0.7850568489442339,
"grad_norm": 0.470703125,
"learning_rate": 1.2429555416405761e-05,
"loss": 0.5124,
"step": 2900
},
{
"epoch": 0.7850568489442339,
"eval_loss": 0.5337508916854858,
"eval_runtime": 70.6912,
"eval_samples_per_second": 7.073,
"eval_steps_per_second": 0.226,
"step": 2900
},
{
"epoch": 0.7877639415268002,
"grad_norm": 0.427734375,
"learning_rate": 1.2273011897307452e-05,
"loss": 0.5211,
"step": 2910
},
{
"epoch": 0.7904710341093666,
"grad_norm": 0.45703125,
"learning_rate": 1.2116468378209143e-05,
"loss": 0.5151,
"step": 2920
},
{
"epoch": 0.7931781266919329,
"grad_norm": 0.400390625,
"learning_rate": 1.1959924859110834e-05,
"loss": 0.5144,
"step": 2930
},
{
"epoch": 0.7958852192744992,
"grad_norm": 0.455078125,
"learning_rate": 1.1803381340012524e-05,
"loss": 0.5349,
"step": 2940
},
{
"epoch": 0.7985923118570655,
"grad_norm": 0.458984375,
"learning_rate": 1.1646837820914215e-05,
"loss": 0.5272,
"step": 2950
},
{
"epoch": 0.8012994044396319,
"grad_norm": 0.40234375,
"learning_rate": 1.1490294301815906e-05,
"loss": 0.5262,
"step": 2960
},
{
"epoch": 0.8040064970221982,
"grad_norm": 0.40234375,
"learning_rate": 1.1333750782717596e-05,
"loss": 0.5352,
"step": 2970
},
{
"epoch": 0.8067135896047645,
"grad_norm": 0.359375,
"learning_rate": 1.1177207263619287e-05,
"loss": 0.528,
"step": 2980
},
{
"epoch": 0.8094206821873308,
"grad_norm": 0.435546875,
"learning_rate": 1.1020663744520978e-05,
"loss": 0.5194,
"step": 2990
},
{
"epoch": 0.8121277747698972,
"grad_norm": 0.5078125,
"learning_rate": 1.0864120225422668e-05,
"loss": 0.5224,
"step": 3000
},
{
"epoch": 0.8121277747698972,
"eval_loss": 0.532988429069519,
"eval_runtime": 70.1018,
"eval_samples_per_second": 7.132,
"eval_steps_per_second": 0.228,
"step": 3000
},
{
"epoch": 0.8148348673524635,
"grad_norm": 0.447265625,
"learning_rate": 1.070757670632436e-05,
"loss": 0.5239,
"step": 3010
},
{
"epoch": 0.8175419599350298,
"grad_norm": 0.37890625,
"learning_rate": 1.0551033187226048e-05,
"loss": 0.5323,
"step": 3020
},
{
"epoch": 0.8202490525175961,
"grad_norm": 0.388671875,
"learning_rate": 1.0394489668127739e-05,
"loss": 0.5334,
"step": 3030
},
{
"epoch": 0.8229561451001625,
"grad_norm": 0.390625,
"learning_rate": 1.023794614902943e-05,
"loss": 0.5318,
"step": 3040
},
{
"epoch": 0.8256632376827288,
"grad_norm": 0.45703125,
"learning_rate": 1.008140262993112e-05,
"loss": 0.5202,
"step": 3050
},
{
"epoch": 0.828370330265295,
"grad_norm": 0.41796875,
"learning_rate": 9.924859110832813e-06,
"loss": 0.5239,
"step": 3060
},
{
"epoch": 0.8310774228478613,
"grad_norm": 0.462890625,
"learning_rate": 9.768315591734503e-06,
"loss": 0.5261,
"step": 3070
},
{
"epoch": 0.8337845154304278,
"grad_norm": 0.384765625,
"learning_rate": 9.611772072636194e-06,
"loss": 0.5242,
"step": 3080
},
{
"epoch": 0.836491608012994,
"grad_norm": 0.421875,
"learning_rate": 9.455228553537885e-06,
"loss": 0.5327,
"step": 3090
},
{
"epoch": 0.8391987005955603,
"grad_norm": 0.4296875,
"learning_rate": 9.298685034439576e-06,
"loss": 0.5204,
"step": 3100
},
{
"epoch": 0.8391987005955603,
"eval_loss": 0.5333003997802734,
"eval_runtime": 71.1268,
"eval_samples_per_second": 7.03,
"eval_steps_per_second": 0.225,
"step": 3100
},
{
"epoch": 0.8419057931781266,
"grad_norm": 0.4765625,
"learning_rate": 9.142141515341266e-06,
"loss": 0.5437,
"step": 3110
},
{
"epoch": 0.844612885760693,
"grad_norm": 0.462890625,
"learning_rate": 8.985597996242955e-06,
"loss": 0.5297,
"step": 3120
},
{
"epoch": 0.8473199783432593,
"grad_norm": 0.4296875,
"learning_rate": 8.829054477144646e-06,
"loss": 0.5305,
"step": 3130
},
{
"epoch": 0.8500270709258256,
"grad_norm": 0.400390625,
"learning_rate": 8.672510958046337e-06,
"loss": 0.5486,
"step": 3140
},
{
"epoch": 0.852734163508392,
"grad_norm": 0.40234375,
"learning_rate": 8.515967438948027e-06,
"loss": 0.5296,
"step": 3150
},
{
"epoch": 0.8554412560909583,
"grad_norm": 0.376953125,
"learning_rate": 8.359423919849718e-06,
"loss": 0.5323,
"step": 3160
},
{
"epoch": 0.8581483486735246,
"grad_norm": 0.427734375,
"learning_rate": 8.202880400751409e-06,
"loss": 0.5381,
"step": 3170
},
{
"epoch": 0.8608554412560909,
"grad_norm": 0.44140625,
"learning_rate": 8.0463368816531e-06,
"loss": 0.5278,
"step": 3180
},
{
"epoch": 0.8635625338386573,
"grad_norm": 0.51953125,
"learning_rate": 7.889793362554792e-06,
"loss": 0.5252,
"step": 3190
},
{
"epoch": 0.8662696264212236,
"grad_norm": 0.447265625,
"learning_rate": 7.733249843456483e-06,
"loss": 0.5215,
"step": 3200
},
{
"epoch": 0.8662696264212236,
"eval_loss": 0.5333092212677002,
"eval_runtime": 70.1888,
"eval_samples_per_second": 7.124,
"eval_steps_per_second": 0.228,
"step": 3200
},
{
"epoch": 0.8689767190037899,
"grad_norm": 0.453125,
"learning_rate": 7.576706324358172e-06,
"loss": 0.5226,
"step": 3210
},
{
"epoch": 0.8716838115863562,
"grad_norm": 0.416015625,
"learning_rate": 7.420162805259863e-06,
"loss": 0.5325,
"step": 3220
},
{
"epoch": 0.8743909041689226,
"grad_norm": 0.4375,
"learning_rate": 7.263619286161554e-06,
"loss": 0.5318,
"step": 3230
},
{
"epoch": 0.8770979967514889,
"grad_norm": 0.451171875,
"learning_rate": 7.107075767063244e-06,
"loss": 0.5224,
"step": 3240
},
{
"epoch": 0.8798050893340552,
"grad_norm": 0.42578125,
"learning_rate": 6.950532247964934e-06,
"loss": 0.5196,
"step": 3250
},
{
"epoch": 0.8825121819166215,
"grad_norm": 0.5546875,
"learning_rate": 6.793988728866625e-06,
"loss": 0.5205,
"step": 3260
},
{
"epoch": 0.8852192744991879,
"grad_norm": 0.408203125,
"learning_rate": 6.637445209768316e-06,
"loss": 0.5291,
"step": 3270
},
{
"epoch": 0.8879263670817542,
"grad_norm": 0.435546875,
"learning_rate": 6.4809016906700065e-06,
"loss": 0.5229,
"step": 3280
},
{
"epoch": 0.8906334596643205,
"grad_norm": 0.439453125,
"learning_rate": 6.324358171571697e-06,
"loss": 0.5286,
"step": 3290
},
{
"epoch": 0.8933405522468868,
"grad_norm": 0.47265625,
"learning_rate": 6.167814652473388e-06,
"loss": 0.5373,
"step": 3300
},
{
"epoch": 0.8933405522468868,
"eval_loss": 0.5331239104270935,
"eval_runtime": 70.3149,
"eval_samples_per_second": 7.111,
"eval_steps_per_second": 0.228,
"step": 3300
},
{
"epoch": 0.8960476448294532,
"grad_norm": 0.408203125,
"learning_rate": 6.011271133375079e-06,
"loss": 0.5325,
"step": 3310
},
{
"epoch": 0.8987547374120195,
"grad_norm": 0.455078125,
"learning_rate": 5.854727614276769e-06,
"loss": 0.5176,
"step": 3320
},
{
"epoch": 0.9014618299945858,
"grad_norm": 0.435546875,
"learning_rate": 5.69818409517846e-06,
"loss": 0.5185,
"step": 3330
},
{
"epoch": 0.9041689225771521,
"grad_norm": 0.48046875,
"learning_rate": 5.54164057608015e-06,
"loss": 0.5155,
"step": 3340
},
{
"epoch": 0.9068760151597185,
"grad_norm": 0.36328125,
"learning_rate": 5.3850970569818414e-06,
"loss": 0.5176,
"step": 3350
},
{
"epoch": 0.9095831077422848,
"grad_norm": 0.45703125,
"learning_rate": 5.228553537883532e-06,
"loss": 0.5201,
"step": 3360
},
{
"epoch": 0.9122902003248511,
"grad_norm": 0.40625,
"learning_rate": 5.072010018785223e-06,
"loss": 0.5267,
"step": 3370
},
{
"epoch": 0.9149972929074174,
"grad_norm": 0.421875,
"learning_rate": 4.9154664996869136e-06,
"loss": 0.5171,
"step": 3380
},
{
"epoch": 0.9177043854899838,
"grad_norm": 0.4140625,
"learning_rate": 4.758922980588603e-06,
"loss": 0.517,
"step": 3390
},
{
"epoch": 0.9204114780725501,
"grad_norm": 0.44140625,
"learning_rate": 4.602379461490294e-06,
"loss": 0.5138,
"step": 3400
},
{
"epoch": 0.9204114780725501,
"eval_loss": 0.5331882238388062,
"eval_runtime": 70.6838,
"eval_samples_per_second": 7.074,
"eval_steps_per_second": 0.226,
"step": 3400
},
{
"epoch": 0.9231185706551164,
"grad_norm": 0.384765625,
"learning_rate": 4.445835942391985e-06,
"loss": 0.5122,
"step": 3410
},
{
"epoch": 0.9258256632376828,
"grad_norm": 0.49609375,
"learning_rate": 4.289292423293676e-06,
"loss": 0.5288,
"step": 3420
},
{
"epoch": 0.9285327558202491,
"grad_norm": 0.3984375,
"learning_rate": 4.132748904195367e-06,
"loss": 0.5262,
"step": 3430
},
{
"epoch": 0.9312398484028154,
"grad_norm": 0.462890625,
"learning_rate": 3.976205385097057e-06,
"loss": 0.5329,
"step": 3440
},
{
"epoch": 0.9339469409853817,
"grad_norm": 0.37890625,
"learning_rate": 3.819661865998748e-06,
"loss": 0.5339,
"step": 3450
},
{
"epoch": 0.9366540335679481,
"grad_norm": 0.431640625,
"learning_rate": 3.6631183469004384e-06,
"loss": 0.5403,
"step": 3460
},
{
"epoch": 0.9393611261505144,
"grad_norm": 0.39453125,
"learning_rate": 3.506574827802129e-06,
"loss": 0.5145,
"step": 3470
},
{
"epoch": 0.9420682187330807,
"grad_norm": 0.474609375,
"learning_rate": 3.35003130870382e-06,
"loss": 0.5253,
"step": 3480
},
{
"epoch": 0.944775311315647,
"grad_norm": 0.4140625,
"learning_rate": 3.193487789605511e-06,
"loss": 0.5183,
"step": 3490
},
{
"epoch": 0.9474824038982134,
"grad_norm": 0.408203125,
"learning_rate": 3.036944270507201e-06,
"loss": 0.5153,
"step": 3500
},
{
"epoch": 0.9474824038982134,
"eval_loss": 0.5332643985748291,
"eval_runtime": 71.6079,
"eval_samples_per_second": 6.982,
"eval_steps_per_second": 0.223,
"step": 3500
},
{
"epoch": 0.9501894964807797,
"grad_norm": 0.5234375,
"learning_rate": 2.880400751408892e-06,
"loss": 0.5083,
"step": 3510
},
{
"epoch": 0.952896589063346,
"grad_norm": 0.57421875,
"learning_rate": 2.7238572323105826e-06,
"loss": 0.5247,
"step": 3520
},
{
"epoch": 0.9556036816459123,
"grad_norm": 0.5234375,
"learning_rate": 2.5673137132122733e-06,
"loss": 0.5215,
"step": 3530
},
{
"epoch": 0.9583107742284787,
"grad_norm": 0.453125,
"learning_rate": 2.410770194113964e-06,
"loss": 0.5254,
"step": 3540
},
{
"epoch": 0.961017866811045,
"grad_norm": 0.42578125,
"learning_rate": 2.2542266750156543e-06,
"loss": 0.5253,
"step": 3550
},
{
"epoch": 0.9637249593936112,
"grad_norm": 0.4609375,
"learning_rate": 2.0976831559173454e-06,
"loss": 0.5354,
"step": 3560
},
{
"epoch": 0.9664320519761775,
"grad_norm": 0.458984375,
"learning_rate": 1.9411396368190357e-06,
"loss": 0.5227,
"step": 3570
},
{
"epoch": 0.969139144558744,
"grad_norm": 0.44140625,
"learning_rate": 1.7845961177207264e-06,
"loss": 0.5226,
"step": 3580
},
{
"epoch": 0.9718462371413102,
"grad_norm": 0.46875,
"learning_rate": 1.6280525986224169e-06,
"loss": 0.5331,
"step": 3590
},
{
"epoch": 0.9745533297238765,
"grad_norm": 0.43359375,
"learning_rate": 1.4715090795241078e-06,
"loss": 0.5319,
"step": 3600
},
{
"epoch": 0.9745533297238765,
"eval_loss": 0.5332371592521667,
"eval_runtime": 70.7142,
"eval_samples_per_second": 7.071,
"eval_steps_per_second": 0.226,
"step": 3600
},
{
"epoch": 0.9772604223064428,
"grad_norm": 0.40234375,
"learning_rate": 1.3149655604257985e-06,
"loss": 0.5263,
"step": 3610
},
{
"epoch": 0.9799675148890092,
"grad_norm": 0.439453125,
"learning_rate": 1.1584220413274892e-06,
"loss": 0.5123,
"step": 3620
},
{
"epoch": 0.9826746074715755,
"grad_norm": 0.392578125,
"learning_rate": 1.0018785222291797e-06,
"loss": 0.5197,
"step": 3630
},
{
"epoch": 0.9853817000541418,
"grad_norm": 0.51953125,
"learning_rate": 8.453350031308704e-07,
"loss": 0.5253,
"step": 3640
},
{
"epoch": 0.9880887926367081,
"grad_norm": 0.37890625,
"learning_rate": 6.887914840325611e-07,
"loss": 0.5301,
"step": 3650
},
{
"epoch": 0.9907958852192745,
"grad_norm": 0.46875,
"learning_rate": 5.322479649342517e-07,
"loss": 0.532,
"step": 3660
},
{
"epoch": 0.9935029778018408,
"grad_norm": 0.419921875,
"learning_rate": 3.757044458359424e-07,
"loss": 0.5349,
"step": 3670
},
{
"epoch": 0.9962100703844071,
"grad_norm": 0.478515625,
"learning_rate": 2.1916092673763307e-07,
"loss": 0.5278,
"step": 3680
},
{
"epoch": 0.9989171629669734,
"grad_norm": 0.392578125,
"learning_rate": 6.261740763932373e-08,
"loss": 0.5235,
"step": 3690
}
],
"logging_steps": 10,
"max_steps": 3694,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0993863336142579e+18,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}