ColPali
Safetensors
English
vidore
colpali-v1.1 / checkpoint-3500 /trainer_state.json
manu's picture
Upload folder using huggingface_hub
04a949f verified
raw
history blame
73.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9474824038982134,
"eval_steps": 50,
"global_step": 3500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0027070925825663237,
"grad_norm": 0.81640625,
"learning_rate": 5e-06,
"loss": 0.7309,
"step": 10
},
{
"epoch": 0.005414185165132647,
"grad_norm": 1.2578125,
"learning_rate": 1e-05,
"loss": 0.7211,
"step": 20
},
{
"epoch": 0.008121277747698972,
"grad_norm": 0.7265625,
"learning_rate": 1.5e-05,
"loss": 0.7223,
"step": 30
},
{
"epoch": 0.010828370330265295,
"grad_norm": 0.640625,
"learning_rate": 2e-05,
"loss": 0.7158,
"step": 40
},
{
"epoch": 0.01353546291283162,
"grad_norm": 0.427734375,
"learning_rate": 2.5e-05,
"loss": 0.7123,
"step": 50
},
{
"epoch": 0.01353546291283162,
"eval_loss": 0.711525022983551,
"eval_runtime": 105.29,
"eval_samples_per_second": 4.749,
"eval_steps_per_second": 0.152,
"step": 50
},
{
"epoch": 0.016242555495397944,
"grad_norm": 0.8125,
"learning_rate": 3e-05,
"loss": 0.7142,
"step": 60
},
{
"epoch": 0.018949648077964266,
"grad_norm": 0.58203125,
"learning_rate": 3.5e-05,
"loss": 0.7052,
"step": 70
},
{
"epoch": 0.02165674066053059,
"grad_norm": 0.62109375,
"learning_rate": 4e-05,
"loss": 0.7083,
"step": 80
},
{
"epoch": 0.024363833243096916,
"grad_norm": 0.98046875,
"learning_rate": 4.5e-05,
"loss": 0.7036,
"step": 90
},
{
"epoch": 0.02707092582566324,
"grad_norm": 0.6640625,
"learning_rate": 5e-05,
"loss": 0.6903,
"step": 100
},
{
"epoch": 0.02707092582566324,
"eval_loss": 0.6922177672386169,
"eval_runtime": 63.6777,
"eval_samples_per_second": 7.852,
"eval_steps_per_second": 0.251,
"step": 100
},
{
"epoch": 0.02977801840822956,
"grad_norm": 0.96875,
"learning_rate": 4.986087924318308e-05,
"loss": 0.6695,
"step": 110
},
{
"epoch": 0.03248511099079589,
"grad_norm": 1.6484375,
"learning_rate": 4.972175848636616e-05,
"loss": 0.6261,
"step": 120
},
{
"epoch": 0.03519220357336221,
"grad_norm": 1.453125,
"learning_rate": 4.958263772954925e-05,
"loss": 0.5779,
"step": 130
},
{
"epoch": 0.03789929615592853,
"grad_norm": 3.09375,
"learning_rate": 4.944351697273234e-05,
"loss": 0.5185,
"step": 140
},
{
"epoch": 0.040606388738494856,
"grad_norm": 2.375,
"learning_rate": 4.930439621591542e-05,
"loss": 0.4695,
"step": 150
},
{
"epoch": 0.040606388738494856,
"eval_loss": 0.45527857542037964,
"eval_runtime": 64.9387,
"eval_samples_per_second": 7.7,
"eval_steps_per_second": 0.246,
"step": 150
},
{
"epoch": 0.04331348132106118,
"grad_norm": 3.984375,
"learning_rate": 4.91652754590985e-05,
"loss": 0.4536,
"step": 160
},
{
"epoch": 0.0460205739036275,
"grad_norm": 3.671875,
"learning_rate": 4.9026154702281585e-05,
"loss": 0.4113,
"step": 170
},
{
"epoch": 0.04872766648619383,
"grad_norm": 2.71875,
"learning_rate": 4.8887033945464666e-05,
"loss": 0.3874,
"step": 180
},
{
"epoch": 0.051434759068760154,
"grad_norm": 1.6640625,
"learning_rate": 4.8747913188647746e-05,
"loss": 0.3714,
"step": 190
},
{
"epoch": 0.05414185165132648,
"grad_norm": 4.1875,
"learning_rate": 4.8608792431830826e-05,
"loss": 0.294,
"step": 200
},
{
"epoch": 0.05414185165132648,
"eval_loss": 0.3422486186027527,
"eval_runtime": 64.2742,
"eval_samples_per_second": 7.779,
"eval_steps_per_second": 0.249,
"step": 200
},
{
"epoch": 0.0568489442338928,
"grad_norm": 3.625,
"learning_rate": 4.8469671675013914e-05,
"loss": 0.4054,
"step": 210
},
{
"epoch": 0.05955603681645912,
"grad_norm": 2.984375,
"learning_rate": 4.8330550918197e-05,
"loss": 0.3213,
"step": 220
},
{
"epoch": 0.062263129399025445,
"grad_norm": 3.46875,
"learning_rate": 4.819143016138008e-05,
"loss": 0.3261,
"step": 230
},
{
"epoch": 0.06497022198159177,
"grad_norm": 2.328125,
"learning_rate": 4.805230940456316e-05,
"loss": 0.2817,
"step": 240
},
{
"epoch": 0.0676773145641581,
"grad_norm": 2.59375,
"learning_rate": 4.791318864774624e-05,
"loss": 0.2502,
"step": 250
},
{
"epoch": 0.0676773145641581,
"eval_loss": 0.2796708643436432,
"eval_runtime": 63.6818,
"eval_samples_per_second": 7.852,
"eval_steps_per_second": 0.251,
"step": 250
},
{
"epoch": 0.07038440714672442,
"grad_norm": 3.765625,
"learning_rate": 4.777406789092933e-05,
"loss": 0.2768,
"step": 260
},
{
"epoch": 0.07309149972929074,
"grad_norm": 6.375,
"learning_rate": 4.763494713411241e-05,
"loss": 0.2615,
"step": 270
},
{
"epoch": 0.07579859231185707,
"grad_norm": 3.328125,
"learning_rate": 4.7495826377295496e-05,
"loss": 0.248,
"step": 280
},
{
"epoch": 0.07850568489442339,
"grad_norm": 2.109375,
"learning_rate": 4.735670562047858e-05,
"loss": 0.1904,
"step": 290
},
{
"epoch": 0.08121277747698971,
"grad_norm": 1.7734375,
"learning_rate": 4.7217584863661664e-05,
"loss": 0.2066,
"step": 300
},
{
"epoch": 0.08121277747698971,
"eval_loss": 0.24386852979660034,
"eval_runtime": 63.7131,
"eval_samples_per_second": 7.848,
"eval_steps_per_second": 0.251,
"step": 300
},
{
"epoch": 0.08391987005955603,
"grad_norm": 2.515625,
"learning_rate": 4.7078464106844744e-05,
"loss": 0.2251,
"step": 310
},
{
"epoch": 0.08662696264212236,
"grad_norm": 1.9453125,
"learning_rate": 4.6939343350027825e-05,
"loss": 0.2066,
"step": 320
},
{
"epoch": 0.08933405522468868,
"grad_norm": 1.8515625,
"learning_rate": 4.6800222593210905e-05,
"loss": 0.1942,
"step": 330
},
{
"epoch": 0.092041147807255,
"grad_norm": 2.3125,
"learning_rate": 4.666110183639399e-05,
"loss": 0.2184,
"step": 340
},
{
"epoch": 0.09474824038982133,
"grad_norm": 2.265625,
"learning_rate": 4.652198107957708e-05,
"loss": 0.2343,
"step": 350
},
{
"epoch": 0.09474824038982133,
"eval_loss": 0.23647841811180115,
"eval_runtime": 63.9433,
"eval_samples_per_second": 7.819,
"eval_steps_per_second": 0.25,
"step": 350
},
{
"epoch": 0.09745533297238766,
"grad_norm": 2.09375,
"learning_rate": 4.638286032276016e-05,
"loss": 0.2554,
"step": 360
},
{
"epoch": 0.10016242555495398,
"grad_norm": 1.828125,
"learning_rate": 4.624373956594324e-05,
"loss": 0.2288,
"step": 370
},
{
"epoch": 0.10286951813752031,
"grad_norm": 2.328125,
"learning_rate": 4.610461880912633e-05,
"loss": 0.2159,
"step": 380
},
{
"epoch": 0.10557661072008663,
"grad_norm": 2.1875,
"learning_rate": 4.596549805230941e-05,
"loss": 0.1825,
"step": 390
},
{
"epoch": 0.10828370330265295,
"grad_norm": 2.4375,
"learning_rate": 4.582637729549249e-05,
"loss": 0.2525,
"step": 400
},
{
"epoch": 0.10828370330265295,
"eval_loss": 0.21003711223602295,
"eval_runtime": 64.3464,
"eval_samples_per_second": 7.77,
"eval_steps_per_second": 0.249,
"step": 400
},
{
"epoch": 0.11099079588521928,
"grad_norm": 2.4375,
"learning_rate": 4.568725653867557e-05,
"loss": 0.1708,
"step": 410
},
{
"epoch": 0.1136978884677856,
"grad_norm": 3.140625,
"learning_rate": 4.5548135781858655e-05,
"loss": 0.1939,
"step": 420
},
{
"epoch": 0.11640498105035192,
"grad_norm": 2.5625,
"learning_rate": 4.540901502504174e-05,
"loss": 0.1732,
"step": 430
},
{
"epoch": 0.11911207363291824,
"grad_norm": 1.3984375,
"learning_rate": 4.526989426822482e-05,
"loss": 0.2078,
"step": 440
},
{
"epoch": 0.12181916621548457,
"grad_norm": 7.5,
"learning_rate": 4.51307735114079e-05,
"loss": 0.1984,
"step": 450
},
{
"epoch": 0.12181916621548457,
"eval_loss": 0.2016001045703888,
"eval_runtime": 98.9371,
"eval_samples_per_second": 5.054,
"eval_steps_per_second": 0.162,
"step": 450
},
{
"epoch": 0.12452625879805089,
"grad_norm": 2.78125,
"learning_rate": 4.4991652754590984e-05,
"loss": 0.1846,
"step": 460
},
{
"epoch": 0.12723335138061723,
"grad_norm": 1.0,
"learning_rate": 4.485253199777407e-05,
"loss": 0.1628,
"step": 470
},
{
"epoch": 0.12994044396318355,
"grad_norm": 0.65625,
"learning_rate": 4.471341124095715e-05,
"loss": 0.1627,
"step": 480
},
{
"epoch": 0.13264753654574987,
"grad_norm": 1.1953125,
"learning_rate": 4.457429048414024e-05,
"loss": 0.1694,
"step": 490
},
{
"epoch": 0.1353546291283162,
"grad_norm": 1.5078125,
"learning_rate": 4.443516972732332e-05,
"loss": 0.2156,
"step": 500
},
{
"epoch": 0.1353546291283162,
"eval_loss": 0.19197307527065277,
"eval_runtime": 72.4725,
"eval_samples_per_second": 6.899,
"eval_steps_per_second": 0.221,
"step": 500
},
{
"epoch": 0.13806172171088252,
"grad_norm": 3.140625,
"learning_rate": 4.4296048970506406e-05,
"loss": 0.1641,
"step": 510
},
{
"epoch": 0.14076881429344884,
"grad_norm": 2.359375,
"learning_rate": 4.4156928213689486e-05,
"loss": 0.1925,
"step": 520
},
{
"epoch": 0.14347590687601516,
"grad_norm": 1.375,
"learning_rate": 4.4017807456872566e-05,
"loss": 0.1794,
"step": 530
},
{
"epoch": 0.1461829994585815,
"grad_norm": 4.96875,
"learning_rate": 4.387868670005565e-05,
"loss": 0.197,
"step": 540
},
{
"epoch": 0.1488900920411478,
"grad_norm": 1.1015625,
"learning_rate": 4.373956594323873e-05,
"loss": 0.1423,
"step": 550
},
{
"epoch": 0.1488900920411478,
"eval_loss": 0.19690875709056854,
"eval_runtime": 62.7419,
"eval_samples_per_second": 7.969,
"eval_steps_per_second": 0.255,
"step": 550
},
{
"epoch": 0.15159718462371413,
"grad_norm": 1.5234375,
"learning_rate": 4.360044518642182e-05,
"loss": 0.1976,
"step": 560
},
{
"epoch": 0.15430427720628045,
"grad_norm": 0.57421875,
"learning_rate": 4.34613244296049e-05,
"loss": 0.14,
"step": 570
},
{
"epoch": 0.15701136978884678,
"grad_norm": 2.890625,
"learning_rate": 4.332220367278798e-05,
"loss": 0.2063,
"step": 580
},
{
"epoch": 0.1597184623714131,
"grad_norm": 2.15625,
"learning_rate": 4.318308291597106e-05,
"loss": 0.2194,
"step": 590
},
{
"epoch": 0.16242555495397942,
"grad_norm": 1.40625,
"learning_rate": 4.304396215915415e-05,
"loss": 0.1308,
"step": 600
},
{
"epoch": 0.16242555495397942,
"eval_loss": 0.1980336606502533,
"eval_runtime": 62.7479,
"eval_samples_per_second": 7.968,
"eval_steps_per_second": 0.255,
"step": 600
},
{
"epoch": 0.16513264753654575,
"grad_norm": 0.8046875,
"learning_rate": 4.290484140233723e-05,
"loss": 0.1557,
"step": 610
},
{
"epoch": 0.16783974011911207,
"grad_norm": 2.21875,
"learning_rate": 4.276572064552031e-05,
"loss": 0.1923,
"step": 620
},
{
"epoch": 0.1705468327016784,
"grad_norm": 0.73828125,
"learning_rate": 4.26265998887034e-05,
"loss": 0.1714,
"step": 630
},
{
"epoch": 0.17325392528424471,
"grad_norm": 3.578125,
"learning_rate": 4.2487479131886484e-05,
"loss": 0.184,
"step": 640
},
{
"epoch": 0.17596101786681104,
"grad_norm": 1.8125,
"learning_rate": 4.2348358375069565e-05,
"loss": 0.2067,
"step": 650
},
{
"epoch": 0.17596101786681104,
"eval_loss": 0.19718270003795624,
"eval_runtime": 62.7349,
"eval_samples_per_second": 7.97,
"eval_steps_per_second": 0.255,
"step": 650
},
{
"epoch": 0.17866811044937736,
"grad_norm": 2.734375,
"learning_rate": 4.2209237618252645e-05,
"loss": 0.156,
"step": 660
},
{
"epoch": 0.18137520303194368,
"grad_norm": 1.6328125,
"learning_rate": 4.2070116861435725e-05,
"loss": 0.1309,
"step": 670
},
{
"epoch": 0.18408229561451,
"grad_norm": 3.28125,
"learning_rate": 4.193099610461881e-05,
"loss": 0.1983,
"step": 680
},
{
"epoch": 0.18678938819707633,
"grad_norm": 1.765625,
"learning_rate": 4.179187534780189e-05,
"loss": 0.159,
"step": 690
},
{
"epoch": 0.18949648077964265,
"grad_norm": 0.62109375,
"learning_rate": 4.165275459098498e-05,
"loss": 0.1481,
"step": 700
},
{
"epoch": 0.18949648077964265,
"eval_loss": 0.1895207166671753,
"eval_runtime": 62.6131,
"eval_samples_per_second": 7.986,
"eval_steps_per_second": 0.256,
"step": 700
},
{
"epoch": 0.19220357336220897,
"grad_norm": 1.984375,
"learning_rate": 4.151363383416806e-05,
"loss": 0.2162,
"step": 710
},
{
"epoch": 0.19491066594477532,
"grad_norm": 1.1875,
"learning_rate": 4.137451307735114e-05,
"loss": 0.1278,
"step": 720
},
{
"epoch": 0.19761775852734165,
"grad_norm": 0.7734375,
"learning_rate": 4.123539232053423e-05,
"loss": 0.1714,
"step": 730
},
{
"epoch": 0.20032485110990797,
"grad_norm": 1.90625,
"learning_rate": 4.109627156371731e-05,
"loss": 0.1764,
"step": 740
},
{
"epoch": 0.2030319436924743,
"grad_norm": 1.9765625,
"learning_rate": 4.095715080690039e-05,
"loss": 0.161,
"step": 750
},
{
"epoch": 0.2030319436924743,
"eval_loss": 0.18931728601455688,
"eval_runtime": 63.0862,
"eval_samples_per_second": 7.926,
"eval_steps_per_second": 0.254,
"step": 750
},
{
"epoch": 0.20573903627504062,
"grad_norm": 0.859375,
"learning_rate": 4.081803005008347e-05,
"loss": 0.1913,
"step": 760
},
{
"epoch": 0.20844612885760694,
"grad_norm": 1.0234375,
"learning_rate": 4.0678909293266556e-05,
"loss": 0.1447,
"step": 770
},
{
"epoch": 0.21115322144017326,
"grad_norm": 1.265625,
"learning_rate": 4.053978853644964e-05,
"loss": 0.1265,
"step": 780
},
{
"epoch": 0.21386031402273958,
"grad_norm": 3.5,
"learning_rate": 4.0400667779632724e-05,
"loss": 0.1812,
"step": 790
},
{
"epoch": 0.2165674066053059,
"grad_norm": 1.484375,
"learning_rate": 4.0261547022815804e-05,
"loss": 0.1352,
"step": 800
},
{
"epoch": 0.2165674066053059,
"eval_loss": 0.1901209056377411,
"eval_runtime": 63.1622,
"eval_samples_per_second": 7.916,
"eval_steps_per_second": 0.253,
"step": 800
},
{
"epoch": 0.21927449918787223,
"grad_norm": 2.390625,
"learning_rate": 4.012242626599889e-05,
"loss": 0.1266,
"step": 810
},
{
"epoch": 0.22198159177043855,
"grad_norm": 2.71875,
"learning_rate": 3.998330550918197e-05,
"loss": 0.1408,
"step": 820
},
{
"epoch": 0.22468868435300487,
"grad_norm": 1.6875,
"learning_rate": 3.984418475236505e-05,
"loss": 0.1268,
"step": 830
},
{
"epoch": 0.2273957769355712,
"grad_norm": 1.5234375,
"learning_rate": 3.970506399554814e-05,
"loss": 0.1397,
"step": 840
},
{
"epoch": 0.23010286951813752,
"grad_norm": 1.421875,
"learning_rate": 3.956594323873122e-05,
"loss": 0.2107,
"step": 850
},
{
"epoch": 0.23010286951813752,
"eval_loss": 0.18407949805259705,
"eval_runtime": 62.4768,
"eval_samples_per_second": 8.003,
"eval_steps_per_second": 0.256,
"step": 850
},
{
"epoch": 0.23280996210070384,
"grad_norm": 2.515625,
"learning_rate": 3.9426822481914307e-05,
"loss": 0.1519,
"step": 860
},
{
"epoch": 0.23551705468327017,
"grad_norm": 2.3125,
"learning_rate": 3.928770172509739e-05,
"loss": 0.1877,
"step": 870
},
{
"epoch": 0.2382241472658365,
"grad_norm": 1.453125,
"learning_rate": 3.914858096828047e-05,
"loss": 0.099,
"step": 880
},
{
"epoch": 0.2409312398484028,
"grad_norm": 1.171875,
"learning_rate": 3.900946021146355e-05,
"loss": 0.1565,
"step": 890
},
{
"epoch": 0.24363833243096913,
"grad_norm": 0.44921875,
"learning_rate": 3.8870339454646635e-05,
"loss": 0.1352,
"step": 900
},
{
"epoch": 0.24363833243096913,
"eval_loss": 0.18001192808151245,
"eval_runtime": 62.7885,
"eval_samples_per_second": 7.963,
"eval_steps_per_second": 0.255,
"step": 900
},
{
"epoch": 0.24634542501353546,
"grad_norm": 2.140625,
"learning_rate": 3.873121869782972e-05,
"loss": 0.1469,
"step": 910
},
{
"epoch": 0.24905251759610178,
"grad_norm": 1.453125,
"learning_rate": 3.85920979410128e-05,
"loss": 0.1918,
"step": 920
},
{
"epoch": 0.2517596101786681,
"grad_norm": 1.4609375,
"learning_rate": 3.845297718419588e-05,
"loss": 0.1836,
"step": 930
},
{
"epoch": 0.25446670276123445,
"grad_norm": 2.625,
"learning_rate": 3.831385642737897e-05,
"loss": 0.1588,
"step": 940
},
{
"epoch": 0.25717379534380075,
"grad_norm": 0.6953125,
"learning_rate": 3.817473567056205e-05,
"loss": 0.1503,
"step": 950
},
{
"epoch": 0.25717379534380075,
"eval_loss": 0.17602640390396118,
"eval_runtime": 63.175,
"eval_samples_per_second": 7.915,
"eval_steps_per_second": 0.253,
"step": 950
},
{
"epoch": 0.2598808879263671,
"grad_norm": 2.0,
"learning_rate": 3.803561491374513e-05,
"loss": 0.1356,
"step": 960
},
{
"epoch": 0.2625879805089334,
"grad_norm": 0.9375,
"learning_rate": 3.789649415692821e-05,
"loss": 0.1702,
"step": 970
},
{
"epoch": 0.26529507309149974,
"grad_norm": 0.92578125,
"learning_rate": 3.77573734001113e-05,
"loss": 0.1545,
"step": 980
},
{
"epoch": 0.26800216567406604,
"grad_norm": 0.6640625,
"learning_rate": 3.7618252643294385e-05,
"loss": 0.1421,
"step": 990
},
{
"epoch": 0.2707092582566324,
"grad_norm": 3.96875,
"learning_rate": 3.7479131886477466e-05,
"loss": 0.1255,
"step": 1000
},
{
"epoch": 0.2707092582566324,
"eval_loss": 0.18262001872062683,
"eval_runtime": 78.6472,
"eval_samples_per_second": 6.358,
"eval_steps_per_second": 0.203,
"step": 1000
},
{
"epoch": 0.2734163508391987,
"grad_norm": 1.2734375,
"learning_rate": 3.7340011129660546e-05,
"loss": 0.144,
"step": 1010
},
{
"epoch": 0.27612344342176504,
"grad_norm": 1.7109375,
"learning_rate": 3.7200890372843626e-05,
"loss": 0.108,
"step": 1020
},
{
"epoch": 0.27883053600433133,
"grad_norm": 2.9375,
"learning_rate": 3.7061769616026713e-05,
"loss": 0.1255,
"step": 1030
},
{
"epoch": 0.2815376285868977,
"grad_norm": 1.25,
"learning_rate": 3.6922648859209794e-05,
"loss": 0.1421,
"step": 1040
},
{
"epoch": 0.284244721169464,
"grad_norm": 4.625,
"learning_rate": 3.678352810239288e-05,
"loss": 0.1686,
"step": 1050
},
{
"epoch": 0.284244721169464,
"eval_loss": 0.18178632855415344,
"eval_runtime": 62.9144,
"eval_samples_per_second": 7.947,
"eval_steps_per_second": 0.254,
"step": 1050
},
{
"epoch": 0.2869518137520303,
"grad_norm": 1.1328125,
"learning_rate": 3.664440734557596e-05,
"loss": 0.1147,
"step": 1060
},
{
"epoch": 0.2896589063345966,
"grad_norm": 1.703125,
"learning_rate": 3.650528658875905e-05,
"loss": 0.1517,
"step": 1070
},
{
"epoch": 0.292365998917163,
"grad_norm": 2.328125,
"learning_rate": 3.636616583194213e-05,
"loss": 0.1545,
"step": 1080
},
{
"epoch": 0.29507309149972927,
"grad_norm": 1.984375,
"learning_rate": 3.622704507512521e-05,
"loss": 0.1471,
"step": 1090
},
{
"epoch": 0.2977801840822956,
"grad_norm": 2.515625,
"learning_rate": 3.608792431830829e-05,
"loss": 0.1522,
"step": 1100
},
{
"epoch": 0.2977801840822956,
"eval_loss": 0.17702646553516388,
"eval_runtime": 63.2535,
"eval_samples_per_second": 7.905,
"eval_steps_per_second": 0.253,
"step": 1100
},
{
"epoch": 0.3004872766648619,
"grad_norm": 1.5859375,
"learning_rate": 3.594880356149138e-05,
"loss": 0.1772,
"step": 1110
},
{
"epoch": 0.30319436924742826,
"grad_norm": 0.77734375,
"learning_rate": 3.580968280467446e-05,
"loss": 0.1361,
"step": 1120
},
{
"epoch": 0.30590146182999456,
"grad_norm": 0.427734375,
"learning_rate": 3.5670562047857544e-05,
"loss": 0.128,
"step": 1130
},
{
"epoch": 0.3086085544125609,
"grad_norm": 2.328125,
"learning_rate": 3.5531441291040625e-05,
"loss": 0.1313,
"step": 1140
},
{
"epoch": 0.31131564699512726,
"grad_norm": 1.34375,
"learning_rate": 3.5392320534223705e-05,
"loss": 0.1238,
"step": 1150
},
{
"epoch": 0.31131564699512726,
"eval_loss": 0.17454275488853455,
"eval_runtime": 62.5874,
"eval_samples_per_second": 7.989,
"eval_steps_per_second": 0.256,
"step": 1150
},
{
"epoch": 0.31402273957769355,
"grad_norm": 0.451171875,
"learning_rate": 3.525319977740679e-05,
"loss": 0.122,
"step": 1160
},
{
"epoch": 0.3167298321602599,
"grad_norm": 2.59375,
"learning_rate": 3.511407902058987e-05,
"loss": 0.174,
"step": 1170
},
{
"epoch": 0.3194369247428262,
"grad_norm": 1.8671875,
"learning_rate": 3.497495826377295e-05,
"loss": 0.1741,
"step": 1180
},
{
"epoch": 0.32214401732539255,
"grad_norm": 1.5,
"learning_rate": 3.483583750695604e-05,
"loss": 0.1116,
"step": 1190
},
{
"epoch": 0.32485110990795885,
"grad_norm": 3.875,
"learning_rate": 3.469671675013913e-05,
"loss": 0.1179,
"step": 1200
},
{
"epoch": 0.32485110990795885,
"eval_loss": 0.17608264088630676,
"eval_runtime": 62.6246,
"eval_samples_per_second": 7.984,
"eval_steps_per_second": 0.255,
"step": 1200
},
{
"epoch": 0.3275582024905252,
"grad_norm": 1.7421875,
"learning_rate": 3.455759599332221e-05,
"loss": 0.1332,
"step": 1210
},
{
"epoch": 0.3302652950730915,
"grad_norm": 2.578125,
"learning_rate": 3.441847523650529e-05,
"loss": 0.1425,
"step": 1220
},
{
"epoch": 0.33297238765565784,
"grad_norm": 2.734375,
"learning_rate": 3.427935447968837e-05,
"loss": 0.2023,
"step": 1230
},
{
"epoch": 0.33567948023822414,
"grad_norm": 0.84375,
"learning_rate": 3.4140233722871455e-05,
"loss": 0.1373,
"step": 1240
},
{
"epoch": 0.3383865728207905,
"grad_norm": 1.203125,
"learning_rate": 3.4001112966054536e-05,
"loss": 0.123,
"step": 1250
},
{
"epoch": 0.3383865728207905,
"eval_loss": 0.16774284839630127,
"eval_runtime": 62.784,
"eval_samples_per_second": 7.964,
"eval_steps_per_second": 0.255,
"step": 1250
},
{
"epoch": 0.3410936654033568,
"grad_norm": 2.6875,
"learning_rate": 3.386199220923762e-05,
"loss": 0.1289,
"step": 1260
},
{
"epoch": 0.34380075798592313,
"grad_norm": 3.28125,
"learning_rate": 3.37228714524207e-05,
"loss": 0.1533,
"step": 1270
},
{
"epoch": 0.34650785056848943,
"grad_norm": 1.359375,
"learning_rate": 3.358375069560379e-05,
"loss": 0.1829,
"step": 1280
},
{
"epoch": 0.3492149431510558,
"grad_norm": 2.15625,
"learning_rate": 3.344462993878687e-05,
"loss": 0.128,
"step": 1290
},
{
"epoch": 0.3519220357336221,
"grad_norm": 2.21875,
"learning_rate": 3.330550918196995e-05,
"loss": 0.1807,
"step": 1300
},
{
"epoch": 0.3519220357336221,
"eval_loss": 0.15718665719032288,
"eval_runtime": 62.4512,
"eval_samples_per_second": 8.006,
"eval_steps_per_second": 0.256,
"step": 1300
},
{
"epoch": 0.3546291283161884,
"grad_norm": 0.5390625,
"learning_rate": 3.316638842515303e-05,
"loss": 0.1276,
"step": 1310
},
{
"epoch": 0.3573362208987547,
"grad_norm": 1.7265625,
"learning_rate": 3.302726766833611e-05,
"loss": 0.1422,
"step": 1320
},
{
"epoch": 0.36004331348132107,
"grad_norm": 3.125,
"learning_rate": 3.28881469115192e-05,
"loss": 0.2032,
"step": 1330
},
{
"epoch": 0.36275040606388737,
"grad_norm": 1.8515625,
"learning_rate": 3.2749026154702286e-05,
"loss": 0.1726,
"step": 1340
},
{
"epoch": 0.3654574986464537,
"grad_norm": 1.0625,
"learning_rate": 3.2609905397885366e-05,
"loss": 0.1432,
"step": 1350
},
{
"epoch": 0.3654574986464537,
"eval_loss": 0.15765319764614105,
"eval_runtime": 63.3444,
"eval_samples_per_second": 7.893,
"eval_steps_per_second": 0.253,
"step": 1350
},
{
"epoch": 0.36816459122902,
"grad_norm": 1.125,
"learning_rate": 3.247078464106845e-05,
"loss": 0.1008,
"step": 1360
},
{
"epoch": 0.37087168381158636,
"grad_norm": 1.734375,
"learning_rate": 3.2331663884251534e-05,
"loss": 0.1397,
"step": 1370
},
{
"epoch": 0.37357877639415266,
"grad_norm": 1.6484375,
"learning_rate": 3.2192543127434614e-05,
"loss": 0.1342,
"step": 1380
},
{
"epoch": 0.376285868976719,
"grad_norm": 1.0859375,
"learning_rate": 3.2053422370617695e-05,
"loss": 0.1528,
"step": 1390
},
{
"epoch": 0.3789929615592853,
"grad_norm": 2.421875,
"learning_rate": 3.191430161380078e-05,
"loss": 0.1313,
"step": 1400
},
{
"epoch": 0.3789929615592853,
"eval_loss": 0.16292478144168854,
"eval_runtime": 64.15,
"eval_samples_per_second": 7.794,
"eval_steps_per_second": 0.249,
"step": 1400
},
{
"epoch": 0.38170005414185165,
"grad_norm": 2.09375,
"learning_rate": 3.177518085698387e-05,
"loss": 0.1358,
"step": 1410
},
{
"epoch": 0.38440714672441795,
"grad_norm": 3.0,
"learning_rate": 3.163606010016695e-05,
"loss": 0.1683,
"step": 1420
},
{
"epoch": 0.3871142393069843,
"grad_norm": 2.109375,
"learning_rate": 3.149693934335003e-05,
"loss": 0.136,
"step": 1430
},
{
"epoch": 0.38982133188955065,
"grad_norm": 2.15625,
"learning_rate": 3.135781858653311e-05,
"loss": 0.1435,
"step": 1440
},
{
"epoch": 0.39252842447211694,
"grad_norm": 2.046875,
"learning_rate": 3.121869782971619e-05,
"loss": 0.1711,
"step": 1450
},
{
"epoch": 0.39252842447211694,
"eval_loss": 0.15946637094020844,
"eval_runtime": 65.0021,
"eval_samples_per_second": 7.692,
"eval_steps_per_second": 0.246,
"step": 1450
},
{
"epoch": 0.3952355170546833,
"grad_norm": 0.80078125,
"learning_rate": 3.107957707289928e-05,
"loss": 0.1672,
"step": 1460
},
{
"epoch": 0.3979426096372496,
"grad_norm": 0.703125,
"learning_rate": 3.0940456316082365e-05,
"loss": 0.1152,
"step": 1470
},
{
"epoch": 0.40064970221981594,
"grad_norm": 2.203125,
"learning_rate": 3.0801335559265445e-05,
"loss": 0.1628,
"step": 1480
},
{
"epoch": 0.40335679480238223,
"grad_norm": 1.7734375,
"learning_rate": 3.0662214802448525e-05,
"loss": 0.1474,
"step": 1490
},
{
"epoch": 0.4060638873849486,
"grad_norm": 1.015625,
"learning_rate": 3.052309404563161e-05,
"loss": 0.1211,
"step": 1500
},
{
"epoch": 0.4060638873849486,
"eval_loss": 0.16578222811222076,
"eval_runtime": 77.637,
"eval_samples_per_second": 6.44,
"eval_steps_per_second": 0.206,
"step": 1500
},
{
"epoch": 0.4087709799675149,
"grad_norm": 2.0625,
"learning_rate": 3.0383973288814693e-05,
"loss": 0.1388,
"step": 1510
},
{
"epoch": 0.41147807255008123,
"grad_norm": 2.671875,
"learning_rate": 3.0244852531997773e-05,
"loss": 0.1518,
"step": 1520
},
{
"epoch": 0.4141851651326475,
"grad_norm": 2.046875,
"learning_rate": 3.0105731775180857e-05,
"loss": 0.1536,
"step": 1530
},
{
"epoch": 0.4168922577152139,
"grad_norm": 2.0625,
"learning_rate": 2.9966611018363944e-05,
"loss": 0.1464,
"step": 1540
},
{
"epoch": 0.41959935029778017,
"grad_norm": 1.9609375,
"learning_rate": 2.9827490261547024e-05,
"loss": 0.1585,
"step": 1550
},
{
"epoch": 0.41959935029778017,
"eval_loss": 0.16232775151729584,
"eval_runtime": 64.4195,
"eval_samples_per_second": 7.762,
"eval_steps_per_second": 0.248,
"step": 1550
},
{
"epoch": 0.4223064428803465,
"grad_norm": 1.1640625,
"learning_rate": 2.9688369504730108e-05,
"loss": 0.1078,
"step": 1560
},
{
"epoch": 0.4250135354629128,
"grad_norm": 0.9453125,
"learning_rate": 2.954924874791319e-05,
"loss": 0.1191,
"step": 1570
},
{
"epoch": 0.42772062804547917,
"grad_norm": 2.328125,
"learning_rate": 2.9410127991096276e-05,
"loss": 0.1366,
"step": 1580
},
{
"epoch": 0.43042772062804546,
"grad_norm": 1.0390625,
"learning_rate": 2.9271007234279356e-05,
"loss": 0.1382,
"step": 1590
},
{
"epoch": 0.4331348132106118,
"grad_norm": 1.03125,
"learning_rate": 2.913188647746244e-05,
"loss": 0.1545,
"step": 1600
},
{
"epoch": 0.4331348132106118,
"eval_loss": 0.16547426581382751,
"eval_runtime": 64.8149,
"eval_samples_per_second": 7.714,
"eval_steps_per_second": 0.247,
"step": 1600
},
{
"epoch": 0.4358419057931781,
"grad_norm": 1.0390625,
"learning_rate": 2.899276572064552e-05,
"loss": 0.1232,
"step": 1610
},
{
"epoch": 0.43854899837574446,
"grad_norm": 0.45703125,
"learning_rate": 2.88536449638286e-05,
"loss": 0.1371,
"step": 1620
},
{
"epoch": 0.44125609095831075,
"grad_norm": 3.28125,
"learning_rate": 2.8714524207011688e-05,
"loss": 0.1301,
"step": 1630
},
{
"epoch": 0.4439631835408771,
"grad_norm": 2.703125,
"learning_rate": 2.857540345019477e-05,
"loss": 0.122,
"step": 1640
},
{
"epoch": 0.4466702761234434,
"grad_norm": 1.203125,
"learning_rate": 2.8436282693377852e-05,
"loss": 0.1649,
"step": 1650
},
{
"epoch": 0.4466702761234434,
"eval_loss": 0.15991100668907166,
"eval_runtime": 64.6415,
"eval_samples_per_second": 7.735,
"eval_steps_per_second": 0.248,
"step": 1650
},
{
"epoch": 0.44937736870600975,
"grad_norm": 1.28125,
"learning_rate": 2.8297161936560936e-05,
"loss": 0.158,
"step": 1660
},
{
"epoch": 0.45208446128857604,
"grad_norm": 0.9375,
"learning_rate": 2.8158041179744023e-05,
"loss": 0.1427,
"step": 1670
},
{
"epoch": 0.4547915538711424,
"grad_norm": 1.6796875,
"learning_rate": 2.8018920422927103e-05,
"loss": 0.1528,
"step": 1680
},
{
"epoch": 0.4574986464537087,
"grad_norm": 0.78125,
"learning_rate": 2.7879799666110183e-05,
"loss": 0.1117,
"step": 1690
},
{
"epoch": 0.46020573903627504,
"grad_norm": 1.3203125,
"learning_rate": 2.7740678909293267e-05,
"loss": 0.1069,
"step": 1700
},
{
"epoch": 0.46020573903627504,
"eval_loss": 0.16090016067028046,
"eval_runtime": 64.8286,
"eval_samples_per_second": 7.713,
"eval_steps_per_second": 0.247,
"step": 1700
},
{
"epoch": 0.4629128316188414,
"grad_norm": 1.703125,
"learning_rate": 2.7601558152476354e-05,
"loss": 0.1165,
"step": 1710
},
{
"epoch": 0.4656199242014077,
"grad_norm": 2.015625,
"learning_rate": 2.7462437395659435e-05,
"loss": 0.1347,
"step": 1720
},
{
"epoch": 0.46832701678397404,
"grad_norm": 2.25,
"learning_rate": 2.7323316638842515e-05,
"loss": 0.2037,
"step": 1730
},
{
"epoch": 0.47103410936654033,
"grad_norm": 0.7890625,
"learning_rate": 2.71841958820256e-05,
"loss": 0.1338,
"step": 1740
},
{
"epoch": 0.4737412019491067,
"grad_norm": 1.21875,
"learning_rate": 2.704507512520868e-05,
"loss": 0.1245,
"step": 1750
},
{
"epoch": 0.4737412019491067,
"eval_loss": 0.16117550432682037,
"eval_runtime": 64.6119,
"eval_samples_per_second": 7.739,
"eval_steps_per_second": 0.248,
"step": 1750
},
{
"epoch": 0.476448294531673,
"grad_norm": 1.796875,
"learning_rate": 2.6905954368391766e-05,
"loss": 0.1182,
"step": 1760
},
{
"epoch": 0.47915538711423933,
"grad_norm": 2.78125,
"learning_rate": 2.676683361157485e-05,
"loss": 0.1662,
"step": 1770
},
{
"epoch": 0.4818624796968056,
"grad_norm": 1.0546875,
"learning_rate": 2.662771285475793e-05,
"loss": 0.1597,
"step": 1780
},
{
"epoch": 0.484569572279372,
"grad_norm": 1.5703125,
"learning_rate": 2.648859209794101e-05,
"loss": 0.104,
"step": 1790
},
{
"epoch": 0.48727666486193827,
"grad_norm": 3.109375,
"learning_rate": 2.6349471341124098e-05,
"loss": 0.1149,
"step": 1800
},
{
"epoch": 0.48727666486193827,
"eval_loss": 0.1653529703617096,
"eval_runtime": 65.0831,
"eval_samples_per_second": 7.682,
"eval_steps_per_second": 0.246,
"step": 1800
},
{
"epoch": 0.4899837574445046,
"grad_norm": 0.60546875,
"learning_rate": 2.6210350584307182e-05,
"loss": 0.121,
"step": 1810
},
{
"epoch": 0.4926908500270709,
"grad_norm": 2.625,
"learning_rate": 2.6071229827490262e-05,
"loss": 0.1797,
"step": 1820
},
{
"epoch": 0.49539794260963727,
"grad_norm": 1.9609375,
"learning_rate": 2.5932109070673342e-05,
"loss": 0.1159,
"step": 1830
},
{
"epoch": 0.49810503519220356,
"grad_norm": 1.546875,
"learning_rate": 2.579298831385643e-05,
"loss": 0.1119,
"step": 1840
},
{
"epoch": 0.5008121277747699,
"grad_norm": 2.90625,
"learning_rate": 2.5653867557039513e-05,
"loss": 0.1367,
"step": 1850
},
{
"epoch": 0.5008121277747699,
"eval_loss": 0.16627325117588043,
"eval_runtime": 64.4654,
"eval_samples_per_second": 7.756,
"eval_steps_per_second": 0.248,
"step": 1850
},
{
"epoch": 0.5035192203573362,
"grad_norm": 1.1640625,
"learning_rate": 2.5514746800222594e-05,
"loss": 0.1591,
"step": 1860
},
{
"epoch": 0.5062263129399025,
"grad_norm": 2.4375,
"learning_rate": 2.5375626043405677e-05,
"loss": 0.1047,
"step": 1870
},
{
"epoch": 0.5089334055224689,
"grad_norm": 1.78125,
"learning_rate": 2.5236505286588765e-05,
"loss": 0.1172,
"step": 1880
},
{
"epoch": 0.5116404981050352,
"grad_norm": 1.234375,
"learning_rate": 2.5097384529771845e-05,
"loss": 0.1211,
"step": 1890
},
{
"epoch": 0.5143475906876015,
"grad_norm": 3.0,
"learning_rate": 2.4958263772954925e-05,
"loss": 0.1294,
"step": 1900
},
{
"epoch": 0.5143475906876015,
"eval_loss": 0.1589047610759735,
"eval_runtime": 63.4959,
"eval_samples_per_second": 7.875,
"eval_steps_per_second": 0.252,
"step": 1900
},
{
"epoch": 0.5170546832701678,
"grad_norm": 1.8984375,
"learning_rate": 2.481914301613801e-05,
"loss": 0.1343,
"step": 1910
},
{
"epoch": 0.5197617758527342,
"grad_norm": 2.0,
"learning_rate": 2.4680022259321093e-05,
"loss": 0.1357,
"step": 1920
},
{
"epoch": 0.5224688684353005,
"grad_norm": 2.671875,
"learning_rate": 2.4540901502504173e-05,
"loss": 0.143,
"step": 1930
},
{
"epoch": 0.5251759610178668,
"grad_norm": 1.3828125,
"learning_rate": 2.4401780745687257e-05,
"loss": 0.1321,
"step": 1940
},
{
"epoch": 0.5278830536004331,
"grad_norm": 2.046875,
"learning_rate": 2.426265998887034e-05,
"loss": 0.1663,
"step": 1950
},
{
"epoch": 0.5278830536004331,
"eval_loss": 0.15560173988342285,
"eval_runtime": 63.7793,
"eval_samples_per_second": 7.84,
"eval_steps_per_second": 0.251,
"step": 1950
},
{
"epoch": 0.5305901461829995,
"grad_norm": 0.9296875,
"learning_rate": 2.4123539232053424e-05,
"loss": 0.0997,
"step": 1960
},
{
"epoch": 0.5332972387655658,
"grad_norm": 2.015625,
"learning_rate": 2.3984418475236505e-05,
"loss": 0.1283,
"step": 1970
},
{
"epoch": 0.5360043313481321,
"grad_norm": 0.89453125,
"learning_rate": 2.3845297718419592e-05,
"loss": 0.1407,
"step": 1980
},
{
"epoch": 0.5387114239306985,
"grad_norm": 0.8046875,
"learning_rate": 2.3706176961602672e-05,
"loss": 0.1171,
"step": 1990
},
{
"epoch": 0.5414185165132648,
"grad_norm": 1.296875,
"learning_rate": 2.3567056204785756e-05,
"loss": 0.1288,
"step": 2000
},
{
"epoch": 0.5414185165132648,
"eval_loss": 0.15494494140148163,
"eval_runtime": 79.2552,
"eval_samples_per_second": 6.309,
"eval_steps_per_second": 0.202,
"step": 2000
},
{
"epoch": 0.5441256090958311,
"grad_norm": 2.015625,
"learning_rate": 2.3427935447968836e-05,
"loss": 0.1055,
"step": 2010
},
{
"epoch": 0.5468327016783974,
"grad_norm": 1.140625,
"learning_rate": 2.3288814691151924e-05,
"loss": 0.1142,
"step": 2020
},
{
"epoch": 0.5495397942609638,
"grad_norm": 1.5703125,
"learning_rate": 2.3149693934335004e-05,
"loss": 0.1415,
"step": 2030
},
{
"epoch": 0.5522468868435301,
"grad_norm": 0.609375,
"learning_rate": 2.3010573177518084e-05,
"loss": 0.1086,
"step": 2040
},
{
"epoch": 0.5549539794260964,
"grad_norm": 2.921875,
"learning_rate": 2.287145242070117e-05,
"loss": 0.1028,
"step": 2050
},
{
"epoch": 0.5549539794260964,
"eval_loss": 0.1615983545780182,
"eval_runtime": 64.4361,
"eval_samples_per_second": 7.76,
"eval_steps_per_second": 0.248,
"step": 2050
},
{
"epoch": 0.5576610720086627,
"grad_norm": 1.6640625,
"learning_rate": 2.2732331663884252e-05,
"loss": 0.1268,
"step": 2060
},
{
"epoch": 0.5603681645912291,
"grad_norm": 0.55078125,
"learning_rate": 2.2593210907067336e-05,
"loss": 0.1187,
"step": 2070
},
{
"epoch": 0.5630752571737954,
"grad_norm": 1.703125,
"learning_rate": 2.2454090150250416e-05,
"loss": 0.1135,
"step": 2080
},
{
"epoch": 0.5657823497563617,
"grad_norm": 2.71875,
"learning_rate": 2.2314969393433503e-05,
"loss": 0.1625,
"step": 2090
},
{
"epoch": 0.568489442338928,
"grad_norm": 1.3125,
"learning_rate": 2.2175848636616583e-05,
"loss": 0.1095,
"step": 2100
},
{
"epoch": 0.568489442338928,
"eval_loss": 0.16485995054244995,
"eval_runtime": 63.0678,
"eval_samples_per_second": 7.928,
"eval_steps_per_second": 0.254,
"step": 2100
},
{
"epoch": 0.5711965349214944,
"grad_norm": 0.890625,
"learning_rate": 2.2036727879799667e-05,
"loss": 0.1131,
"step": 2110
},
{
"epoch": 0.5739036275040607,
"grad_norm": 2.296875,
"learning_rate": 2.189760712298275e-05,
"loss": 0.0784,
"step": 2120
},
{
"epoch": 0.576610720086627,
"grad_norm": 3.4375,
"learning_rate": 2.1758486366165835e-05,
"loss": 0.1548,
"step": 2130
},
{
"epoch": 0.5793178126691932,
"grad_norm": 0.671875,
"learning_rate": 2.1619365609348915e-05,
"loss": 0.1285,
"step": 2140
},
{
"epoch": 0.5820249052517596,
"grad_norm": 2.265625,
"learning_rate": 2.1480244852532e-05,
"loss": 0.1333,
"step": 2150
},
{
"epoch": 0.5820249052517596,
"eval_loss": 0.1646082103252411,
"eval_runtime": 64.2796,
"eval_samples_per_second": 7.779,
"eval_steps_per_second": 0.249,
"step": 2150
},
{
"epoch": 0.584731997834326,
"grad_norm": 1.71875,
"learning_rate": 2.1341124095715083e-05,
"loss": 0.1197,
"step": 2160
},
{
"epoch": 0.5874390904168922,
"grad_norm": 0.5703125,
"learning_rate": 2.1202003338898166e-05,
"loss": 0.151,
"step": 2170
},
{
"epoch": 0.5901461829994585,
"grad_norm": 1.828125,
"learning_rate": 2.1062882582081247e-05,
"loss": 0.0958,
"step": 2180
},
{
"epoch": 0.5928532755820249,
"grad_norm": 3.15625,
"learning_rate": 2.092376182526433e-05,
"loss": 0.1853,
"step": 2190
},
{
"epoch": 0.5955603681645912,
"grad_norm": 2.34375,
"learning_rate": 2.0784641068447414e-05,
"loss": 0.1199,
"step": 2200
},
{
"epoch": 0.5955603681645912,
"eval_loss": 0.162201389670372,
"eval_runtime": 64.0196,
"eval_samples_per_second": 7.81,
"eval_steps_per_second": 0.25,
"step": 2200
},
{
"epoch": 0.5982674607471575,
"grad_norm": 1.34375,
"learning_rate": 2.0645520311630495e-05,
"loss": 0.1661,
"step": 2210
},
{
"epoch": 0.6009745533297238,
"grad_norm": 1.5703125,
"learning_rate": 2.0506399554813578e-05,
"loss": 0.1106,
"step": 2220
},
{
"epoch": 0.6036816459122902,
"grad_norm": 3.65625,
"learning_rate": 2.0367278797996662e-05,
"loss": 0.1747,
"step": 2230
},
{
"epoch": 0.6063887384948565,
"grad_norm": 0.828125,
"learning_rate": 2.0228158041179746e-05,
"loss": 0.1247,
"step": 2240
},
{
"epoch": 0.6090958310774228,
"grad_norm": 0.7734375,
"learning_rate": 2.0089037284362826e-05,
"loss": 0.0886,
"step": 2250
},
{
"epoch": 0.6090958310774228,
"eval_loss": 0.15883322060108185,
"eval_runtime": 63.686,
"eval_samples_per_second": 7.851,
"eval_steps_per_second": 0.251,
"step": 2250
},
{
"epoch": 0.6118029236599891,
"grad_norm": 2.046875,
"learning_rate": 1.994991652754591e-05,
"loss": 0.1134,
"step": 2260
},
{
"epoch": 0.6145100162425555,
"grad_norm": 2.296875,
"learning_rate": 1.9810795770728994e-05,
"loss": 0.1273,
"step": 2270
},
{
"epoch": 0.6172171088251218,
"grad_norm": 1.3671875,
"learning_rate": 1.9671675013912077e-05,
"loss": 0.118,
"step": 2280
},
{
"epoch": 0.6199242014076881,
"grad_norm": 1.3203125,
"learning_rate": 1.9532554257095158e-05,
"loss": 0.1127,
"step": 2290
},
{
"epoch": 0.6226312939902545,
"grad_norm": 1.703125,
"learning_rate": 1.9393433500278245e-05,
"loss": 0.1319,
"step": 2300
},
{
"epoch": 0.6226312939902545,
"eval_loss": 0.1598823368549347,
"eval_runtime": 63.542,
"eval_samples_per_second": 7.869,
"eval_steps_per_second": 0.252,
"step": 2300
},
{
"epoch": 0.6253383865728208,
"grad_norm": 1.75,
"learning_rate": 1.9254312743461325e-05,
"loss": 0.1115,
"step": 2310
},
{
"epoch": 0.6280454791553871,
"grad_norm": 2.3125,
"learning_rate": 1.911519198664441e-05,
"loss": 0.1411,
"step": 2320
},
{
"epoch": 0.6307525717379534,
"grad_norm": 1.1484375,
"learning_rate": 1.8976071229827493e-05,
"loss": 0.1097,
"step": 2330
},
{
"epoch": 0.6334596643205198,
"grad_norm": 3.5,
"learning_rate": 1.8836950473010573e-05,
"loss": 0.1072,
"step": 2340
},
{
"epoch": 0.6361667569030861,
"grad_norm": 1.09375,
"learning_rate": 1.8697829716193657e-05,
"loss": 0.1215,
"step": 2350
},
{
"epoch": 0.6361667569030861,
"eval_loss": 0.15923769772052765,
"eval_runtime": 62.8745,
"eval_samples_per_second": 7.952,
"eval_steps_per_second": 0.254,
"step": 2350
},
{
"epoch": 0.6388738494856524,
"grad_norm": 2.78125,
"learning_rate": 1.8558708959376737e-05,
"loss": 0.0999,
"step": 2360
},
{
"epoch": 0.6415809420682187,
"grad_norm": 0.5390625,
"learning_rate": 1.8419588202559824e-05,
"loss": 0.134,
"step": 2370
},
{
"epoch": 0.6442880346507851,
"grad_norm": 3.703125,
"learning_rate": 1.8280467445742905e-05,
"loss": 0.1648,
"step": 2380
},
{
"epoch": 0.6469951272333514,
"grad_norm": 2.046875,
"learning_rate": 1.814134668892599e-05,
"loss": 0.1272,
"step": 2390
},
{
"epoch": 0.6497022198159177,
"grad_norm": 0.91796875,
"learning_rate": 1.8002225932109072e-05,
"loss": 0.1202,
"step": 2400
},
{
"epoch": 0.6497022198159177,
"eval_loss": 0.156494602560997,
"eval_runtime": 63.7775,
"eval_samples_per_second": 7.84,
"eval_steps_per_second": 0.251,
"step": 2400
},
{
"epoch": 0.652409312398484,
"grad_norm": 6.3125,
"learning_rate": 1.7863105175292156e-05,
"loss": 0.1491,
"step": 2410
},
{
"epoch": 0.6551164049810504,
"grad_norm": 1.65625,
"learning_rate": 1.7723984418475236e-05,
"loss": 0.1209,
"step": 2420
},
{
"epoch": 0.6578234975636167,
"grad_norm": 0.73828125,
"learning_rate": 1.758486366165832e-05,
"loss": 0.1295,
"step": 2430
},
{
"epoch": 0.660530590146183,
"grad_norm": 2.765625,
"learning_rate": 1.7445742904841404e-05,
"loss": 0.1013,
"step": 2440
},
{
"epoch": 0.6632376827287493,
"grad_norm": 0.53515625,
"learning_rate": 1.7306622148024488e-05,
"loss": 0.1537,
"step": 2450
},
{
"epoch": 0.6632376827287493,
"eval_loss": 0.15601959824562073,
"eval_runtime": 64.3267,
"eval_samples_per_second": 7.773,
"eval_steps_per_second": 0.249,
"step": 2450
},
{
"epoch": 0.6659447753113157,
"grad_norm": 2.984375,
"learning_rate": 1.7167501391207568e-05,
"loss": 0.1364,
"step": 2460
},
{
"epoch": 0.668651867893882,
"grad_norm": 1.8828125,
"learning_rate": 1.7028380634390652e-05,
"loss": 0.1121,
"step": 2470
},
{
"epoch": 0.6713589604764483,
"grad_norm": 1.40625,
"learning_rate": 1.6889259877573735e-05,
"loss": 0.135,
"step": 2480
},
{
"epoch": 0.6740660530590146,
"grad_norm": 2.609375,
"learning_rate": 1.6750139120756816e-05,
"loss": 0.1312,
"step": 2490
},
{
"epoch": 0.676773145641581,
"grad_norm": 1.1015625,
"learning_rate": 1.66110183639399e-05,
"loss": 0.1146,
"step": 2500
},
{
"epoch": 0.676773145641581,
"eval_loss": 0.15720410645008087,
"eval_runtime": 82.5469,
"eval_samples_per_second": 6.057,
"eval_steps_per_second": 0.194,
"step": 2500
},
{
"epoch": 0.6794802382241473,
"grad_norm": 2.921875,
"learning_rate": 1.6471897607122983e-05,
"loss": 0.132,
"step": 2510
},
{
"epoch": 0.6821873308067136,
"grad_norm": 1.109375,
"learning_rate": 1.6332776850306067e-05,
"loss": 0.1216,
"step": 2520
},
{
"epoch": 0.6848944233892799,
"grad_norm": 1.0625,
"learning_rate": 1.6193656093489147e-05,
"loss": 0.1181,
"step": 2530
},
{
"epoch": 0.6876015159718463,
"grad_norm": 0.236328125,
"learning_rate": 1.605453533667223e-05,
"loss": 0.1067,
"step": 2540
},
{
"epoch": 0.6903086085544126,
"grad_norm": 1.5,
"learning_rate": 1.5915414579855315e-05,
"loss": 0.1363,
"step": 2550
},
{
"epoch": 0.6903086085544126,
"eval_loss": 0.15796752274036407,
"eval_runtime": 64.1468,
"eval_samples_per_second": 7.795,
"eval_steps_per_second": 0.249,
"step": 2550
},
{
"epoch": 0.6930157011369789,
"grad_norm": 1.5703125,
"learning_rate": 1.57762938230384e-05,
"loss": 0.0929,
"step": 2560
},
{
"epoch": 0.6957227937195453,
"grad_norm": 0.94140625,
"learning_rate": 1.563717306622148e-05,
"loss": 0.1125,
"step": 2570
},
{
"epoch": 0.6984298863021116,
"grad_norm": 2.140625,
"learning_rate": 1.5498052309404566e-05,
"loss": 0.1053,
"step": 2580
},
{
"epoch": 0.7011369788846779,
"grad_norm": 2.578125,
"learning_rate": 1.5358931552587647e-05,
"loss": 0.111,
"step": 2590
},
{
"epoch": 0.7038440714672441,
"grad_norm": 2.484375,
"learning_rate": 1.521981079577073e-05,
"loss": 0.111,
"step": 2600
},
{
"epoch": 0.7038440714672441,
"eval_loss": 0.15660835802555084,
"eval_runtime": 64.7015,
"eval_samples_per_second": 7.728,
"eval_steps_per_second": 0.247,
"step": 2600
},
{
"epoch": 0.7065511640498106,
"grad_norm": 0.69921875,
"learning_rate": 1.5080690038953812e-05,
"loss": 0.1276,
"step": 2610
},
{
"epoch": 0.7092582566323768,
"grad_norm": 1.6328125,
"learning_rate": 1.4941569282136896e-05,
"loss": 0.1508,
"step": 2620
},
{
"epoch": 0.7119653492149431,
"grad_norm": 2.265625,
"learning_rate": 1.4802448525319978e-05,
"loss": 0.1717,
"step": 2630
},
{
"epoch": 0.7146724417975094,
"grad_norm": 1.9765625,
"learning_rate": 1.466332776850306e-05,
"loss": 0.1422,
"step": 2640
},
{
"epoch": 0.7173795343800758,
"grad_norm": 1.1796875,
"learning_rate": 1.4524207011686144e-05,
"loss": 0.1003,
"step": 2650
},
{
"epoch": 0.7173795343800758,
"eval_loss": 0.15460146963596344,
"eval_runtime": 65.1755,
"eval_samples_per_second": 7.672,
"eval_steps_per_second": 0.245,
"step": 2650
},
{
"epoch": 0.7200866269626421,
"grad_norm": 1.796875,
"learning_rate": 1.4385086254869226e-05,
"loss": 0.1366,
"step": 2660
},
{
"epoch": 0.7227937195452084,
"grad_norm": 1.59375,
"learning_rate": 1.424596549805231e-05,
"loss": 0.1316,
"step": 2670
},
{
"epoch": 0.7255008121277747,
"grad_norm": 2.671875,
"learning_rate": 1.4106844741235392e-05,
"loss": 0.1367,
"step": 2680
},
{
"epoch": 0.7282079047103411,
"grad_norm": 1.75,
"learning_rate": 1.3967723984418477e-05,
"loss": 0.13,
"step": 2690
},
{
"epoch": 0.7309149972929074,
"grad_norm": 2.96875,
"learning_rate": 1.3828603227601558e-05,
"loss": 0.1213,
"step": 2700
},
{
"epoch": 0.7309149972929074,
"eval_loss": 0.15699392557144165,
"eval_runtime": 64.8654,
"eval_samples_per_second": 7.708,
"eval_steps_per_second": 0.247,
"step": 2700
},
{
"epoch": 0.7336220898754737,
"grad_norm": 3.171875,
"learning_rate": 1.3689482470784643e-05,
"loss": 0.1303,
"step": 2710
},
{
"epoch": 0.73632918245804,
"grad_norm": 0.78125,
"learning_rate": 1.3550361713967724e-05,
"loss": 0.1496,
"step": 2720
},
{
"epoch": 0.7390362750406064,
"grad_norm": 1.2421875,
"learning_rate": 1.3411240957150809e-05,
"loss": 0.1129,
"step": 2730
},
{
"epoch": 0.7417433676231727,
"grad_norm": 2.703125,
"learning_rate": 1.327212020033389e-05,
"loss": 0.128,
"step": 2740
},
{
"epoch": 0.744450460205739,
"grad_norm": 2.328125,
"learning_rate": 1.3132999443516975e-05,
"loss": 0.1081,
"step": 2750
},
{
"epoch": 0.744450460205739,
"eval_loss": 0.1565748006105423,
"eval_runtime": 64.732,
"eval_samples_per_second": 7.724,
"eval_steps_per_second": 0.247,
"step": 2750
},
{
"epoch": 0.7471575527883053,
"grad_norm": 1.6015625,
"learning_rate": 1.2993878686700057e-05,
"loss": 0.1355,
"step": 2760
},
{
"epoch": 0.7498646453708717,
"grad_norm": 1.1015625,
"learning_rate": 1.285475792988314e-05,
"loss": 0.1341,
"step": 2770
},
{
"epoch": 0.752571737953438,
"grad_norm": 0.98828125,
"learning_rate": 1.2715637173066223e-05,
"loss": 0.1169,
"step": 2780
},
{
"epoch": 0.7552788305360043,
"grad_norm": 0.60546875,
"learning_rate": 1.2576516416249303e-05,
"loss": 0.0924,
"step": 2790
},
{
"epoch": 0.7579859231185706,
"grad_norm": 1.4921875,
"learning_rate": 1.2437395659432388e-05,
"loss": 0.1634,
"step": 2800
},
{
"epoch": 0.7579859231185706,
"eval_loss": 0.1555679589509964,
"eval_runtime": 65.0072,
"eval_samples_per_second": 7.691,
"eval_steps_per_second": 0.246,
"step": 2800
},
{
"epoch": 0.760693015701137,
"grad_norm": 1.6796875,
"learning_rate": 1.229827490261547e-05,
"loss": 0.1255,
"step": 2810
},
{
"epoch": 0.7634001082837033,
"grad_norm": 0.83984375,
"learning_rate": 1.2159154145798554e-05,
"loss": 0.0838,
"step": 2820
},
{
"epoch": 0.7661072008662696,
"grad_norm": 3.375,
"learning_rate": 1.2020033388981636e-05,
"loss": 0.1031,
"step": 2830
},
{
"epoch": 0.7688142934488359,
"grad_norm": 1.5,
"learning_rate": 1.188091263216472e-05,
"loss": 0.1125,
"step": 2840
},
{
"epoch": 0.7715213860314023,
"grad_norm": 1.7421875,
"learning_rate": 1.1741791875347802e-05,
"loss": 0.1427,
"step": 2850
},
{
"epoch": 0.7715213860314023,
"eval_loss": 0.15634971857070923,
"eval_runtime": 64.5468,
"eval_samples_per_second": 7.746,
"eval_steps_per_second": 0.248,
"step": 2850
},
{
"epoch": 0.7742284786139686,
"grad_norm": 2.484375,
"learning_rate": 1.1602671118530884e-05,
"loss": 0.1233,
"step": 2860
},
{
"epoch": 0.7769355711965349,
"grad_norm": 0.95703125,
"learning_rate": 1.1463550361713968e-05,
"loss": 0.1146,
"step": 2870
},
{
"epoch": 0.7796426637791013,
"grad_norm": 1.5703125,
"learning_rate": 1.132442960489705e-05,
"loss": 0.1301,
"step": 2880
},
{
"epoch": 0.7823497563616676,
"grad_norm": 1.515625,
"learning_rate": 1.1185308848080134e-05,
"loss": 0.1238,
"step": 2890
},
{
"epoch": 0.7850568489442339,
"grad_norm": 1.828125,
"learning_rate": 1.1046188091263218e-05,
"loss": 0.1161,
"step": 2900
},
{
"epoch": 0.7850568489442339,
"eval_loss": 0.1560017168521881,
"eval_runtime": 64.4442,
"eval_samples_per_second": 7.759,
"eval_steps_per_second": 0.248,
"step": 2900
},
{
"epoch": 0.7877639415268002,
"grad_norm": 1.03125,
"learning_rate": 1.09070673344463e-05,
"loss": 0.0932,
"step": 2910
},
{
"epoch": 0.7904710341093666,
"grad_norm": 1.1171875,
"learning_rate": 1.0767946577629383e-05,
"loss": 0.1132,
"step": 2920
},
{
"epoch": 0.7931781266919329,
"grad_norm": 1.3125,
"learning_rate": 1.0628825820812465e-05,
"loss": 0.1272,
"step": 2930
},
{
"epoch": 0.7958852192744992,
"grad_norm": 2.109375,
"learning_rate": 1.0489705063995549e-05,
"loss": 0.1251,
"step": 2940
},
{
"epoch": 0.7985923118570655,
"grad_norm": 1.7734375,
"learning_rate": 1.0350584307178631e-05,
"loss": 0.1188,
"step": 2950
},
{
"epoch": 0.7985923118570655,
"eval_loss": 0.1543656885623932,
"eval_runtime": 63.1528,
"eval_samples_per_second": 7.917,
"eval_steps_per_second": 0.253,
"step": 2950
},
{
"epoch": 0.8012994044396319,
"grad_norm": 1.796875,
"learning_rate": 1.0211463550361715e-05,
"loss": 0.1261,
"step": 2960
},
{
"epoch": 0.8040064970221982,
"grad_norm": 0.67578125,
"learning_rate": 1.0072342793544797e-05,
"loss": 0.1121,
"step": 2970
},
{
"epoch": 0.8067135896047645,
"grad_norm": 1.0390625,
"learning_rate": 9.93322203672788e-06,
"loss": 0.1267,
"step": 2980
},
{
"epoch": 0.8094206821873308,
"grad_norm": 1.8125,
"learning_rate": 9.794101279910965e-06,
"loss": 0.0998,
"step": 2990
},
{
"epoch": 0.8121277747698972,
"grad_norm": 1.8984375,
"learning_rate": 9.654980523094045e-06,
"loss": 0.1153,
"step": 3000
},
{
"epoch": 0.8121277747698972,
"eval_loss": 0.15634942054748535,
"eval_runtime": 78.3242,
"eval_samples_per_second": 6.384,
"eval_steps_per_second": 0.204,
"step": 3000
},
{
"epoch": 0.8148348673524635,
"grad_norm": 0.8515625,
"learning_rate": 9.515859766277129e-06,
"loss": 0.1116,
"step": 3010
},
{
"epoch": 0.8175419599350298,
"grad_norm": 1.5859375,
"learning_rate": 9.37673900946021e-06,
"loss": 0.1083,
"step": 3020
},
{
"epoch": 0.8202490525175961,
"grad_norm": 1.15625,
"learning_rate": 9.237618252643294e-06,
"loss": 0.1535,
"step": 3030
},
{
"epoch": 0.8229561451001625,
"grad_norm": 1.1796875,
"learning_rate": 9.098497495826378e-06,
"loss": 0.1179,
"step": 3040
},
{
"epoch": 0.8256632376827288,
"grad_norm": 1.1875,
"learning_rate": 8.95937673900946e-06,
"loss": 0.1154,
"step": 3050
},
{
"epoch": 0.8256632376827288,
"eval_loss": 0.15425148606300354,
"eval_runtime": 62.8908,
"eval_samples_per_second": 7.95,
"eval_steps_per_second": 0.254,
"step": 3050
},
{
"epoch": 0.828370330265295,
"grad_norm": 4.1875,
"learning_rate": 8.820255982192544e-06,
"loss": 0.1071,
"step": 3060
},
{
"epoch": 0.8310774228478613,
"grad_norm": 6.0625,
"learning_rate": 8.681135225375626e-06,
"loss": 0.1563,
"step": 3070
},
{
"epoch": 0.8337845154304278,
"grad_norm": 2.234375,
"learning_rate": 8.54201446855871e-06,
"loss": 0.1412,
"step": 3080
},
{
"epoch": 0.836491608012994,
"grad_norm": 1.2421875,
"learning_rate": 8.402893711741792e-06,
"loss": 0.0991,
"step": 3090
},
{
"epoch": 0.8391987005955603,
"grad_norm": 1.234375,
"learning_rate": 8.263772954924876e-06,
"loss": 0.1098,
"step": 3100
},
{
"epoch": 0.8391987005955603,
"eval_loss": 0.15568658709526062,
"eval_runtime": 63.402,
"eval_samples_per_second": 7.886,
"eval_steps_per_second": 0.252,
"step": 3100
},
{
"epoch": 0.8419057931781266,
"grad_norm": 2.015625,
"learning_rate": 8.124652198107958e-06,
"loss": 0.1556,
"step": 3110
},
{
"epoch": 0.844612885760693,
"grad_norm": 4.3125,
"learning_rate": 7.985531441291041e-06,
"loss": 0.1596,
"step": 3120
},
{
"epoch": 0.8473199783432593,
"grad_norm": 2.046875,
"learning_rate": 7.846410684474125e-06,
"loss": 0.1437,
"step": 3130
},
{
"epoch": 0.8500270709258256,
"grad_norm": 1.15625,
"learning_rate": 7.707289927657207e-06,
"loss": 0.1511,
"step": 3140
},
{
"epoch": 0.852734163508392,
"grad_norm": 1.9765625,
"learning_rate": 7.568169170840289e-06,
"loss": 0.1559,
"step": 3150
},
{
"epoch": 0.852734163508392,
"eval_loss": 0.15518584847450256,
"eval_runtime": 63.1606,
"eval_samples_per_second": 7.916,
"eval_steps_per_second": 0.253,
"step": 3150
},
{
"epoch": 0.8554412560909583,
"grad_norm": 1.9921875,
"learning_rate": 7.429048414023372e-06,
"loss": 0.1392,
"step": 3160
},
{
"epoch": 0.8581483486735246,
"grad_norm": 2.6875,
"learning_rate": 7.289927657206455e-06,
"loss": 0.1623,
"step": 3170
},
{
"epoch": 0.8608554412560909,
"grad_norm": 3.0625,
"learning_rate": 7.150806900389538e-06,
"loss": 0.1358,
"step": 3180
},
{
"epoch": 0.8635625338386573,
"grad_norm": 2.375,
"learning_rate": 7.011686143572621e-06,
"loss": 0.1483,
"step": 3190
},
{
"epoch": 0.8662696264212236,
"grad_norm": 2.578125,
"learning_rate": 6.872565386755704e-06,
"loss": 0.1207,
"step": 3200
},
{
"epoch": 0.8662696264212236,
"eval_loss": 0.15542198717594147,
"eval_runtime": 63.3581,
"eval_samples_per_second": 7.892,
"eval_steps_per_second": 0.253,
"step": 3200
},
{
"epoch": 0.8689767190037899,
"grad_norm": 1.078125,
"learning_rate": 6.733444629938787e-06,
"loss": 0.1102,
"step": 3210
},
{
"epoch": 0.8716838115863562,
"grad_norm": 1.2421875,
"learning_rate": 6.5943238731218705e-06,
"loss": 0.1394,
"step": 3220
},
{
"epoch": 0.8743909041689226,
"grad_norm": 2.0625,
"learning_rate": 6.455203116304953e-06,
"loss": 0.1495,
"step": 3230
},
{
"epoch": 0.8770979967514889,
"grad_norm": 0.7890625,
"learning_rate": 6.316082359488036e-06,
"loss": 0.1472,
"step": 3240
},
{
"epoch": 0.8798050893340552,
"grad_norm": 6.09375,
"learning_rate": 6.176961602671119e-06,
"loss": 0.1169,
"step": 3250
},
{
"epoch": 0.8798050893340552,
"eval_loss": 0.15612711012363434,
"eval_runtime": 62.8499,
"eval_samples_per_second": 7.955,
"eval_steps_per_second": 0.255,
"step": 3250
},
{
"epoch": 0.8825121819166215,
"grad_norm": 1.1328125,
"learning_rate": 6.037840845854201e-06,
"loss": 0.1208,
"step": 3260
},
{
"epoch": 0.8852192744991879,
"grad_norm": 1.78125,
"learning_rate": 5.898720089037284e-06,
"loss": 0.1145,
"step": 3270
},
{
"epoch": 0.8879263670817542,
"grad_norm": 3.21875,
"learning_rate": 5.759599332220367e-06,
"loss": 0.1341,
"step": 3280
},
{
"epoch": 0.8906334596643205,
"grad_norm": 2.265625,
"learning_rate": 5.620478575403451e-06,
"loss": 0.1278,
"step": 3290
},
{
"epoch": 0.8933405522468868,
"grad_norm": 2.65625,
"learning_rate": 5.481357818586534e-06,
"loss": 0.1932,
"step": 3300
},
{
"epoch": 0.8933405522468868,
"eval_loss": 0.15443623065948486,
"eval_runtime": 63.1624,
"eval_samples_per_second": 7.916,
"eval_steps_per_second": 0.253,
"step": 3300
},
{
"epoch": 0.8960476448294532,
"grad_norm": 2.3125,
"learning_rate": 5.342237061769617e-06,
"loss": 0.113,
"step": 3310
},
{
"epoch": 0.8987547374120195,
"grad_norm": 1.921875,
"learning_rate": 5.2031163049526995e-06,
"loss": 0.0939,
"step": 3320
},
{
"epoch": 0.9014618299945858,
"grad_norm": 3.125,
"learning_rate": 5.0639955481357824e-06,
"loss": 0.122,
"step": 3330
},
{
"epoch": 0.9041689225771521,
"grad_norm": 0.84765625,
"learning_rate": 4.9248747913188645e-06,
"loss": 0.0583,
"step": 3340
},
{
"epoch": 0.9068760151597185,
"grad_norm": 1.3125,
"learning_rate": 4.785754034501947e-06,
"loss": 0.0985,
"step": 3350
},
{
"epoch": 0.9068760151597185,
"eval_loss": 0.1550075113773346,
"eval_runtime": 63.4539,
"eval_samples_per_second": 7.88,
"eval_steps_per_second": 0.252,
"step": 3350
},
{
"epoch": 0.9095831077422848,
"grad_norm": 1.765625,
"learning_rate": 4.646633277685031e-06,
"loss": 0.1216,
"step": 3360
},
{
"epoch": 0.9122902003248511,
"grad_norm": 1.25,
"learning_rate": 4.507512520868114e-06,
"loss": 0.1355,
"step": 3370
},
{
"epoch": 0.9149972929074174,
"grad_norm": 0.2236328125,
"learning_rate": 4.368391764051197e-06,
"loss": 0.1101,
"step": 3380
},
{
"epoch": 0.9177043854899838,
"grad_norm": 1.7421875,
"learning_rate": 4.22927100723428e-06,
"loss": 0.0976,
"step": 3390
},
{
"epoch": 0.9204114780725501,
"grad_norm": 0.83984375,
"learning_rate": 4.090150250417363e-06,
"loss": 0.1194,
"step": 3400
},
{
"epoch": 0.9204114780725501,
"eval_loss": 0.15643475949764252,
"eval_runtime": 63.0287,
"eval_samples_per_second": 7.933,
"eval_steps_per_second": 0.254,
"step": 3400
},
{
"epoch": 0.9231185706551164,
"grad_norm": 3.0625,
"learning_rate": 3.951029493600445e-06,
"loss": 0.1401,
"step": 3410
},
{
"epoch": 0.9258256632376828,
"grad_norm": 1.2421875,
"learning_rate": 3.811908736783528e-06,
"loss": 0.1137,
"step": 3420
},
{
"epoch": 0.9285327558202491,
"grad_norm": 3.109375,
"learning_rate": 3.672787979966611e-06,
"loss": 0.1236,
"step": 3430
},
{
"epoch": 0.9312398484028154,
"grad_norm": 3.109375,
"learning_rate": 3.533667223149694e-06,
"loss": 0.1108,
"step": 3440
},
{
"epoch": 0.9339469409853817,
"grad_norm": 2.796875,
"learning_rate": 3.3945464663327773e-06,
"loss": 0.1309,
"step": 3450
},
{
"epoch": 0.9339469409853817,
"eval_loss": 0.1561814844608307,
"eval_runtime": 63.1871,
"eval_samples_per_second": 7.913,
"eval_steps_per_second": 0.253,
"step": 3450
},
{
"epoch": 0.9366540335679481,
"grad_norm": 2.265625,
"learning_rate": 3.25542570951586e-06,
"loss": 0.197,
"step": 3460
},
{
"epoch": 0.9393611261505144,
"grad_norm": 2.796875,
"learning_rate": 3.1163049526989427e-06,
"loss": 0.1253,
"step": 3470
},
{
"epoch": 0.9420682187330807,
"grad_norm": 2.125,
"learning_rate": 2.9771841958820256e-06,
"loss": 0.1318,
"step": 3480
},
{
"epoch": 0.944775311315647,
"grad_norm": 5.21875,
"learning_rate": 2.838063439065109e-06,
"loss": 0.1233,
"step": 3490
},
{
"epoch": 0.9474824038982134,
"grad_norm": 0.703125,
"learning_rate": 2.6989426822481914e-06,
"loss": 0.1058,
"step": 3500
},
{
"epoch": 0.9474824038982134,
"eval_loss": 0.1561388522386551,
"eval_runtime": 73.2191,
"eval_samples_per_second": 6.829,
"eval_steps_per_second": 0.219,
"step": 3500
}
],
"logging_steps": 10,
"max_steps": 3694,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.05068095973888e+18,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}