ColPali
Safetensors
English
vidore
vidore-experimental
colpali2-3b-pt-448 / checkpoint-4620 /trainer_state.json
manu's picture
Upload folder using huggingface_hub
669dfe5 verified
raw
history blame
86.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 100,
"global_step": 4620,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.010822510822510822,
"grad_norm": 101.5,
"learning_rate": 5e-06,
"loss": 1.9407,
"step": 10
},
{
"epoch": 0.021645021645021644,
"grad_norm": 30.625,
"learning_rate": 1e-05,
"loss": 1.5034,
"step": 20
},
{
"epoch": 0.032467532467532464,
"grad_norm": 22.125,
"learning_rate": 1.5e-05,
"loss": 1.0526,
"step": 30
},
{
"epoch": 0.04329004329004329,
"grad_norm": 8.375,
"learning_rate": 2e-05,
"loss": 0.9051,
"step": 40
},
{
"epoch": 0.05411255411255411,
"grad_norm": 3.75,
"learning_rate": 2.5e-05,
"loss": 0.7809,
"step": 50
},
{
"epoch": 0.06493506493506493,
"grad_norm": 1.625,
"learning_rate": 3e-05,
"loss": 0.7329,
"step": 60
},
{
"epoch": 0.07575757575757576,
"grad_norm": 1.96875,
"learning_rate": 3.5e-05,
"loss": 0.717,
"step": 70
},
{
"epoch": 0.08658008658008658,
"grad_norm": 1.5546875,
"learning_rate": 4e-05,
"loss": 0.6936,
"step": 80
},
{
"epoch": 0.09740259740259741,
"grad_norm": 3.75,
"learning_rate": 4.5e-05,
"loss": 0.664,
"step": 90
},
{
"epoch": 0.10822510822510822,
"grad_norm": 3.5,
"learning_rate": 5e-05,
"loss": 0.5702,
"step": 100
},
{
"epoch": 0.10822510822510822,
"eval_loss": 0.5504737496376038,
"eval_runtime": 14.4773,
"eval_samples_per_second": 34.537,
"eval_steps_per_second": 0.276,
"step": 100
},
{
"epoch": 0.11904761904761904,
"grad_norm": 3.125,
"learning_rate": 4.9889380530973454e-05,
"loss": 0.4897,
"step": 110
},
{
"epoch": 0.12987012987012986,
"grad_norm": 2.96875,
"learning_rate": 4.9778761061946906e-05,
"loss": 0.4462,
"step": 120
},
{
"epoch": 0.1406926406926407,
"grad_norm": 2.5625,
"learning_rate": 4.966814159292036e-05,
"loss": 0.4034,
"step": 130
},
{
"epoch": 0.15151515151515152,
"grad_norm": 2.484375,
"learning_rate": 4.955752212389381e-05,
"loss": 0.3774,
"step": 140
},
{
"epoch": 0.16233766233766234,
"grad_norm": 3.171875,
"learning_rate": 4.944690265486726e-05,
"loss": 0.3513,
"step": 150
},
{
"epoch": 0.17316017316017315,
"grad_norm": 2.15625,
"learning_rate": 4.9336283185840707e-05,
"loss": 0.341,
"step": 160
},
{
"epoch": 0.18398268398268397,
"grad_norm": 2.515625,
"learning_rate": 4.9225663716814165e-05,
"loss": 0.3309,
"step": 170
},
{
"epoch": 0.19480519480519481,
"grad_norm": 1.9375,
"learning_rate": 4.911504424778761e-05,
"loss": 0.3161,
"step": 180
},
{
"epoch": 0.20562770562770563,
"grad_norm": 1.78125,
"learning_rate": 4.900442477876107e-05,
"loss": 0.3011,
"step": 190
},
{
"epoch": 0.21645021645021645,
"grad_norm": 1.6640625,
"learning_rate": 4.8893805309734514e-05,
"loss": 0.2863,
"step": 200
},
{
"epoch": 0.21645021645021645,
"eval_loss": 0.3175188899040222,
"eval_runtime": 14.5331,
"eval_samples_per_second": 34.404,
"eval_steps_per_second": 0.275,
"step": 200
},
{
"epoch": 0.22727272727272727,
"grad_norm": 2.140625,
"learning_rate": 4.8783185840707966e-05,
"loss": 0.2608,
"step": 210
},
{
"epoch": 0.23809523809523808,
"grad_norm": 1.328125,
"learning_rate": 4.867256637168142e-05,
"loss": 0.2908,
"step": 220
},
{
"epoch": 0.24891774891774893,
"grad_norm": 2.421875,
"learning_rate": 4.856194690265487e-05,
"loss": 0.2613,
"step": 230
},
{
"epoch": 0.2597402597402597,
"grad_norm": 1.859375,
"learning_rate": 4.845132743362832e-05,
"loss": 0.2535,
"step": 240
},
{
"epoch": 0.27056277056277056,
"grad_norm": 1.4609375,
"learning_rate": 4.834070796460177e-05,
"loss": 0.2738,
"step": 250
},
{
"epoch": 0.2813852813852814,
"grad_norm": 2.265625,
"learning_rate": 4.823008849557522e-05,
"loss": 0.2413,
"step": 260
},
{
"epoch": 0.2922077922077922,
"grad_norm": 1.84375,
"learning_rate": 4.8119469026548677e-05,
"loss": 0.2608,
"step": 270
},
{
"epoch": 0.30303030303030304,
"grad_norm": 1.28125,
"learning_rate": 4.800884955752213e-05,
"loss": 0.2711,
"step": 280
},
{
"epoch": 0.31385281385281383,
"grad_norm": 1.453125,
"learning_rate": 4.789823008849558e-05,
"loss": 0.2417,
"step": 290
},
{
"epoch": 0.3246753246753247,
"grad_norm": 1.59375,
"learning_rate": 4.778761061946903e-05,
"loss": 0.267,
"step": 300
},
{
"epoch": 0.3246753246753247,
"eval_loss": 0.279142290353775,
"eval_runtime": 14.1782,
"eval_samples_per_second": 35.265,
"eval_steps_per_second": 0.282,
"step": 300
},
{
"epoch": 0.3354978354978355,
"grad_norm": 1.40625,
"learning_rate": 4.767699115044248e-05,
"loss": 0.2589,
"step": 310
},
{
"epoch": 0.3463203463203463,
"grad_norm": 7.9375,
"learning_rate": 4.7566371681415936e-05,
"loss": 0.253,
"step": 320
},
{
"epoch": 0.35714285714285715,
"grad_norm": 1.921875,
"learning_rate": 4.745575221238938e-05,
"loss": 0.2683,
"step": 330
},
{
"epoch": 0.36796536796536794,
"grad_norm": 1.3125,
"learning_rate": 4.734513274336283e-05,
"loss": 0.2483,
"step": 340
},
{
"epoch": 0.3787878787878788,
"grad_norm": 2.5,
"learning_rate": 4.7234513274336284e-05,
"loss": 0.2512,
"step": 350
},
{
"epoch": 0.38961038961038963,
"grad_norm": 1.421875,
"learning_rate": 4.7123893805309736e-05,
"loss": 0.2346,
"step": 360
},
{
"epoch": 0.4004329004329004,
"grad_norm": 1.8828125,
"learning_rate": 4.701327433628319e-05,
"loss": 0.248,
"step": 370
},
{
"epoch": 0.41125541125541126,
"grad_norm": 1.5859375,
"learning_rate": 4.690265486725664e-05,
"loss": 0.2663,
"step": 380
},
{
"epoch": 0.42207792207792205,
"grad_norm": 1.6171875,
"learning_rate": 4.679203539823009e-05,
"loss": 0.24,
"step": 390
},
{
"epoch": 0.4329004329004329,
"grad_norm": 1.5,
"learning_rate": 4.668141592920354e-05,
"loss": 0.2425,
"step": 400
},
{
"epoch": 0.4329004329004329,
"eval_loss": 0.2609393000602722,
"eval_runtime": 14.168,
"eval_samples_per_second": 35.291,
"eval_steps_per_second": 0.282,
"step": 400
},
{
"epoch": 0.44372294372294374,
"grad_norm": 2.234375,
"learning_rate": 4.657079646017699e-05,
"loss": 0.2377,
"step": 410
},
{
"epoch": 0.45454545454545453,
"grad_norm": 1.5625,
"learning_rate": 4.646017699115045e-05,
"loss": 0.2552,
"step": 420
},
{
"epoch": 0.4653679653679654,
"grad_norm": 2.171875,
"learning_rate": 4.63495575221239e-05,
"loss": 0.2185,
"step": 430
},
{
"epoch": 0.47619047619047616,
"grad_norm": 1.796875,
"learning_rate": 4.6238938053097344e-05,
"loss": 0.2484,
"step": 440
},
{
"epoch": 0.487012987012987,
"grad_norm": 1.15625,
"learning_rate": 4.61283185840708e-05,
"loss": 0.2421,
"step": 450
},
{
"epoch": 0.49783549783549785,
"grad_norm": 1.7265625,
"learning_rate": 4.601769911504425e-05,
"loss": 0.2274,
"step": 460
},
{
"epoch": 0.5086580086580087,
"grad_norm": 1.828125,
"learning_rate": 4.5907079646017706e-05,
"loss": 0.2337,
"step": 470
},
{
"epoch": 0.5194805194805194,
"grad_norm": 1.6328125,
"learning_rate": 4.579646017699115e-05,
"loss": 0.234,
"step": 480
},
{
"epoch": 0.5303030303030303,
"grad_norm": 2.171875,
"learning_rate": 4.56858407079646e-05,
"loss": 0.226,
"step": 490
},
{
"epoch": 0.5411255411255411,
"grad_norm": 1.75,
"learning_rate": 4.5575221238938055e-05,
"loss": 0.2318,
"step": 500
},
{
"epoch": 0.5411255411255411,
"eval_loss": 0.2462214082479477,
"eval_runtime": 14.0685,
"eval_samples_per_second": 35.541,
"eval_steps_per_second": 0.284,
"step": 500
},
{
"epoch": 0.551948051948052,
"grad_norm": 1.6953125,
"learning_rate": 4.5464601769911507e-05,
"loss": 0.2078,
"step": 510
},
{
"epoch": 0.5627705627705628,
"grad_norm": 1.6640625,
"learning_rate": 4.535398230088496e-05,
"loss": 0.2114,
"step": 520
},
{
"epoch": 0.5735930735930735,
"grad_norm": 1.46875,
"learning_rate": 4.524336283185841e-05,
"loss": 0.21,
"step": 530
},
{
"epoch": 0.5844155844155844,
"grad_norm": 1.46875,
"learning_rate": 4.5132743362831855e-05,
"loss": 0.2178,
"step": 540
},
{
"epoch": 0.5952380952380952,
"grad_norm": 1.65625,
"learning_rate": 4.5022123893805314e-05,
"loss": 0.2263,
"step": 550
},
{
"epoch": 0.6060606060606061,
"grad_norm": 1.8671875,
"learning_rate": 4.491150442477876e-05,
"loss": 0.2351,
"step": 560
},
{
"epoch": 0.6168831168831169,
"grad_norm": 1.25,
"learning_rate": 4.480088495575222e-05,
"loss": 0.2111,
"step": 570
},
{
"epoch": 0.6277056277056277,
"grad_norm": 1.578125,
"learning_rate": 4.469026548672566e-05,
"loss": 0.2109,
"step": 580
},
{
"epoch": 0.6385281385281385,
"grad_norm": 1.515625,
"learning_rate": 4.4579646017699114e-05,
"loss": 0.211,
"step": 590
},
{
"epoch": 0.6493506493506493,
"grad_norm": 1.3359375,
"learning_rate": 4.446902654867257e-05,
"loss": 0.2237,
"step": 600
},
{
"epoch": 0.6493506493506493,
"eval_loss": 0.22987733781337738,
"eval_runtime": 14.0366,
"eval_samples_per_second": 35.621,
"eval_steps_per_second": 0.285,
"step": 600
},
{
"epoch": 0.6601731601731602,
"grad_norm": 1.3828125,
"learning_rate": 4.435840707964602e-05,
"loss": 0.2215,
"step": 610
},
{
"epoch": 0.670995670995671,
"grad_norm": 1.7578125,
"learning_rate": 4.4247787610619477e-05,
"loss": 0.2084,
"step": 620
},
{
"epoch": 0.6818181818181818,
"grad_norm": 1.0859375,
"learning_rate": 4.413716814159292e-05,
"loss": 0.2222,
"step": 630
},
{
"epoch": 0.6926406926406926,
"grad_norm": 1.1875,
"learning_rate": 4.4026548672566373e-05,
"loss": 0.1977,
"step": 640
},
{
"epoch": 0.7034632034632035,
"grad_norm": 1.953125,
"learning_rate": 4.3915929203539825e-05,
"loss": 0.1945,
"step": 650
},
{
"epoch": 0.7142857142857143,
"grad_norm": 1.6484375,
"learning_rate": 4.380530973451328e-05,
"loss": 0.2288,
"step": 660
},
{
"epoch": 0.7251082251082251,
"grad_norm": 1.3828125,
"learning_rate": 4.369469026548673e-05,
"loss": 0.219,
"step": 670
},
{
"epoch": 0.7359307359307359,
"grad_norm": 1.6796875,
"learning_rate": 4.358407079646018e-05,
"loss": 0.2072,
"step": 680
},
{
"epoch": 0.7467532467532467,
"grad_norm": 1.3984375,
"learning_rate": 4.3473451327433626e-05,
"loss": 0.2161,
"step": 690
},
{
"epoch": 0.7575757575757576,
"grad_norm": 1.3671875,
"learning_rate": 4.3362831858407084e-05,
"loss": 0.2288,
"step": 700
},
{
"epoch": 0.7575757575757576,
"eval_loss": 0.22867494821548462,
"eval_runtime": 13.9772,
"eval_samples_per_second": 35.772,
"eval_steps_per_second": 0.286,
"step": 700
},
{
"epoch": 0.7683982683982684,
"grad_norm": 1.921875,
"learning_rate": 4.325221238938053e-05,
"loss": 0.1962,
"step": 710
},
{
"epoch": 0.7792207792207793,
"grad_norm": 1.5625,
"learning_rate": 4.314159292035399e-05,
"loss": 0.2086,
"step": 720
},
{
"epoch": 0.79004329004329,
"grad_norm": 1.2734375,
"learning_rate": 4.303097345132743e-05,
"loss": 0.1968,
"step": 730
},
{
"epoch": 0.8008658008658008,
"grad_norm": 1.6328125,
"learning_rate": 4.2920353982300885e-05,
"loss": 0.2114,
"step": 740
},
{
"epoch": 0.8116883116883117,
"grad_norm": 1.8671875,
"learning_rate": 4.280973451327434e-05,
"loss": 0.2008,
"step": 750
},
{
"epoch": 0.8225108225108225,
"grad_norm": 1.9296875,
"learning_rate": 4.269911504424779e-05,
"loss": 0.206,
"step": 760
},
{
"epoch": 0.8333333333333334,
"grad_norm": 1.484375,
"learning_rate": 4.258849557522124e-05,
"loss": 0.2091,
"step": 770
},
{
"epoch": 0.8441558441558441,
"grad_norm": 1.625,
"learning_rate": 4.247787610619469e-05,
"loss": 0.2137,
"step": 780
},
{
"epoch": 0.854978354978355,
"grad_norm": 1.78125,
"learning_rate": 4.2367256637168144e-05,
"loss": 0.2205,
"step": 790
},
{
"epoch": 0.8658008658008658,
"grad_norm": 1.0859375,
"learning_rate": 4.2256637168141596e-05,
"loss": 0.2096,
"step": 800
},
{
"epoch": 0.8658008658008658,
"eval_loss": 0.2200011909008026,
"eval_runtime": 13.8226,
"eval_samples_per_second": 36.173,
"eval_steps_per_second": 0.289,
"step": 800
},
{
"epoch": 0.8766233766233766,
"grad_norm": 1.5234375,
"learning_rate": 4.214601769911505e-05,
"loss": 0.2114,
"step": 810
},
{
"epoch": 0.8874458874458875,
"grad_norm": 1.84375,
"learning_rate": 4.20353982300885e-05,
"loss": 0.1894,
"step": 820
},
{
"epoch": 0.8982683982683982,
"grad_norm": 1.6484375,
"learning_rate": 4.192477876106195e-05,
"loss": 0.2055,
"step": 830
},
{
"epoch": 0.9090909090909091,
"grad_norm": 1.3359375,
"learning_rate": 4.1814159292035396e-05,
"loss": 0.1917,
"step": 840
},
{
"epoch": 0.9199134199134199,
"grad_norm": 1.1171875,
"learning_rate": 4.1703539823008855e-05,
"loss": 0.1775,
"step": 850
},
{
"epoch": 0.9307359307359307,
"grad_norm": 0.91796875,
"learning_rate": 4.15929203539823e-05,
"loss": 0.1924,
"step": 860
},
{
"epoch": 0.9415584415584416,
"grad_norm": 2.015625,
"learning_rate": 4.148230088495575e-05,
"loss": 0.2056,
"step": 870
},
{
"epoch": 0.9523809523809523,
"grad_norm": 1.6015625,
"learning_rate": 4.1371681415929203e-05,
"loss": 0.1973,
"step": 880
},
{
"epoch": 0.9632034632034632,
"grad_norm": 1.5703125,
"learning_rate": 4.1261061946902655e-05,
"loss": 0.2009,
"step": 890
},
{
"epoch": 0.974025974025974,
"grad_norm": 1.5546875,
"learning_rate": 4.115044247787611e-05,
"loss": 0.2166,
"step": 900
},
{
"epoch": 0.974025974025974,
"eval_loss": 0.2135138362646103,
"eval_runtime": 13.5879,
"eval_samples_per_second": 36.798,
"eval_steps_per_second": 0.294,
"step": 900
},
{
"epoch": 0.9848484848484849,
"grad_norm": 1.2109375,
"learning_rate": 4.103982300884956e-05,
"loss": 0.1889,
"step": 910
},
{
"epoch": 0.9956709956709957,
"grad_norm": 4.125,
"learning_rate": 4.092920353982301e-05,
"loss": 0.1955,
"step": 920
},
{
"epoch": 1.0064935064935066,
"grad_norm": 1.625,
"learning_rate": 4.081858407079646e-05,
"loss": 0.1949,
"step": 930
},
{
"epoch": 1.0173160173160174,
"grad_norm": 1.578125,
"learning_rate": 4.0707964601769914e-05,
"loss": 0.171,
"step": 940
},
{
"epoch": 1.0281385281385282,
"grad_norm": 1.6484375,
"learning_rate": 4.0597345132743366e-05,
"loss": 0.193,
"step": 950
},
{
"epoch": 1.0389610389610389,
"grad_norm": 1.5390625,
"learning_rate": 4.048672566371682e-05,
"loss": 0.1908,
"step": 960
},
{
"epoch": 1.0497835497835497,
"grad_norm": 1.40625,
"learning_rate": 4.037610619469026e-05,
"loss": 0.1824,
"step": 970
},
{
"epoch": 1.0606060606060606,
"grad_norm": 1.4921875,
"learning_rate": 4.026548672566372e-05,
"loss": 0.1822,
"step": 980
},
{
"epoch": 1.0714285714285714,
"grad_norm": 1.8203125,
"learning_rate": 4.015486725663717e-05,
"loss": 0.198,
"step": 990
},
{
"epoch": 1.0822510822510822,
"grad_norm": 1.2578125,
"learning_rate": 4.0044247787610625e-05,
"loss": 0.1972,
"step": 1000
},
{
"epoch": 1.0822510822510822,
"eval_loss": 0.20468579232692719,
"eval_runtime": 13.8662,
"eval_samples_per_second": 36.059,
"eval_steps_per_second": 0.288,
"step": 1000
},
{
"epoch": 1.093073593073593,
"grad_norm": 1.8515625,
"learning_rate": 3.993362831858407e-05,
"loss": 0.1843,
"step": 1010
},
{
"epoch": 1.103896103896104,
"grad_norm": 1.65625,
"learning_rate": 3.982300884955752e-05,
"loss": 0.1706,
"step": 1020
},
{
"epoch": 1.1147186147186148,
"grad_norm": 1.4296875,
"learning_rate": 3.9712389380530974e-05,
"loss": 0.1684,
"step": 1030
},
{
"epoch": 1.1255411255411256,
"grad_norm": 2.03125,
"learning_rate": 3.9601769911504426e-05,
"loss": 0.1877,
"step": 1040
},
{
"epoch": 1.1363636363636362,
"grad_norm": 1.5078125,
"learning_rate": 3.949115044247788e-05,
"loss": 0.1948,
"step": 1050
},
{
"epoch": 1.1471861471861473,
"grad_norm": 1.8046875,
"learning_rate": 3.938053097345133e-05,
"loss": 0.1846,
"step": 1060
},
{
"epoch": 1.158008658008658,
"grad_norm": 1.7421875,
"learning_rate": 3.926991150442478e-05,
"loss": 0.1812,
"step": 1070
},
{
"epoch": 1.1688311688311688,
"grad_norm": 1.296875,
"learning_rate": 3.915929203539823e-05,
"loss": 0.1773,
"step": 1080
},
{
"epoch": 1.1796536796536796,
"grad_norm": 2.078125,
"learning_rate": 3.9048672566371685e-05,
"loss": 0.1716,
"step": 1090
},
{
"epoch": 1.1904761904761905,
"grad_norm": 1.484375,
"learning_rate": 3.893805309734514e-05,
"loss": 0.1912,
"step": 1100
},
{
"epoch": 1.1904761904761905,
"eval_loss": 0.21200305223464966,
"eval_runtime": 18.2843,
"eval_samples_per_second": 27.346,
"eval_steps_per_second": 0.219,
"step": 1100
},
{
"epoch": 1.2012987012987013,
"grad_norm": 1.7265625,
"learning_rate": 3.882743362831859e-05,
"loss": 0.1739,
"step": 1110
},
{
"epoch": 1.2121212121212122,
"grad_norm": 1.7109375,
"learning_rate": 3.8716814159292034e-05,
"loss": 0.1747,
"step": 1120
},
{
"epoch": 1.222943722943723,
"grad_norm": 1.46875,
"learning_rate": 3.860619469026549e-05,
"loss": 0.1631,
"step": 1130
},
{
"epoch": 1.2337662337662338,
"grad_norm": 1.609375,
"learning_rate": 3.849557522123894e-05,
"loss": 0.1722,
"step": 1140
},
{
"epoch": 1.2445887445887447,
"grad_norm": 2.015625,
"learning_rate": 3.8384955752212396e-05,
"loss": 0.1911,
"step": 1150
},
{
"epoch": 1.2554112554112553,
"grad_norm": 1.3828125,
"learning_rate": 3.827433628318584e-05,
"loss": 0.1557,
"step": 1160
},
{
"epoch": 1.2662337662337662,
"grad_norm": 1.9140625,
"learning_rate": 3.816371681415929e-05,
"loss": 0.1621,
"step": 1170
},
{
"epoch": 1.277056277056277,
"grad_norm": 1.6171875,
"learning_rate": 3.8053097345132744e-05,
"loss": 0.1694,
"step": 1180
},
{
"epoch": 1.2878787878787878,
"grad_norm": 3.1875,
"learning_rate": 3.7942477876106196e-05,
"loss": 0.1774,
"step": 1190
},
{
"epoch": 1.2987012987012987,
"grad_norm": 1.1015625,
"learning_rate": 3.783185840707965e-05,
"loss": 0.181,
"step": 1200
},
{
"epoch": 1.2987012987012987,
"eval_loss": 0.20562683045864105,
"eval_runtime": 14.2589,
"eval_samples_per_second": 35.066,
"eval_steps_per_second": 0.281,
"step": 1200
},
{
"epoch": 1.3095238095238095,
"grad_norm": 2.09375,
"learning_rate": 3.77212389380531e-05,
"loss": 0.1686,
"step": 1210
},
{
"epoch": 1.3203463203463204,
"grad_norm": 1.2734375,
"learning_rate": 3.7610619469026545e-05,
"loss": 0.1714,
"step": 1220
},
{
"epoch": 1.3311688311688312,
"grad_norm": 1.109375,
"learning_rate": 3.7500000000000003e-05,
"loss": 0.1869,
"step": 1230
},
{
"epoch": 1.341991341991342,
"grad_norm": 1.9296875,
"learning_rate": 3.7389380530973455e-05,
"loss": 0.1684,
"step": 1240
},
{
"epoch": 1.3528138528138527,
"grad_norm": 1.6328125,
"learning_rate": 3.727876106194691e-05,
"loss": 0.1747,
"step": 1250
},
{
"epoch": 1.3636363636363638,
"grad_norm": 1.0859375,
"learning_rate": 3.716814159292036e-05,
"loss": 0.1637,
"step": 1260
},
{
"epoch": 1.3744588744588744,
"grad_norm": 1.390625,
"learning_rate": 3.7057522123893804e-05,
"loss": 0.1684,
"step": 1270
},
{
"epoch": 1.3852813852813852,
"grad_norm": 1.453125,
"learning_rate": 3.694690265486726e-05,
"loss": 0.1849,
"step": 1280
},
{
"epoch": 1.396103896103896,
"grad_norm": 1.3046875,
"learning_rate": 3.683628318584071e-05,
"loss": 0.182,
"step": 1290
},
{
"epoch": 1.406926406926407,
"grad_norm": 1.640625,
"learning_rate": 3.672566371681416e-05,
"loss": 0.1756,
"step": 1300
},
{
"epoch": 1.406926406926407,
"eval_loss": 0.1982562243938446,
"eval_runtime": 13.8702,
"eval_samples_per_second": 36.049,
"eval_steps_per_second": 0.288,
"step": 1300
},
{
"epoch": 1.4177489177489178,
"grad_norm": 1.9375,
"learning_rate": 3.661504424778761e-05,
"loss": 0.1787,
"step": 1310
},
{
"epoch": 1.4285714285714286,
"grad_norm": 1.453125,
"learning_rate": 3.650442477876106e-05,
"loss": 0.176,
"step": 1320
},
{
"epoch": 1.4393939393939394,
"grad_norm": 1.6796875,
"learning_rate": 3.6393805309734515e-05,
"loss": 0.1639,
"step": 1330
},
{
"epoch": 1.4502164502164503,
"grad_norm": 1.7421875,
"learning_rate": 3.628318584070797e-05,
"loss": 0.1786,
"step": 1340
},
{
"epoch": 1.4610389610389611,
"grad_norm": 1.4609375,
"learning_rate": 3.617256637168142e-05,
"loss": 0.1705,
"step": 1350
},
{
"epoch": 1.4718614718614718,
"grad_norm": 2.03125,
"learning_rate": 3.606194690265487e-05,
"loss": 0.1619,
"step": 1360
},
{
"epoch": 1.4826839826839826,
"grad_norm": 1.609375,
"learning_rate": 3.5951327433628315e-05,
"loss": 0.1931,
"step": 1370
},
{
"epoch": 1.4935064935064934,
"grad_norm": 1.1484375,
"learning_rate": 3.5840707964601774e-05,
"loss": 0.1693,
"step": 1380
},
{
"epoch": 1.5043290043290043,
"grad_norm": 1.6328125,
"learning_rate": 3.573008849557522e-05,
"loss": 0.1718,
"step": 1390
},
{
"epoch": 1.5151515151515151,
"grad_norm": 1.375,
"learning_rate": 3.561946902654867e-05,
"loss": 0.181,
"step": 1400
},
{
"epoch": 1.5151515151515151,
"eval_loss": 0.21516482532024384,
"eval_runtime": 13.8304,
"eval_samples_per_second": 36.152,
"eval_steps_per_second": 0.289,
"step": 1400
},
{
"epoch": 1.525974025974026,
"grad_norm": 1.078125,
"learning_rate": 3.550884955752213e-05,
"loss": 0.1718,
"step": 1410
},
{
"epoch": 1.5367965367965368,
"grad_norm": 1.2265625,
"learning_rate": 3.5398230088495574e-05,
"loss": 0.1546,
"step": 1420
},
{
"epoch": 1.5476190476190477,
"grad_norm": 1.3671875,
"learning_rate": 3.528761061946903e-05,
"loss": 0.1706,
"step": 1430
},
{
"epoch": 1.5584415584415585,
"grad_norm": 0.91796875,
"learning_rate": 3.517699115044248e-05,
"loss": 0.1608,
"step": 1440
},
{
"epoch": 1.5692640692640691,
"grad_norm": 1.8046875,
"learning_rate": 3.506637168141593e-05,
"loss": 0.1707,
"step": 1450
},
{
"epoch": 1.5800865800865802,
"grad_norm": 1.6484375,
"learning_rate": 3.495575221238938e-05,
"loss": 0.1776,
"step": 1460
},
{
"epoch": 1.5909090909090908,
"grad_norm": 1.1640625,
"learning_rate": 3.4845132743362834e-05,
"loss": 0.1546,
"step": 1470
},
{
"epoch": 1.601731601731602,
"grad_norm": 1.703125,
"learning_rate": 3.4734513274336285e-05,
"loss": 0.1669,
"step": 1480
},
{
"epoch": 1.6125541125541125,
"grad_norm": 1.578125,
"learning_rate": 3.462389380530974e-05,
"loss": 0.1771,
"step": 1490
},
{
"epoch": 1.6233766233766234,
"grad_norm": 1.6953125,
"learning_rate": 3.451327433628319e-05,
"loss": 0.1834,
"step": 1500
},
{
"epoch": 1.6233766233766234,
"eval_loss": 0.19372014701366425,
"eval_runtime": 13.9577,
"eval_samples_per_second": 35.823,
"eval_steps_per_second": 0.287,
"step": 1500
},
{
"epoch": 1.6341991341991342,
"grad_norm": 3.09375,
"learning_rate": 3.440265486725664e-05,
"loss": 0.1717,
"step": 1510
},
{
"epoch": 1.645021645021645,
"grad_norm": 2.234375,
"learning_rate": 3.4292035398230086e-05,
"loss": 0.1786,
"step": 1520
},
{
"epoch": 1.655844155844156,
"grad_norm": 1.3046875,
"learning_rate": 3.4181415929203544e-05,
"loss": 0.1683,
"step": 1530
},
{
"epoch": 1.6666666666666665,
"grad_norm": 1.5859375,
"learning_rate": 3.407079646017699e-05,
"loss": 0.1661,
"step": 1540
},
{
"epoch": 1.6774891774891776,
"grad_norm": 2.109375,
"learning_rate": 3.396017699115044e-05,
"loss": 0.165,
"step": 1550
},
{
"epoch": 1.6883116883116882,
"grad_norm": 2.203125,
"learning_rate": 3.38495575221239e-05,
"loss": 0.1542,
"step": 1560
},
{
"epoch": 1.6991341991341993,
"grad_norm": 1.609375,
"learning_rate": 3.3738938053097345e-05,
"loss": 0.1689,
"step": 1570
},
{
"epoch": 1.70995670995671,
"grad_norm": 1.8203125,
"learning_rate": 3.3628318584070804e-05,
"loss": 0.1721,
"step": 1580
},
{
"epoch": 1.7207792207792207,
"grad_norm": 1.3515625,
"learning_rate": 3.351769911504425e-05,
"loss": 0.1731,
"step": 1590
},
{
"epoch": 1.7316017316017316,
"grad_norm": 1.1796875,
"learning_rate": 3.34070796460177e-05,
"loss": 0.1761,
"step": 1600
},
{
"epoch": 1.7316017316017316,
"eval_loss": 0.18740878999233246,
"eval_runtime": 13.8261,
"eval_samples_per_second": 36.164,
"eval_steps_per_second": 0.289,
"step": 1600
},
{
"epoch": 1.7424242424242424,
"grad_norm": 1.6953125,
"learning_rate": 3.329646017699115e-05,
"loss": 0.1666,
"step": 1610
},
{
"epoch": 1.7532467532467533,
"grad_norm": 1.0625,
"learning_rate": 3.3185840707964604e-05,
"loss": 0.1578,
"step": 1620
},
{
"epoch": 1.7640692640692641,
"grad_norm": 1.4296875,
"learning_rate": 3.3075221238938056e-05,
"loss": 0.1613,
"step": 1630
},
{
"epoch": 1.774891774891775,
"grad_norm": 1.1484375,
"learning_rate": 3.296460176991151e-05,
"loss": 0.1673,
"step": 1640
},
{
"epoch": 1.7857142857142856,
"grad_norm": 1.1640625,
"learning_rate": 3.285398230088495e-05,
"loss": 0.1868,
"step": 1650
},
{
"epoch": 1.7965367965367967,
"grad_norm": 1.3046875,
"learning_rate": 3.274336283185841e-05,
"loss": 0.1648,
"step": 1660
},
{
"epoch": 1.8073593073593073,
"grad_norm": 1.609375,
"learning_rate": 3.2632743362831856e-05,
"loss": 0.1768,
"step": 1670
},
{
"epoch": 1.8181818181818183,
"grad_norm": 1.7578125,
"learning_rate": 3.2522123893805315e-05,
"loss": 0.1665,
"step": 1680
},
{
"epoch": 1.829004329004329,
"grad_norm": 1.484375,
"learning_rate": 3.241150442477876e-05,
"loss": 0.153,
"step": 1690
},
{
"epoch": 1.8398268398268398,
"grad_norm": 1.5703125,
"learning_rate": 3.230088495575221e-05,
"loss": 0.1525,
"step": 1700
},
{
"epoch": 1.8398268398268398,
"eval_loss": 0.1896573305130005,
"eval_runtime": 13.7312,
"eval_samples_per_second": 36.413,
"eval_steps_per_second": 0.291,
"step": 1700
},
{
"epoch": 1.8506493506493507,
"grad_norm": 2.4375,
"learning_rate": 3.2190265486725664e-05,
"loss": 0.1614,
"step": 1710
},
{
"epoch": 1.8614718614718615,
"grad_norm": 1.0625,
"learning_rate": 3.2079646017699115e-05,
"loss": 0.186,
"step": 1720
},
{
"epoch": 1.8722943722943723,
"grad_norm": 2.046875,
"learning_rate": 3.196902654867257e-05,
"loss": 0.1611,
"step": 1730
},
{
"epoch": 1.883116883116883,
"grad_norm": 1.828125,
"learning_rate": 3.185840707964602e-05,
"loss": 0.1767,
"step": 1740
},
{
"epoch": 1.893939393939394,
"grad_norm": 1.484375,
"learning_rate": 3.174778761061947e-05,
"loss": 0.1726,
"step": 1750
},
{
"epoch": 1.9047619047619047,
"grad_norm": 1.640625,
"learning_rate": 3.163716814159292e-05,
"loss": 0.1444,
"step": 1760
},
{
"epoch": 1.9155844155844157,
"grad_norm": 2.0625,
"learning_rate": 3.1526548672566374e-05,
"loss": 0.1609,
"step": 1770
},
{
"epoch": 1.9264069264069263,
"grad_norm": 1.5078125,
"learning_rate": 3.1415929203539826e-05,
"loss": 0.1694,
"step": 1780
},
{
"epoch": 1.9372294372294372,
"grad_norm": 1.3984375,
"learning_rate": 3.130530973451328e-05,
"loss": 0.1718,
"step": 1790
},
{
"epoch": 1.948051948051948,
"grad_norm": 1.71875,
"learning_rate": 3.119469026548672e-05,
"loss": 0.1669,
"step": 1800
},
{
"epoch": 1.948051948051948,
"eval_loss": 0.18646453320980072,
"eval_runtime": 14.1377,
"eval_samples_per_second": 35.366,
"eval_steps_per_second": 0.283,
"step": 1800
},
{
"epoch": 1.9588744588744589,
"grad_norm": 1.90625,
"learning_rate": 3.108407079646018e-05,
"loss": 0.1804,
"step": 1810
},
{
"epoch": 1.9696969696969697,
"grad_norm": 1.609375,
"learning_rate": 3.097345132743363e-05,
"loss": 0.1594,
"step": 1820
},
{
"epoch": 1.9805194805194806,
"grad_norm": 1.4921875,
"learning_rate": 3.086283185840708e-05,
"loss": 0.1542,
"step": 1830
},
{
"epoch": 1.9913419913419914,
"grad_norm": 2.046875,
"learning_rate": 3.075221238938053e-05,
"loss": 0.1462,
"step": 1840
},
{
"epoch": 2.002164502164502,
"grad_norm": 1.0703125,
"learning_rate": 3.064159292035398e-05,
"loss": 0.1446,
"step": 1850
},
{
"epoch": 2.012987012987013,
"grad_norm": 1.578125,
"learning_rate": 3.0530973451327434e-05,
"loss": 0.148,
"step": 1860
},
{
"epoch": 2.0238095238095237,
"grad_norm": 1.5546875,
"learning_rate": 3.0420353982300886e-05,
"loss": 0.1437,
"step": 1870
},
{
"epoch": 2.034632034632035,
"grad_norm": 1.015625,
"learning_rate": 3.030973451327434e-05,
"loss": 0.1519,
"step": 1880
},
{
"epoch": 2.0454545454545454,
"grad_norm": 1.203125,
"learning_rate": 3.019911504424779e-05,
"loss": 0.1573,
"step": 1890
},
{
"epoch": 2.0562770562770565,
"grad_norm": 1.4296875,
"learning_rate": 3.008849557522124e-05,
"loss": 0.1319,
"step": 1900
},
{
"epoch": 2.0562770562770565,
"eval_loss": 0.1948573887348175,
"eval_runtime": 14.0173,
"eval_samples_per_second": 35.67,
"eval_steps_per_second": 0.285,
"step": 1900
},
{
"epoch": 2.067099567099567,
"grad_norm": 1.3828125,
"learning_rate": 2.997787610619469e-05,
"loss": 0.1526,
"step": 1910
},
{
"epoch": 2.0779220779220777,
"grad_norm": 2.03125,
"learning_rate": 2.9867256637168145e-05,
"loss": 0.1444,
"step": 1920
},
{
"epoch": 2.088744588744589,
"grad_norm": 1.59375,
"learning_rate": 2.9756637168141593e-05,
"loss": 0.1437,
"step": 1930
},
{
"epoch": 2.0995670995670994,
"grad_norm": 1.2890625,
"learning_rate": 2.964601769911505e-05,
"loss": 0.1447,
"step": 1940
},
{
"epoch": 2.1103896103896105,
"grad_norm": 1.59375,
"learning_rate": 2.9535398230088497e-05,
"loss": 0.1597,
"step": 1950
},
{
"epoch": 2.121212121212121,
"grad_norm": 1.5703125,
"learning_rate": 2.942477876106195e-05,
"loss": 0.1416,
"step": 1960
},
{
"epoch": 2.132034632034632,
"grad_norm": 0.98828125,
"learning_rate": 2.9314159292035397e-05,
"loss": 0.1451,
"step": 1970
},
{
"epoch": 2.142857142857143,
"grad_norm": 0.90625,
"learning_rate": 2.9203539823008852e-05,
"loss": 0.1385,
"step": 1980
},
{
"epoch": 2.153679653679654,
"grad_norm": 1.515625,
"learning_rate": 2.90929203539823e-05,
"loss": 0.1527,
"step": 1990
},
{
"epoch": 2.1645021645021645,
"grad_norm": 1.453125,
"learning_rate": 2.8982300884955753e-05,
"loss": 0.1472,
"step": 2000
},
{
"epoch": 2.1645021645021645,
"eval_loss": 0.19946011900901794,
"eval_runtime": 14.0627,
"eval_samples_per_second": 35.555,
"eval_steps_per_second": 0.284,
"step": 2000
},
{
"epoch": 2.175324675324675,
"grad_norm": 0.87890625,
"learning_rate": 2.88716814159292e-05,
"loss": 0.126,
"step": 2010
},
{
"epoch": 2.186147186147186,
"grad_norm": 1.4296875,
"learning_rate": 2.8761061946902656e-05,
"loss": 0.1473,
"step": 2020
},
{
"epoch": 2.196969696969697,
"grad_norm": 1.21875,
"learning_rate": 2.8650442477876105e-05,
"loss": 0.1394,
"step": 2030
},
{
"epoch": 2.207792207792208,
"grad_norm": 1.3359375,
"learning_rate": 2.853982300884956e-05,
"loss": 0.1382,
"step": 2040
},
{
"epoch": 2.2186147186147185,
"grad_norm": 1.34375,
"learning_rate": 2.8429203539823012e-05,
"loss": 0.1289,
"step": 2050
},
{
"epoch": 2.2294372294372296,
"grad_norm": 1.3828125,
"learning_rate": 2.831858407079646e-05,
"loss": 0.1518,
"step": 2060
},
{
"epoch": 2.24025974025974,
"grad_norm": 2.265625,
"learning_rate": 2.8207964601769915e-05,
"loss": 0.1473,
"step": 2070
},
{
"epoch": 2.2510822510822512,
"grad_norm": 1.3828125,
"learning_rate": 2.8097345132743364e-05,
"loss": 0.1191,
"step": 2080
},
{
"epoch": 2.261904761904762,
"grad_norm": 1.203125,
"learning_rate": 2.7986725663716816e-05,
"loss": 0.1379,
"step": 2090
},
{
"epoch": 2.2727272727272725,
"grad_norm": 1.7265625,
"learning_rate": 2.7876106194690264e-05,
"loss": 0.157,
"step": 2100
},
{
"epoch": 2.2727272727272725,
"eval_loss": 0.19301347434520721,
"eval_runtime": 13.8998,
"eval_samples_per_second": 35.972,
"eval_steps_per_second": 0.288,
"step": 2100
},
{
"epoch": 2.2835497835497836,
"grad_norm": 1.21875,
"learning_rate": 2.776548672566372e-05,
"loss": 0.1494,
"step": 2110
},
{
"epoch": 2.2943722943722946,
"grad_norm": 0.98046875,
"learning_rate": 2.7654867256637168e-05,
"loss": 0.1341,
"step": 2120
},
{
"epoch": 2.3051948051948052,
"grad_norm": 1.7890625,
"learning_rate": 2.7544247787610623e-05,
"loss": 0.1517,
"step": 2130
},
{
"epoch": 2.316017316017316,
"grad_norm": 1.453125,
"learning_rate": 2.743362831858407e-05,
"loss": 0.1403,
"step": 2140
},
{
"epoch": 2.326839826839827,
"grad_norm": 1.3125,
"learning_rate": 2.7323008849557523e-05,
"loss": 0.1442,
"step": 2150
},
{
"epoch": 2.3376623376623376,
"grad_norm": 2.03125,
"learning_rate": 2.721238938053097e-05,
"loss": 0.1426,
"step": 2160
},
{
"epoch": 2.3484848484848486,
"grad_norm": 1.09375,
"learning_rate": 2.7101769911504427e-05,
"loss": 0.1341,
"step": 2170
},
{
"epoch": 2.3593073593073592,
"grad_norm": 1.6796875,
"learning_rate": 2.6991150442477875e-05,
"loss": 0.1492,
"step": 2180
},
{
"epoch": 2.3701298701298703,
"grad_norm": 1.8984375,
"learning_rate": 2.688053097345133e-05,
"loss": 0.1393,
"step": 2190
},
{
"epoch": 2.380952380952381,
"grad_norm": 1.7890625,
"learning_rate": 2.6769911504424782e-05,
"loss": 0.1522,
"step": 2200
},
{
"epoch": 2.380952380952381,
"eval_loss": 0.18994522094726562,
"eval_runtime": 17.3426,
"eval_samples_per_second": 28.831,
"eval_steps_per_second": 0.231,
"step": 2200
},
{
"epoch": 2.391774891774892,
"grad_norm": 1.3515625,
"learning_rate": 2.665929203539823e-05,
"loss": 0.1365,
"step": 2210
},
{
"epoch": 2.4025974025974026,
"grad_norm": 0.96875,
"learning_rate": 2.6548672566371686e-05,
"loss": 0.144,
"step": 2220
},
{
"epoch": 2.4134199134199132,
"grad_norm": 1.8828125,
"learning_rate": 2.6438053097345134e-05,
"loss": 0.1443,
"step": 2230
},
{
"epoch": 2.4242424242424243,
"grad_norm": 1.3671875,
"learning_rate": 2.6327433628318586e-05,
"loss": 0.148,
"step": 2240
},
{
"epoch": 2.435064935064935,
"grad_norm": 1.6484375,
"learning_rate": 2.6216814159292035e-05,
"loss": 0.1364,
"step": 2250
},
{
"epoch": 2.445887445887446,
"grad_norm": 1.21875,
"learning_rate": 2.610619469026549e-05,
"loss": 0.1517,
"step": 2260
},
{
"epoch": 2.4567099567099566,
"grad_norm": 1.546875,
"learning_rate": 2.5995575221238938e-05,
"loss": 0.152,
"step": 2270
},
{
"epoch": 2.4675324675324677,
"grad_norm": 1.21875,
"learning_rate": 2.5884955752212393e-05,
"loss": 0.1321,
"step": 2280
},
{
"epoch": 2.4783549783549783,
"grad_norm": 1.375,
"learning_rate": 2.5774336283185842e-05,
"loss": 0.1395,
"step": 2290
},
{
"epoch": 2.4891774891774894,
"grad_norm": 1.671875,
"learning_rate": 2.5663716814159294e-05,
"loss": 0.1533,
"step": 2300
},
{
"epoch": 2.4891774891774894,
"eval_loss": 0.19859272241592407,
"eval_runtime": 14.0876,
"eval_samples_per_second": 35.492,
"eval_steps_per_second": 0.284,
"step": 2300
},
{
"epoch": 2.5,
"grad_norm": 3.46875,
"learning_rate": 2.5553097345132742e-05,
"loss": 0.1539,
"step": 2310
},
{
"epoch": 2.5108225108225106,
"grad_norm": 1.5234375,
"learning_rate": 2.5442477876106197e-05,
"loss": 0.1437,
"step": 2320
},
{
"epoch": 2.5216450216450217,
"grad_norm": 1.4609375,
"learning_rate": 2.5331858407079646e-05,
"loss": 0.1275,
"step": 2330
},
{
"epoch": 2.5324675324675323,
"grad_norm": 1.6640625,
"learning_rate": 2.5221238938053098e-05,
"loss": 0.1534,
"step": 2340
},
{
"epoch": 2.5432900432900434,
"grad_norm": 2.453125,
"learning_rate": 2.5110619469026546e-05,
"loss": 0.1234,
"step": 2350
},
{
"epoch": 2.554112554112554,
"grad_norm": 1.8984375,
"learning_rate": 2.5e-05,
"loss": 0.138,
"step": 2360
},
{
"epoch": 2.564935064935065,
"grad_norm": 1.8984375,
"learning_rate": 2.4889380530973453e-05,
"loss": 0.1451,
"step": 2370
},
{
"epoch": 2.5757575757575757,
"grad_norm": 1.1015625,
"learning_rate": 2.4778761061946905e-05,
"loss": 0.1459,
"step": 2380
},
{
"epoch": 2.5865800865800868,
"grad_norm": 1.0625,
"learning_rate": 2.4668141592920353e-05,
"loss": 0.1495,
"step": 2390
},
{
"epoch": 2.5974025974025974,
"grad_norm": 1.640625,
"learning_rate": 2.4557522123893805e-05,
"loss": 0.1586,
"step": 2400
},
{
"epoch": 2.5974025974025974,
"eval_loss": 0.1879378855228424,
"eval_runtime": 14.0304,
"eval_samples_per_second": 35.637,
"eval_steps_per_second": 0.285,
"step": 2400
},
{
"epoch": 2.608225108225108,
"grad_norm": 1.5234375,
"learning_rate": 2.4446902654867257e-05,
"loss": 0.1375,
"step": 2410
},
{
"epoch": 2.619047619047619,
"grad_norm": 1.3671875,
"learning_rate": 2.433628318584071e-05,
"loss": 0.1358,
"step": 2420
},
{
"epoch": 2.62987012987013,
"grad_norm": 1.5546875,
"learning_rate": 2.422566371681416e-05,
"loss": 0.1368,
"step": 2430
},
{
"epoch": 2.6406926406926408,
"grad_norm": 0.7890625,
"learning_rate": 2.411504424778761e-05,
"loss": 0.1326,
"step": 2440
},
{
"epoch": 2.6515151515151514,
"grad_norm": 2.140625,
"learning_rate": 2.4004424778761064e-05,
"loss": 0.1396,
"step": 2450
},
{
"epoch": 2.6623376623376624,
"grad_norm": 0.96484375,
"learning_rate": 2.3893805309734516e-05,
"loss": 0.1164,
"step": 2460
},
{
"epoch": 2.673160173160173,
"grad_norm": 3.53125,
"learning_rate": 2.3783185840707968e-05,
"loss": 0.1333,
"step": 2470
},
{
"epoch": 2.683982683982684,
"grad_norm": 1.3046875,
"learning_rate": 2.3672566371681416e-05,
"loss": 0.1507,
"step": 2480
},
{
"epoch": 2.6948051948051948,
"grad_norm": 1.1015625,
"learning_rate": 2.3561946902654868e-05,
"loss": 0.1384,
"step": 2490
},
{
"epoch": 2.7056277056277054,
"grad_norm": 1.53125,
"learning_rate": 2.345132743362832e-05,
"loss": 0.1698,
"step": 2500
},
{
"epoch": 2.7056277056277054,
"eval_loss": 0.19169363379478455,
"eval_runtime": 14.0148,
"eval_samples_per_second": 35.676,
"eval_steps_per_second": 0.285,
"step": 2500
},
{
"epoch": 2.7164502164502164,
"grad_norm": 1.53125,
"learning_rate": 2.334070796460177e-05,
"loss": 0.1406,
"step": 2510
},
{
"epoch": 2.7272727272727275,
"grad_norm": 0.94140625,
"learning_rate": 2.3230088495575223e-05,
"loss": 0.1476,
"step": 2520
},
{
"epoch": 2.738095238095238,
"grad_norm": 1.3515625,
"learning_rate": 2.3119469026548672e-05,
"loss": 0.129,
"step": 2530
},
{
"epoch": 2.7489177489177488,
"grad_norm": 1.4765625,
"learning_rate": 2.3008849557522124e-05,
"loss": 0.136,
"step": 2540
},
{
"epoch": 2.75974025974026,
"grad_norm": 1.8125,
"learning_rate": 2.2898230088495576e-05,
"loss": 0.1436,
"step": 2550
},
{
"epoch": 2.7705627705627704,
"grad_norm": 1.34375,
"learning_rate": 2.2787610619469027e-05,
"loss": 0.1356,
"step": 2560
},
{
"epoch": 2.7813852813852815,
"grad_norm": 2.28125,
"learning_rate": 2.267699115044248e-05,
"loss": 0.1455,
"step": 2570
},
{
"epoch": 2.792207792207792,
"grad_norm": 1.140625,
"learning_rate": 2.2566371681415928e-05,
"loss": 0.1369,
"step": 2580
},
{
"epoch": 2.8030303030303028,
"grad_norm": 2.09375,
"learning_rate": 2.245575221238938e-05,
"loss": 0.1433,
"step": 2590
},
{
"epoch": 2.813852813852814,
"grad_norm": 1.2421875,
"learning_rate": 2.234513274336283e-05,
"loss": 0.1215,
"step": 2600
},
{
"epoch": 2.813852813852814,
"eval_loss": 0.1945955455303192,
"eval_runtime": 14.0272,
"eval_samples_per_second": 35.645,
"eval_steps_per_second": 0.285,
"step": 2600
},
{
"epoch": 2.824675324675325,
"grad_norm": 1.5,
"learning_rate": 2.2234513274336286e-05,
"loss": 0.1342,
"step": 2610
},
{
"epoch": 2.8354978354978355,
"grad_norm": 1.3828125,
"learning_rate": 2.2123893805309738e-05,
"loss": 0.134,
"step": 2620
},
{
"epoch": 2.846320346320346,
"grad_norm": 1.4140625,
"learning_rate": 2.2013274336283187e-05,
"loss": 0.1443,
"step": 2630
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.94140625,
"learning_rate": 2.190265486725664e-05,
"loss": 0.1287,
"step": 2640
},
{
"epoch": 2.867965367965368,
"grad_norm": 1.2578125,
"learning_rate": 2.179203539823009e-05,
"loss": 0.1423,
"step": 2650
},
{
"epoch": 2.878787878787879,
"grad_norm": 1.8203125,
"learning_rate": 2.1681415929203542e-05,
"loss": 0.1428,
"step": 2660
},
{
"epoch": 2.8896103896103895,
"grad_norm": 1.578125,
"learning_rate": 2.1570796460176994e-05,
"loss": 0.1324,
"step": 2670
},
{
"epoch": 2.9004329004329006,
"grad_norm": 1.234375,
"learning_rate": 2.1460176991150442e-05,
"loss": 0.1405,
"step": 2680
},
{
"epoch": 2.911255411255411,
"grad_norm": 1.3203125,
"learning_rate": 2.1349557522123894e-05,
"loss": 0.1545,
"step": 2690
},
{
"epoch": 2.9220779220779223,
"grad_norm": 1.703125,
"learning_rate": 2.1238938053097346e-05,
"loss": 0.1354,
"step": 2700
},
{
"epoch": 2.9220779220779223,
"eval_loss": 0.1839994490146637,
"eval_runtime": 13.8953,
"eval_samples_per_second": 35.983,
"eval_steps_per_second": 0.288,
"step": 2700
},
{
"epoch": 2.932900432900433,
"grad_norm": 1.5859375,
"learning_rate": 2.1128318584070798e-05,
"loss": 0.1467,
"step": 2710
},
{
"epoch": 2.9437229437229435,
"grad_norm": 1.59375,
"learning_rate": 2.101769911504425e-05,
"loss": 0.1548,
"step": 2720
},
{
"epoch": 2.9545454545454546,
"grad_norm": 1.3984375,
"learning_rate": 2.0907079646017698e-05,
"loss": 0.1498,
"step": 2730
},
{
"epoch": 2.965367965367965,
"grad_norm": 1.5859375,
"learning_rate": 2.079646017699115e-05,
"loss": 0.1484,
"step": 2740
},
{
"epoch": 2.9761904761904763,
"grad_norm": 1.5546875,
"learning_rate": 2.0685840707964602e-05,
"loss": 0.1409,
"step": 2750
},
{
"epoch": 2.987012987012987,
"grad_norm": 1.7421875,
"learning_rate": 2.0575221238938054e-05,
"loss": 0.1482,
"step": 2760
},
{
"epoch": 2.997835497835498,
"grad_norm": 2.140625,
"learning_rate": 2.0464601769911505e-05,
"loss": 0.1456,
"step": 2770
},
{
"epoch": 3.0086580086580086,
"grad_norm": 1.4765625,
"learning_rate": 2.0353982300884957e-05,
"loss": 0.111,
"step": 2780
},
{
"epoch": 3.0194805194805197,
"grad_norm": 1.625,
"learning_rate": 2.024336283185841e-05,
"loss": 0.1259,
"step": 2790
},
{
"epoch": 3.0303030303030303,
"grad_norm": 1.359375,
"learning_rate": 2.013274336283186e-05,
"loss": 0.1234,
"step": 2800
},
{
"epoch": 3.0303030303030303,
"eval_loss": 0.18937236070632935,
"eval_runtime": 13.8239,
"eval_samples_per_second": 36.169,
"eval_steps_per_second": 0.289,
"step": 2800
},
{
"epoch": 3.0411255411255413,
"grad_norm": 1.2734375,
"learning_rate": 2.0022123893805313e-05,
"loss": 0.1339,
"step": 2810
},
{
"epoch": 3.051948051948052,
"grad_norm": 1.609375,
"learning_rate": 1.991150442477876e-05,
"loss": 0.1276,
"step": 2820
},
{
"epoch": 3.0627705627705626,
"grad_norm": 1.875,
"learning_rate": 1.9800884955752213e-05,
"loss": 0.1226,
"step": 2830
},
{
"epoch": 3.0735930735930737,
"grad_norm": 1.078125,
"learning_rate": 1.9690265486725665e-05,
"loss": 0.1273,
"step": 2840
},
{
"epoch": 3.0844155844155843,
"grad_norm": 1.4765625,
"learning_rate": 1.9579646017699117e-05,
"loss": 0.1352,
"step": 2850
},
{
"epoch": 3.0952380952380953,
"grad_norm": 1.3203125,
"learning_rate": 1.946902654867257e-05,
"loss": 0.1124,
"step": 2860
},
{
"epoch": 3.106060606060606,
"grad_norm": 1.125,
"learning_rate": 1.9358407079646017e-05,
"loss": 0.1174,
"step": 2870
},
{
"epoch": 3.116883116883117,
"grad_norm": 1.59375,
"learning_rate": 1.924778761061947e-05,
"loss": 0.1409,
"step": 2880
},
{
"epoch": 3.1277056277056277,
"grad_norm": 1.34375,
"learning_rate": 1.913716814159292e-05,
"loss": 0.1205,
"step": 2890
},
{
"epoch": 3.1385281385281387,
"grad_norm": 1.2109375,
"learning_rate": 1.9026548672566372e-05,
"loss": 0.1217,
"step": 2900
},
{
"epoch": 3.1385281385281387,
"eval_loss": 0.18834540247917175,
"eval_runtime": 13.8797,
"eval_samples_per_second": 36.024,
"eval_steps_per_second": 0.288,
"step": 2900
},
{
"epoch": 3.1493506493506493,
"grad_norm": 1.7734375,
"learning_rate": 1.8915929203539824e-05,
"loss": 0.1373,
"step": 2910
},
{
"epoch": 3.16017316017316,
"grad_norm": 1.203125,
"learning_rate": 1.8805309734513272e-05,
"loss": 0.1187,
"step": 2920
},
{
"epoch": 3.170995670995671,
"grad_norm": 1.421875,
"learning_rate": 1.8694690265486728e-05,
"loss": 0.1196,
"step": 2930
},
{
"epoch": 3.1818181818181817,
"grad_norm": 1.671875,
"learning_rate": 1.858407079646018e-05,
"loss": 0.1343,
"step": 2940
},
{
"epoch": 3.1926406926406927,
"grad_norm": 1.453125,
"learning_rate": 1.847345132743363e-05,
"loss": 0.1267,
"step": 2950
},
{
"epoch": 3.2034632034632033,
"grad_norm": 1.8984375,
"learning_rate": 1.836283185840708e-05,
"loss": 0.1261,
"step": 2960
},
{
"epoch": 3.2142857142857144,
"grad_norm": 1.484375,
"learning_rate": 1.825221238938053e-05,
"loss": 0.1202,
"step": 2970
},
{
"epoch": 3.225108225108225,
"grad_norm": 1.1640625,
"learning_rate": 1.8141592920353983e-05,
"loss": 0.1259,
"step": 2980
},
{
"epoch": 3.235930735930736,
"grad_norm": 1.515625,
"learning_rate": 1.8030973451327435e-05,
"loss": 0.1208,
"step": 2990
},
{
"epoch": 3.2467532467532467,
"grad_norm": 1.1875,
"learning_rate": 1.7920353982300887e-05,
"loss": 0.1363,
"step": 3000
},
{
"epoch": 3.2467532467532467,
"eval_loss": 0.19392167031764984,
"eval_runtime": 14.0702,
"eval_samples_per_second": 35.536,
"eval_steps_per_second": 0.284,
"step": 3000
},
{
"epoch": 3.257575757575758,
"grad_norm": 1.2265625,
"learning_rate": 1.7809734513274335e-05,
"loss": 0.1169,
"step": 3010
},
{
"epoch": 3.2683982683982684,
"grad_norm": 1.765625,
"learning_rate": 1.7699115044247787e-05,
"loss": 0.1279,
"step": 3020
},
{
"epoch": 3.279220779220779,
"grad_norm": 1.4140625,
"learning_rate": 1.758849557522124e-05,
"loss": 0.127,
"step": 3030
},
{
"epoch": 3.29004329004329,
"grad_norm": 1.53125,
"learning_rate": 1.747787610619469e-05,
"loss": 0.1294,
"step": 3040
},
{
"epoch": 3.3008658008658007,
"grad_norm": 1.9765625,
"learning_rate": 1.7367256637168143e-05,
"loss": 0.1244,
"step": 3050
},
{
"epoch": 3.311688311688312,
"grad_norm": 0.7109375,
"learning_rate": 1.7256637168141594e-05,
"loss": 0.1194,
"step": 3060
},
{
"epoch": 3.3225108225108224,
"grad_norm": 1.296875,
"learning_rate": 1.7146017699115043e-05,
"loss": 0.1284,
"step": 3070
},
{
"epoch": 3.3333333333333335,
"grad_norm": 0.93359375,
"learning_rate": 1.7035398230088495e-05,
"loss": 0.1359,
"step": 3080
},
{
"epoch": 3.344155844155844,
"grad_norm": 1.203125,
"learning_rate": 1.692477876106195e-05,
"loss": 0.1318,
"step": 3090
},
{
"epoch": 3.354978354978355,
"grad_norm": 1.4453125,
"learning_rate": 1.6814159292035402e-05,
"loss": 0.1277,
"step": 3100
},
{
"epoch": 3.354978354978355,
"eval_loss": 0.18987847864627838,
"eval_runtime": 13.8933,
"eval_samples_per_second": 35.989,
"eval_steps_per_second": 0.288,
"step": 3100
},
{
"epoch": 3.365800865800866,
"grad_norm": 1.25,
"learning_rate": 1.670353982300885e-05,
"loss": 0.1164,
"step": 3110
},
{
"epoch": 3.3766233766233764,
"grad_norm": 1.3125,
"learning_rate": 1.6592920353982302e-05,
"loss": 0.125,
"step": 3120
},
{
"epoch": 3.3874458874458875,
"grad_norm": 1.1796875,
"learning_rate": 1.6482300884955754e-05,
"loss": 0.1261,
"step": 3130
},
{
"epoch": 3.398268398268398,
"grad_norm": 1.25,
"learning_rate": 1.6371681415929206e-05,
"loss": 0.126,
"step": 3140
},
{
"epoch": 3.409090909090909,
"grad_norm": 1.59375,
"learning_rate": 1.6261061946902657e-05,
"loss": 0.1268,
"step": 3150
},
{
"epoch": 3.41991341991342,
"grad_norm": 1.5390625,
"learning_rate": 1.6150442477876106e-05,
"loss": 0.1239,
"step": 3160
},
{
"epoch": 3.430735930735931,
"grad_norm": 1.859375,
"learning_rate": 1.6039823008849558e-05,
"loss": 0.11,
"step": 3170
},
{
"epoch": 3.4415584415584415,
"grad_norm": 0.82421875,
"learning_rate": 1.592920353982301e-05,
"loss": 0.1243,
"step": 3180
},
{
"epoch": 3.4523809523809526,
"grad_norm": 1.4140625,
"learning_rate": 1.581858407079646e-05,
"loss": 0.1257,
"step": 3190
},
{
"epoch": 3.463203463203463,
"grad_norm": 1.0078125,
"learning_rate": 1.5707964601769913e-05,
"loss": 0.1231,
"step": 3200
},
{
"epoch": 3.463203463203463,
"eval_loss": 0.19035659730434418,
"eval_runtime": 14.1973,
"eval_samples_per_second": 35.218,
"eval_steps_per_second": 0.282,
"step": 3200
},
{
"epoch": 3.474025974025974,
"grad_norm": 1.4921875,
"learning_rate": 1.559734513274336e-05,
"loss": 0.1161,
"step": 3210
},
{
"epoch": 3.484848484848485,
"grad_norm": 1.6484375,
"learning_rate": 1.5486725663716813e-05,
"loss": 0.1275,
"step": 3220
},
{
"epoch": 3.4956709956709955,
"grad_norm": 1.171875,
"learning_rate": 1.5376106194690265e-05,
"loss": 0.1177,
"step": 3230
},
{
"epoch": 3.5064935064935066,
"grad_norm": 1.25,
"learning_rate": 1.5265486725663717e-05,
"loss": 0.1269,
"step": 3240
},
{
"epoch": 3.517316017316017,
"grad_norm": 1.4375,
"learning_rate": 1.515486725663717e-05,
"loss": 0.1295,
"step": 3250
},
{
"epoch": 3.5281385281385282,
"grad_norm": 1.34375,
"learning_rate": 1.504424778761062e-05,
"loss": 0.1183,
"step": 3260
},
{
"epoch": 3.538961038961039,
"grad_norm": 1.4921875,
"learning_rate": 1.4933628318584072e-05,
"loss": 0.1146,
"step": 3270
},
{
"epoch": 3.54978354978355,
"grad_norm": 1.1796875,
"learning_rate": 1.4823008849557524e-05,
"loss": 0.1149,
"step": 3280
},
{
"epoch": 3.5606060606060606,
"grad_norm": 1.2421875,
"learning_rate": 1.4712389380530974e-05,
"loss": 0.1276,
"step": 3290
},
{
"epoch": 3.571428571428571,
"grad_norm": 0.86328125,
"learning_rate": 1.4601769911504426e-05,
"loss": 0.0958,
"step": 3300
},
{
"epoch": 3.571428571428571,
"eval_loss": 0.18995501101016998,
"eval_runtime": 18.7862,
"eval_samples_per_second": 26.615,
"eval_steps_per_second": 0.213,
"step": 3300
},
{
"epoch": 3.5822510822510822,
"grad_norm": 1.4609375,
"learning_rate": 1.4491150442477876e-05,
"loss": 0.1257,
"step": 3310
},
{
"epoch": 3.5930735930735933,
"grad_norm": 1.75,
"learning_rate": 1.4380530973451328e-05,
"loss": 0.1379,
"step": 3320
},
{
"epoch": 3.603896103896104,
"grad_norm": 1.359375,
"learning_rate": 1.426991150442478e-05,
"loss": 0.1359,
"step": 3330
},
{
"epoch": 3.6147186147186146,
"grad_norm": 0.97265625,
"learning_rate": 1.415929203539823e-05,
"loss": 0.1071,
"step": 3340
},
{
"epoch": 3.6255411255411256,
"grad_norm": 2.625,
"learning_rate": 1.4048672566371682e-05,
"loss": 0.1231,
"step": 3350
},
{
"epoch": 3.6363636363636362,
"grad_norm": 1.59375,
"learning_rate": 1.3938053097345132e-05,
"loss": 0.1234,
"step": 3360
},
{
"epoch": 3.6471861471861473,
"grad_norm": 1.140625,
"learning_rate": 1.3827433628318584e-05,
"loss": 0.1287,
"step": 3370
},
{
"epoch": 3.658008658008658,
"grad_norm": 1.3984375,
"learning_rate": 1.3716814159292036e-05,
"loss": 0.1345,
"step": 3380
},
{
"epoch": 3.6688311688311686,
"grad_norm": 1.640625,
"learning_rate": 1.3606194690265486e-05,
"loss": 0.146,
"step": 3390
},
{
"epoch": 3.6796536796536796,
"grad_norm": 1.640625,
"learning_rate": 1.3495575221238938e-05,
"loss": 0.1321,
"step": 3400
},
{
"epoch": 3.6796536796536796,
"eval_loss": 0.19218742847442627,
"eval_runtime": 13.835,
"eval_samples_per_second": 36.14,
"eval_steps_per_second": 0.289,
"step": 3400
},
{
"epoch": 3.6904761904761907,
"grad_norm": 1.3984375,
"learning_rate": 1.3384955752212391e-05,
"loss": 0.1269,
"step": 3410
},
{
"epoch": 3.7012987012987013,
"grad_norm": 1.9296875,
"learning_rate": 1.3274336283185843e-05,
"loss": 0.1216,
"step": 3420
},
{
"epoch": 3.712121212121212,
"grad_norm": 1.0234375,
"learning_rate": 1.3163716814159293e-05,
"loss": 0.1096,
"step": 3430
},
{
"epoch": 3.722943722943723,
"grad_norm": 1.3125,
"learning_rate": 1.3053097345132745e-05,
"loss": 0.1422,
"step": 3440
},
{
"epoch": 3.7337662337662336,
"grad_norm": 1.046875,
"learning_rate": 1.2942477876106197e-05,
"loss": 0.1316,
"step": 3450
},
{
"epoch": 3.7445887445887447,
"grad_norm": 1.6015625,
"learning_rate": 1.2831858407079647e-05,
"loss": 0.1198,
"step": 3460
},
{
"epoch": 3.7554112554112553,
"grad_norm": 1.5078125,
"learning_rate": 1.2721238938053099e-05,
"loss": 0.1251,
"step": 3470
},
{
"epoch": 3.7662337662337664,
"grad_norm": 1.1015625,
"learning_rate": 1.2610619469026549e-05,
"loss": 0.1198,
"step": 3480
},
{
"epoch": 3.777056277056277,
"grad_norm": 0.93359375,
"learning_rate": 1.25e-05,
"loss": 0.1195,
"step": 3490
},
{
"epoch": 3.787878787878788,
"grad_norm": 1.7265625,
"learning_rate": 1.2389380530973452e-05,
"loss": 0.1052,
"step": 3500
},
{
"epoch": 3.787878787878788,
"eval_loss": 0.19286279380321503,
"eval_runtime": 14.0626,
"eval_samples_per_second": 35.555,
"eval_steps_per_second": 0.284,
"step": 3500
},
{
"epoch": 3.7987012987012987,
"grad_norm": 1.3046875,
"learning_rate": 1.2278761061946903e-05,
"loss": 0.1246,
"step": 3510
},
{
"epoch": 3.8095238095238093,
"grad_norm": 1.0859375,
"learning_rate": 1.2168141592920354e-05,
"loss": 0.1159,
"step": 3520
},
{
"epoch": 3.8203463203463204,
"grad_norm": 1.3828125,
"learning_rate": 1.2057522123893804e-05,
"loss": 0.129,
"step": 3530
},
{
"epoch": 3.8311688311688314,
"grad_norm": 2.40625,
"learning_rate": 1.1946902654867258e-05,
"loss": 0.1177,
"step": 3540
},
{
"epoch": 3.841991341991342,
"grad_norm": 1.453125,
"learning_rate": 1.1836283185840708e-05,
"loss": 0.1333,
"step": 3550
},
{
"epoch": 3.8528138528138527,
"grad_norm": 1.296875,
"learning_rate": 1.172566371681416e-05,
"loss": 0.1287,
"step": 3560
},
{
"epoch": 3.8636363636363638,
"grad_norm": 2.234375,
"learning_rate": 1.1615044247787612e-05,
"loss": 0.1284,
"step": 3570
},
{
"epoch": 3.8744588744588744,
"grad_norm": 1.4921875,
"learning_rate": 1.1504424778761062e-05,
"loss": 0.1257,
"step": 3580
},
{
"epoch": 3.8852813852813854,
"grad_norm": 1.5625,
"learning_rate": 1.1393805309734514e-05,
"loss": 0.1157,
"step": 3590
},
{
"epoch": 3.896103896103896,
"grad_norm": 1.421875,
"learning_rate": 1.1283185840707964e-05,
"loss": 0.1106,
"step": 3600
},
{
"epoch": 3.896103896103896,
"eval_loss": 0.19091036915779114,
"eval_runtime": 14.4935,
"eval_samples_per_second": 34.498,
"eval_steps_per_second": 0.276,
"step": 3600
},
{
"epoch": 3.9069264069264067,
"grad_norm": 1.5859375,
"learning_rate": 1.1172566371681416e-05,
"loss": 0.123,
"step": 3610
},
{
"epoch": 3.9177489177489178,
"grad_norm": 1.8046875,
"learning_rate": 1.1061946902654869e-05,
"loss": 0.1299,
"step": 3620
},
{
"epoch": 3.928571428571429,
"grad_norm": 1.5234375,
"learning_rate": 1.095132743362832e-05,
"loss": 0.1227,
"step": 3630
},
{
"epoch": 3.9393939393939394,
"grad_norm": 1.421875,
"learning_rate": 1.0840707964601771e-05,
"loss": 0.1239,
"step": 3640
},
{
"epoch": 3.95021645021645,
"grad_norm": 0.92578125,
"learning_rate": 1.0730088495575221e-05,
"loss": 0.1251,
"step": 3650
},
{
"epoch": 3.961038961038961,
"grad_norm": 0.80078125,
"learning_rate": 1.0619469026548673e-05,
"loss": 0.1121,
"step": 3660
},
{
"epoch": 3.9718614718614718,
"grad_norm": 1.4609375,
"learning_rate": 1.0508849557522125e-05,
"loss": 0.1298,
"step": 3670
},
{
"epoch": 3.982683982683983,
"grad_norm": 1.1953125,
"learning_rate": 1.0398230088495575e-05,
"loss": 0.1129,
"step": 3680
},
{
"epoch": 3.9935064935064934,
"grad_norm": 1.9765625,
"learning_rate": 1.0287610619469027e-05,
"loss": 0.1214,
"step": 3690
},
{
"epoch": 4.004329004329004,
"grad_norm": 1.15625,
"learning_rate": 1.0176991150442479e-05,
"loss": 0.1207,
"step": 3700
},
{
"epoch": 4.004329004329004,
"eval_loss": 0.19235384464263916,
"eval_runtime": 14.3348,
"eval_samples_per_second": 34.88,
"eval_steps_per_second": 0.279,
"step": 3700
},
{
"epoch": 4.015151515151516,
"grad_norm": 1.6015625,
"learning_rate": 1.006637168141593e-05,
"loss": 0.1167,
"step": 3710
},
{
"epoch": 4.025974025974026,
"grad_norm": 1.1875,
"learning_rate": 9.95575221238938e-06,
"loss": 0.1195,
"step": 3720
},
{
"epoch": 4.036796536796537,
"grad_norm": 2.03125,
"learning_rate": 9.845132743362832e-06,
"loss": 0.1171,
"step": 3730
},
{
"epoch": 4.0476190476190474,
"grad_norm": 1.3828125,
"learning_rate": 9.734513274336284e-06,
"loss": 0.1176,
"step": 3740
},
{
"epoch": 4.058441558441558,
"grad_norm": 1.1953125,
"learning_rate": 9.623893805309734e-06,
"loss": 0.12,
"step": 3750
},
{
"epoch": 4.06926406926407,
"grad_norm": 1.3515625,
"learning_rate": 9.513274336283186e-06,
"loss": 0.1174,
"step": 3760
},
{
"epoch": 4.08008658008658,
"grad_norm": 1.0859375,
"learning_rate": 9.402654867256636e-06,
"loss": 0.1096,
"step": 3770
},
{
"epoch": 4.090909090909091,
"grad_norm": 1.546875,
"learning_rate": 9.29203539823009e-06,
"loss": 0.1109,
"step": 3780
},
{
"epoch": 4.1017316017316015,
"grad_norm": 1.4453125,
"learning_rate": 9.18141592920354e-06,
"loss": 0.1263,
"step": 3790
},
{
"epoch": 4.112554112554113,
"grad_norm": 1.8046875,
"learning_rate": 9.070796460176992e-06,
"loss": 0.1067,
"step": 3800
},
{
"epoch": 4.112554112554113,
"eval_loss": 0.19082804024219513,
"eval_runtime": 14.0192,
"eval_samples_per_second": 35.665,
"eval_steps_per_second": 0.285,
"step": 3800
},
{
"epoch": 4.123376623376624,
"grad_norm": 1.4375,
"learning_rate": 8.960176991150443e-06,
"loss": 0.1079,
"step": 3810
},
{
"epoch": 4.134199134199134,
"grad_norm": 1.796875,
"learning_rate": 8.849557522123894e-06,
"loss": 0.1139,
"step": 3820
},
{
"epoch": 4.145021645021645,
"grad_norm": 1.1875,
"learning_rate": 8.738938053097345e-06,
"loss": 0.1107,
"step": 3830
},
{
"epoch": 4.1558441558441555,
"grad_norm": 1.0859375,
"learning_rate": 8.628318584070797e-06,
"loss": 0.1227,
"step": 3840
},
{
"epoch": 4.166666666666667,
"grad_norm": 1.6875,
"learning_rate": 8.517699115044247e-06,
"loss": 0.1258,
"step": 3850
},
{
"epoch": 4.177489177489178,
"grad_norm": 1.734375,
"learning_rate": 8.407079646017701e-06,
"loss": 0.1186,
"step": 3860
},
{
"epoch": 4.188311688311688,
"grad_norm": 1.40625,
"learning_rate": 8.296460176991151e-06,
"loss": 0.1181,
"step": 3870
},
{
"epoch": 4.199134199134199,
"grad_norm": 2.171875,
"learning_rate": 8.185840707964603e-06,
"loss": 0.1302,
"step": 3880
},
{
"epoch": 4.20995670995671,
"grad_norm": 1.2578125,
"learning_rate": 8.075221238938053e-06,
"loss": 0.1215,
"step": 3890
},
{
"epoch": 4.220779220779221,
"grad_norm": 1.890625,
"learning_rate": 7.964601769911505e-06,
"loss": 0.1092,
"step": 3900
},
{
"epoch": 4.220779220779221,
"eval_loss": 0.19130083918571472,
"eval_runtime": 13.9613,
"eval_samples_per_second": 35.813,
"eval_steps_per_second": 0.287,
"step": 3900
},
{
"epoch": 4.231601731601732,
"grad_norm": 1.4296875,
"learning_rate": 7.853982300884957e-06,
"loss": 0.1256,
"step": 3910
},
{
"epoch": 4.242424242424242,
"grad_norm": 2.09375,
"learning_rate": 7.743362831858407e-06,
"loss": 0.1098,
"step": 3920
},
{
"epoch": 4.253246753246753,
"grad_norm": 1.4921875,
"learning_rate": 7.632743362831859e-06,
"loss": 0.1216,
"step": 3930
},
{
"epoch": 4.264069264069264,
"grad_norm": 0.74609375,
"learning_rate": 7.52212389380531e-06,
"loss": 0.1131,
"step": 3940
},
{
"epoch": 4.274891774891775,
"grad_norm": 1.640625,
"learning_rate": 7.411504424778762e-06,
"loss": 0.1289,
"step": 3950
},
{
"epoch": 4.285714285714286,
"grad_norm": 2.375,
"learning_rate": 7.300884955752213e-06,
"loss": 0.117,
"step": 3960
},
{
"epoch": 4.296536796536796,
"grad_norm": 0.81640625,
"learning_rate": 7.190265486725664e-06,
"loss": 0.1107,
"step": 3970
},
{
"epoch": 4.307359307359308,
"grad_norm": 1.2734375,
"learning_rate": 7.079646017699115e-06,
"loss": 0.1169,
"step": 3980
},
{
"epoch": 4.318181818181818,
"grad_norm": 1.296875,
"learning_rate": 6.969026548672566e-06,
"loss": 0.1262,
"step": 3990
},
{
"epoch": 4.329004329004329,
"grad_norm": 1.0546875,
"learning_rate": 6.858407079646018e-06,
"loss": 0.0927,
"step": 4000
},
{
"epoch": 4.329004329004329,
"eval_loss": 0.19239334762096405,
"eval_runtime": 14.0391,
"eval_samples_per_second": 35.615,
"eval_steps_per_second": 0.285,
"step": 4000
},
{
"epoch": 4.33982683982684,
"grad_norm": 1.4375,
"learning_rate": 6.747787610619469e-06,
"loss": 0.1088,
"step": 4010
},
{
"epoch": 4.35064935064935,
"grad_norm": 1.2265625,
"learning_rate": 6.6371681415929215e-06,
"loss": 0.1177,
"step": 4020
},
{
"epoch": 4.361471861471862,
"grad_norm": 1.359375,
"learning_rate": 6.5265486725663725e-06,
"loss": 0.1173,
"step": 4030
},
{
"epoch": 4.372294372294372,
"grad_norm": 1.4140625,
"learning_rate": 6.415929203539823e-06,
"loss": 0.1168,
"step": 4040
},
{
"epoch": 4.383116883116883,
"grad_norm": 0.83203125,
"learning_rate": 6.305309734513274e-06,
"loss": 0.1145,
"step": 4050
},
{
"epoch": 4.393939393939394,
"grad_norm": 1.2421875,
"learning_rate": 6.194690265486726e-06,
"loss": 0.1025,
"step": 4060
},
{
"epoch": 4.404761904761905,
"grad_norm": 1.1640625,
"learning_rate": 6.084070796460177e-06,
"loss": 0.1016,
"step": 4070
},
{
"epoch": 4.415584415584416,
"grad_norm": 1.2890625,
"learning_rate": 5.973451327433629e-06,
"loss": 0.1161,
"step": 4080
},
{
"epoch": 4.426406926406926,
"grad_norm": 1.1484375,
"learning_rate": 5.86283185840708e-06,
"loss": 0.1159,
"step": 4090
},
{
"epoch": 4.437229437229437,
"grad_norm": 0.98046875,
"learning_rate": 5.752212389380531e-06,
"loss": 0.1038,
"step": 4100
},
{
"epoch": 4.437229437229437,
"eval_loss": 0.19425031542778015,
"eval_runtime": 14.1296,
"eval_samples_per_second": 35.387,
"eval_steps_per_second": 0.283,
"step": 4100
},
{
"epoch": 4.448051948051948,
"grad_norm": 2.078125,
"learning_rate": 5.641592920353982e-06,
"loss": 0.1281,
"step": 4110
},
{
"epoch": 4.458874458874459,
"grad_norm": 1.4453125,
"learning_rate": 5.5309734513274346e-06,
"loss": 0.112,
"step": 4120
},
{
"epoch": 4.46969696969697,
"grad_norm": 1.2578125,
"learning_rate": 5.4203539823008855e-06,
"loss": 0.1206,
"step": 4130
},
{
"epoch": 4.48051948051948,
"grad_norm": 1.21875,
"learning_rate": 5.3097345132743365e-06,
"loss": 0.1147,
"step": 4140
},
{
"epoch": 4.491341991341991,
"grad_norm": 1.3984375,
"learning_rate": 5.1991150442477875e-06,
"loss": 0.1248,
"step": 4150
},
{
"epoch": 4.5021645021645025,
"grad_norm": 1.6171875,
"learning_rate": 5.088495575221239e-06,
"loss": 0.1165,
"step": 4160
},
{
"epoch": 4.512987012987013,
"grad_norm": 3.703125,
"learning_rate": 4.97787610619469e-06,
"loss": 0.1067,
"step": 4170
},
{
"epoch": 4.523809523809524,
"grad_norm": 1.9921875,
"learning_rate": 4.867256637168142e-06,
"loss": 0.1109,
"step": 4180
},
{
"epoch": 4.534632034632034,
"grad_norm": 0.89453125,
"learning_rate": 4.756637168141593e-06,
"loss": 0.1026,
"step": 4190
},
{
"epoch": 4.545454545454545,
"grad_norm": 1.90625,
"learning_rate": 4.646017699115045e-06,
"loss": 0.1231,
"step": 4200
},
{
"epoch": 4.545454545454545,
"eval_loss": 0.1931437849998474,
"eval_runtime": 14.0341,
"eval_samples_per_second": 35.627,
"eval_steps_per_second": 0.285,
"step": 4200
},
{
"epoch": 4.5562770562770565,
"grad_norm": 0.98046875,
"learning_rate": 4.535398230088496e-06,
"loss": 0.1016,
"step": 4210
},
{
"epoch": 4.567099567099567,
"grad_norm": 1.9375,
"learning_rate": 4.424778761061947e-06,
"loss": 0.115,
"step": 4220
},
{
"epoch": 4.577922077922078,
"grad_norm": 1.0546875,
"learning_rate": 4.314159292035399e-06,
"loss": 0.122,
"step": 4230
},
{
"epoch": 4.588744588744589,
"grad_norm": 1.578125,
"learning_rate": 4.2035398230088504e-06,
"loss": 0.1178,
"step": 4240
},
{
"epoch": 4.5995670995671,
"grad_norm": 1.125,
"learning_rate": 4.092920353982301e-06,
"loss": 0.1179,
"step": 4250
},
{
"epoch": 4.6103896103896105,
"grad_norm": 1.0859375,
"learning_rate": 3.982300884955752e-06,
"loss": 0.1109,
"step": 4260
},
{
"epoch": 4.621212121212121,
"grad_norm": 1.0,
"learning_rate": 3.871681415929203e-06,
"loss": 0.1036,
"step": 4270
},
{
"epoch": 4.632034632034632,
"grad_norm": 1.7734375,
"learning_rate": 3.761061946902655e-06,
"loss": 0.1102,
"step": 4280
},
{
"epoch": 4.642857142857143,
"grad_norm": 1.4375,
"learning_rate": 3.6504424778761066e-06,
"loss": 0.1154,
"step": 4290
},
{
"epoch": 4.653679653679654,
"grad_norm": 1.6328125,
"learning_rate": 3.5398230088495575e-06,
"loss": 0.1277,
"step": 4300
},
{
"epoch": 4.653679653679654,
"eval_loss": 0.19161994755268097,
"eval_runtime": 14.064,
"eval_samples_per_second": 35.552,
"eval_steps_per_second": 0.284,
"step": 4300
},
{
"epoch": 4.6645021645021645,
"grad_norm": 1.578125,
"learning_rate": 3.429203539823009e-06,
"loss": 0.1143,
"step": 4310
},
{
"epoch": 4.675324675324675,
"grad_norm": 4.0625,
"learning_rate": 3.3185840707964607e-06,
"loss": 0.1256,
"step": 4320
},
{
"epoch": 4.686147186147187,
"grad_norm": 1.5625,
"learning_rate": 3.2079646017699117e-06,
"loss": 0.1275,
"step": 4330
},
{
"epoch": 4.696969696969697,
"grad_norm": 1.4453125,
"learning_rate": 3.097345132743363e-06,
"loss": 0.1211,
"step": 4340
},
{
"epoch": 4.707792207792208,
"grad_norm": 1.4296875,
"learning_rate": 2.9867256637168145e-06,
"loss": 0.1093,
"step": 4350
},
{
"epoch": 4.7186147186147185,
"grad_norm": 1.7109375,
"learning_rate": 2.8761061946902655e-06,
"loss": 0.114,
"step": 4360
},
{
"epoch": 4.729437229437229,
"grad_norm": 1.4453125,
"learning_rate": 2.7654867256637173e-06,
"loss": 0.1224,
"step": 4370
},
{
"epoch": 4.740259740259741,
"grad_norm": 1.609375,
"learning_rate": 2.6548672566371683e-06,
"loss": 0.1259,
"step": 4380
},
{
"epoch": 4.751082251082251,
"grad_norm": 1.3984375,
"learning_rate": 2.5442477876106196e-06,
"loss": 0.1126,
"step": 4390
},
{
"epoch": 4.761904761904762,
"grad_norm": 1.265625,
"learning_rate": 2.433628318584071e-06,
"loss": 0.1023,
"step": 4400
},
{
"epoch": 4.761904761904762,
"eval_loss": 0.1923452615737915,
"eval_runtime": 19.6377,
"eval_samples_per_second": 25.461,
"eval_steps_per_second": 0.204,
"step": 4400
},
{
"epoch": 4.7727272727272725,
"grad_norm": 1.8671875,
"learning_rate": 2.3230088495575224e-06,
"loss": 0.1191,
"step": 4410
},
{
"epoch": 4.783549783549784,
"grad_norm": 1.5859375,
"learning_rate": 2.2123893805309734e-06,
"loss": 0.113,
"step": 4420
},
{
"epoch": 4.794372294372295,
"grad_norm": 0.8828125,
"learning_rate": 2.1017699115044252e-06,
"loss": 0.1147,
"step": 4430
},
{
"epoch": 4.805194805194805,
"grad_norm": 1.5703125,
"learning_rate": 1.991150442477876e-06,
"loss": 0.1159,
"step": 4440
},
{
"epoch": 4.816017316017316,
"grad_norm": 1.203125,
"learning_rate": 1.8805309734513276e-06,
"loss": 0.1171,
"step": 4450
},
{
"epoch": 4.8268398268398265,
"grad_norm": 1.3984375,
"learning_rate": 1.7699115044247788e-06,
"loss": 0.1183,
"step": 4460
},
{
"epoch": 4.837662337662338,
"grad_norm": 1.359375,
"learning_rate": 1.6592920353982304e-06,
"loss": 0.1101,
"step": 4470
},
{
"epoch": 4.848484848484849,
"grad_norm": 0.87890625,
"learning_rate": 1.5486725663716816e-06,
"loss": 0.1138,
"step": 4480
},
{
"epoch": 4.859307359307359,
"grad_norm": 1.390625,
"learning_rate": 1.4380530973451327e-06,
"loss": 0.1277,
"step": 4490
},
{
"epoch": 4.87012987012987,
"grad_norm": 1.5703125,
"learning_rate": 1.3274336283185841e-06,
"loss": 0.113,
"step": 4500
},
{
"epoch": 4.87012987012987,
"eval_loss": 0.1925743669271469,
"eval_runtime": 14.1551,
"eval_samples_per_second": 35.323,
"eval_steps_per_second": 0.283,
"step": 4500
},
{
"epoch": 4.880952380952381,
"grad_norm": 1.6328125,
"learning_rate": 1.2168141592920355e-06,
"loss": 0.1012,
"step": 4510
},
{
"epoch": 4.891774891774892,
"grad_norm": 1.0625,
"learning_rate": 1.1061946902654867e-06,
"loss": 0.109,
"step": 4520
},
{
"epoch": 4.902597402597403,
"grad_norm": 0.86328125,
"learning_rate": 9.95575221238938e-07,
"loss": 0.1191,
"step": 4530
},
{
"epoch": 4.913419913419913,
"grad_norm": 1.390625,
"learning_rate": 8.849557522123894e-07,
"loss": 0.1144,
"step": 4540
},
{
"epoch": 4.924242424242424,
"grad_norm": 0.94140625,
"learning_rate": 7.743362831858408e-07,
"loss": 0.1167,
"step": 4550
},
{
"epoch": 4.935064935064935,
"grad_norm": 1.3046875,
"learning_rate": 6.637168141592921e-07,
"loss": 0.1131,
"step": 4560
},
{
"epoch": 4.945887445887446,
"grad_norm": 1.0234375,
"learning_rate": 5.530973451327434e-07,
"loss": 0.1185,
"step": 4570
},
{
"epoch": 4.956709956709957,
"grad_norm": 1.140625,
"learning_rate": 4.424778761061947e-07,
"loss": 0.1302,
"step": 4580
},
{
"epoch": 4.967532467532467,
"grad_norm": 1.875,
"learning_rate": 3.3185840707964603e-07,
"loss": 0.1145,
"step": 4590
},
{
"epoch": 4.978354978354979,
"grad_norm": 1.3125,
"learning_rate": 2.2123893805309735e-07,
"loss": 0.1136,
"step": 4600
},
{
"epoch": 4.978354978354979,
"eval_loss": 0.19142386317253113,
"eval_runtime": 13.9692,
"eval_samples_per_second": 35.793,
"eval_steps_per_second": 0.286,
"step": 4600
},
{
"epoch": 4.989177489177489,
"grad_norm": 1.1015625,
"learning_rate": 1.1061946902654867e-07,
"loss": 0.1204,
"step": 4610
},
{
"epoch": 5.0,
"grad_norm": 1.625,
"learning_rate": 0.0,
"loss": 0.11,
"step": 4620
}
],
"logging_steps": 10,
"max_steps": 4620,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.062004698834473e+18,
"train_batch_size": 128,
"trial_name": null,
"trial_params": null
}