videomae-base-finetuned-scratch_1 / trainer_state.json
dat96's picture
Training in progress, epoch 0
34c9dac verified
raw
history blame
201 kB
{
"best_metric": 0.8209169054441261,
"best_model_checkpoint": "videomae-base-finetuned-scratch_1/checkpoint-5280",
"epoch": 35.02580971659919,
"eval_steps": 500,
"global_step": 11856,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 5.349636554718018,
"learning_rate": 4.2158516020236085e-07,
"loss": 0.7141,
"step": 10
},
{
"epoch": 0.0,
"grad_norm": 5.897022724151611,
"learning_rate": 8.431703204047217e-07,
"loss": 0.7044,
"step": 20
},
{
"epoch": 0.0,
"grad_norm": 4.595437526702881,
"learning_rate": 1.2647554806070827e-06,
"loss": 0.7118,
"step": 30
},
{
"epoch": 0.0,
"grad_norm": 4.464962959289551,
"learning_rate": 1.6863406408094434e-06,
"loss": 0.7121,
"step": 40
},
{
"epoch": 0.0,
"grad_norm": 4.585201263427734,
"learning_rate": 2.1079258010118047e-06,
"loss": 0.7018,
"step": 50
},
{
"epoch": 0.01,
"grad_norm": 4.25459098815918,
"learning_rate": 2.5295109612141654e-06,
"loss": 0.6952,
"step": 60
},
{
"epoch": 0.01,
"grad_norm": 3.792253017425537,
"learning_rate": 2.951096121416526e-06,
"loss": 0.6813,
"step": 70
},
{
"epoch": 0.01,
"grad_norm": 3.621083974838257,
"learning_rate": 3.372681281618887e-06,
"loss": 0.6814,
"step": 80
},
{
"epoch": 0.01,
"grad_norm": 4.006009578704834,
"learning_rate": 3.794266441821248e-06,
"loss": 0.6896,
"step": 90
},
{
"epoch": 0.01,
"grad_norm": 3.3775813579559326,
"learning_rate": 4.2158516020236095e-06,
"loss": 0.6803,
"step": 100
},
{
"epoch": 0.01,
"grad_norm": 5.32543420791626,
"learning_rate": 4.63743676222597e-06,
"loss": 0.6793,
"step": 110
},
{
"epoch": 0.01,
"grad_norm": 4.859120845794678,
"learning_rate": 5.059021922428331e-06,
"loss": 0.6715,
"step": 120
},
{
"epoch": 0.01,
"grad_norm": 3.3114655017852783,
"learning_rate": 5.480607082630692e-06,
"loss": 0.705,
"step": 130
},
{
"epoch": 0.01,
"grad_norm": 4.681263446807861,
"learning_rate": 5.902192242833052e-06,
"loss": 0.7032,
"step": 140
},
{
"epoch": 0.01,
"grad_norm": 3.5089430809020996,
"learning_rate": 6.323777403035413e-06,
"loss": 0.6903,
"step": 150
},
{
"epoch": 0.01,
"grad_norm": 4.625184059143066,
"learning_rate": 6.745362563237774e-06,
"loss": 0.6885,
"step": 160
},
{
"epoch": 0.01,
"grad_norm": 3.5083889961242676,
"learning_rate": 7.166947723440136e-06,
"loss": 0.6671,
"step": 170
},
{
"epoch": 0.02,
"grad_norm": 6.028371334075928,
"learning_rate": 7.588532883642496e-06,
"loss": 0.6822,
"step": 180
},
{
"epoch": 0.02,
"grad_norm": 4.046874046325684,
"learning_rate": 8.010118043844857e-06,
"loss": 0.676,
"step": 190
},
{
"epoch": 0.02,
"grad_norm": 7.515069961547852,
"learning_rate": 8.431703204047219e-06,
"loss": 0.6649,
"step": 200
},
{
"epoch": 0.02,
"grad_norm": 4.611435890197754,
"learning_rate": 8.85328836424958e-06,
"loss": 0.6726,
"step": 210
},
{
"epoch": 0.02,
"grad_norm": 5.385730743408203,
"learning_rate": 9.27487352445194e-06,
"loss": 0.6771,
"step": 220
},
{
"epoch": 0.02,
"grad_norm": 4.537276268005371,
"learning_rate": 9.696458684654301e-06,
"loss": 0.6543,
"step": 230
},
{
"epoch": 0.02,
"grad_norm": 4.257579803466797,
"learning_rate": 1.0118043844856662e-05,
"loss": 0.6602,
"step": 240
},
{
"epoch": 0.02,
"grad_norm": 5.346296787261963,
"learning_rate": 1.0539629005059022e-05,
"loss": 0.6293,
"step": 250
},
{
"epoch": 0.02,
"grad_norm": 8.624234199523926,
"learning_rate": 1.0961214165261384e-05,
"loss": 0.6735,
"step": 260
},
{
"epoch": 0.02,
"grad_norm": 5.386282920837402,
"learning_rate": 1.1382799325463744e-05,
"loss": 0.6612,
"step": 270
},
{
"epoch": 0.02,
"grad_norm": 9.212226867675781,
"learning_rate": 1.1804384485666105e-05,
"loss": 0.673,
"step": 280
},
{
"epoch": 0.02,
"grad_norm": 6.993302822113037,
"learning_rate": 1.2225969645868467e-05,
"loss": 0.6551,
"step": 290
},
{
"epoch": 0.03,
"grad_norm": 5.731637954711914,
"learning_rate": 1.2647554806070827e-05,
"loss": 0.6432,
"step": 300
},
{
"epoch": 0.03,
"grad_norm": 5.617886543273926,
"learning_rate": 1.3069139966273189e-05,
"loss": 0.6777,
"step": 310
},
{
"epoch": 0.03,
"grad_norm": 7.462168216705322,
"learning_rate": 1.3490725126475547e-05,
"loss": 0.6153,
"step": 320
},
{
"epoch": 0.03,
"grad_norm": 9.60923957824707,
"learning_rate": 1.391231028667791e-05,
"loss": 0.6811,
"step": 330
},
{
"epoch": 0.03,
"eval_accuracy": 0.6547277936962751,
"eval_loss": 0.6314294338226318,
"eval_runtime": 33.9302,
"eval_samples_per_second": 20.572,
"eval_steps_per_second": 1.739,
"step": 330
},
{
"epoch": 1.0,
"grad_norm": 9.263134002685547,
"learning_rate": 1.4333895446880271e-05,
"loss": 0.6244,
"step": 340
},
{
"epoch": 1.0,
"grad_norm": 8.611780166625977,
"learning_rate": 1.4755480607082632e-05,
"loss": 0.6358,
"step": 350
},
{
"epoch": 1.0,
"grad_norm": 8.437411308288574,
"learning_rate": 1.5177065767284992e-05,
"loss": 0.6273,
"step": 360
},
{
"epoch": 1.0,
"grad_norm": 6.759949207305908,
"learning_rate": 1.5598650927487355e-05,
"loss": 0.6529,
"step": 370
},
{
"epoch": 1.0,
"grad_norm": 15.71631908416748,
"learning_rate": 1.6020236087689714e-05,
"loss": 0.6066,
"step": 380
},
{
"epoch": 1.01,
"grad_norm": 17.849533081054688,
"learning_rate": 1.6441821247892076e-05,
"loss": 0.5587,
"step": 390
},
{
"epoch": 1.01,
"grad_norm": 4.601895809173584,
"learning_rate": 1.6863406408094438e-05,
"loss": 0.6334,
"step": 400
},
{
"epoch": 1.01,
"grad_norm": 6.037390232086182,
"learning_rate": 1.7284991568296797e-05,
"loss": 0.7324,
"step": 410
},
{
"epoch": 1.01,
"grad_norm": 4.270590782165527,
"learning_rate": 1.770657672849916e-05,
"loss": 0.6552,
"step": 420
},
{
"epoch": 1.01,
"grad_norm": 10.071660995483398,
"learning_rate": 1.812816188870152e-05,
"loss": 0.6441,
"step": 430
},
{
"epoch": 1.01,
"grad_norm": 7.655844211578369,
"learning_rate": 1.854974704890388e-05,
"loss": 0.6271,
"step": 440
},
{
"epoch": 1.01,
"grad_norm": 7.267734050750732,
"learning_rate": 1.897133220910624e-05,
"loss": 0.6048,
"step": 450
},
{
"epoch": 1.01,
"grad_norm": 6.196805953979492,
"learning_rate": 1.9392917369308603e-05,
"loss": 0.6371,
"step": 460
},
{
"epoch": 1.01,
"grad_norm": 5.247162342071533,
"learning_rate": 1.981450252951096e-05,
"loss": 0.5485,
"step": 470
},
{
"epoch": 1.01,
"grad_norm": 7.14028787612915,
"learning_rate": 2.0236087689713324e-05,
"loss": 0.6503,
"step": 480
},
{
"epoch": 1.01,
"grad_norm": 7.4398722648620605,
"learning_rate": 2.0657672849915685e-05,
"loss": 0.5997,
"step": 490
},
{
"epoch": 1.01,
"grad_norm": 4.917046070098877,
"learning_rate": 2.1079258010118044e-05,
"loss": 0.6271,
"step": 500
},
{
"epoch": 1.02,
"grad_norm": 7.6909565925598145,
"learning_rate": 2.1500843170320406e-05,
"loss": 0.5816,
"step": 510
},
{
"epoch": 1.02,
"grad_norm": 5.249167442321777,
"learning_rate": 2.1922428330522768e-05,
"loss": 0.6534,
"step": 520
},
{
"epoch": 1.02,
"grad_norm": 5.298259735107422,
"learning_rate": 2.2344013490725127e-05,
"loss": 0.6713,
"step": 530
},
{
"epoch": 1.02,
"grad_norm": 8.135736465454102,
"learning_rate": 2.276559865092749e-05,
"loss": 0.5999,
"step": 540
},
{
"epoch": 1.02,
"grad_norm": 9.37923812866211,
"learning_rate": 2.318718381112985e-05,
"loss": 0.7861,
"step": 550
},
{
"epoch": 1.02,
"grad_norm": 4.831663131713867,
"learning_rate": 2.360876897133221e-05,
"loss": 0.605,
"step": 560
},
{
"epoch": 1.02,
"grad_norm": 5.473945617675781,
"learning_rate": 2.403035413153457e-05,
"loss": 0.6066,
"step": 570
},
{
"epoch": 1.02,
"grad_norm": 5.188632488250732,
"learning_rate": 2.4451939291736933e-05,
"loss": 0.6189,
"step": 580
},
{
"epoch": 1.02,
"grad_norm": 4.069911956787109,
"learning_rate": 2.487352445193929e-05,
"loss": 0.6705,
"step": 590
},
{
"epoch": 1.02,
"grad_norm": 8.71951675415039,
"learning_rate": 2.5295109612141654e-05,
"loss": 0.6168,
"step": 600
},
{
"epoch": 1.02,
"grad_norm": 6.839282989501953,
"learning_rate": 2.5716694772344012e-05,
"loss": 0.6465,
"step": 610
},
{
"epoch": 1.02,
"grad_norm": 5.155991077423096,
"learning_rate": 2.6138279932546377e-05,
"loss": 0.6007,
"step": 620
},
{
"epoch": 1.03,
"grad_norm": 4.38847541809082,
"learning_rate": 2.6559865092748736e-05,
"loss": 0.6253,
"step": 630
},
{
"epoch": 1.03,
"grad_norm": 6.741366386413574,
"learning_rate": 2.6981450252951095e-05,
"loss": 0.614,
"step": 640
},
{
"epoch": 1.03,
"grad_norm": 6.325467586517334,
"learning_rate": 2.740303541315346e-05,
"loss": 0.6444,
"step": 650
},
{
"epoch": 1.03,
"grad_norm": 7.080298900604248,
"learning_rate": 2.782462057335582e-05,
"loss": 0.6706,
"step": 660
},
{
"epoch": 1.03,
"eval_accuracy": 0.6318051575931232,
"eval_loss": 0.635435938835144,
"eval_runtime": 36.8826,
"eval_samples_per_second": 18.925,
"eval_steps_per_second": 1.6,
"step": 660
},
{
"epoch": 2.0,
"grad_norm": 8.330774307250977,
"learning_rate": 2.8246205733558177e-05,
"loss": 0.5594,
"step": 670
},
{
"epoch": 2.0,
"grad_norm": 5.559295654296875,
"learning_rate": 2.8667790893760543e-05,
"loss": 0.6125,
"step": 680
},
{
"epoch": 2.0,
"grad_norm": 7.404927730560303,
"learning_rate": 2.90893760539629e-05,
"loss": 0.5856,
"step": 690
},
{
"epoch": 2.0,
"grad_norm": 13.15217113494873,
"learning_rate": 2.9510961214165263e-05,
"loss": 0.5817,
"step": 700
},
{
"epoch": 2.0,
"grad_norm": 8.084522247314453,
"learning_rate": 2.9932546374367625e-05,
"loss": 0.646,
"step": 710
},
{
"epoch": 2.01,
"grad_norm": 10.898458480834961,
"learning_rate": 3.0354131534569984e-05,
"loss": 0.5814,
"step": 720
},
{
"epoch": 2.01,
"grad_norm": 4.796360015869141,
"learning_rate": 3.0775716694772346e-05,
"loss": 0.563,
"step": 730
},
{
"epoch": 2.01,
"grad_norm": 6.336984157562256,
"learning_rate": 3.119730185497471e-05,
"loss": 0.5837,
"step": 740
},
{
"epoch": 2.01,
"grad_norm": 6.342469692230225,
"learning_rate": 3.161888701517707e-05,
"loss": 0.5755,
"step": 750
},
{
"epoch": 2.01,
"grad_norm": 3.3841326236724854,
"learning_rate": 3.204047217537943e-05,
"loss": 0.6185,
"step": 760
},
{
"epoch": 2.01,
"grad_norm": 3.3901569843292236,
"learning_rate": 3.2462057335581793e-05,
"loss": 0.6117,
"step": 770
},
{
"epoch": 2.01,
"grad_norm": 5.979400634765625,
"learning_rate": 3.288364249578415e-05,
"loss": 0.5974,
"step": 780
},
{
"epoch": 2.01,
"grad_norm": 8.773338317871094,
"learning_rate": 3.330522765598651e-05,
"loss": 0.6024,
"step": 790
},
{
"epoch": 2.01,
"grad_norm": 7.4114298820495605,
"learning_rate": 3.3726812816188876e-05,
"loss": 0.5838,
"step": 800
},
{
"epoch": 2.01,
"grad_norm": 8.659492492675781,
"learning_rate": 3.4148397976391235e-05,
"loss": 0.6818,
"step": 810
},
{
"epoch": 2.01,
"grad_norm": 4.71567440032959,
"learning_rate": 3.456998313659359e-05,
"loss": 0.6604,
"step": 820
},
{
"epoch": 2.01,
"grad_norm": 4.012688159942627,
"learning_rate": 3.499156829679596e-05,
"loss": 0.6041,
"step": 830
},
{
"epoch": 2.02,
"grad_norm": 5.472045421600342,
"learning_rate": 3.541315345699832e-05,
"loss": 0.573,
"step": 840
},
{
"epoch": 2.02,
"grad_norm": 3.6241841316223145,
"learning_rate": 3.5834738617200676e-05,
"loss": 0.6385,
"step": 850
},
{
"epoch": 2.02,
"grad_norm": 3.782322645187378,
"learning_rate": 3.625632377740304e-05,
"loss": 0.6425,
"step": 860
},
{
"epoch": 2.02,
"grad_norm": 5.188979148864746,
"learning_rate": 3.66779089376054e-05,
"loss": 0.5915,
"step": 870
},
{
"epoch": 2.02,
"grad_norm": 7.798283576965332,
"learning_rate": 3.709949409780776e-05,
"loss": 0.6863,
"step": 880
},
{
"epoch": 2.02,
"grad_norm": 4.644458293914795,
"learning_rate": 3.7521079258010123e-05,
"loss": 0.6003,
"step": 890
},
{
"epoch": 2.02,
"grad_norm": 7.098191738128662,
"learning_rate": 3.794266441821248e-05,
"loss": 0.5639,
"step": 900
},
{
"epoch": 2.02,
"grad_norm": 5.175333499908447,
"learning_rate": 3.836424957841484e-05,
"loss": 0.5598,
"step": 910
},
{
"epoch": 2.02,
"grad_norm": 7.205776691436768,
"learning_rate": 3.8785834738617206e-05,
"loss": 0.6401,
"step": 920
},
{
"epoch": 2.02,
"grad_norm": 4.420816898345947,
"learning_rate": 3.9207419898819565e-05,
"loss": 0.6388,
"step": 930
},
{
"epoch": 2.02,
"grad_norm": 3.2338483333587646,
"learning_rate": 3.962900505902192e-05,
"loss": 0.5893,
"step": 940
},
{
"epoch": 2.02,
"grad_norm": 3.816267251968384,
"learning_rate": 4.005059021922429e-05,
"loss": 0.6419,
"step": 950
},
{
"epoch": 2.03,
"grad_norm": 3.6634039878845215,
"learning_rate": 4.047217537942665e-05,
"loss": 0.659,
"step": 960
},
{
"epoch": 2.03,
"grad_norm": 2.776705503463745,
"learning_rate": 4.0893760539629006e-05,
"loss": 0.6155,
"step": 970
},
{
"epoch": 2.03,
"grad_norm": 3.137341260910034,
"learning_rate": 4.131534569983137e-05,
"loss": 0.6158,
"step": 980
},
{
"epoch": 2.03,
"grad_norm": 6.1107707023620605,
"learning_rate": 4.173693086003373e-05,
"loss": 0.6581,
"step": 990
},
{
"epoch": 2.03,
"eval_accuracy": 0.6375358166189111,
"eval_loss": 0.6327700018882751,
"eval_runtime": 33.7114,
"eval_samples_per_second": 20.705,
"eval_steps_per_second": 1.75,
"step": 990
},
{
"epoch": 3.0,
"grad_norm": 3.0415008068084717,
"learning_rate": 4.215851602023609e-05,
"loss": 0.6034,
"step": 1000
},
{
"epoch": 3.0,
"grad_norm": 6.006179332733154,
"learning_rate": 4.2580101180438453e-05,
"loss": 0.4955,
"step": 1010
},
{
"epoch": 3.0,
"grad_norm": 6.862912654876709,
"learning_rate": 4.300168634064081e-05,
"loss": 0.4918,
"step": 1020
},
{
"epoch": 3.0,
"grad_norm": 11.918879508972168,
"learning_rate": 4.342327150084317e-05,
"loss": 0.5643,
"step": 1030
},
{
"epoch": 3.0,
"grad_norm": 8.16517448425293,
"learning_rate": 4.3844856661045536e-05,
"loss": 0.638,
"step": 1040
},
{
"epoch": 3.01,
"grad_norm": 6.384884357452393,
"learning_rate": 4.4266441821247895e-05,
"loss": 0.5995,
"step": 1050
},
{
"epoch": 3.01,
"grad_norm": 5.2973127365112305,
"learning_rate": 4.468802698145025e-05,
"loss": 0.639,
"step": 1060
},
{
"epoch": 3.01,
"grad_norm": 3.8324005603790283,
"learning_rate": 4.510961214165262e-05,
"loss": 0.572,
"step": 1070
},
{
"epoch": 3.01,
"grad_norm": 4.97128963470459,
"learning_rate": 4.553119730185498e-05,
"loss": 0.597,
"step": 1080
},
{
"epoch": 3.01,
"grad_norm": 5.064922332763672,
"learning_rate": 4.5952782462057336e-05,
"loss": 0.5617,
"step": 1090
},
{
"epoch": 3.01,
"grad_norm": 3.734870672225952,
"learning_rate": 4.63743676222597e-05,
"loss": 0.5439,
"step": 1100
},
{
"epoch": 3.01,
"grad_norm": 12.495979309082031,
"learning_rate": 4.679595278246206e-05,
"loss": 0.5659,
"step": 1110
},
{
"epoch": 3.01,
"grad_norm": 3.5089731216430664,
"learning_rate": 4.721753794266442e-05,
"loss": 0.517,
"step": 1120
},
{
"epoch": 3.01,
"grad_norm": 5.993117332458496,
"learning_rate": 4.7639123102866784e-05,
"loss": 0.6202,
"step": 1130
},
{
"epoch": 3.01,
"grad_norm": 3.431788444519043,
"learning_rate": 4.806070826306914e-05,
"loss": 0.5767,
"step": 1140
},
{
"epoch": 3.01,
"grad_norm": 5.00049352645874,
"learning_rate": 4.84822934232715e-05,
"loss": 0.5623,
"step": 1150
},
{
"epoch": 3.01,
"grad_norm": 5.571552276611328,
"learning_rate": 4.8903878583473866e-05,
"loss": 0.5445,
"step": 1160
},
{
"epoch": 3.02,
"grad_norm": 3.7177913188934326,
"learning_rate": 4.9325463743676225e-05,
"loss": 0.5748,
"step": 1170
},
{
"epoch": 3.02,
"grad_norm": 4.8672194480896,
"learning_rate": 4.974704890387858e-05,
"loss": 0.6161,
"step": 1180
},
{
"epoch": 3.02,
"grad_norm": 4.830544948577881,
"learning_rate": 4.9981255857544517e-05,
"loss": 0.587,
"step": 1190
},
{
"epoch": 3.02,
"grad_norm": 5.276002407073975,
"learning_rate": 4.993439550140581e-05,
"loss": 0.5564,
"step": 1200
},
{
"epoch": 3.02,
"grad_norm": 6.24776029586792,
"learning_rate": 4.988753514526711e-05,
"loss": 0.5164,
"step": 1210
},
{
"epoch": 3.02,
"grad_norm": 4.341228008270264,
"learning_rate": 4.98406747891284e-05,
"loss": 0.501,
"step": 1220
},
{
"epoch": 3.02,
"grad_norm": 3.2242419719696045,
"learning_rate": 4.979381443298969e-05,
"loss": 0.5489,
"step": 1230
},
{
"epoch": 3.02,
"grad_norm": 4.790765285491943,
"learning_rate": 4.9746954076850985e-05,
"loss": 0.5591,
"step": 1240
},
{
"epoch": 3.02,
"grad_norm": 3.7129054069519043,
"learning_rate": 4.9700093720712284e-05,
"loss": 0.612,
"step": 1250
},
{
"epoch": 3.02,
"grad_norm": 7.660614013671875,
"learning_rate": 4.9653233364573576e-05,
"loss": 0.5194,
"step": 1260
},
{
"epoch": 3.02,
"grad_norm": 5.343811511993408,
"learning_rate": 4.960637300843486e-05,
"loss": 0.6074,
"step": 1270
},
{
"epoch": 3.02,
"grad_norm": 4.8525590896606445,
"learning_rate": 4.955951265229616e-05,
"loss": 0.548,
"step": 1280
},
{
"epoch": 3.03,
"grad_norm": 4.513533592224121,
"learning_rate": 4.951265229615745e-05,
"loss": 0.5818,
"step": 1290
},
{
"epoch": 3.03,
"grad_norm": 3.897047996520996,
"learning_rate": 4.9465791940018746e-05,
"loss": 0.5784,
"step": 1300
},
{
"epoch": 3.03,
"grad_norm": 3.294161796569824,
"learning_rate": 4.941893158388004e-05,
"loss": 0.5605,
"step": 1310
},
{
"epoch": 3.03,
"grad_norm": 6.77754545211792,
"learning_rate": 4.937207122774134e-05,
"loss": 0.5614,
"step": 1320
},
{
"epoch": 3.03,
"eval_accuracy": 0.7134670487106017,
"eval_loss": 0.5506558418273926,
"eval_runtime": 34.5926,
"eval_samples_per_second": 20.178,
"eval_steps_per_second": 1.706,
"step": 1320
},
{
"epoch": 4.0,
"grad_norm": 4.854128360748291,
"learning_rate": 4.932521087160263e-05,
"loss": 0.5509,
"step": 1330
},
{
"epoch": 4.0,
"grad_norm": 6.327508449554443,
"learning_rate": 4.927835051546392e-05,
"loss": 0.5109,
"step": 1340
},
{
"epoch": 4.0,
"grad_norm": 5.180001258850098,
"learning_rate": 4.9231490159325214e-05,
"loss": 0.5522,
"step": 1350
},
{
"epoch": 4.0,
"grad_norm": 6.649283409118652,
"learning_rate": 4.9184629803186506e-05,
"loss": 0.4932,
"step": 1360
},
{
"epoch": 4.0,
"grad_norm": 4.401059150695801,
"learning_rate": 4.91377694470478e-05,
"loss": 0.5446,
"step": 1370
},
{
"epoch": 4.01,
"grad_norm": 4.1620635986328125,
"learning_rate": 4.909090909090909e-05,
"loss": 0.4964,
"step": 1380
},
{
"epoch": 4.01,
"grad_norm": 5.955656051635742,
"learning_rate": 4.904404873477039e-05,
"loss": 0.4685,
"step": 1390
},
{
"epoch": 4.01,
"grad_norm": 2.9708614349365234,
"learning_rate": 4.899718837863168e-05,
"loss": 0.4874,
"step": 1400
},
{
"epoch": 4.01,
"grad_norm": 4.681861877441406,
"learning_rate": 4.8950328022492975e-05,
"loss": 0.5001,
"step": 1410
},
{
"epoch": 4.01,
"grad_norm": 10.97381591796875,
"learning_rate": 4.890346766635427e-05,
"loss": 0.4289,
"step": 1420
},
{
"epoch": 4.01,
"grad_norm": 4.6500349044799805,
"learning_rate": 4.885660731021556e-05,
"loss": 0.4725,
"step": 1430
},
{
"epoch": 4.01,
"grad_norm": 7.461334705352783,
"learning_rate": 4.880974695407685e-05,
"loss": 0.5846,
"step": 1440
},
{
"epoch": 4.01,
"grad_norm": 4.3084940910339355,
"learning_rate": 4.8762886597938144e-05,
"loss": 0.5864,
"step": 1450
},
{
"epoch": 4.01,
"grad_norm": 8.88397216796875,
"learning_rate": 4.8716026241799436e-05,
"loss": 0.5392,
"step": 1460
},
{
"epoch": 4.01,
"grad_norm": 6.398833274841309,
"learning_rate": 4.8669165885660735e-05,
"loss": 0.5501,
"step": 1470
},
{
"epoch": 4.01,
"grad_norm": 6.638328552246094,
"learning_rate": 4.862230552952203e-05,
"loss": 0.5494,
"step": 1480
},
{
"epoch": 4.01,
"grad_norm": 5.006091594696045,
"learning_rate": 4.857544517338332e-05,
"loss": 0.6367,
"step": 1490
},
{
"epoch": 4.02,
"grad_norm": 5.209989547729492,
"learning_rate": 4.852858481724461e-05,
"loss": 0.5773,
"step": 1500
},
{
"epoch": 4.02,
"grad_norm": 5.720169544219971,
"learning_rate": 4.8481724461105905e-05,
"loss": 0.5022,
"step": 1510
},
{
"epoch": 4.02,
"grad_norm": 6.709425449371338,
"learning_rate": 4.84348641049672e-05,
"loss": 0.4968,
"step": 1520
},
{
"epoch": 4.02,
"grad_norm": 6.903029918670654,
"learning_rate": 4.838800374882849e-05,
"loss": 0.5384,
"step": 1530
},
{
"epoch": 4.02,
"grad_norm": 3.307621479034424,
"learning_rate": 4.834114339268979e-05,
"loss": 0.5675,
"step": 1540
},
{
"epoch": 4.02,
"grad_norm": 6.119607925415039,
"learning_rate": 4.829428303655108e-05,
"loss": 0.535,
"step": 1550
},
{
"epoch": 4.02,
"grad_norm": 2.2412047386169434,
"learning_rate": 4.824742268041237e-05,
"loss": 0.4636,
"step": 1560
},
{
"epoch": 4.02,
"grad_norm": 3.960838794708252,
"learning_rate": 4.8200562324273665e-05,
"loss": 0.4383,
"step": 1570
},
{
"epoch": 4.02,
"grad_norm": 3.922579765319824,
"learning_rate": 4.8153701968134964e-05,
"loss": 0.4583,
"step": 1580
},
{
"epoch": 4.02,
"grad_norm": 6.935645580291748,
"learning_rate": 4.810684161199626e-05,
"loss": 0.4757,
"step": 1590
},
{
"epoch": 4.02,
"grad_norm": 6.558530807495117,
"learning_rate": 4.805998125585754e-05,
"loss": 0.5097,
"step": 1600
},
{
"epoch": 4.02,
"grad_norm": 4.9284186363220215,
"learning_rate": 4.801312089971884e-05,
"loss": 0.5218,
"step": 1610
},
{
"epoch": 4.03,
"grad_norm": 8.183663368225098,
"learning_rate": 4.7966260543580134e-05,
"loss": 0.4462,
"step": 1620
},
{
"epoch": 4.03,
"grad_norm": 6.276401996612549,
"learning_rate": 4.7919400187441426e-05,
"loss": 0.7016,
"step": 1630
},
{
"epoch": 4.03,
"grad_norm": 4.575777530670166,
"learning_rate": 4.787253983130272e-05,
"loss": 0.4815,
"step": 1640
},
{
"epoch": 4.03,
"grad_norm": 2.3570098876953125,
"learning_rate": 4.782567947516402e-05,
"loss": 0.438,
"step": 1650
},
{
"epoch": 4.03,
"eval_accuracy": 0.7263610315186246,
"eval_loss": 0.5439700484275818,
"eval_runtime": 33.443,
"eval_samples_per_second": 20.871,
"eval_steps_per_second": 1.764,
"step": 1650
},
{
"epoch": 5.0,
"grad_norm": 7.316202163696289,
"learning_rate": 4.777881911902531e-05,
"loss": 0.3581,
"step": 1660
},
{
"epoch": 5.0,
"grad_norm": 6.561455249786377,
"learning_rate": 4.77319587628866e-05,
"loss": 0.6762,
"step": 1670
},
{
"epoch": 5.0,
"grad_norm": 3.9346327781677246,
"learning_rate": 4.7685098406747894e-05,
"loss": 0.4135,
"step": 1680
},
{
"epoch": 5.0,
"grad_norm": 3.1179192066192627,
"learning_rate": 4.7638238050609187e-05,
"loss": 0.4336,
"step": 1690
},
{
"epoch": 5.0,
"grad_norm": 6.217395305633545,
"learning_rate": 4.759137769447048e-05,
"loss": 0.4875,
"step": 1700
},
{
"epoch": 5.01,
"grad_norm": 5.203670978546143,
"learning_rate": 4.754451733833177e-05,
"loss": 0.4241,
"step": 1710
},
{
"epoch": 5.01,
"grad_norm": 6.5499420166015625,
"learning_rate": 4.749765698219307e-05,
"loss": 0.4823,
"step": 1720
},
{
"epoch": 5.01,
"grad_norm": 4.842621803283691,
"learning_rate": 4.745079662605436e-05,
"loss": 0.3871,
"step": 1730
},
{
"epoch": 5.01,
"grad_norm": 5.887164115905762,
"learning_rate": 4.7403936269915655e-05,
"loss": 0.4075,
"step": 1740
},
{
"epoch": 5.01,
"grad_norm": 7.392530918121338,
"learning_rate": 4.735707591377695e-05,
"loss": 0.4504,
"step": 1750
},
{
"epoch": 5.01,
"grad_norm": 6.812521934509277,
"learning_rate": 4.731021555763824e-05,
"loss": 0.5216,
"step": 1760
},
{
"epoch": 5.01,
"grad_norm": 5.895148754119873,
"learning_rate": 4.726335520149953e-05,
"loss": 0.455,
"step": 1770
},
{
"epoch": 5.01,
"grad_norm": 7.757108688354492,
"learning_rate": 4.7216494845360824e-05,
"loss": 0.488,
"step": 1780
},
{
"epoch": 5.01,
"grad_norm": 9.14256477355957,
"learning_rate": 4.7169634489222116e-05,
"loss": 0.5042,
"step": 1790
},
{
"epoch": 5.01,
"grad_norm": 5.17966890335083,
"learning_rate": 4.7122774133083416e-05,
"loss": 0.4534,
"step": 1800
},
{
"epoch": 5.01,
"grad_norm": 6.081084728240967,
"learning_rate": 4.707591377694471e-05,
"loss": 0.4221,
"step": 1810
},
{
"epoch": 5.01,
"grad_norm": 3.0460875034332275,
"learning_rate": 4.7029053420806e-05,
"loss": 0.4783,
"step": 1820
},
{
"epoch": 5.02,
"grad_norm": 5.692960262298584,
"learning_rate": 4.698219306466729e-05,
"loss": 0.5559,
"step": 1830
},
{
"epoch": 5.02,
"grad_norm": 5.824710845947266,
"learning_rate": 4.6935332708528585e-05,
"loss": 0.486,
"step": 1840
},
{
"epoch": 5.02,
"grad_norm": 8.523110389709473,
"learning_rate": 4.688847235238988e-05,
"loss": 0.4036,
"step": 1850
},
{
"epoch": 5.02,
"grad_norm": 13.942612648010254,
"learning_rate": 4.684161199625117e-05,
"loss": 0.4903,
"step": 1860
},
{
"epoch": 5.02,
"grad_norm": 6.322441577911377,
"learning_rate": 4.679475164011247e-05,
"loss": 0.4573,
"step": 1870
},
{
"epoch": 5.02,
"grad_norm": 3.1088461875915527,
"learning_rate": 4.674789128397376e-05,
"loss": 0.3915,
"step": 1880
},
{
"epoch": 5.02,
"grad_norm": 7.44355583190918,
"learning_rate": 4.670103092783505e-05,
"loss": 0.4966,
"step": 1890
},
{
"epoch": 5.02,
"grad_norm": 5.484576225280762,
"learning_rate": 4.6654170571696345e-05,
"loss": 0.4336,
"step": 1900
},
{
"epoch": 5.02,
"grad_norm": 6.856348037719727,
"learning_rate": 4.6607310215557645e-05,
"loss": 0.5151,
"step": 1910
},
{
"epoch": 5.02,
"grad_norm": 6.7076849937438965,
"learning_rate": 4.656044985941894e-05,
"loss": 0.4332,
"step": 1920
},
{
"epoch": 5.02,
"grad_norm": 6.39366340637207,
"learning_rate": 4.651358950328022e-05,
"loss": 0.4858,
"step": 1930
},
{
"epoch": 5.02,
"grad_norm": 7.002589225769043,
"learning_rate": 4.646672914714152e-05,
"loss": 0.4684,
"step": 1940
},
{
"epoch": 5.03,
"grad_norm": 10.67752456665039,
"learning_rate": 4.6419868791002814e-05,
"loss": 0.3976,
"step": 1950
},
{
"epoch": 5.03,
"grad_norm": 12.617927551269531,
"learning_rate": 4.6373008434864106e-05,
"loss": 0.4468,
"step": 1960
},
{
"epoch": 5.03,
"grad_norm": 6.047290802001953,
"learning_rate": 4.63261480787254e-05,
"loss": 0.5431,
"step": 1970
},
{
"epoch": 5.03,
"grad_norm": 6.107527256011963,
"learning_rate": 4.62792877225867e-05,
"loss": 0.4569,
"step": 1980
},
{
"epoch": 5.03,
"eval_accuracy": 0.7277936962750716,
"eval_loss": 0.5531865954399109,
"eval_runtime": 34.546,
"eval_samples_per_second": 20.205,
"eval_steps_per_second": 1.708,
"step": 1980
},
{
"epoch": 6.0,
"grad_norm": 5.367207050323486,
"learning_rate": 4.623242736644799e-05,
"loss": 0.37,
"step": 1990
},
{
"epoch": 6.0,
"grad_norm": 5.771437644958496,
"learning_rate": 4.618556701030928e-05,
"loss": 0.3602,
"step": 2000
},
{
"epoch": 6.0,
"grad_norm": 9.664875984191895,
"learning_rate": 4.6138706654170575e-05,
"loss": 0.3343,
"step": 2010
},
{
"epoch": 6.0,
"grad_norm": 6.788029193878174,
"learning_rate": 4.609184629803187e-05,
"loss": 0.4078,
"step": 2020
},
{
"epoch": 6.0,
"grad_norm": 7.247564315795898,
"learning_rate": 4.604498594189316e-05,
"loss": 0.4155,
"step": 2030
},
{
"epoch": 6.01,
"grad_norm": 4.327235698699951,
"learning_rate": 4.599812558575445e-05,
"loss": 0.3966,
"step": 2040
},
{
"epoch": 6.01,
"grad_norm": 7.491323947906494,
"learning_rate": 4.595126522961575e-05,
"loss": 0.349,
"step": 2050
},
{
"epoch": 6.01,
"grad_norm": 9.212530136108398,
"learning_rate": 4.590440487347704e-05,
"loss": 0.4265,
"step": 2060
},
{
"epoch": 6.01,
"grad_norm": 13.466367721557617,
"learning_rate": 4.5857544517338335e-05,
"loss": 0.4375,
"step": 2070
},
{
"epoch": 6.01,
"grad_norm": 7.694874286651611,
"learning_rate": 4.581068416119963e-05,
"loss": 0.3759,
"step": 2080
},
{
"epoch": 6.01,
"grad_norm": 8.024492263793945,
"learning_rate": 4.576382380506092e-05,
"loss": 0.3936,
"step": 2090
},
{
"epoch": 6.01,
"grad_norm": 5.053406238555908,
"learning_rate": 4.571696344892221e-05,
"loss": 0.3063,
"step": 2100
},
{
"epoch": 6.01,
"grad_norm": 7.138169288635254,
"learning_rate": 4.5670103092783504e-05,
"loss": 0.4692,
"step": 2110
},
{
"epoch": 6.01,
"grad_norm": 3.2747809886932373,
"learning_rate": 4.56232427366448e-05,
"loss": 0.3421,
"step": 2120
},
{
"epoch": 6.01,
"grad_norm": 5.716726303100586,
"learning_rate": 4.5576382380506096e-05,
"loss": 0.4158,
"step": 2130
},
{
"epoch": 6.01,
"grad_norm": 5.238852500915527,
"learning_rate": 4.552952202436739e-05,
"loss": 0.3768,
"step": 2140
},
{
"epoch": 6.01,
"grad_norm": 9.766545295715332,
"learning_rate": 4.548266166822868e-05,
"loss": 0.3791,
"step": 2150
},
{
"epoch": 6.02,
"grad_norm": 3.8693816661834717,
"learning_rate": 4.543580131208997e-05,
"loss": 0.334,
"step": 2160
},
{
"epoch": 6.02,
"grad_norm": 9.960877418518066,
"learning_rate": 4.538894095595127e-05,
"loss": 0.4487,
"step": 2170
},
{
"epoch": 6.02,
"grad_norm": 4.830340385437012,
"learning_rate": 4.534208059981256e-05,
"loss": 0.4052,
"step": 2180
},
{
"epoch": 6.02,
"grad_norm": 4.533899307250977,
"learning_rate": 4.529522024367385e-05,
"loss": 0.3292,
"step": 2190
},
{
"epoch": 6.02,
"grad_norm": 4.12489652633667,
"learning_rate": 4.524835988753515e-05,
"loss": 0.342,
"step": 2200
},
{
"epoch": 6.02,
"grad_norm": 7.824032306671143,
"learning_rate": 4.520149953139644e-05,
"loss": 0.4713,
"step": 2210
},
{
"epoch": 6.02,
"grad_norm": 6.992900371551514,
"learning_rate": 4.5154639175257733e-05,
"loss": 0.4517,
"step": 2220
},
{
"epoch": 6.02,
"grad_norm": 2.9177560806274414,
"learning_rate": 4.5107778819119026e-05,
"loss": 0.346,
"step": 2230
},
{
"epoch": 6.02,
"grad_norm": 5.294494152069092,
"learning_rate": 4.5060918462980325e-05,
"loss": 0.4555,
"step": 2240
},
{
"epoch": 6.02,
"grad_norm": 3.4280591011047363,
"learning_rate": 4.501405810684162e-05,
"loss": 0.3906,
"step": 2250
},
{
"epoch": 6.02,
"grad_norm": 5.682974338531494,
"learning_rate": 4.49671977507029e-05,
"loss": 0.3501,
"step": 2260
},
{
"epoch": 6.02,
"grad_norm": 14.726093292236328,
"learning_rate": 4.49203373945642e-05,
"loss": 0.6106,
"step": 2270
},
{
"epoch": 6.03,
"grad_norm": 6.266767501831055,
"learning_rate": 4.4873477038425494e-05,
"loss": 0.4206,
"step": 2280
},
{
"epoch": 6.03,
"grad_norm": 5.161693572998047,
"learning_rate": 4.4826616682286786e-05,
"loss": 0.5073,
"step": 2290
},
{
"epoch": 6.03,
"grad_norm": 3.678534746170044,
"learning_rate": 4.477975632614808e-05,
"loss": 0.4903,
"step": 2300
},
{
"epoch": 6.03,
"grad_norm": 3.0561232566833496,
"learning_rate": 4.473289597000938e-05,
"loss": 0.3614,
"step": 2310
},
{
"epoch": 6.03,
"eval_accuracy": 0.7363896848137536,
"eval_loss": 0.5283002853393555,
"eval_runtime": 34.2167,
"eval_samples_per_second": 20.399,
"eval_steps_per_second": 1.724,
"step": 2310
},
{
"epoch": 7.0,
"grad_norm": 4.80795431137085,
"learning_rate": 4.468603561387067e-05,
"loss": 0.342,
"step": 2320
},
{
"epoch": 7.0,
"grad_norm": 5.0758748054504395,
"learning_rate": 4.463917525773196e-05,
"loss": 0.3799,
"step": 2330
},
{
"epoch": 7.0,
"grad_norm": 7.944527626037598,
"learning_rate": 4.4592314901593255e-05,
"loss": 0.3295,
"step": 2340
},
{
"epoch": 7.0,
"grad_norm": 8.439969062805176,
"learning_rate": 4.454545454545455e-05,
"loss": 0.4199,
"step": 2350
},
{
"epoch": 7.0,
"grad_norm": 3.974719762802124,
"learning_rate": 4.449859418931584e-05,
"loss": 0.3785,
"step": 2360
},
{
"epoch": 7.01,
"grad_norm": 6.712285995483398,
"learning_rate": 4.445173383317713e-05,
"loss": 0.3202,
"step": 2370
},
{
"epoch": 7.01,
"grad_norm": 0.3726706802845001,
"learning_rate": 4.440487347703843e-05,
"loss": 0.1409,
"step": 2380
},
{
"epoch": 7.01,
"grad_norm": 9.558029174804688,
"learning_rate": 4.435801312089972e-05,
"loss": 0.4147,
"step": 2390
},
{
"epoch": 7.01,
"grad_norm": 3.2590224742889404,
"learning_rate": 4.4311152764761015e-05,
"loss": 0.2469,
"step": 2400
},
{
"epoch": 7.01,
"grad_norm": 8.232924461364746,
"learning_rate": 4.426429240862231e-05,
"loss": 0.229,
"step": 2410
},
{
"epoch": 7.01,
"grad_norm": 11.747199058532715,
"learning_rate": 4.42174320524836e-05,
"loss": 0.3219,
"step": 2420
},
{
"epoch": 7.01,
"grad_norm": 12.6619234085083,
"learning_rate": 4.417057169634489e-05,
"loss": 0.4654,
"step": 2430
},
{
"epoch": 7.01,
"grad_norm": 10.572305679321289,
"learning_rate": 4.4123711340206185e-05,
"loss": 0.4145,
"step": 2440
},
{
"epoch": 7.01,
"grad_norm": 7.648813247680664,
"learning_rate": 4.407685098406748e-05,
"loss": 0.3939,
"step": 2450
},
{
"epoch": 7.01,
"grad_norm": 7.876673698425293,
"learning_rate": 4.4029990627928776e-05,
"loss": 0.4002,
"step": 2460
},
{
"epoch": 7.01,
"grad_norm": 2.9009244441986084,
"learning_rate": 4.398313027179007e-05,
"loss": 0.3665,
"step": 2470
},
{
"epoch": 7.01,
"grad_norm": 4.1332597732543945,
"learning_rate": 4.393626991565136e-05,
"loss": 0.315,
"step": 2480
},
{
"epoch": 7.02,
"grad_norm": 8.535113334655762,
"learning_rate": 4.388940955951265e-05,
"loss": 0.3637,
"step": 2490
},
{
"epoch": 7.02,
"grad_norm": 6.837998390197754,
"learning_rate": 4.384254920337395e-05,
"loss": 0.2805,
"step": 2500
},
{
"epoch": 7.02,
"grad_norm": 9.94541072845459,
"learning_rate": 4.379568884723524e-05,
"loss": 0.3765,
"step": 2510
},
{
"epoch": 7.02,
"grad_norm": 8.577096939086914,
"learning_rate": 4.374882849109653e-05,
"loss": 0.3092,
"step": 2520
},
{
"epoch": 7.02,
"grad_norm": 4.8793253898620605,
"learning_rate": 4.370196813495783e-05,
"loss": 0.2715,
"step": 2530
},
{
"epoch": 7.02,
"grad_norm": 6.965157508850098,
"learning_rate": 4.365510777881912e-05,
"loss": 0.4944,
"step": 2540
},
{
"epoch": 7.02,
"grad_norm": 2.5626721382141113,
"learning_rate": 4.3608247422680414e-05,
"loss": 0.311,
"step": 2550
},
{
"epoch": 7.02,
"grad_norm": 5.375543594360352,
"learning_rate": 4.3561387066541706e-05,
"loss": 0.4302,
"step": 2560
},
{
"epoch": 7.02,
"grad_norm": 8.762489318847656,
"learning_rate": 4.3514526710403005e-05,
"loss": 0.2687,
"step": 2570
},
{
"epoch": 7.02,
"grad_norm": 6.056619167327881,
"learning_rate": 4.34676663542643e-05,
"loss": 0.3575,
"step": 2580
},
{
"epoch": 7.02,
"grad_norm": 5.398069858551025,
"learning_rate": 4.342080599812558e-05,
"loss": 0.2445,
"step": 2590
},
{
"epoch": 7.02,
"grad_norm": 6.936893939971924,
"learning_rate": 4.337394564198688e-05,
"loss": 0.3377,
"step": 2600
},
{
"epoch": 7.03,
"grad_norm": 6.378572463989258,
"learning_rate": 4.3327085285848174e-05,
"loss": 0.3935,
"step": 2610
},
{
"epoch": 7.03,
"grad_norm": 15.028035163879395,
"learning_rate": 4.328022492970947e-05,
"loss": 0.238,
"step": 2620
},
{
"epoch": 7.03,
"grad_norm": 4.001184463500977,
"learning_rate": 4.323336457357076e-05,
"loss": 0.3672,
"step": 2630
},
{
"epoch": 7.03,
"grad_norm": 0.5959993600845337,
"learning_rate": 4.318650421743206e-05,
"loss": 0.3514,
"step": 2640
},
{
"epoch": 7.03,
"eval_accuracy": 0.7406876790830945,
"eval_loss": 0.6699703335762024,
"eval_runtime": 33.8438,
"eval_samples_per_second": 20.624,
"eval_steps_per_second": 1.743,
"step": 2640
},
{
"epoch": 8.0,
"grad_norm": 6.682882308959961,
"learning_rate": 4.313964386129335e-05,
"loss": 0.3878,
"step": 2650
},
{
"epoch": 8.0,
"grad_norm": 8.971117973327637,
"learning_rate": 4.309278350515464e-05,
"loss": 0.3186,
"step": 2660
},
{
"epoch": 8.0,
"grad_norm": 7.282140731811523,
"learning_rate": 4.3045923149015935e-05,
"loss": 0.2842,
"step": 2670
},
{
"epoch": 8.0,
"grad_norm": 10.455528259277344,
"learning_rate": 4.299906279287723e-05,
"loss": 0.3175,
"step": 2680
},
{
"epoch": 8.0,
"grad_norm": 8.218666076660156,
"learning_rate": 4.295220243673852e-05,
"loss": 0.3408,
"step": 2690
},
{
"epoch": 8.01,
"grad_norm": 8.701309204101562,
"learning_rate": 4.290534208059981e-05,
"loss": 0.2705,
"step": 2700
},
{
"epoch": 8.01,
"grad_norm": 7.51228666305542,
"learning_rate": 4.285848172446111e-05,
"loss": 0.3086,
"step": 2710
},
{
"epoch": 8.01,
"grad_norm": 1.298295497894287,
"learning_rate": 4.2811621368322403e-05,
"loss": 0.2298,
"step": 2720
},
{
"epoch": 8.01,
"grad_norm": 9.289319038391113,
"learning_rate": 4.2764761012183696e-05,
"loss": 0.2307,
"step": 2730
},
{
"epoch": 8.01,
"grad_norm": 10.216160774230957,
"learning_rate": 4.271790065604499e-05,
"loss": 0.2715,
"step": 2740
},
{
"epoch": 8.01,
"grad_norm": 0.6326087117195129,
"learning_rate": 4.267104029990628e-05,
"loss": 0.224,
"step": 2750
},
{
"epoch": 8.01,
"grad_norm": 0.4399392306804657,
"learning_rate": 4.262417994376757e-05,
"loss": 0.2329,
"step": 2760
},
{
"epoch": 8.01,
"grad_norm": 18.67351722717285,
"learning_rate": 4.2577319587628865e-05,
"loss": 0.3202,
"step": 2770
},
{
"epoch": 8.01,
"grad_norm": 11.18542194366455,
"learning_rate": 4.253045923149016e-05,
"loss": 0.3705,
"step": 2780
},
{
"epoch": 8.01,
"grad_norm": 1.94206702709198,
"learning_rate": 4.2483598875351456e-05,
"loss": 0.4143,
"step": 2790
},
{
"epoch": 8.01,
"grad_norm": 6.226709365844727,
"learning_rate": 4.243673851921275e-05,
"loss": 0.31,
"step": 2800
},
{
"epoch": 8.01,
"grad_norm": 14.290252685546875,
"learning_rate": 4.238987816307404e-05,
"loss": 0.2697,
"step": 2810
},
{
"epoch": 8.02,
"grad_norm": 14.262323379516602,
"learning_rate": 4.234301780693533e-05,
"loss": 0.3874,
"step": 2820
},
{
"epoch": 8.02,
"grad_norm": 9.656635284423828,
"learning_rate": 4.229615745079663e-05,
"loss": 0.2919,
"step": 2830
},
{
"epoch": 8.02,
"grad_norm": 3.8038530349731445,
"learning_rate": 4.224929709465792e-05,
"loss": 0.3939,
"step": 2840
},
{
"epoch": 8.02,
"grad_norm": 3.729642152786255,
"learning_rate": 4.220243673851921e-05,
"loss": 0.2423,
"step": 2850
},
{
"epoch": 8.02,
"grad_norm": 4.79857873916626,
"learning_rate": 4.215557638238051e-05,
"loss": 0.3128,
"step": 2860
},
{
"epoch": 8.02,
"grad_norm": 6.748260498046875,
"learning_rate": 4.21087160262418e-05,
"loss": 0.3235,
"step": 2870
},
{
"epoch": 8.02,
"grad_norm": 10.759824752807617,
"learning_rate": 4.2061855670103094e-05,
"loss": 0.285,
"step": 2880
},
{
"epoch": 8.02,
"grad_norm": 6.050924777984619,
"learning_rate": 4.2014995313964386e-05,
"loss": 0.3038,
"step": 2890
},
{
"epoch": 8.02,
"grad_norm": 3.649509906768799,
"learning_rate": 4.1968134957825685e-05,
"loss": 0.2936,
"step": 2900
},
{
"epoch": 8.02,
"grad_norm": 6.332077980041504,
"learning_rate": 4.192127460168698e-05,
"loss": 0.3153,
"step": 2910
},
{
"epoch": 8.02,
"grad_norm": 3.2886719703674316,
"learning_rate": 4.187441424554826e-05,
"loss": 0.2492,
"step": 2920
},
{
"epoch": 8.02,
"grad_norm": 11.733613014221191,
"learning_rate": 4.182755388940956e-05,
"loss": 0.2669,
"step": 2930
},
{
"epoch": 8.03,
"grad_norm": 3.867345094680786,
"learning_rate": 4.1780693533270855e-05,
"loss": 0.3001,
"step": 2940
},
{
"epoch": 8.03,
"grad_norm": 7.281511306762695,
"learning_rate": 4.173383317713215e-05,
"loss": 0.3297,
"step": 2950
},
{
"epoch": 8.03,
"grad_norm": 10.021977424621582,
"learning_rate": 4.168697282099344e-05,
"loss": 0.2637,
"step": 2960
},
{
"epoch": 8.03,
"grad_norm": 7.1175971031188965,
"learning_rate": 4.164011246485474e-05,
"loss": 0.2796,
"step": 2970
},
{
"epoch": 8.03,
"eval_accuracy": 0.7177650429799427,
"eval_loss": 0.7974393367767334,
"eval_runtime": 34.0885,
"eval_samples_per_second": 20.476,
"eval_steps_per_second": 1.731,
"step": 2970
},
{
"epoch": 9.0,
"grad_norm": 7.273170471191406,
"learning_rate": 4.159325210871603e-05,
"loss": 0.2985,
"step": 2980
},
{
"epoch": 9.0,
"grad_norm": 12.718881607055664,
"learning_rate": 4.154639175257732e-05,
"loss": 0.2799,
"step": 2990
},
{
"epoch": 9.0,
"grad_norm": 12.384184837341309,
"learning_rate": 4.1499531396438615e-05,
"loss": 0.2361,
"step": 3000
},
{
"epoch": 9.0,
"grad_norm": 4.5740156173706055,
"learning_rate": 4.145267104029991e-05,
"loss": 0.1935,
"step": 3010
},
{
"epoch": 9.0,
"grad_norm": 4.967354774475098,
"learning_rate": 4.14058106841612e-05,
"loss": 0.2195,
"step": 3020
},
{
"epoch": 9.01,
"grad_norm": 2.65854811668396,
"learning_rate": 4.135895032802249e-05,
"loss": 0.1492,
"step": 3030
},
{
"epoch": 9.01,
"grad_norm": 22.392955780029297,
"learning_rate": 4.131208997188379e-05,
"loss": 0.3631,
"step": 3040
},
{
"epoch": 9.01,
"grad_norm": 8.278757095336914,
"learning_rate": 4.1265229615745084e-05,
"loss": 0.201,
"step": 3050
},
{
"epoch": 9.01,
"grad_norm": 10.572164535522461,
"learning_rate": 4.1218369259606376e-05,
"loss": 0.285,
"step": 3060
},
{
"epoch": 9.01,
"grad_norm": 7.999050617218018,
"learning_rate": 4.117150890346767e-05,
"loss": 0.202,
"step": 3070
},
{
"epoch": 9.01,
"grad_norm": 0.6098045110702515,
"learning_rate": 4.112464854732897e-05,
"loss": 0.3136,
"step": 3080
},
{
"epoch": 9.01,
"grad_norm": 13.311070442199707,
"learning_rate": 4.107778819119025e-05,
"loss": 0.2286,
"step": 3090
},
{
"epoch": 9.01,
"grad_norm": 14.218550682067871,
"learning_rate": 4.1030927835051545e-05,
"loss": 0.1248,
"step": 3100
},
{
"epoch": 9.01,
"grad_norm": 19.886507034301758,
"learning_rate": 4.098406747891284e-05,
"loss": 0.2682,
"step": 3110
},
{
"epoch": 9.01,
"grad_norm": 15.638948440551758,
"learning_rate": 4.093720712277414e-05,
"loss": 0.2751,
"step": 3120
},
{
"epoch": 9.01,
"grad_norm": 16.060192108154297,
"learning_rate": 4.089034676663543e-05,
"loss": 0.1631,
"step": 3130
},
{
"epoch": 9.01,
"grad_norm": 16.14665412902832,
"learning_rate": 4.084348641049672e-05,
"loss": 0.21,
"step": 3140
},
{
"epoch": 9.02,
"grad_norm": 12.38498592376709,
"learning_rate": 4.079662605435802e-05,
"loss": 0.2409,
"step": 3150
},
{
"epoch": 9.02,
"grad_norm": 21.296875,
"learning_rate": 4.074976569821931e-05,
"loss": 0.3526,
"step": 3160
},
{
"epoch": 9.02,
"grad_norm": 12.729432106018066,
"learning_rate": 4.07029053420806e-05,
"loss": 0.3236,
"step": 3170
},
{
"epoch": 9.02,
"grad_norm": 5.113853931427002,
"learning_rate": 4.065604498594189e-05,
"loss": 0.116,
"step": 3180
},
{
"epoch": 9.02,
"grad_norm": 15.853752136230469,
"learning_rate": 4.060918462980319e-05,
"loss": 0.3694,
"step": 3190
},
{
"epoch": 9.02,
"grad_norm": 3.868764877319336,
"learning_rate": 4.056232427366448e-05,
"loss": 0.1406,
"step": 3200
},
{
"epoch": 9.02,
"grad_norm": 3.905461072921753,
"learning_rate": 4.0515463917525774e-05,
"loss": 0.2263,
"step": 3210
},
{
"epoch": 9.02,
"grad_norm": 9.867708206176758,
"learning_rate": 4.0468603561387067e-05,
"loss": 0.241,
"step": 3220
},
{
"epoch": 9.02,
"grad_norm": 7.449389457702637,
"learning_rate": 4.0421743205248366e-05,
"loss": 0.3362,
"step": 3230
},
{
"epoch": 9.02,
"grad_norm": 6.9868292808532715,
"learning_rate": 4.037488284910966e-05,
"loss": 0.2709,
"step": 3240
},
{
"epoch": 9.02,
"grad_norm": 9.967850685119629,
"learning_rate": 4.0328022492970944e-05,
"loss": 0.3062,
"step": 3250
},
{
"epoch": 9.02,
"grad_norm": 6.262727737426758,
"learning_rate": 4.028116213683224e-05,
"loss": 0.2394,
"step": 3260
},
{
"epoch": 9.03,
"grad_norm": 12.531270980834961,
"learning_rate": 4.0234301780693535e-05,
"loss": 0.3621,
"step": 3270
},
{
"epoch": 9.03,
"grad_norm": 8.992213249206543,
"learning_rate": 4.018744142455483e-05,
"loss": 0.1948,
"step": 3280
},
{
"epoch": 9.03,
"grad_norm": 6.379027843475342,
"learning_rate": 4.014058106841612e-05,
"loss": 0.1911,
"step": 3290
},
{
"epoch": 9.03,
"grad_norm": 23.431751251220703,
"learning_rate": 4.009372071227742e-05,
"loss": 0.236,
"step": 3300
},
{
"epoch": 9.03,
"eval_accuracy": 0.7722063037249284,
"eval_loss": 0.6850898265838623,
"eval_runtime": 33.99,
"eval_samples_per_second": 20.535,
"eval_steps_per_second": 1.736,
"step": 3300
},
{
"epoch": 10.0,
"grad_norm": 14.146215438842773,
"learning_rate": 4.004686035613871e-05,
"loss": 0.2476,
"step": 3310
},
{
"epoch": 10.0,
"grad_norm": 6.308194160461426,
"learning_rate": 4e-05,
"loss": 0.1314,
"step": 3320
},
{
"epoch": 10.0,
"grad_norm": 12.289512634277344,
"learning_rate": 3.9953139643861296e-05,
"loss": 0.1203,
"step": 3330
},
{
"epoch": 10.0,
"grad_norm": 15.842235565185547,
"learning_rate": 3.990627928772259e-05,
"loss": 0.2797,
"step": 3340
},
{
"epoch": 10.0,
"grad_norm": 1.5130534172058105,
"learning_rate": 3.985941893158388e-05,
"loss": 0.1283,
"step": 3350
},
{
"epoch": 10.01,
"grad_norm": 11.302055358886719,
"learning_rate": 3.981255857544517e-05,
"loss": 0.3701,
"step": 3360
},
{
"epoch": 10.01,
"grad_norm": 15.355897903442383,
"learning_rate": 3.976569821930647e-05,
"loss": 0.4371,
"step": 3370
},
{
"epoch": 10.01,
"grad_norm": 12.716096878051758,
"learning_rate": 3.9718837863167764e-05,
"loss": 0.2433,
"step": 3380
},
{
"epoch": 10.01,
"grad_norm": 3.5532474517822266,
"learning_rate": 3.9671977507029056e-05,
"loss": 0.0855,
"step": 3390
},
{
"epoch": 10.01,
"grad_norm": 1.783897876739502,
"learning_rate": 3.962511715089035e-05,
"loss": 0.1193,
"step": 3400
},
{
"epoch": 10.01,
"grad_norm": 8.46875286102295,
"learning_rate": 3.957825679475165e-05,
"loss": 0.2303,
"step": 3410
},
{
"epoch": 10.01,
"grad_norm": 16.5199031829834,
"learning_rate": 3.953139643861293e-05,
"loss": 0.254,
"step": 3420
},
{
"epoch": 10.01,
"grad_norm": 0.3168381452560425,
"learning_rate": 3.9484536082474226e-05,
"loss": 0.1953,
"step": 3430
},
{
"epoch": 10.01,
"grad_norm": 16.903932571411133,
"learning_rate": 3.943767572633552e-05,
"loss": 0.35,
"step": 3440
},
{
"epoch": 10.01,
"grad_norm": 8.773260116577148,
"learning_rate": 3.939081537019682e-05,
"loss": 0.2684,
"step": 3450
},
{
"epoch": 10.01,
"grad_norm": 12.934499740600586,
"learning_rate": 3.934395501405811e-05,
"loss": 0.2493,
"step": 3460
},
{
"epoch": 10.01,
"grad_norm": 11.468120574951172,
"learning_rate": 3.92970946579194e-05,
"loss": 0.154,
"step": 3470
},
{
"epoch": 10.02,
"grad_norm": 0.31763386726379395,
"learning_rate": 3.92502343017807e-05,
"loss": 0.1635,
"step": 3480
},
{
"epoch": 10.02,
"grad_norm": 0.5305169224739075,
"learning_rate": 3.920337394564199e-05,
"loss": 0.1666,
"step": 3490
},
{
"epoch": 10.02,
"grad_norm": 23.478593826293945,
"learning_rate": 3.915651358950328e-05,
"loss": 0.1499,
"step": 3500
},
{
"epoch": 10.02,
"grad_norm": 0.9787814021110535,
"learning_rate": 3.910965323336457e-05,
"loss": 0.3277,
"step": 3510
},
{
"epoch": 10.02,
"grad_norm": 10.264555931091309,
"learning_rate": 3.906279287722587e-05,
"loss": 0.2312,
"step": 3520
},
{
"epoch": 10.02,
"grad_norm": 1.865168571472168,
"learning_rate": 3.901593252108716e-05,
"loss": 0.2381,
"step": 3530
},
{
"epoch": 10.02,
"grad_norm": 13.4818754196167,
"learning_rate": 3.8969072164948455e-05,
"loss": 0.263,
"step": 3540
},
{
"epoch": 10.02,
"grad_norm": 11.58323860168457,
"learning_rate": 3.892221180880975e-05,
"loss": 0.2925,
"step": 3550
},
{
"epoch": 10.02,
"grad_norm": 6.375723838806152,
"learning_rate": 3.8875351452671046e-05,
"loss": 0.2063,
"step": 3560
},
{
"epoch": 10.02,
"grad_norm": 6.340160846710205,
"learning_rate": 3.882849109653234e-05,
"loss": 0.179,
"step": 3570
},
{
"epoch": 10.02,
"grad_norm": 1.2288185358047485,
"learning_rate": 3.8781630740393624e-05,
"loss": 0.1231,
"step": 3580
},
{
"epoch": 10.02,
"grad_norm": 4.222806453704834,
"learning_rate": 3.873477038425492e-05,
"loss": 0.198,
"step": 3590
},
{
"epoch": 10.03,
"grad_norm": 12.188648223876953,
"learning_rate": 3.8687910028116215e-05,
"loss": 0.0827,
"step": 3600
},
{
"epoch": 10.03,
"grad_norm": 16.154399871826172,
"learning_rate": 3.864104967197751e-05,
"loss": 0.2194,
"step": 3610
},
{
"epoch": 10.03,
"grad_norm": 25.385496139526367,
"learning_rate": 3.85941893158388e-05,
"loss": 0.2496,
"step": 3620
},
{
"epoch": 10.03,
"grad_norm": 0.44989219307899475,
"learning_rate": 3.85473289597001e-05,
"loss": 0.2066,
"step": 3630
},
{
"epoch": 10.03,
"eval_accuracy": 0.7707736389684814,
"eval_loss": 0.7625312805175781,
"eval_runtime": 34.4357,
"eval_samples_per_second": 20.27,
"eval_steps_per_second": 1.713,
"step": 3630
},
{
"epoch": 11.0,
"grad_norm": 15.097335815429688,
"learning_rate": 3.850046860356139e-05,
"loss": 0.1422,
"step": 3640
},
{
"epoch": 11.0,
"grad_norm": 8.706366539001465,
"learning_rate": 3.8453608247422684e-05,
"loss": 0.1621,
"step": 3650
},
{
"epoch": 11.0,
"grad_norm": 6.461037635803223,
"learning_rate": 3.8406747891283976e-05,
"loss": 0.2252,
"step": 3660
},
{
"epoch": 11.0,
"grad_norm": 18.602127075195312,
"learning_rate": 3.835988753514527e-05,
"loss": 0.3387,
"step": 3670
},
{
"epoch": 11.0,
"grad_norm": 13.997271537780762,
"learning_rate": 3.831302717900656e-05,
"loss": 0.2751,
"step": 3680
},
{
"epoch": 11.01,
"grad_norm": 6.304378986358643,
"learning_rate": 3.826616682286785e-05,
"loss": 0.0713,
"step": 3690
},
{
"epoch": 11.01,
"grad_norm": 14.0118408203125,
"learning_rate": 3.821930646672915e-05,
"loss": 0.1648,
"step": 3700
},
{
"epoch": 11.01,
"grad_norm": 22.292020797729492,
"learning_rate": 3.8172446110590444e-05,
"loss": 0.187,
"step": 3710
},
{
"epoch": 11.01,
"grad_norm": 3.986797332763672,
"learning_rate": 3.8125585754451737e-05,
"loss": 0.244,
"step": 3720
},
{
"epoch": 11.01,
"grad_norm": 14.314979553222656,
"learning_rate": 3.807872539831303e-05,
"loss": 0.2893,
"step": 3730
},
{
"epoch": 11.01,
"grad_norm": 5.869822978973389,
"learning_rate": 3.803186504217433e-05,
"loss": 0.2231,
"step": 3740
},
{
"epoch": 11.01,
"grad_norm": 2.076101303100586,
"learning_rate": 3.7985004686035613e-05,
"loss": 0.2073,
"step": 3750
},
{
"epoch": 11.01,
"grad_norm": 0.970169186592102,
"learning_rate": 3.7938144329896906e-05,
"loss": 0.0698,
"step": 3760
},
{
"epoch": 11.01,
"grad_norm": 6.208909034729004,
"learning_rate": 3.7891283973758205e-05,
"loss": 0.1289,
"step": 3770
},
{
"epoch": 11.01,
"grad_norm": 17.37769317626953,
"learning_rate": 3.78444236176195e-05,
"loss": 0.1264,
"step": 3780
},
{
"epoch": 11.01,
"grad_norm": 4.121638298034668,
"learning_rate": 3.779756326148079e-05,
"loss": 0.2226,
"step": 3790
},
{
"epoch": 11.01,
"grad_norm": 0.10288643091917038,
"learning_rate": 3.775070290534208e-05,
"loss": 0.2066,
"step": 3800
},
{
"epoch": 11.02,
"grad_norm": 7.0988240242004395,
"learning_rate": 3.770384254920338e-05,
"loss": 0.2174,
"step": 3810
},
{
"epoch": 11.02,
"grad_norm": 10.114404678344727,
"learning_rate": 3.765698219306467e-05,
"loss": 0.3379,
"step": 3820
},
{
"epoch": 11.02,
"grad_norm": 4.689489364624023,
"learning_rate": 3.761012183692596e-05,
"loss": 0.1015,
"step": 3830
},
{
"epoch": 11.02,
"grad_norm": 16.416460037231445,
"learning_rate": 3.756326148078725e-05,
"loss": 0.1955,
"step": 3840
},
{
"epoch": 11.02,
"grad_norm": 23.091764450073242,
"learning_rate": 3.751640112464855e-05,
"loss": 0.3026,
"step": 3850
},
{
"epoch": 11.02,
"grad_norm": 8.67167854309082,
"learning_rate": 3.746954076850984e-05,
"loss": 0.197,
"step": 3860
},
{
"epoch": 11.02,
"grad_norm": 6.001295566558838,
"learning_rate": 3.7422680412371135e-05,
"loss": 0.2166,
"step": 3870
},
{
"epoch": 11.02,
"grad_norm": 7.23835563659668,
"learning_rate": 3.737582005623243e-05,
"loss": 0.3391,
"step": 3880
},
{
"epoch": 11.02,
"grad_norm": 5.40344762802124,
"learning_rate": 3.7328959700093726e-05,
"loss": 0.1672,
"step": 3890
},
{
"epoch": 11.02,
"grad_norm": 6.540796756744385,
"learning_rate": 3.728209934395502e-05,
"loss": 0.0776,
"step": 3900
},
{
"epoch": 11.02,
"grad_norm": 0.964383065700531,
"learning_rate": 3.7235238987816304e-05,
"loss": 0.1802,
"step": 3910
},
{
"epoch": 11.02,
"grad_norm": 0.4158385396003723,
"learning_rate": 3.71883786316776e-05,
"loss": 0.0695,
"step": 3920
},
{
"epoch": 11.03,
"grad_norm": 17.69159698486328,
"learning_rate": 3.7141518275538895e-05,
"loss": 0.2484,
"step": 3930
},
{
"epoch": 11.03,
"grad_norm": 13.761899948120117,
"learning_rate": 3.709465791940019e-05,
"loss": 0.2431,
"step": 3940
},
{
"epoch": 11.03,
"grad_norm": 0.3444725275039673,
"learning_rate": 3.704779756326148e-05,
"loss": 0.2122,
"step": 3950
},
{
"epoch": 11.03,
"grad_norm": 3.0498321056365967,
"learning_rate": 3.700093720712278e-05,
"loss": 0.2831,
"step": 3960
},
{
"epoch": 11.03,
"eval_accuracy": 0.7707736389684814,
"eval_loss": 0.79632568359375,
"eval_runtime": 33.5427,
"eval_samples_per_second": 20.809,
"eval_steps_per_second": 1.759,
"step": 3960
},
{
"epoch": 12.0,
"grad_norm": 0.13173483312129974,
"learning_rate": 3.695407685098407e-05,
"loss": 0.0471,
"step": 3970
},
{
"epoch": 12.0,
"grad_norm": 18.985239028930664,
"learning_rate": 3.6907216494845364e-05,
"loss": 0.0531,
"step": 3980
},
{
"epoch": 12.0,
"grad_norm": 9.530562400817871,
"learning_rate": 3.6860356138706656e-05,
"loss": 0.2334,
"step": 3990
},
{
"epoch": 12.0,
"grad_norm": 4.545018672943115,
"learning_rate": 3.681349578256795e-05,
"loss": 0.0164,
"step": 4000
},
{
"epoch": 12.0,
"grad_norm": 11.210469245910645,
"learning_rate": 3.676663542642924e-05,
"loss": 0.2759,
"step": 4010
},
{
"epoch": 12.01,
"grad_norm": 15.611169815063477,
"learning_rate": 3.671977507029053e-05,
"loss": 0.1851,
"step": 4020
},
{
"epoch": 12.01,
"grad_norm": 12.470063209533691,
"learning_rate": 3.667291471415183e-05,
"loss": 0.2275,
"step": 4030
},
{
"epoch": 12.01,
"grad_norm": 9.493141174316406,
"learning_rate": 3.6626054358013124e-05,
"loss": 0.1243,
"step": 4040
},
{
"epoch": 12.01,
"grad_norm": 0.25132325291633606,
"learning_rate": 3.657919400187442e-05,
"loss": 0.0885,
"step": 4050
},
{
"epoch": 12.01,
"grad_norm": 28.46393394470215,
"learning_rate": 3.653233364573571e-05,
"loss": 0.261,
"step": 4060
},
{
"epoch": 12.01,
"grad_norm": 21.371795654296875,
"learning_rate": 3.648547328959701e-05,
"loss": 0.1305,
"step": 4070
},
{
"epoch": 12.01,
"grad_norm": 0.9401485323905945,
"learning_rate": 3.6438612933458294e-05,
"loss": 0.1798,
"step": 4080
},
{
"epoch": 12.01,
"grad_norm": 23.651334762573242,
"learning_rate": 3.6391752577319586e-05,
"loss": 0.1997,
"step": 4090
},
{
"epoch": 12.01,
"grad_norm": 0.2696411609649658,
"learning_rate": 3.6344892221180885e-05,
"loss": 0.1279,
"step": 4100
},
{
"epoch": 12.01,
"grad_norm": 26.360898971557617,
"learning_rate": 3.629803186504218e-05,
"loss": 0.2128,
"step": 4110
},
{
"epoch": 12.01,
"grad_norm": 5.624836444854736,
"learning_rate": 3.625117150890347e-05,
"loss": 0.2319,
"step": 4120
},
{
"epoch": 12.01,
"grad_norm": 9.12028980255127,
"learning_rate": 3.620431115276476e-05,
"loss": 0.1358,
"step": 4130
},
{
"epoch": 12.02,
"grad_norm": 11.657383918762207,
"learning_rate": 3.615745079662606e-05,
"loss": 0.0597,
"step": 4140
},
{
"epoch": 12.02,
"grad_norm": 9.352867126464844,
"learning_rate": 3.6110590440487353e-05,
"loss": 0.1819,
"step": 4150
},
{
"epoch": 12.02,
"grad_norm": 33.10483932495117,
"learning_rate": 3.606373008434864e-05,
"loss": 0.2577,
"step": 4160
},
{
"epoch": 12.02,
"grad_norm": 7.759758472442627,
"learning_rate": 3.601686972820993e-05,
"loss": 0.1209,
"step": 4170
},
{
"epoch": 12.02,
"grad_norm": 1.226152777671814,
"learning_rate": 3.597000937207123e-05,
"loss": 0.2366,
"step": 4180
},
{
"epoch": 12.02,
"grad_norm": 4.4371185302734375,
"learning_rate": 3.592314901593252e-05,
"loss": 0.1248,
"step": 4190
},
{
"epoch": 12.02,
"grad_norm": 29.921714782714844,
"learning_rate": 3.5876288659793815e-05,
"loss": 0.3106,
"step": 4200
},
{
"epoch": 12.02,
"grad_norm": 12.740315437316895,
"learning_rate": 3.582942830365511e-05,
"loss": 0.1804,
"step": 4210
},
{
"epoch": 12.02,
"grad_norm": 23.39232635498047,
"learning_rate": 3.5782567947516406e-05,
"loss": 0.134,
"step": 4220
},
{
"epoch": 12.02,
"grad_norm": 5.175128936767578,
"learning_rate": 3.57357075913777e-05,
"loss": 0.2728,
"step": 4230
},
{
"epoch": 12.02,
"grad_norm": 18.326560974121094,
"learning_rate": 3.5688847235238984e-05,
"loss": 0.1816,
"step": 4240
},
{
"epoch": 12.02,
"grad_norm": 0.42216312885284424,
"learning_rate": 3.5641986879100283e-05,
"loss": 0.0879,
"step": 4250
},
{
"epoch": 12.03,
"grad_norm": 26.220693588256836,
"learning_rate": 3.5595126522961576e-05,
"loss": 0.094,
"step": 4260
},
{
"epoch": 12.03,
"grad_norm": 2.731444835662842,
"learning_rate": 3.554826616682287e-05,
"loss": 0.0832,
"step": 4270
},
{
"epoch": 12.03,
"grad_norm": 0.9680123925209045,
"learning_rate": 3.550140581068416e-05,
"loss": 0.0811,
"step": 4280
},
{
"epoch": 12.03,
"grad_norm": 69.98845672607422,
"learning_rate": 3.545454545454546e-05,
"loss": 0.1903,
"step": 4290
},
{
"epoch": 12.03,
"eval_accuracy": 0.7722063037249284,
"eval_loss": 1.034330129623413,
"eval_runtime": 34.5588,
"eval_samples_per_second": 20.197,
"eval_steps_per_second": 1.707,
"step": 4290
},
{
"epoch": 13.0,
"grad_norm": 25.59967041015625,
"learning_rate": 3.540768509840675e-05,
"loss": 0.0995,
"step": 4300
},
{
"epoch": 13.0,
"grad_norm": 24.92293357849121,
"learning_rate": 3.5360824742268044e-05,
"loss": 0.3308,
"step": 4310
},
{
"epoch": 13.0,
"grad_norm": 30.706470489501953,
"learning_rate": 3.5313964386129336e-05,
"loss": 0.1114,
"step": 4320
},
{
"epoch": 13.0,
"grad_norm": 1.1878864765167236,
"learning_rate": 3.526710402999063e-05,
"loss": 0.0588,
"step": 4330
},
{
"epoch": 13.0,
"grad_norm": 31.14521026611328,
"learning_rate": 3.522024367385192e-05,
"loss": 0.1238,
"step": 4340
},
{
"epoch": 13.01,
"grad_norm": 5.118488311767578,
"learning_rate": 3.517338331771321e-05,
"loss": 0.1793,
"step": 4350
},
{
"epoch": 13.01,
"grad_norm": 30.64320182800293,
"learning_rate": 3.512652296157451e-05,
"loss": 0.1733,
"step": 4360
},
{
"epoch": 13.01,
"grad_norm": 0.029796045273542404,
"learning_rate": 3.5079662605435805e-05,
"loss": 0.2219,
"step": 4370
},
{
"epoch": 13.01,
"grad_norm": 12.55972671508789,
"learning_rate": 3.50328022492971e-05,
"loss": 0.1043,
"step": 4380
},
{
"epoch": 13.01,
"grad_norm": 6.064866065979004,
"learning_rate": 3.498594189315839e-05,
"loss": 0.1679,
"step": 4390
},
{
"epoch": 13.01,
"grad_norm": 0.1219923123717308,
"learning_rate": 3.493908153701969e-05,
"loss": 0.1371,
"step": 4400
},
{
"epoch": 13.01,
"grad_norm": 7.214325904846191,
"learning_rate": 3.4892221180880974e-05,
"loss": 0.05,
"step": 4410
},
{
"epoch": 13.01,
"grad_norm": 54.42354965209961,
"learning_rate": 3.4845360824742266e-05,
"loss": 0.1519,
"step": 4420
},
{
"epoch": 13.01,
"grad_norm": 0.27510225772857666,
"learning_rate": 3.4798500468603565e-05,
"loss": 0.1608,
"step": 4430
},
{
"epoch": 13.01,
"grad_norm": 0.35241690278053284,
"learning_rate": 3.475164011246486e-05,
"loss": 0.1467,
"step": 4440
},
{
"epoch": 13.01,
"grad_norm": 17.937379837036133,
"learning_rate": 3.470477975632615e-05,
"loss": 0.1619,
"step": 4450
},
{
"epoch": 13.01,
"grad_norm": 13.159626960754395,
"learning_rate": 3.465791940018744e-05,
"loss": 0.1981,
"step": 4460
},
{
"epoch": 13.02,
"grad_norm": 19.368940353393555,
"learning_rate": 3.461105904404874e-05,
"loss": 0.3174,
"step": 4470
},
{
"epoch": 13.02,
"grad_norm": 0.7653603553771973,
"learning_rate": 3.4564198687910034e-05,
"loss": 0.2421,
"step": 4480
},
{
"epoch": 13.02,
"grad_norm": 2.392252206802368,
"learning_rate": 3.451733833177132e-05,
"loss": 0.248,
"step": 4490
},
{
"epoch": 13.02,
"grad_norm": 1.3327248096466064,
"learning_rate": 3.447047797563261e-05,
"loss": 0.0903,
"step": 4500
},
{
"epoch": 13.02,
"grad_norm": 8.378661155700684,
"learning_rate": 3.442361761949391e-05,
"loss": 0.1196,
"step": 4510
},
{
"epoch": 13.02,
"grad_norm": 17.592124938964844,
"learning_rate": 3.43767572633552e-05,
"loss": 0.1543,
"step": 4520
},
{
"epoch": 13.02,
"grad_norm": 22.954425811767578,
"learning_rate": 3.4329896907216495e-05,
"loss": 0.1726,
"step": 4530
},
{
"epoch": 13.02,
"grad_norm": 11.798051834106445,
"learning_rate": 3.428303655107779e-05,
"loss": 0.3255,
"step": 4540
},
{
"epoch": 13.02,
"grad_norm": 20.77471923828125,
"learning_rate": 3.423617619493909e-05,
"loss": 0.2043,
"step": 4550
},
{
"epoch": 13.02,
"grad_norm": 5.149378299713135,
"learning_rate": 3.418931583880038e-05,
"loss": 0.0589,
"step": 4560
},
{
"epoch": 13.02,
"grad_norm": 8.951346397399902,
"learning_rate": 3.4142455482661665e-05,
"loss": 0.207,
"step": 4570
},
{
"epoch": 13.02,
"grad_norm": 0.9169898629188538,
"learning_rate": 3.4095595126522964e-05,
"loss": 0.1265,
"step": 4580
},
{
"epoch": 13.03,
"grad_norm": 13.90592098236084,
"learning_rate": 3.4048734770384256e-05,
"loss": 0.2189,
"step": 4590
},
{
"epoch": 13.03,
"grad_norm": 23.327816009521484,
"learning_rate": 3.400187441424555e-05,
"loss": 0.184,
"step": 4600
},
{
"epoch": 13.03,
"grad_norm": 14.074636459350586,
"learning_rate": 3.395501405810684e-05,
"loss": 0.1103,
"step": 4610
},
{
"epoch": 13.03,
"grad_norm": 9.187418937683105,
"learning_rate": 3.390815370196814e-05,
"loss": 0.1169,
"step": 4620
},
{
"epoch": 13.03,
"eval_accuracy": 0.7865329512893983,
"eval_loss": 0.8527703285217285,
"eval_runtime": 34.2781,
"eval_samples_per_second": 20.363,
"eval_steps_per_second": 1.721,
"step": 4620
},
{
"epoch": 14.0,
"grad_norm": 11.900031089782715,
"learning_rate": 3.386129334582943e-05,
"loss": 0.074,
"step": 4630
},
{
"epoch": 14.0,
"grad_norm": 11.011795043945312,
"learning_rate": 3.3814432989690724e-05,
"loss": 0.126,
"step": 4640
},
{
"epoch": 14.0,
"grad_norm": 23.668609619140625,
"learning_rate": 3.376757263355202e-05,
"loss": 0.0747,
"step": 4650
},
{
"epoch": 14.0,
"grad_norm": 0.17650875449180603,
"learning_rate": 3.372071227741331e-05,
"loss": 0.0751,
"step": 4660
},
{
"epoch": 14.0,
"grad_norm": 5.125329971313477,
"learning_rate": 3.36738519212746e-05,
"loss": 0.2435,
"step": 4670
},
{
"epoch": 14.01,
"grad_norm": 21.372135162353516,
"learning_rate": 3.3626991565135894e-05,
"loss": 0.1759,
"step": 4680
},
{
"epoch": 14.01,
"grad_norm": 0.13896770775318146,
"learning_rate": 3.358013120899719e-05,
"loss": 0.0661,
"step": 4690
},
{
"epoch": 14.01,
"grad_norm": 0.020779293030500412,
"learning_rate": 3.3533270852858485e-05,
"loss": 0.1733,
"step": 4700
},
{
"epoch": 14.01,
"grad_norm": 14.163589477539062,
"learning_rate": 3.348641049671978e-05,
"loss": 0.1317,
"step": 4710
},
{
"epoch": 14.01,
"grad_norm": 1.3063100576400757,
"learning_rate": 3.343955014058107e-05,
"loss": 0.1116,
"step": 4720
},
{
"epoch": 14.01,
"grad_norm": 2.3075990676879883,
"learning_rate": 3.339268978444237e-05,
"loss": 0.0674,
"step": 4730
},
{
"epoch": 14.01,
"grad_norm": 10.278874397277832,
"learning_rate": 3.3345829428303654e-05,
"loss": 0.2285,
"step": 4740
},
{
"epoch": 14.01,
"grad_norm": 28.758888244628906,
"learning_rate": 3.3298969072164947e-05,
"loss": 0.0825,
"step": 4750
},
{
"epoch": 14.01,
"grad_norm": 10.479151725769043,
"learning_rate": 3.3252108716026246e-05,
"loss": 0.1451,
"step": 4760
},
{
"epoch": 14.01,
"grad_norm": 0.03889832645654678,
"learning_rate": 3.320524835988754e-05,
"loss": 0.0653,
"step": 4770
},
{
"epoch": 14.01,
"grad_norm": 1.4351927042007446,
"learning_rate": 3.315838800374883e-05,
"loss": 0.1306,
"step": 4780
},
{
"epoch": 14.01,
"grad_norm": 28.180889129638672,
"learning_rate": 3.311152764761012e-05,
"loss": 0.2123,
"step": 4790
},
{
"epoch": 14.02,
"grad_norm": 17.614906311035156,
"learning_rate": 3.306466729147142e-05,
"loss": 0.0832,
"step": 4800
},
{
"epoch": 14.02,
"grad_norm": 0.06731338798999786,
"learning_rate": 3.3017806935332714e-05,
"loss": 0.0583,
"step": 4810
},
{
"epoch": 14.02,
"grad_norm": 1.4568157196044922,
"learning_rate": 3.2970946579194e-05,
"loss": 0.2127,
"step": 4820
},
{
"epoch": 14.02,
"grad_norm": 7.513918876647949,
"learning_rate": 3.292408622305529e-05,
"loss": 0.1357,
"step": 4830
},
{
"epoch": 14.02,
"grad_norm": 0.8259130120277405,
"learning_rate": 3.287722586691659e-05,
"loss": 0.04,
"step": 4840
},
{
"epoch": 14.02,
"grad_norm": 0.1765613704919815,
"learning_rate": 3.283036551077788e-05,
"loss": 0.086,
"step": 4850
},
{
"epoch": 14.02,
"grad_norm": 15.376742362976074,
"learning_rate": 3.2783505154639176e-05,
"loss": 0.2212,
"step": 4860
},
{
"epoch": 14.02,
"grad_norm": 0.4169343411922455,
"learning_rate": 3.273664479850047e-05,
"loss": 0.0478,
"step": 4870
},
{
"epoch": 14.02,
"grad_norm": 43.11495590209961,
"learning_rate": 3.268978444236177e-05,
"loss": 0.096,
"step": 4880
},
{
"epoch": 14.02,
"grad_norm": 15.717057228088379,
"learning_rate": 3.264292408622306e-05,
"loss": 0.154,
"step": 4890
},
{
"epoch": 14.02,
"grad_norm": 24.132003784179688,
"learning_rate": 3.2596063730084345e-05,
"loss": 0.1402,
"step": 4900
},
{
"epoch": 14.02,
"grad_norm": 9.180221557617188,
"learning_rate": 3.2549203373945644e-05,
"loss": 0.1438,
"step": 4910
},
{
"epoch": 14.03,
"grad_norm": 17.912752151489258,
"learning_rate": 3.2502343017806936e-05,
"loss": 0.3277,
"step": 4920
},
{
"epoch": 14.03,
"grad_norm": 23.05970573425293,
"learning_rate": 3.245548266166823e-05,
"loss": 0.1899,
"step": 4930
},
{
"epoch": 14.03,
"grad_norm": 5.295262813568115,
"learning_rate": 3.240862230552952e-05,
"loss": 0.2962,
"step": 4940
},
{
"epoch": 14.03,
"grad_norm": 0.08439239114522934,
"learning_rate": 3.236176194939082e-05,
"loss": 0.3502,
"step": 4950
},
{
"epoch": 14.03,
"eval_accuracy": 0.7965616045845272,
"eval_loss": 0.9265322089195251,
"eval_runtime": 33.6194,
"eval_samples_per_second": 20.762,
"eval_steps_per_second": 1.755,
"step": 4950
},
{
"epoch": 15.0,
"grad_norm": 27.462636947631836,
"learning_rate": 3.231490159325211e-05,
"loss": 0.1507,
"step": 4960
},
{
"epoch": 15.0,
"grad_norm": 0.03183213621377945,
"learning_rate": 3.2268041237113405e-05,
"loss": 0.0413,
"step": 4970
},
{
"epoch": 15.0,
"grad_norm": 24.36420440673828,
"learning_rate": 3.22211808809747e-05,
"loss": 0.1499,
"step": 4980
},
{
"epoch": 15.0,
"grad_norm": 2.348003387451172,
"learning_rate": 3.217432052483599e-05,
"loss": 0.2091,
"step": 4990
},
{
"epoch": 15.0,
"grad_norm": 14.074554443359375,
"learning_rate": 3.212746016869728e-05,
"loss": 0.1966,
"step": 5000
},
{
"epoch": 15.01,
"grad_norm": 13.325398445129395,
"learning_rate": 3.2080599812558574e-05,
"loss": 0.0982,
"step": 5010
},
{
"epoch": 15.01,
"grad_norm": 0.06735904514789581,
"learning_rate": 3.203373945641987e-05,
"loss": 0.0819,
"step": 5020
},
{
"epoch": 15.01,
"grad_norm": 7.472801685333252,
"learning_rate": 3.1986879100281165e-05,
"loss": 0.1468,
"step": 5030
},
{
"epoch": 15.01,
"grad_norm": 4.233068943023682,
"learning_rate": 3.194001874414246e-05,
"loss": 0.03,
"step": 5040
},
{
"epoch": 15.01,
"grad_norm": 9.645890235900879,
"learning_rate": 3.189315838800375e-05,
"loss": 0.1579,
"step": 5050
},
{
"epoch": 15.01,
"grad_norm": 18.024099349975586,
"learning_rate": 3.184629803186505e-05,
"loss": 0.1401,
"step": 5060
},
{
"epoch": 15.01,
"grad_norm": 38.932167053222656,
"learning_rate": 3.1799437675726335e-05,
"loss": 0.1337,
"step": 5070
},
{
"epoch": 15.01,
"grad_norm": 0.6836027503013611,
"learning_rate": 3.175257731958763e-05,
"loss": 0.1067,
"step": 5080
},
{
"epoch": 15.01,
"grad_norm": 13.472026824951172,
"learning_rate": 3.1705716963448926e-05,
"loss": 0.137,
"step": 5090
},
{
"epoch": 15.01,
"grad_norm": 49.68353271484375,
"learning_rate": 3.165885660731022e-05,
"loss": 0.2201,
"step": 5100
},
{
"epoch": 15.01,
"grad_norm": 0.10768305510282516,
"learning_rate": 3.161199625117151e-05,
"loss": 0.0503,
"step": 5110
},
{
"epoch": 15.01,
"grad_norm": 0.02888442948460579,
"learning_rate": 3.15651358950328e-05,
"loss": 0.2246,
"step": 5120
},
{
"epoch": 15.02,
"grad_norm": 11.260727882385254,
"learning_rate": 3.15182755388941e-05,
"loss": 0.1655,
"step": 5130
},
{
"epoch": 15.02,
"grad_norm": 0.16832049190998077,
"learning_rate": 3.1471415182755394e-05,
"loss": 0.2107,
"step": 5140
},
{
"epoch": 15.02,
"grad_norm": 0.5747199058532715,
"learning_rate": 3.142455482661668e-05,
"loss": 0.1302,
"step": 5150
},
{
"epoch": 15.02,
"grad_norm": 6.570047378540039,
"learning_rate": 3.137769447047797e-05,
"loss": 0.1102,
"step": 5160
},
{
"epoch": 15.02,
"grad_norm": 0.02458810992538929,
"learning_rate": 3.133083411433927e-05,
"loss": 0.0711,
"step": 5170
},
{
"epoch": 15.02,
"grad_norm": 28.233722686767578,
"learning_rate": 3.1283973758200564e-05,
"loss": 0.1243,
"step": 5180
},
{
"epoch": 15.02,
"grad_norm": 34.339900970458984,
"learning_rate": 3.1237113402061856e-05,
"loss": 0.2122,
"step": 5190
},
{
"epoch": 15.02,
"grad_norm": 0.042488761246204376,
"learning_rate": 3.119025304592315e-05,
"loss": 0.0696,
"step": 5200
},
{
"epoch": 15.02,
"grad_norm": 10.747623443603516,
"learning_rate": 3.114339268978445e-05,
"loss": 0.0881,
"step": 5210
},
{
"epoch": 15.02,
"grad_norm": 0.14769670367240906,
"learning_rate": 3.109653233364574e-05,
"loss": 0.2615,
"step": 5220
},
{
"epoch": 15.02,
"grad_norm": 6.627384662628174,
"learning_rate": 3.104967197750703e-05,
"loss": 0.105,
"step": 5230
},
{
"epoch": 15.02,
"grad_norm": 0.06998647749423981,
"learning_rate": 3.1002811621368324e-05,
"loss": 0.1129,
"step": 5240
},
{
"epoch": 15.03,
"grad_norm": 14.90441608428955,
"learning_rate": 3.0955951265229617e-05,
"loss": 0.1303,
"step": 5250
},
{
"epoch": 15.03,
"grad_norm": 0.6457139849662781,
"learning_rate": 3.090909090909091e-05,
"loss": 0.0176,
"step": 5260
},
{
"epoch": 15.03,
"grad_norm": 28.25990867614746,
"learning_rate": 3.08622305529522e-05,
"loss": 0.1868,
"step": 5270
},
{
"epoch": 15.03,
"grad_norm": 18.335277557373047,
"learning_rate": 3.08153701968135e-05,
"loss": 0.1728,
"step": 5280
},
{
"epoch": 15.03,
"eval_accuracy": 0.8209169054441261,
"eval_loss": 0.8522208333015442,
"eval_runtime": 33.7106,
"eval_samples_per_second": 20.706,
"eval_steps_per_second": 1.75,
"step": 5280
},
{
"epoch": 16.0,
"grad_norm": 0.026595309376716614,
"learning_rate": 3.076850984067479e-05,
"loss": 0.0047,
"step": 5290
},
{
"epoch": 16.0,
"grad_norm": 16.674278259277344,
"learning_rate": 3.0721649484536085e-05,
"loss": 0.1096,
"step": 5300
},
{
"epoch": 16.0,
"grad_norm": 1.7340277433395386,
"learning_rate": 3.067478912839738e-05,
"loss": 0.1344,
"step": 5310
},
{
"epoch": 16.0,
"grad_norm": 0.22693070769309998,
"learning_rate": 3.062792877225867e-05,
"loss": 0.1013,
"step": 5320
},
{
"epoch": 16.0,
"grad_norm": 0.07706239074468613,
"learning_rate": 3.058106841611996e-05,
"loss": 0.1392,
"step": 5330
},
{
"epoch": 16.01,
"grad_norm": 18.134891510009766,
"learning_rate": 3.0534208059981254e-05,
"loss": 0.06,
"step": 5340
},
{
"epoch": 16.01,
"grad_norm": 1.382936716079712,
"learning_rate": 3.048734770384255e-05,
"loss": 0.1596,
"step": 5350
},
{
"epoch": 16.01,
"grad_norm": 20.703399658203125,
"learning_rate": 3.0440487347703846e-05,
"loss": 0.0695,
"step": 5360
},
{
"epoch": 16.01,
"grad_norm": 1.053179144859314,
"learning_rate": 3.0393626991565138e-05,
"loss": 0.1434,
"step": 5370
},
{
"epoch": 16.01,
"grad_norm": 0.017900103703141212,
"learning_rate": 3.0346766635426434e-05,
"loss": 0.0591,
"step": 5380
},
{
"epoch": 16.01,
"grad_norm": 0.056633152067661285,
"learning_rate": 3.0299906279287726e-05,
"loss": 0.1733,
"step": 5390
},
{
"epoch": 16.01,
"grad_norm": 0.11656715720891953,
"learning_rate": 3.0253045923149015e-05,
"loss": 0.1159,
"step": 5400
},
{
"epoch": 16.01,
"grad_norm": 2.425400972366333,
"learning_rate": 3.020618556701031e-05,
"loss": 0.1666,
"step": 5410
},
{
"epoch": 16.01,
"grad_norm": 4.859916687011719,
"learning_rate": 3.0159325210871603e-05,
"loss": 0.0466,
"step": 5420
},
{
"epoch": 16.01,
"grad_norm": 29.941722869873047,
"learning_rate": 3.01124648547329e-05,
"loss": 0.1313,
"step": 5430
},
{
"epoch": 16.01,
"grad_norm": 1.4672462940216064,
"learning_rate": 3.006560449859419e-05,
"loss": 0.066,
"step": 5440
},
{
"epoch": 16.01,
"grad_norm": 20.58746910095215,
"learning_rate": 3.0018744142455487e-05,
"loss": 0.0488,
"step": 5450
},
{
"epoch": 16.02,
"grad_norm": 19.143203735351562,
"learning_rate": 2.997188378631678e-05,
"loss": 0.1403,
"step": 5460
},
{
"epoch": 16.02,
"grad_norm": 21.062692642211914,
"learning_rate": 2.9925023430178075e-05,
"loss": 0.0366,
"step": 5470
},
{
"epoch": 16.02,
"grad_norm": 1.4022785425186157,
"learning_rate": 2.987816307403936e-05,
"loss": 0.0344,
"step": 5480
},
{
"epoch": 16.02,
"grad_norm": 0.7755998373031616,
"learning_rate": 2.9831302717900656e-05,
"loss": 0.1005,
"step": 5490
},
{
"epoch": 16.02,
"grad_norm": 0.02092009223997593,
"learning_rate": 2.9784442361761948e-05,
"loss": 0.1796,
"step": 5500
},
{
"epoch": 16.02,
"grad_norm": 27.31386375427246,
"learning_rate": 2.9737582005623244e-05,
"loss": 0.1263,
"step": 5510
},
{
"epoch": 16.02,
"grad_norm": 3.5492141246795654,
"learning_rate": 2.9690721649484536e-05,
"loss": 0.2096,
"step": 5520
},
{
"epoch": 16.02,
"grad_norm": 23.727418899536133,
"learning_rate": 2.9643861293345832e-05,
"loss": 0.1176,
"step": 5530
},
{
"epoch": 16.02,
"grad_norm": 2.065145492553711,
"learning_rate": 2.9597000937207124e-05,
"loss": 0.0873,
"step": 5540
},
{
"epoch": 16.02,
"grad_norm": 1.5445265769958496,
"learning_rate": 2.955014058106842e-05,
"loss": 0.1622,
"step": 5550
},
{
"epoch": 16.02,
"grad_norm": 3.4327638149261475,
"learning_rate": 2.9503280224929712e-05,
"loss": 0.1375,
"step": 5560
},
{
"epoch": 16.02,
"grad_norm": 0.018016502261161804,
"learning_rate": 2.9456419868791e-05,
"loss": 0.0712,
"step": 5570
},
{
"epoch": 16.03,
"grad_norm": 11.649871826171875,
"learning_rate": 2.9409559512652297e-05,
"loss": 0.1606,
"step": 5580
},
{
"epoch": 16.03,
"grad_norm": 17.827857971191406,
"learning_rate": 2.936269915651359e-05,
"loss": 0.1209,
"step": 5590
},
{
"epoch": 16.03,
"grad_norm": 0.01631985232234001,
"learning_rate": 2.9315838800374885e-05,
"loss": 0.0295,
"step": 5600
},
{
"epoch": 16.03,
"grad_norm": 48.93087387084961,
"learning_rate": 2.9268978444236177e-05,
"loss": 0.0542,
"step": 5610
},
{
"epoch": 16.03,
"eval_accuracy": 0.8051575931232091,
"eval_loss": 1.0106927156448364,
"eval_runtime": 33.4281,
"eval_samples_per_second": 20.881,
"eval_steps_per_second": 1.765,
"step": 5610
},
{
"epoch": 17.0,
"grad_norm": 3.7330472469329834,
"learning_rate": 2.9222118088097473e-05,
"loss": 0.1299,
"step": 5620
},
{
"epoch": 17.0,
"grad_norm": 1.1770386695861816,
"learning_rate": 2.9175257731958765e-05,
"loss": 0.1494,
"step": 5630
},
{
"epoch": 17.0,
"grad_norm": 6.161425590515137,
"learning_rate": 2.912839737582006e-05,
"loss": 0.0633,
"step": 5640
},
{
"epoch": 17.0,
"grad_norm": 0.5936900973320007,
"learning_rate": 2.908153701968135e-05,
"loss": 0.0759,
"step": 5650
},
{
"epoch": 17.0,
"grad_norm": 0.04893672466278076,
"learning_rate": 2.9034676663542642e-05,
"loss": 0.0792,
"step": 5660
},
{
"epoch": 17.01,
"grad_norm": 6.019880294799805,
"learning_rate": 2.8987816307403938e-05,
"loss": 0.1079,
"step": 5670
},
{
"epoch": 17.01,
"grad_norm": 45.10232162475586,
"learning_rate": 2.894095595126523e-05,
"loss": 0.0538,
"step": 5680
},
{
"epoch": 17.01,
"grad_norm": 52.10890197753906,
"learning_rate": 2.8894095595126526e-05,
"loss": 0.0725,
"step": 5690
},
{
"epoch": 17.01,
"grad_norm": 0.03488897159695625,
"learning_rate": 2.8847235238987818e-05,
"loss": 0.1523,
"step": 5700
},
{
"epoch": 17.01,
"grad_norm": 0.07146196067333221,
"learning_rate": 2.8800374882849114e-05,
"loss": 0.0272,
"step": 5710
},
{
"epoch": 17.01,
"grad_norm": 9.103580474853516,
"learning_rate": 2.8753514526710406e-05,
"loss": 0.0956,
"step": 5720
},
{
"epoch": 17.01,
"grad_norm": 0.010441714897751808,
"learning_rate": 2.8706654170571695e-05,
"loss": 0.1186,
"step": 5730
},
{
"epoch": 17.01,
"grad_norm": 51.32048797607422,
"learning_rate": 2.865979381443299e-05,
"loss": 0.0991,
"step": 5740
},
{
"epoch": 17.01,
"grad_norm": 17.720178604125977,
"learning_rate": 2.8612933458294283e-05,
"loss": 0.0702,
"step": 5750
},
{
"epoch": 17.01,
"grad_norm": 31.01833724975586,
"learning_rate": 2.856607310215558e-05,
"loss": 0.175,
"step": 5760
},
{
"epoch": 17.01,
"grad_norm": 0.02792373113334179,
"learning_rate": 2.851921274601687e-05,
"loss": 0.0858,
"step": 5770
},
{
"epoch": 17.01,
"grad_norm": 48.3105583190918,
"learning_rate": 2.8472352389878167e-05,
"loss": 0.2053,
"step": 5780
},
{
"epoch": 17.02,
"grad_norm": 9.974466323852539,
"learning_rate": 2.842549203373946e-05,
"loss": 0.0544,
"step": 5790
},
{
"epoch": 17.02,
"grad_norm": 0.7237229347229004,
"learning_rate": 2.8378631677600755e-05,
"loss": 0.1306,
"step": 5800
},
{
"epoch": 17.02,
"grad_norm": 14.930671691894531,
"learning_rate": 2.833177132146204e-05,
"loss": 0.1952,
"step": 5810
},
{
"epoch": 17.02,
"grad_norm": 0.019687309861183167,
"learning_rate": 2.8284910965323336e-05,
"loss": 0.1069,
"step": 5820
},
{
"epoch": 17.02,
"grad_norm": 5.928595066070557,
"learning_rate": 2.823805060918463e-05,
"loss": 0.0161,
"step": 5830
},
{
"epoch": 17.02,
"grad_norm": 0.03599075973033905,
"learning_rate": 2.8191190253045924e-05,
"loss": 0.09,
"step": 5840
},
{
"epoch": 17.02,
"grad_norm": 0.05025621876120567,
"learning_rate": 2.8144329896907216e-05,
"loss": 0.0732,
"step": 5850
},
{
"epoch": 17.02,
"grad_norm": 40.76129150390625,
"learning_rate": 2.8097469540768512e-05,
"loss": 0.0456,
"step": 5860
},
{
"epoch": 17.02,
"grad_norm": 1.6205233335494995,
"learning_rate": 2.8050609184629804e-05,
"loss": 0.0213,
"step": 5870
},
{
"epoch": 17.02,
"grad_norm": 56.61774444580078,
"learning_rate": 2.80037488284911e-05,
"loss": 0.2101,
"step": 5880
},
{
"epoch": 17.02,
"grad_norm": 0.4398052394390106,
"learning_rate": 2.7956888472352392e-05,
"loss": 0.1504,
"step": 5890
},
{
"epoch": 17.02,
"grad_norm": 3.440358877182007,
"learning_rate": 2.791002811621368e-05,
"loss": 0.0786,
"step": 5900
},
{
"epoch": 17.03,
"grad_norm": 0.02337775193154812,
"learning_rate": 2.7863167760074977e-05,
"loss": 0.1404,
"step": 5910
},
{
"epoch": 17.03,
"grad_norm": 36.56870651245117,
"learning_rate": 2.781630740393627e-05,
"loss": 0.0849,
"step": 5920
},
{
"epoch": 17.03,
"grad_norm": 0.021380068734288216,
"learning_rate": 2.7769447047797565e-05,
"loss": 0.0257,
"step": 5930
},
{
"epoch": 17.03,
"grad_norm": 0.030316907912492752,
"learning_rate": 2.7722586691658857e-05,
"loss": 0.0711,
"step": 5940
},
{
"epoch": 17.03,
"eval_accuracy": 0.8080229226361032,
"eval_loss": 0.9795148968696594,
"eval_runtime": 34.1521,
"eval_samples_per_second": 20.438,
"eval_steps_per_second": 1.728,
"step": 5940
},
{
"epoch": 18.0,
"grad_norm": 0.007565322797745466,
"learning_rate": 2.7675726335520153e-05,
"loss": 0.0015,
"step": 5950
},
{
"epoch": 18.0,
"grad_norm": 0.006239545065909624,
"learning_rate": 2.7628865979381445e-05,
"loss": 0.0589,
"step": 5960
},
{
"epoch": 18.0,
"grad_norm": 0.024090183898806572,
"learning_rate": 2.758200562324274e-05,
"loss": 0.0892,
"step": 5970
},
{
"epoch": 18.0,
"grad_norm": 8.415689468383789,
"learning_rate": 2.753514526710403e-05,
"loss": 0.1953,
"step": 5980
},
{
"epoch": 18.0,
"grad_norm": 0.46405917406082153,
"learning_rate": 2.7488284910965322e-05,
"loss": 0.0365,
"step": 5990
},
{
"epoch": 18.01,
"grad_norm": 0.320950984954834,
"learning_rate": 2.7441424554826618e-05,
"loss": 0.1011,
"step": 6000
},
{
"epoch": 18.01,
"grad_norm": 25.849971771240234,
"learning_rate": 2.739456419868791e-05,
"loss": 0.0787,
"step": 6010
},
{
"epoch": 18.01,
"grad_norm": 0.018421674147248268,
"learning_rate": 2.7347703842549206e-05,
"loss": 0.0032,
"step": 6020
},
{
"epoch": 18.01,
"grad_norm": 0.02502196654677391,
"learning_rate": 2.73008434864105e-05,
"loss": 0.173,
"step": 6030
},
{
"epoch": 18.01,
"grad_norm": 0.05541960895061493,
"learning_rate": 2.7253983130271794e-05,
"loss": 0.1086,
"step": 6040
},
{
"epoch": 18.01,
"grad_norm": 1.3273464441299438,
"learning_rate": 2.7207122774133086e-05,
"loss": 0.0214,
"step": 6050
},
{
"epoch": 18.01,
"grad_norm": 27.578462600708008,
"learning_rate": 2.7160262417994375e-05,
"loss": 0.0767,
"step": 6060
},
{
"epoch": 18.01,
"grad_norm": 1.713114857673645,
"learning_rate": 2.711340206185567e-05,
"loss": 0.1884,
"step": 6070
},
{
"epoch": 18.01,
"grad_norm": 0.0781659409403801,
"learning_rate": 2.7066541705716963e-05,
"loss": 0.1062,
"step": 6080
},
{
"epoch": 18.01,
"grad_norm": 61.0845832824707,
"learning_rate": 2.701968134957826e-05,
"loss": 0.0516,
"step": 6090
},
{
"epoch": 18.01,
"grad_norm": 9.772195816040039,
"learning_rate": 2.697282099343955e-05,
"loss": 0.0589,
"step": 6100
},
{
"epoch": 18.01,
"grad_norm": 0.18929840624332428,
"learning_rate": 2.6925960637300847e-05,
"loss": 0.1057,
"step": 6110
},
{
"epoch": 18.02,
"grad_norm": 0.020669307559728622,
"learning_rate": 2.687910028116214e-05,
"loss": 0.0687,
"step": 6120
},
{
"epoch": 18.02,
"grad_norm": 5.4998016357421875,
"learning_rate": 2.6832239925023435e-05,
"loss": 0.2466,
"step": 6130
},
{
"epoch": 18.02,
"grad_norm": 2.288482427597046,
"learning_rate": 2.6785379568884727e-05,
"loss": 0.0868,
"step": 6140
},
{
"epoch": 18.02,
"grad_norm": 0.02619881182909012,
"learning_rate": 2.6738519212746016e-05,
"loss": 0.0071,
"step": 6150
},
{
"epoch": 18.02,
"grad_norm": 0.011477050371468067,
"learning_rate": 2.669165885660731e-05,
"loss": 0.0421,
"step": 6160
},
{
"epoch": 18.02,
"grad_norm": 0.03269781917333603,
"learning_rate": 2.6644798500468604e-05,
"loss": 0.0465,
"step": 6170
},
{
"epoch": 18.02,
"grad_norm": 0.03204688802361488,
"learning_rate": 2.6597938144329897e-05,
"loss": 0.0769,
"step": 6180
},
{
"epoch": 18.02,
"grad_norm": 28.57611656188965,
"learning_rate": 2.6551077788191192e-05,
"loss": 0.0488,
"step": 6190
},
{
"epoch": 18.02,
"grad_norm": 27.48614501953125,
"learning_rate": 2.6504217432052485e-05,
"loss": 0.0817,
"step": 6200
},
{
"epoch": 18.02,
"grad_norm": 37.647117614746094,
"learning_rate": 2.645735707591378e-05,
"loss": 0.0928,
"step": 6210
},
{
"epoch": 18.02,
"grad_norm": 0.01060717087239027,
"learning_rate": 2.6410496719775073e-05,
"loss": 0.034,
"step": 6220
},
{
"epoch": 18.02,
"grad_norm": 0.24268437922000885,
"learning_rate": 2.636363636363636e-05,
"loss": 0.108,
"step": 6230
},
{
"epoch": 18.03,
"grad_norm": 0.00554469833150506,
"learning_rate": 2.6316776007497657e-05,
"loss": 0.1068,
"step": 6240
},
{
"epoch": 18.03,
"grad_norm": 10.710355758666992,
"learning_rate": 2.626991565135895e-05,
"loss": 0.0637,
"step": 6250
},
{
"epoch": 18.03,
"grad_norm": 23.989526748657227,
"learning_rate": 2.6223055295220245e-05,
"loss": 0.0926,
"step": 6260
},
{
"epoch": 18.03,
"grad_norm": 0.2239922732114792,
"learning_rate": 2.6176194939081538e-05,
"loss": 0.0287,
"step": 6270
},
{
"epoch": 18.03,
"eval_accuracy": 0.8094555873925502,
"eval_loss": 1.1470834016799927,
"eval_runtime": 34.1919,
"eval_samples_per_second": 20.414,
"eval_steps_per_second": 1.726,
"step": 6270
},
{
"epoch": 19.0,
"grad_norm": 0.3733896315097809,
"learning_rate": 2.6129334582942833e-05,
"loss": 0.0541,
"step": 6280
},
{
"epoch": 19.0,
"grad_norm": 0.011687755584716797,
"learning_rate": 2.6082474226804126e-05,
"loss": 0.0725,
"step": 6290
},
{
"epoch": 19.0,
"grad_norm": 15.612820625305176,
"learning_rate": 2.603561387066542e-05,
"loss": 0.0287,
"step": 6300
},
{
"epoch": 19.0,
"grad_norm": 0.0473637618124485,
"learning_rate": 2.598875351452671e-05,
"loss": 0.128,
"step": 6310
},
{
"epoch": 19.0,
"grad_norm": 14.158834457397461,
"learning_rate": 2.5941893158388003e-05,
"loss": 0.0102,
"step": 6320
},
{
"epoch": 19.01,
"grad_norm": 0.03251667320728302,
"learning_rate": 2.58950328022493e-05,
"loss": 0.0536,
"step": 6330
},
{
"epoch": 19.01,
"grad_norm": 2.301870822906494,
"learning_rate": 2.584817244611059e-05,
"loss": 0.1364,
"step": 6340
},
{
"epoch": 19.01,
"grad_norm": 36.87099075317383,
"learning_rate": 2.5801312089971886e-05,
"loss": 0.1997,
"step": 6350
},
{
"epoch": 19.01,
"grad_norm": 0.012598090805113316,
"learning_rate": 2.575445173383318e-05,
"loss": 0.1696,
"step": 6360
},
{
"epoch": 19.01,
"grad_norm": 0.016358235850930214,
"learning_rate": 2.5707591377694474e-05,
"loss": 0.0671,
"step": 6370
},
{
"epoch": 19.01,
"grad_norm": 3.8293418884277344,
"learning_rate": 2.5660731021555767e-05,
"loss": 0.0023,
"step": 6380
},
{
"epoch": 19.01,
"grad_norm": 32.61289978027344,
"learning_rate": 2.5613870665417056e-05,
"loss": 0.0762,
"step": 6390
},
{
"epoch": 19.01,
"grad_norm": 0.12384048849344254,
"learning_rate": 2.556701030927835e-05,
"loss": 0.1048,
"step": 6400
},
{
"epoch": 19.01,
"grad_norm": 34.71640396118164,
"learning_rate": 2.5520149953139644e-05,
"loss": 0.075,
"step": 6410
},
{
"epoch": 19.01,
"grad_norm": 0.02384701929986477,
"learning_rate": 2.547328959700094e-05,
"loss": 0.0328,
"step": 6420
},
{
"epoch": 19.01,
"grad_norm": 0.028293780982494354,
"learning_rate": 2.542642924086223e-05,
"loss": 0.0533,
"step": 6430
},
{
"epoch": 19.01,
"grad_norm": 0.06875205785036087,
"learning_rate": 2.5379568884723527e-05,
"loss": 0.0736,
"step": 6440
},
{
"epoch": 19.02,
"grad_norm": 0.020702671259641647,
"learning_rate": 2.533270852858482e-05,
"loss": 0.1326,
"step": 6450
},
{
"epoch": 19.02,
"grad_norm": 27.13255500793457,
"learning_rate": 2.5285848172446115e-05,
"loss": 0.1638,
"step": 6460
},
{
"epoch": 19.02,
"grad_norm": 0.04475679248571396,
"learning_rate": 2.5238987816307408e-05,
"loss": 0.0483,
"step": 6470
},
{
"epoch": 19.02,
"grad_norm": 0.3056570291519165,
"learning_rate": 2.5192127460168697e-05,
"loss": 0.0657,
"step": 6480
},
{
"epoch": 19.02,
"grad_norm": 0.01589689962565899,
"learning_rate": 2.514526710402999e-05,
"loss": 0.033,
"step": 6490
},
{
"epoch": 19.02,
"grad_norm": 0.02305714786052704,
"learning_rate": 2.5098406747891285e-05,
"loss": 0.1051,
"step": 6500
},
{
"epoch": 19.02,
"grad_norm": 0.0253700353205204,
"learning_rate": 2.5051546391752577e-05,
"loss": 0.0328,
"step": 6510
},
{
"epoch": 19.02,
"grad_norm": 46.60805892944336,
"learning_rate": 2.5004686035613873e-05,
"loss": 0.0829,
"step": 6520
},
{
"epoch": 19.02,
"grad_norm": 0.07593350857496262,
"learning_rate": 2.4957825679475165e-05,
"loss": 0.0278,
"step": 6530
},
{
"epoch": 19.02,
"grad_norm": 0.011305336840450764,
"learning_rate": 2.4910965323336457e-05,
"loss": 0.1844,
"step": 6540
},
{
"epoch": 19.02,
"grad_norm": 0.02104657143354416,
"learning_rate": 2.4864104967197753e-05,
"loss": 0.0412,
"step": 6550
},
{
"epoch": 19.02,
"grad_norm": 0.05700293555855751,
"learning_rate": 2.4817244611059045e-05,
"loss": 0.004,
"step": 6560
},
{
"epoch": 19.03,
"grad_norm": 5.577692985534668,
"learning_rate": 2.477038425492034e-05,
"loss": 0.1151,
"step": 6570
},
{
"epoch": 19.03,
"grad_norm": 0.020554441958665848,
"learning_rate": 2.472352389878163e-05,
"loss": 0.0564,
"step": 6580
},
{
"epoch": 19.03,
"grad_norm": 0.04367370158433914,
"learning_rate": 2.4676663542642926e-05,
"loss": 0.1458,
"step": 6590
},
{
"epoch": 19.03,
"grad_norm": 0.026535367593169212,
"learning_rate": 2.4629803186504218e-05,
"loss": 0.1011,
"step": 6600
},
{
"epoch": 19.03,
"eval_accuracy": 0.7851002865329513,
"eval_loss": 1.0893527269363403,
"eval_runtime": 33.9177,
"eval_samples_per_second": 20.579,
"eval_steps_per_second": 1.74,
"step": 6600
},
{
"epoch": 20.0,
"grad_norm": 10.764158248901367,
"learning_rate": 2.4582942830365514e-05,
"loss": 0.159,
"step": 6610
},
{
"epoch": 20.0,
"grad_norm": 0.06043768674135208,
"learning_rate": 2.4536082474226803e-05,
"loss": 0.003,
"step": 6620
},
{
"epoch": 20.0,
"grad_norm": 0.006598788313567638,
"learning_rate": 2.4489222118088098e-05,
"loss": 0.0761,
"step": 6630
},
{
"epoch": 20.0,
"grad_norm": 0.02938709408044815,
"learning_rate": 2.444236176194939e-05,
"loss": 0.069,
"step": 6640
},
{
"epoch": 20.0,
"grad_norm": 0.7408674955368042,
"learning_rate": 2.4395501405810686e-05,
"loss": 0.0754,
"step": 6650
},
{
"epoch": 20.01,
"grad_norm": 14.34585952758789,
"learning_rate": 2.434864104967198e-05,
"loss": 0.0301,
"step": 6660
},
{
"epoch": 20.01,
"grad_norm": 0.12531670928001404,
"learning_rate": 2.430178069353327e-05,
"loss": 0.0021,
"step": 6670
},
{
"epoch": 20.01,
"grad_norm": 0.11880137771368027,
"learning_rate": 2.4254920337394567e-05,
"loss": 0.0008,
"step": 6680
},
{
"epoch": 20.01,
"grad_norm": 15.903475761413574,
"learning_rate": 2.420805998125586e-05,
"loss": 0.0479,
"step": 6690
},
{
"epoch": 20.01,
"grad_norm": 29.952171325683594,
"learning_rate": 2.416119962511715e-05,
"loss": 0.1002,
"step": 6700
},
{
"epoch": 20.01,
"grad_norm": 0.30753955245018005,
"learning_rate": 2.4114339268978444e-05,
"loss": 0.0304,
"step": 6710
},
{
"epoch": 20.01,
"grad_norm": 0.014064520597457886,
"learning_rate": 2.406747891283974e-05,
"loss": 0.1304,
"step": 6720
},
{
"epoch": 20.01,
"grad_norm": 0.4187520146369934,
"learning_rate": 2.402061855670103e-05,
"loss": 0.1015,
"step": 6730
},
{
"epoch": 20.01,
"grad_norm": 0.02327924221754074,
"learning_rate": 2.3973758200562327e-05,
"loss": 0.0053,
"step": 6740
},
{
"epoch": 20.01,
"grad_norm": 16.745342254638672,
"learning_rate": 2.392689784442362e-05,
"loss": 0.2538,
"step": 6750
},
{
"epoch": 20.01,
"grad_norm": 0.24011173844337463,
"learning_rate": 2.3880037488284912e-05,
"loss": 0.0485,
"step": 6760
},
{
"epoch": 20.01,
"grad_norm": 29.788806915283203,
"learning_rate": 2.3833177132146208e-05,
"loss": 0.0324,
"step": 6770
},
{
"epoch": 20.02,
"grad_norm": 0.7102867960929871,
"learning_rate": 2.37863167760075e-05,
"loss": 0.1429,
"step": 6780
},
{
"epoch": 20.02,
"grad_norm": 0.08376511186361313,
"learning_rate": 2.3739456419868792e-05,
"loss": 0.0972,
"step": 6790
},
{
"epoch": 20.02,
"grad_norm": 0.335637629032135,
"learning_rate": 2.3692596063730085e-05,
"loss": 0.062,
"step": 6800
},
{
"epoch": 20.02,
"grad_norm": 28.6505069732666,
"learning_rate": 2.364573570759138e-05,
"loss": 0.2001,
"step": 6810
},
{
"epoch": 20.02,
"grad_norm": 29.769094467163086,
"learning_rate": 2.3598875351452673e-05,
"loss": 0.1555,
"step": 6820
},
{
"epoch": 20.02,
"grad_norm": 52.66693878173828,
"learning_rate": 2.3552014995313965e-05,
"loss": 0.0873,
"step": 6830
},
{
"epoch": 20.02,
"grad_norm": 0.011765277944505215,
"learning_rate": 2.3505154639175257e-05,
"loss": 0.0721,
"step": 6840
},
{
"epoch": 20.02,
"grad_norm": 0.08442965894937515,
"learning_rate": 2.3458294283036553e-05,
"loss": 0.0497,
"step": 6850
},
{
"epoch": 20.02,
"grad_norm": 0.017878804355859756,
"learning_rate": 2.3411433926897845e-05,
"loss": 0.0389,
"step": 6860
},
{
"epoch": 20.02,
"grad_norm": 0.017131801694631577,
"learning_rate": 2.3364573570759138e-05,
"loss": 0.1382,
"step": 6870
},
{
"epoch": 20.02,
"grad_norm": 2.3948495388031006,
"learning_rate": 2.3317713214620433e-05,
"loss": 0.0425,
"step": 6880
},
{
"epoch": 20.02,
"grad_norm": 9.163774490356445,
"learning_rate": 2.3270852858481726e-05,
"loss": 0.0738,
"step": 6890
},
{
"epoch": 20.03,
"grad_norm": 0.009526137262582779,
"learning_rate": 2.322399250234302e-05,
"loss": 0.1145,
"step": 6900
},
{
"epoch": 20.03,
"grad_norm": 0.11400933563709259,
"learning_rate": 2.317713214620431e-05,
"loss": 0.0527,
"step": 6910
},
{
"epoch": 20.03,
"grad_norm": 1.8810824155807495,
"learning_rate": 2.3130271790065606e-05,
"loss": 0.0034,
"step": 6920
},
{
"epoch": 20.03,
"grad_norm": 0.02618522383272648,
"learning_rate": 2.3083411433926898e-05,
"loss": 0.0424,
"step": 6930
},
{
"epoch": 20.03,
"eval_accuracy": 0.7822349570200573,
"eval_loss": 1.144364356994629,
"eval_runtime": 34.682,
"eval_samples_per_second": 20.126,
"eval_steps_per_second": 1.701,
"step": 6930
},
{
"epoch": 21.0,
"grad_norm": 0.017015540972352028,
"learning_rate": 2.3036551077788194e-05,
"loss": 0.0144,
"step": 6940
},
{
"epoch": 21.0,
"grad_norm": 1.5228683948516846,
"learning_rate": 2.2989690721649483e-05,
"loss": 0.049,
"step": 6950
},
{
"epoch": 21.0,
"grad_norm": 0.00678257504478097,
"learning_rate": 2.294283036551078e-05,
"loss": 0.0779,
"step": 6960
},
{
"epoch": 21.0,
"grad_norm": 0.011534970253705978,
"learning_rate": 2.289597000937207e-05,
"loss": 0.1179,
"step": 6970
},
{
"epoch": 21.0,
"grad_norm": 22.936405181884766,
"learning_rate": 2.2849109653233367e-05,
"loss": 0.0972,
"step": 6980
},
{
"epoch": 21.01,
"grad_norm": 0.06261157989501953,
"learning_rate": 2.280224929709466e-05,
"loss": 0.0625,
"step": 6990
},
{
"epoch": 21.01,
"grad_norm": 9.488935470581055,
"learning_rate": 2.275538894095595e-05,
"loss": 0.0421,
"step": 7000
},
{
"epoch": 21.01,
"grad_norm": 11.307842254638672,
"learning_rate": 2.2708528584817247e-05,
"loss": 0.043,
"step": 7010
},
{
"epoch": 21.01,
"grad_norm": 0.03459528461098671,
"learning_rate": 2.266166822867854e-05,
"loss": 0.0242,
"step": 7020
},
{
"epoch": 21.01,
"grad_norm": 9.111845016479492,
"learning_rate": 2.261480787253983e-05,
"loss": 0.0573,
"step": 7030
},
{
"epoch": 21.01,
"grad_norm": 0.01025596633553505,
"learning_rate": 2.2567947516401124e-05,
"loss": 0.0159,
"step": 7040
},
{
"epoch": 21.01,
"grad_norm": 2.14906907081604,
"learning_rate": 2.252108716026242e-05,
"loss": 0.0856,
"step": 7050
},
{
"epoch": 21.01,
"grad_norm": 11.889404296875,
"learning_rate": 2.2474226804123712e-05,
"loss": 0.0287,
"step": 7060
},
{
"epoch": 21.01,
"grad_norm": 0.046489182859659195,
"learning_rate": 2.2427366447985008e-05,
"loss": 0.0729,
"step": 7070
},
{
"epoch": 21.01,
"grad_norm": 0.009697903878986835,
"learning_rate": 2.23805060918463e-05,
"loss": 0.0428,
"step": 7080
},
{
"epoch": 21.01,
"grad_norm": 0.03072419762611389,
"learning_rate": 2.2333645735707592e-05,
"loss": 0.0601,
"step": 7090
},
{
"epoch": 21.01,
"grad_norm": 10.731389045715332,
"learning_rate": 2.2286785379568888e-05,
"loss": 0.0125,
"step": 7100
},
{
"epoch": 21.02,
"grad_norm": 0.04367499053478241,
"learning_rate": 2.223992502343018e-05,
"loss": 0.0223,
"step": 7110
},
{
"epoch": 21.02,
"grad_norm": 0.013104673475027084,
"learning_rate": 2.2193064667291473e-05,
"loss": 0.1196,
"step": 7120
},
{
"epoch": 21.02,
"grad_norm": 0.5789304375648499,
"learning_rate": 2.2146204311152765e-05,
"loss": 0.0593,
"step": 7130
},
{
"epoch": 21.02,
"grad_norm": 0.005018654279410839,
"learning_rate": 2.209934395501406e-05,
"loss": 0.0508,
"step": 7140
},
{
"epoch": 21.02,
"grad_norm": 2.954718828201294,
"learning_rate": 2.2052483598875353e-05,
"loss": 0.0327,
"step": 7150
},
{
"epoch": 21.02,
"grad_norm": 9.058338165283203,
"learning_rate": 2.2005623242736645e-05,
"loss": 0.0686,
"step": 7160
},
{
"epoch": 21.02,
"grad_norm": 0.009537008590996265,
"learning_rate": 2.1958762886597937e-05,
"loss": 0.0273,
"step": 7170
},
{
"epoch": 21.02,
"grad_norm": 0.009294161573052406,
"learning_rate": 2.1911902530459233e-05,
"loss": 0.0853,
"step": 7180
},
{
"epoch": 21.02,
"grad_norm": 0.31812822818756104,
"learning_rate": 2.1865042174320525e-05,
"loss": 0.0038,
"step": 7190
},
{
"epoch": 21.02,
"grad_norm": 21.410701751708984,
"learning_rate": 2.1818181818181818e-05,
"loss": 0.053,
"step": 7200
},
{
"epoch": 21.02,
"grad_norm": 0.6843286752700806,
"learning_rate": 2.1771321462043114e-05,
"loss": 0.0013,
"step": 7210
},
{
"epoch": 21.02,
"grad_norm": 0.013939457014203072,
"learning_rate": 2.1724461105904406e-05,
"loss": 0.0543,
"step": 7220
},
{
"epoch": 21.03,
"grad_norm": 0.0053044771775603294,
"learning_rate": 2.16776007497657e-05,
"loss": 0.0194,
"step": 7230
},
{
"epoch": 21.03,
"grad_norm": 73.98345947265625,
"learning_rate": 2.163074039362699e-05,
"loss": 0.0579,
"step": 7240
},
{
"epoch": 21.03,
"grad_norm": 0.03918803855776787,
"learning_rate": 2.1583880037488286e-05,
"loss": 0.0161,
"step": 7250
},
{
"epoch": 21.03,
"grad_norm": 0.0033793123438954353,
"learning_rate": 2.153701968134958e-05,
"loss": 0.0229,
"step": 7260
},
{
"epoch": 21.03,
"eval_accuracy": 0.7822349570200573,
"eval_loss": 1.3766086101531982,
"eval_runtime": 34.2388,
"eval_samples_per_second": 20.386,
"eval_steps_per_second": 1.723,
"step": 7260
},
{
"epoch": 22.0,
"grad_norm": 0.02534145675599575,
"learning_rate": 2.1490159325210874e-05,
"loss": 0.0177,
"step": 7270
},
{
"epoch": 22.0,
"grad_norm": 27.288881301879883,
"learning_rate": 2.1443298969072163e-05,
"loss": 0.0594,
"step": 7280
},
{
"epoch": 22.0,
"grad_norm": 3.099937677383423,
"learning_rate": 2.139643861293346e-05,
"loss": 0.07,
"step": 7290
},
{
"epoch": 22.0,
"grad_norm": 0.0036865780130028725,
"learning_rate": 2.134957825679475e-05,
"loss": 0.006,
"step": 7300
},
{
"epoch": 22.0,
"grad_norm": 59.88230514526367,
"learning_rate": 2.1302717900656047e-05,
"loss": 0.1675,
"step": 7310
},
{
"epoch": 22.01,
"grad_norm": 0.12187007069587708,
"learning_rate": 2.125585754451734e-05,
"loss": 0.0348,
"step": 7320
},
{
"epoch": 22.01,
"grad_norm": 0.011614816263318062,
"learning_rate": 2.120899718837863e-05,
"loss": 0.0227,
"step": 7330
},
{
"epoch": 22.01,
"grad_norm": 0.013845368288457394,
"learning_rate": 2.1162136832239927e-05,
"loss": 0.0653,
"step": 7340
},
{
"epoch": 22.01,
"grad_norm": 0.009227645583450794,
"learning_rate": 2.111527647610122e-05,
"loss": 0.141,
"step": 7350
},
{
"epoch": 22.01,
"grad_norm": 0.5920379161834717,
"learning_rate": 2.1068416119962515e-05,
"loss": 0.0401,
"step": 7360
},
{
"epoch": 22.01,
"grad_norm": 0.006525806616991758,
"learning_rate": 2.1021555763823804e-05,
"loss": 0.0885,
"step": 7370
},
{
"epoch": 22.01,
"grad_norm": 0.006711127702146769,
"learning_rate": 2.09746954076851e-05,
"loss": 0.0618,
"step": 7380
},
{
"epoch": 22.01,
"grad_norm": 0.004596900660544634,
"learning_rate": 2.0927835051546392e-05,
"loss": 0.0039,
"step": 7390
},
{
"epoch": 22.01,
"grad_norm": 0.22587594389915466,
"learning_rate": 2.0880974695407688e-05,
"loss": 0.0897,
"step": 7400
},
{
"epoch": 22.01,
"grad_norm": 0.007293607573956251,
"learning_rate": 2.083411433926898e-05,
"loss": 0.1129,
"step": 7410
},
{
"epoch": 22.01,
"grad_norm": 0.02033209055662155,
"learning_rate": 2.0787253983130272e-05,
"loss": 0.0583,
"step": 7420
},
{
"epoch": 22.01,
"grad_norm": 0.006020266562700272,
"learning_rate": 2.0740393626991568e-05,
"loss": 0.0519,
"step": 7430
},
{
"epoch": 22.02,
"grad_norm": 0.050974782556295395,
"learning_rate": 2.069353327085286e-05,
"loss": 0.0972,
"step": 7440
},
{
"epoch": 22.02,
"grad_norm": 15.67349624633789,
"learning_rate": 2.0646672914714153e-05,
"loss": 0.0047,
"step": 7450
},
{
"epoch": 22.02,
"grad_norm": 54.079017639160156,
"learning_rate": 2.0599812558575445e-05,
"loss": 0.0765,
"step": 7460
},
{
"epoch": 22.02,
"grad_norm": 0.28876176476478577,
"learning_rate": 2.055295220243674e-05,
"loss": 0.0463,
"step": 7470
},
{
"epoch": 22.02,
"grad_norm": 0.01849912479519844,
"learning_rate": 2.0506091846298033e-05,
"loss": 0.0064,
"step": 7480
},
{
"epoch": 22.02,
"grad_norm": 0.014657631516456604,
"learning_rate": 2.0459231490159325e-05,
"loss": 0.0517,
"step": 7490
},
{
"epoch": 22.02,
"grad_norm": 0.023860439658164978,
"learning_rate": 2.0412371134020618e-05,
"loss": 0.0165,
"step": 7500
},
{
"epoch": 22.02,
"grad_norm": 0.274640828371048,
"learning_rate": 2.0365510777881913e-05,
"loss": 0.0707,
"step": 7510
},
{
"epoch": 22.02,
"grad_norm": 8.938105583190918,
"learning_rate": 2.0318650421743206e-05,
"loss": 0.0653,
"step": 7520
},
{
"epoch": 22.02,
"grad_norm": 0.023853203281760216,
"learning_rate": 2.0271790065604498e-05,
"loss": 0.1298,
"step": 7530
},
{
"epoch": 22.02,
"grad_norm": 0.01518749725073576,
"learning_rate": 2.0224929709465794e-05,
"loss": 0.054,
"step": 7540
},
{
"epoch": 22.02,
"grad_norm": 0.2764262855052948,
"learning_rate": 2.0178069353327086e-05,
"loss": 0.1653,
"step": 7550
},
{
"epoch": 22.03,
"grad_norm": 0.01938408799469471,
"learning_rate": 2.0131208997188382e-05,
"loss": 0.0305,
"step": 7560
},
{
"epoch": 22.03,
"grad_norm": 0.06406080722808838,
"learning_rate": 2.008434864104967e-05,
"loss": 0.0038,
"step": 7570
},
{
"epoch": 22.03,
"grad_norm": 0.08752384036779404,
"learning_rate": 2.0037488284910966e-05,
"loss": 0.0272,
"step": 7580
},
{
"epoch": 22.03,
"grad_norm": 0.00483914278447628,
"learning_rate": 1.999062792877226e-05,
"loss": 0.058,
"step": 7590
},
{
"epoch": 22.03,
"eval_accuracy": 0.7893982808022922,
"eval_loss": 1.279589056968689,
"eval_runtime": 34.2577,
"eval_samples_per_second": 20.375,
"eval_steps_per_second": 1.722,
"step": 7590
},
{
"epoch": 23.0,
"grad_norm": 1.573994755744934,
"learning_rate": 1.9943767572633554e-05,
"loss": 0.0124,
"step": 7600
},
{
"epoch": 23.0,
"grad_norm": 0.014693894423544407,
"learning_rate": 1.9896907216494843e-05,
"loss": 0.0532,
"step": 7610
},
{
"epoch": 23.0,
"grad_norm": 0.06175214797258377,
"learning_rate": 1.985004686035614e-05,
"loss": 0.0272,
"step": 7620
},
{
"epoch": 23.0,
"grad_norm": 0.1148892492055893,
"learning_rate": 1.980318650421743e-05,
"loss": 0.0026,
"step": 7630
},
{
"epoch": 23.0,
"grad_norm": 0.019487930461764336,
"learning_rate": 1.9756326148078727e-05,
"loss": 0.0537,
"step": 7640
},
{
"epoch": 23.01,
"grad_norm": 34.296722412109375,
"learning_rate": 1.970946579194002e-05,
"loss": 0.0903,
"step": 7650
},
{
"epoch": 23.01,
"grad_norm": 0.38383758068084717,
"learning_rate": 1.9662605435801312e-05,
"loss": 0.0016,
"step": 7660
},
{
"epoch": 23.01,
"grad_norm": 7.190293788909912,
"learning_rate": 1.9615745079662607e-05,
"loss": 0.1071,
"step": 7670
},
{
"epoch": 23.01,
"grad_norm": 0.01017380878329277,
"learning_rate": 1.95688847235239e-05,
"loss": 0.0267,
"step": 7680
},
{
"epoch": 23.01,
"grad_norm": 18.401382446289062,
"learning_rate": 1.9522024367385195e-05,
"loss": 0.0738,
"step": 7690
},
{
"epoch": 23.01,
"grad_norm": 0.0242378581315279,
"learning_rate": 1.9475164011246484e-05,
"loss": 0.1412,
"step": 7700
},
{
"epoch": 23.01,
"grad_norm": 0.023785755038261414,
"learning_rate": 1.942830365510778e-05,
"loss": 0.0955,
"step": 7710
},
{
"epoch": 23.01,
"grad_norm": 0.02424936555325985,
"learning_rate": 1.9381443298969072e-05,
"loss": 0.0043,
"step": 7720
},
{
"epoch": 23.01,
"grad_norm": 0.38176319003105164,
"learning_rate": 1.9334582942830368e-05,
"loss": 0.0326,
"step": 7730
},
{
"epoch": 23.01,
"grad_norm": 0.011367129161953926,
"learning_rate": 1.928772258669166e-05,
"loss": 0.0404,
"step": 7740
},
{
"epoch": 23.01,
"grad_norm": 0.5925320386886597,
"learning_rate": 1.9240862230552953e-05,
"loss": 0.0306,
"step": 7750
},
{
"epoch": 23.01,
"grad_norm": 0.07246335595846176,
"learning_rate": 1.919400187441425e-05,
"loss": 0.069,
"step": 7760
},
{
"epoch": 23.02,
"grad_norm": 0.29946988821029663,
"learning_rate": 1.914714151827554e-05,
"loss": 0.0996,
"step": 7770
},
{
"epoch": 23.02,
"grad_norm": 0.035218510776758194,
"learning_rate": 1.9100281162136833e-05,
"loss": 0.0348,
"step": 7780
},
{
"epoch": 23.02,
"grad_norm": 0.035237617790699005,
"learning_rate": 1.9053420805998125e-05,
"loss": 0.0059,
"step": 7790
},
{
"epoch": 23.02,
"grad_norm": 28.824886322021484,
"learning_rate": 1.900656044985942e-05,
"loss": 0.1736,
"step": 7800
},
{
"epoch": 23.02,
"grad_norm": 0.12462026625871658,
"learning_rate": 1.8959700093720713e-05,
"loss": 0.003,
"step": 7810
},
{
"epoch": 23.02,
"grad_norm": 0.6549420952796936,
"learning_rate": 1.8912839737582006e-05,
"loss": 0.0087,
"step": 7820
},
{
"epoch": 23.02,
"grad_norm": 0.05357836186885834,
"learning_rate": 1.8865979381443298e-05,
"loss": 0.0214,
"step": 7830
},
{
"epoch": 23.02,
"grad_norm": 0.006000145338475704,
"learning_rate": 1.8819119025304594e-05,
"loss": 0.0458,
"step": 7840
},
{
"epoch": 23.02,
"grad_norm": 0.006179989781230688,
"learning_rate": 1.8772258669165886e-05,
"loss": 0.1173,
"step": 7850
},
{
"epoch": 23.02,
"grad_norm": 0.336616575717926,
"learning_rate": 1.872539831302718e-05,
"loss": 0.1676,
"step": 7860
},
{
"epoch": 23.02,
"grad_norm": 0.0050712330266833305,
"learning_rate": 1.8678537956888474e-05,
"loss": 0.0059,
"step": 7870
},
{
"epoch": 23.02,
"grad_norm": 0.005236570257693529,
"learning_rate": 1.8631677600749766e-05,
"loss": 0.1685,
"step": 7880
},
{
"epoch": 23.03,
"grad_norm": 0.06752946227788925,
"learning_rate": 1.8584817244611062e-05,
"loss": 0.0035,
"step": 7890
},
{
"epoch": 23.03,
"grad_norm": 0.4543127119541168,
"learning_rate": 1.853795688847235e-05,
"loss": 0.1258,
"step": 7900
},
{
"epoch": 23.03,
"grad_norm": 0.022716745734214783,
"learning_rate": 1.8491096532333647e-05,
"loss": 0.1408,
"step": 7910
},
{
"epoch": 23.03,
"grad_norm": 0.02292322926223278,
"learning_rate": 1.844423617619494e-05,
"loss": 0.1045,
"step": 7920
},
{
"epoch": 23.03,
"eval_accuracy": 0.7750716332378224,
"eval_loss": 1.3584957122802734,
"eval_runtime": 34.3543,
"eval_samples_per_second": 20.318,
"eval_steps_per_second": 1.717,
"step": 7920
},
{
"epoch": 24.0,
"grad_norm": 0.01888859085738659,
"learning_rate": 1.8397375820056235e-05,
"loss": 0.0033,
"step": 7930
},
{
"epoch": 24.0,
"grad_norm": 2.7452802658081055,
"learning_rate": 1.8350515463917524e-05,
"loss": 0.0672,
"step": 7940
},
{
"epoch": 24.0,
"grad_norm": 17.95488166809082,
"learning_rate": 1.830365510777882e-05,
"loss": 0.0579,
"step": 7950
},
{
"epoch": 24.0,
"grad_norm": 28.065513610839844,
"learning_rate": 1.825679475164011e-05,
"loss": 0.0383,
"step": 7960
},
{
"epoch": 24.0,
"grad_norm": 0.6805478930473328,
"learning_rate": 1.8209934395501407e-05,
"loss": 0.0037,
"step": 7970
},
{
"epoch": 24.01,
"grad_norm": 0.018596457317471504,
"learning_rate": 1.81630740393627e-05,
"loss": 0.0333,
"step": 7980
},
{
"epoch": 24.01,
"grad_norm": 0.8254786133766174,
"learning_rate": 1.8116213683223992e-05,
"loss": 0.0006,
"step": 7990
},
{
"epoch": 24.01,
"grad_norm": 0.014021596871316433,
"learning_rate": 1.8069353327085288e-05,
"loss": 0.0004,
"step": 8000
},
{
"epoch": 24.01,
"grad_norm": 0.009306724183261395,
"learning_rate": 1.802249297094658e-05,
"loss": 0.0018,
"step": 8010
},
{
"epoch": 24.01,
"grad_norm": 0.005670236889272928,
"learning_rate": 1.7975632614807876e-05,
"loss": 0.1225,
"step": 8020
},
{
"epoch": 24.01,
"grad_norm": 0.01667461171746254,
"learning_rate": 1.7928772258669165e-05,
"loss": 0.0701,
"step": 8030
},
{
"epoch": 24.01,
"grad_norm": 0.0773952454328537,
"learning_rate": 1.788191190253046e-05,
"loss": 0.0048,
"step": 8040
},
{
"epoch": 24.01,
"grad_norm": 57.535404205322266,
"learning_rate": 1.7835051546391753e-05,
"loss": 0.027,
"step": 8050
},
{
"epoch": 24.01,
"grad_norm": 0.01095606479793787,
"learning_rate": 1.778819119025305e-05,
"loss": 0.0294,
"step": 8060
},
{
"epoch": 24.01,
"grad_norm": 0.006405522581189871,
"learning_rate": 1.774133083411434e-05,
"loss": 0.0722,
"step": 8070
},
{
"epoch": 24.01,
"grad_norm": 0.003576676594093442,
"learning_rate": 1.7694470477975633e-05,
"loss": 0.0024,
"step": 8080
},
{
"epoch": 24.01,
"grad_norm": 43.07078552246094,
"learning_rate": 1.764761012183693e-05,
"loss": 0.0385,
"step": 8090
},
{
"epoch": 24.02,
"grad_norm": 47.99566650390625,
"learning_rate": 1.760074976569822e-05,
"loss": 0.1164,
"step": 8100
},
{
"epoch": 24.02,
"grad_norm": 0.029569357633590698,
"learning_rate": 1.7553889409559513e-05,
"loss": 0.0441,
"step": 8110
},
{
"epoch": 24.02,
"grad_norm": 0.020374421030282974,
"learning_rate": 1.7507029053420806e-05,
"loss": 0.0203,
"step": 8120
},
{
"epoch": 24.02,
"grad_norm": 0.2725122570991516,
"learning_rate": 1.74601686972821e-05,
"loss": 0.0015,
"step": 8130
},
{
"epoch": 24.02,
"grad_norm": 25.096572875976562,
"learning_rate": 1.7413308341143394e-05,
"loss": 0.0571,
"step": 8140
},
{
"epoch": 24.02,
"grad_norm": 0.2794142961502075,
"learning_rate": 1.7366447985004686e-05,
"loss": 0.0592,
"step": 8150
},
{
"epoch": 24.02,
"grad_norm": 0.007156948558986187,
"learning_rate": 1.7319587628865978e-05,
"loss": 0.0116,
"step": 8160
},
{
"epoch": 24.02,
"grad_norm": 0.06009415537118912,
"learning_rate": 1.7272727272727274e-05,
"loss": 0.0774,
"step": 8170
},
{
"epoch": 24.02,
"grad_norm": 0.0062246439047157764,
"learning_rate": 1.7225866916588566e-05,
"loss": 0.007,
"step": 8180
},
{
"epoch": 24.02,
"grad_norm": 0.0417780727148056,
"learning_rate": 1.717900656044986e-05,
"loss": 0.1236,
"step": 8190
},
{
"epoch": 24.02,
"grad_norm": 53.34578323364258,
"learning_rate": 1.7132146204311154e-05,
"loss": 0.0527,
"step": 8200
},
{
"epoch": 24.02,
"grad_norm": 0.20870938897132874,
"learning_rate": 1.7085285848172447e-05,
"loss": 0.0519,
"step": 8210
},
{
"epoch": 24.03,
"grad_norm": 0.011034387163817883,
"learning_rate": 1.7038425492033742e-05,
"loss": 0.0005,
"step": 8220
},
{
"epoch": 24.03,
"grad_norm": 0.004511510953307152,
"learning_rate": 1.699156513589503e-05,
"loss": 0.0125,
"step": 8230
},
{
"epoch": 24.03,
"grad_norm": 0.4468887746334076,
"learning_rate": 1.6944704779756327e-05,
"loss": 0.0015,
"step": 8240
},
{
"epoch": 24.03,
"grad_norm": 0.002701952587813139,
"learning_rate": 1.689784442361762e-05,
"loss": 0.0379,
"step": 8250
},
{
"epoch": 24.03,
"eval_accuracy": 0.7836676217765043,
"eval_loss": 1.315584421157837,
"eval_runtime": 33.9983,
"eval_samples_per_second": 20.53,
"eval_steps_per_second": 1.735,
"step": 8250
},
{
"epoch": 25.0,
"grad_norm": 0.0075071449391543865,
"learning_rate": 1.6850984067478915e-05,
"loss": 0.0626,
"step": 8260
},
{
"epoch": 25.0,
"grad_norm": 0.23369351029396057,
"learning_rate": 1.6804123711340207e-05,
"loss": 0.0197,
"step": 8270
},
{
"epoch": 25.0,
"grad_norm": 11.089585304260254,
"learning_rate": 1.67572633552015e-05,
"loss": 0.0019,
"step": 8280
},
{
"epoch": 25.0,
"grad_norm": 0.0038492237217724323,
"learning_rate": 1.6710402999062792e-05,
"loss": 0.0002,
"step": 8290
},
{
"epoch": 25.0,
"grad_norm": 0.003863664111122489,
"learning_rate": 1.6663542642924088e-05,
"loss": 0.0012,
"step": 8300
},
{
"epoch": 25.01,
"grad_norm": 0.013411330990493298,
"learning_rate": 1.661668228678538e-05,
"loss": 0.0003,
"step": 8310
},
{
"epoch": 25.01,
"grad_norm": 0.028628764674067497,
"learning_rate": 1.6569821930646672e-05,
"loss": 0.0005,
"step": 8320
},
{
"epoch": 25.01,
"grad_norm": 0.010359793901443481,
"learning_rate": 1.6522961574507968e-05,
"loss": 0.0662,
"step": 8330
},
{
"epoch": 25.01,
"grad_norm": 4.15015983581543,
"learning_rate": 1.647610121836926e-05,
"loss": 0.0023,
"step": 8340
},
{
"epoch": 25.01,
"grad_norm": 34.17851257324219,
"learning_rate": 1.6429240862230556e-05,
"loss": 0.0411,
"step": 8350
},
{
"epoch": 25.01,
"grad_norm": 0.0031898904126137495,
"learning_rate": 1.6382380506091845e-05,
"loss": 0.0003,
"step": 8360
},
{
"epoch": 25.01,
"grad_norm": 0.002583961235359311,
"learning_rate": 1.633552014995314e-05,
"loss": 0.0485,
"step": 8370
},
{
"epoch": 25.01,
"grad_norm": 18.96380615234375,
"learning_rate": 1.6288659793814433e-05,
"loss": 0.1036,
"step": 8380
},
{
"epoch": 25.01,
"grad_norm": 45.696075439453125,
"learning_rate": 1.624179943767573e-05,
"loss": 0.0844,
"step": 8390
},
{
"epoch": 25.01,
"grad_norm": 0.00760071724653244,
"learning_rate": 1.619493908153702e-05,
"loss": 0.0098,
"step": 8400
},
{
"epoch": 25.01,
"grad_norm": 0.019091518595814705,
"learning_rate": 1.6148078725398313e-05,
"loss": 0.0273,
"step": 8410
},
{
"epoch": 25.01,
"grad_norm": 0.004600458778440952,
"learning_rate": 1.610121836925961e-05,
"loss": 0.0008,
"step": 8420
},
{
"epoch": 25.02,
"grad_norm": 22.48053741455078,
"learning_rate": 1.60543580131209e-05,
"loss": 0.0469,
"step": 8430
},
{
"epoch": 25.02,
"grad_norm": 0.011721034534275532,
"learning_rate": 1.6007497656982194e-05,
"loss": 0.046,
"step": 8440
},
{
"epoch": 25.02,
"grad_norm": 0.004451930057257414,
"learning_rate": 1.5960637300843486e-05,
"loss": 0.0038,
"step": 8450
},
{
"epoch": 25.02,
"grad_norm": 0.010986747220158577,
"learning_rate": 1.591377694470478e-05,
"loss": 0.125,
"step": 8460
},
{
"epoch": 25.02,
"grad_norm": 41.15037155151367,
"learning_rate": 1.5866916588566074e-05,
"loss": 0.0238,
"step": 8470
},
{
"epoch": 25.02,
"grad_norm": 0.006322337780147791,
"learning_rate": 1.5820056232427366e-05,
"loss": 0.0012,
"step": 8480
},
{
"epoch": 25.02,
"grad_norm": 0.007438218221068382,
"learning_rate": 1.577319587628866e-05,
"loss": 0.0558,
"step": 8490
},
{
"epoch": 25.02,
"grad_norm": 0.007493423763662577,
"learning_rate": 1.5726335520149954e-05,
"loss": 0.0244,
"step": 8500
},
{
"epoch": 25.02,
"grad_norm": 0.005765008274465799,
"learning_rate": 1.5679475164011247e-05,
"loss": 0.0097,
"step": 8510
},
{
"epoch": 25.02,
"grad_norm": 0.3964434266090393,
"learning_rate": 1.563261480787254e-05,
"loss": 0.0018,
"step": 8520
},
{
"epoch": 25.02,
"grad_norm": 2.117600440979004,
"learning_rate": 1.5585754451733835e-05,
"loss": 0.0519,
"step": 8530
},
{
"epoch": 25.02,
"grad_norm": 0.10996006429195404,
"learning_rate": 1.5538894095595127e-05,
"loss": 0.0021,
"step": 8540
},
{
"epoch": 25.03,
"grad_norm": 0.0033693662844598293,
"learning_rate": 1.5492033739456423e-05,
"loss": 0.0811,
"step": 8550
},
{
"epoch": 25.03,
"grad_norm": 10.247265815734863,
"learning_rate": 1.544517338331771e-05,
"loss": 0.0058,
"step": 8560
},
{
"epoch": 25.03,
"grad_norm": 0.2538563013076782,
"learning_rate": 1.5398313027179007e-05,
"loss": 0.03,
"step": 8570
},
{
"epoch": 25.03,
"grad_norm": 0.00820246897637844,
"learning_rate": 1.53514526710403e-05,
"loss": 0.0945,
"step": 8580
},
{
"epoch": 25.03,
"eval_accuracy": 0.7922636103151862,
"eval_loss": 1.3046531677246094,
"eval_runtime": 33.5684,
"eval_samples_per_second": 20.793,
"eval_steps_per_second": 1.758,
"step": 8580
},
{
"epoch": 26.0,
"grad_norm": 0.0038312424439936876,
"learning_rate": 1.5304592314901595e-05,
"loss": 0.0008,
"step": 8590
},
{
"epoch": 26.0,
"grad_norm": 0.06059965118765831,
"learning_rate": 1.525773195876289e-05,
"loss": 0.0115,
"step": 8600
},
{
"epoch": 26.0,
"grad_norm": 0.004089924972504377,
"learning_rate": 1.521087160262418e-05,
"loss": 0.0002,
"step": 8610
},
{
"epoch": 26.0,
"grad_norm": 0.0043753357604146,
"learning_rate": 1.5164011246485474e-05,
"loss": 0.0727,
"step": 8620
},
{
"epoch": 26.0,
"grad_norm": 5.026497840881348,
"learning_rate": 1.5117150890346768e-05,
"loss": 0.0639,
"step": 8630
},
{
"epoch": 26.01,
"grad_norm": 0.014356585219502449,
"learning_rate": 1.5070290534208062e-05,
"loss": 0.036,
"step": 8640
},
{
"epoch": 26.01,
"grad_norm": 0.24706010520458221,
"learning_rate": 1.5023430178069353e-05,
"loss": 0.001,
"step": 8650
},
{
"epoch": 26.01,
"grad_norm": 0.06222040206193924,
"learning_rate": 1.4976569821930647e-05,
"loss": 0.0436,
"step": 8660
},
{
"epoch": 26.01,
"grad_norm": 0.0071958452463150024,
"learning_rate": 1.492970946579194e-05,
"loss": 0.0006,
"step": 8670
},
{
"epoch": 26.01,
"grad_norm": 0.011005638167262077,
"learning_rate": 1.4882849109653235e-05,
"loss": 0.0528,
"step": 8680
},
{
"epoch": 26.01,
"grad_norm": 0.09897544980049133,
"learning_rate": 1.4835988753514527e-05,
"loss": 0.0099,
"step": 8690
},
{
"epoch": 26.01,
"grad_norm": 1.022418737411499,
"learning_rate": 1.4789128397375821e-05,
"loss": 0.006,
"step": 8700
},
{
"epoch": 26.01,
"grad_norm": 0.0073012434877455235,
"learning_rate": 1.4742268041237115e-05,
"loss": 0.0008,
"step": 8710
},
{
"epoch": 26.01,
"grad_norm": 0.07601054012775421,
"learning_rate": 1.4695407685098409e-05,
"loss": 0.0256,
"step": 8720
},
{
"epoch": 26.01,
"grad_norm": 0.010327538475394249,
"learning_rate": 1.46485473289597e-05,
"loss": 0.0105,
"step": 8730
},
{
"epoch": 26.01,
"grad_norm": 0.4567221999168396,
"learning_rate": 1.4601686972820994e-05,
"loss": 0.0338,
"step": 8740
},
{
"epoch": 26.01,
"grad_norm": 0.03632340207695961,
"learning_rate": 1.4554826616682288e-05,
"loss": 0.0004,
"step": 8750
},
{
"epoch": 26.02,
"grad_norm": 0.005994404200464487,
"learning_rate": 1.4507966260543582e-05,
"loss": 0.0004,
"step": 8760
},
{
"epoch": 26.02,
"grad_norm": 0.004464009311050177,
"learning_rate": 1.4461105904404872e-05,
"loss": 0.0654,
"step": 8770
},
{
"epoch": 26.02,
"grad_norm": 0.5976660251617432,
"learning_rate": 1.4414245548266168e-05,
"loss": 0.0374,
"step": 8780
},
{
"epoch": 26.02,
"grad_norm": 0.003634576452895999,
"learning_rate": 1.4367385192127462e-05,
"loss": 0.0921,
"step": 8790
},
{
"epoch": 26.02,
"grad_norm": 0.005951224360615015,
"learning_rate": 1.4320524835988756e-05,
"loss": 0.0096,
"step": 8800
},
{
"epoch": 26.02,
"grad_norm": 0.0037890600506216288,
"learning_rate": 1.4273664479850047e-05,
"loss": 0.0005,
"step": 8810
},
{
"epoch": 26.02,
"grad_norm": 0.00492624519392848,
"learning_rate": 1.422680412371134e-05,
"loss": 0.0106,
"step": 8820
},
{
"epoch": 26.02,
"grad_norm": 0.0032419913914054632,
"learning_rate": 1.4179943767572635e-05,
"loss": 0.0068,
"step": 8830
},
{
"epoch": 26.02,
"grad_norm": 0.006293057929724455,
"learning_rate": 1.4133083411433929e-05,
"loss": 0.0646,
"step": 8840
},
{
"epoch": 26.02,
"grad_norm": 0.00998301524668932,
"learning_rate": 1.4086223055295219e-05,
"loss": 0.0163,
"step": 8850
},
{
"epoch": 26.02,
"grad_norm": 0.08308320492506027,
"learning_rate": 1.4039362699156513e-05,
"loss": 0.0003,
"step": 8860
},
{
"epoch": 26.02,
"grad_norm": 0.0030508143827319145,
"learning_rate": 1.3992502343017807e-05,
"loss": 0.064,
"step": 8870
},
{
"epoch": 26.03,
"grad_norm": 0.3473857045173645,
"learning_rate": 1.3945641986879101e-05,
"loss": 0.0006,
"step": 8880
},
{
"epoch": 26.03,
"grad_norm": 0.002809175057336688,
"learning_rate": 1.3898781630740395e-05,
"loss": 0.0004,
"step": 8890
},
{
"epoch": 26.03,
"grad_norm": 1.4648064374923706,
"learning_rate": 1.3851921274601688e-05,
"loss": 0.0478,
"step": 8900
},
{
"epoch": 26.03,
"grad_norm": 0.008449913933873177,
"learning_rate": 1.3805060918462982e-05,
"loss": 0.0528,
"step": 8910
},
{
"epoch": 26.03,
"eval_accuracy": 0.7893982808022922,
"eval_loss": 1.3669614791870117,
"eval_runtime": 34.4841,
"eval_samples_per_second": 20.241,
"eval_steps_per_second": 1.711,
"step": 8910
},
{
"epoch": 27.0,
"grad_norm": 0.009650280699133873,
"learning_rate": 1.3758200562324276e-05,
"loss": 0.0131,
"step": 8920
},
{
"epoch": 27.0,
"grad_norm": 0.00797184742987156,
"learning_rate": 1.371134020618557e-05,
"loss": 0.0228,
"step": 8930
},
{
"epoch": 27.0,
"grad_norm": 0.004884254653006792,
"learning_rate": 1.366447985004686e-05,
"loss": 0.0002,
"step": 8940
},
{
"epoch": 27.0,
"grad_norm": 0.0034428162034600973,
"learning_rate": 1.3617619493908154e-05,
"loss": 0.0017,
"step": 8950
},
{
"epoch": 27.0,
"grad_norm": 0.004850251600146294,
"learning_rate": 1.3570759137769448e-05,
"loss": 0.0611,
"step": 8960
},
{
"epoch": 27.01,
"grad_norm": 0.028377506881952286,
"learning_rate": 1.3523898781630742e-05,
"loss": 0.0002,
"step": 8970
},
{
"epoch": 27.01,
"grad_norm": 0.0027832870837301016,
"learning_rate": 1.3477038425492033e-05,
"loss": 0.0475,
"step": 8980
},
{
"epoch": 27.01,
"grad_norm": 0.11803574860095978,
"learning_rate": 1.3430178069353327e-05,
"loss": 0.0005,
"step": 8990
},
{
"epoch": 27.01,
"grad_norm": 0.23726528882980347,
"learning_rate": 1.338331771321462e-05,
"loss": 0.0003,
"step": 9000
},
{
"epoch": 27.01,
"grad_norm": 0.00947210006415844,
"learning_rate": 1.3336457357075915e-05,
"loss": 0.0004,
"step": 9010
},
{
"epoch": 27.01,
"grad_norm": 0.006413722410798073,
"learning_rate": 1.3289597000937207e-05,
"loss": 0.0002,
"step": 9020
},
{
"epoch": 27.01,
"grad_norm": 0.0032037473283708096,
"learning_rate": 1.3242736644798501e-05,
"loss": 0.0022,
"step": 9030
},
{
"epoch": 27.01,
"grad_norm": 0.0032357927411794662,
"learning_rate": 1.3195876288659795e-05,
"loss": 0.0336,
"step": 9040
},
{
"epoch": 27.01,
"grad_norm": 1.7443645000457764,
"learning_rate": 1.314901593252109e-05,
"loss": 0.0008,
"step": 9050
},
{
"epoch": 27.01,
"grad_norm": 0.002807668410241604,
"learning_rate": 1.310215557638238e-05,
"loss": 0.0002,
"step": 9060
},
{
"epoch": 27.01,
"grad_norm": 0.0025303384754806757,
"learning_rate": 1.3055295220243674e-05,
"loss": 0.0005,
"step": 9070
},
{
"epoch": 27.01,
"grad_norm": 0.37589800357818604,
"learning_rate": 1.3008434864104968e-05,
"loss": 0.0713,
"step": 9080
},
{
"epoch": 27.02,
"grad_norm": 0.002870983211323619,
"learning_rate": 1.2961574507966262e-05,
"loss": 0.0204,
"step": 9090
},
{
"epoch": 27.02,
"grad_norm": 0.017298812046647072,
"learning_rate": 1.2914714151827554e-05,
"loss": 0.0268,
"step": 9100
},
{
"epoch": 27.02,
"grad_norm": 0.00800339411944151,
"learning_rate": 1.2867853795688848e-05,
"loss": 0.1058,
"step": 9110
},
{
"epoch": 27.02,
"grad_norm": 0.0037049567326903343,
"learning_rate": 1.2820993439550142e-05,
"loss": 0.0002,
"step": 9120
},
{
"epoch": 27.02,
"grad_norm": 0.0456349216401577,
"learning_rate": 1.2774133083411436e-05,
"loss": 0.013,
"step": 9130
},
{
"epoch": 27.02,
"grad_norm": 0.003435475053265691,
"learning_rate": 1.2727272727272727e-05,
"loss": 0.0392,
"step": 9140
},
{
"epoch": 27.02,
"grad_norm": 0.0036417213268578053,
"learning_rate": 1.268041237113402e-05,
"loss": 0.0003,
"step": 9150
},
{
"epoch": 27.02,
"grad_norm": 0.01712891273200512,
"learning_rate": 1.2633552014995315e-05,
"loss": 0.0151,
"step": 9160
},
{
"epoch": 27.02,
"grad_norm": 0.22050367295742035,
"learning_rate": 1.2586691658856609e-05,
"loss": 0.0004,
"step": 9170
},
{
"epoch": 27.02,
"grad_norm": 0.07363563030958176,
"learning_rate": 1.2539831302717903e-05,
"loss": 0.0084,
"step": 9180
},
{
"epoch": 27.02,
"grad_norm": 0.0359061174094677,
"learning_rate": 1.2492970946579195e-05,
"loss": 0.0008,
"step": 9190
},
{
"epoch": 27.02,
"grad_norm": 0.007665242068469524,
"learning_rate": 1.2446110590440487e-05,
"loss": 0.0623,
"step": 9200
},
{
"epoch": 27.03,
"grad_norm": 0.0135026965290308,
"learning_rate": 1.2399250234301781e-05,
"loss": 0.011,
"step": 9210
},
{
"epoch": 27.03,
"grad_norm": 3.0960659980773926,
"learning_rate": 1.2352389878163074e-05,
"loss": 0.05,
"step": 9220
},
{
"epoch": 27.03,
"grad_norm": 0.01721220277249813,
"learning_rate": 1.2305529522024368e-05,
"loss": 0.0707,
"step": 9230
},
{
"epoch": 27.03,
"grad_norm": 0.004766266793012619,
"learning_rate": 1.2258669165885662e-05,
"loss": 0.0002,
"step": 9240
},
{
"epoch": 27.03,
"eval_accuracy": 0.7965616045845272,
"eval_loss": 1.3398090600967407,
"eval_runtime": 33.8835,
"eval_samples_per_second": 20.6,
"eval_steps_per_second": 1.741,
"step": 9240
},
{
"epoch": 28.0,
"grad_norm": 0.0029590607155114412,
"learning_rate": 1.2211808809746956e-05,
"loss": 0.0831,
"step": 9250
},
{
"epoch": 28.0,
"grad_norm": 0.0406717024743557,
"learning_rate": 1.2164948453608248e-05,
"loss": 0.0004,
"step": 9260
},
{
"epoch": 28.0,
"grad_norm": 0.03818991780281067,
"learning_rate": 1.2118088097469542e-05,
"loss": 0.0003,
"step": 9270
},
{
"epoch": 28.0,
"grad_norm": 0.007692787330597639,
"learning_rate": 1.2071227741330834e-05,
"loss": 0.0096,
"step": 9280
},
{
"epoch": 28.0,
"grad_norm": 0.08763420581817627,
"learning_rate": 1.2024367385192128e-05,
"loss": 0.0663,
"step": 9290
},
{
"epoch": 28.01,
"grad_norm": 0.00407880125567317,
"learning_rate": 1.197750702905342e-05,
"loss": 0.0031,
"step": 9300
},
{
"epoch": 28.01,
"grad_norm": 0.006175840273499489,
"learning_rate": 1.1930646672914715e-05,
"loss": 0.0865,
"step": 9310
},
{
"epoch": 28.01,
"grad_norm": 6.280770301818848,
"learning_rate": 1.1883786316776007e-05,
"loss": 0.0024,
"step": 9320
},
{
"epoch": 28.01,
"grad_norm": 12.87985610961914,
"learning_rate": 1.1836925960637301e-05,
"loss": 0.0468,
"step": 9330
},
{
"epoch": 28.01,
"grad_norm": 0.04478368163108826,
"learning_rate": 1.1790065604498595e-05,
"loss": 0.0002,
"step": 9340
},
{
"epoch": 28.01,
"grad_norm": 0.005779411643743515,
"learning_rate": 1.1743205248359889e-05,
"loss": 0.0002,
"step": 9350
},
{
"epoch": 28.01,
"grad_norm": 0.0021354747004806995,
"learning_rate": 1.1696344892221181e-05,
"loss": 0.0002,
"step": 9360
},
{
"epoch": 28.01,
"grad_norm": 5.800276756286621,
"learning_rate": 1.1649484536082475e-05,
"loss": 0.01,
"step": 9370
},
{
"epoch": 28.01,
"grad_norm": 0.0021325971465557814,
"learning_rate": 1.1602624179943768e-05,
"loss": 0.0133,
"step": 9380
},
{
"epoch": 28.01,
"grad_norm": 0.003749684663489461,
"learning_rate": 1.1555763823805062e-05,
"loss": 0.0003,
"step": 9390
},
{
"epoch": 28.01,
"grad_norm": 0.006699393503367901,
"learning_rate": 1.1508903467666354e-05,
"loss": 0.0058,
"step": 9400
},
{
"epoch": 28.01,
"grad_norm": 0.0032480955123901367,
"learning_rate": 1.1462043111527648e-05,
"loss": 0.0184,
"step": 9410
},
{
"epoch": 28.02,
"grad_norm": 0.0017323597567155957,
"learning_rate": 1.141518275538894e-05,
"loss": 0.0002,
"step": 9420
},
{
"epoch": 28.02,
"grad_norm": 0.0023274635896086693,
"learning_rate": 1.1368322399250234e-05,
"loss": 0.0002,
"step": 9430
},
{
"epoch": 28.02,
"grad_norm": 0.003413414815440774,
"learning_rate": 1.1321462043111528e-05,
"loss": 0.0014,
"step": 9440
},
{
"epoch": 28.02,
"grad_norm": 0.01558777131140232,
"learning_rate": 1.1274601686972822e-05,
"loss": 0.042,
"step": 9450
},
{
"epoch": 28.02,
"grad_norm": 0.003739135107025504,
"learning_rate": 1.1227741330834115e-05,
"loss": 0.0136,
"step": 9460
},
{
"epoch": 28.02,
"grad_norm": 0.043951474130153656,
"learning_rate": 1.1180880974695409e-05,
"loss": 0.0804,
"step": 9470
},
{
"epoch": 28.02,
"grad_norm": 0.0028257304802536964,
"learning_rate": 1.1134020618556703e-05,
"loss": 0.0002,
"step": 9480
},
{
"epoch": 28.02,
"grad_norm": 0.0037733283825218678,
"learning_rate": 1.1087160262417995e-05,
"loss": 0.0004,
"step": 9490
},
{
"epoch": 28.02,
"grad_norm": 0.056008391082286835,
"learning_rate": 1.1040299906279289e-05,
"loss": 0.0319,
"step": 9500
},
{
"epoch": 28.02,
"grad_norm": 0.0031692145857959986,
"learning_rate": 1.0993439550140581e-05,
"loss": 0.0685,
"step": 9510
},
{
"epoch": 28.02,
"grad_norm": 0.004492649342864752,
"learning_rate": 1.0946579194001875e-05,
"loss": 0.0137,
"step": 9520
},
{
"epoch": 28.02,
"grad_norm": 0.0059276544488966465,
"learning_rate": 1.0899718837863168e-05,
"loss": 0.0007,
"step": 9530
},
{
"epoch": 28.03,
"grad_norm": 0.004752593580633402,
"learning_rate": 1.0852858481724462e-05,
"loss": 0.0193,
"step": 9540
},
{
"epoch": 28.03,
"grad_norm": 0.006295239552855492,
"learning_rate": 1.0805998125585754e-05,
"loss": 0.0037,
"step": 9550
},
{
"epoch": 28.03,
"grad_norm": 41.76048278808594,
"learning_rate": 1.0759137769447048e-05,
"loss": 0.0485,
"step": 9560
},
{
"epoch": 28.03,
"grad_norm": 0.0014590555801987648,
"learning_rate": 1.0712277413308342e-05,
"loss": 0.0562,
"step": 9570
},
{
"epoch": 28.03,
"eval_accuracy": 0.7979942693409742,
"eval_loss": 1.3444451093673706,
"eval_runtime": 34.4815,
"eval_samples_per_second": 20.243,
"eval_steps_per_second": 1.711,
"step": 9570
},
{
"epoch": 29.0,
"grad_norm": 0.00453572254627943,
"learning_rate": 1.0665417057169636e-05,
"loss": 0.0002,
"step": 9580
},
{
"epoch": 29.0,
"grad_norm": 0.04278896749019623,
"learning_rate": 1.0618556701030928e-05,
"loss": 0.0332,
"step": 9590
},
{
"epoch": 29.0,
"grad_norm": 0.0018639545887708664,
"learning_rate": 1.0571696344892222e-05,
"loss": 0.0002,
"step": 9600
},
{
"epoch": 29.0,
"grad_norm": 0.005231024231761694,
"learning_rate": 1.0524835988753515e-05,
"loss": 0.0009,
"step": 9610
},
{
"epoch": 29.0,
"grad_norm": 0.0017461972311139107,
"learning_rate": 1.0477975632614809e-05,
"loss": 0.0001,
"step": 9620
},
{
"epoch": 29.01,
"grad_norm": 7.924661636352539,
"learning_rate": 1.0431115276476101e-05,
"loss": 0.0308,
"step": 9630
},
{
"epoch": 29.01,
"grad_norm": 0.012182795442640781,
"learning_rate": 1.0384254920337395e-05,
"loss": 0.0043,
"step": 9640
},
{
"epoch": 29.01,
"grad_norm": 0.0014665969647467136,
"learning_rate": 1.0337394564198687e-05,
"loss": 0.0002,
"step": 9650
},
{
"epoch": 29.01,
"grad_norm": 0.0027665491215884686,
"learning_rate": 1.0290534208059981e-05,
"loss": 0.0097,
"step": 9660
},
{
"epoch": 29.01,
"grad_norm": 0.0026171233039349318,
"learning_rate": 1.0243673851921275e-05,
"loss": 0.0543,
"step": 9670
},
{
"epoch": 29.01,
"grad_norm": 0.005574346520006657,
"learning_rate": 1.019681349578257e-05,
"loss": 0.0001,
"step": 9680
},
{
"epoch": 29.01,
"grad_norm": 0.07008225470781326,
"learning_rate": 1.0149953139643862e-05,
"loss": 0.0321,
"step": 9690
},
{
"epoch": 29.01,
"grad_norm": 58.78215408325195,
"learning_rate": 1.0103092783505156e-05,
"loss": 0.106,
"step": 9700
},
{
"epoch": 29.01,
"grad_norm": 0.010860403068363667,
"learning_rate": 1.0056232427366448e-05,
"loss": 0.0625,
"step": 9710
},
{
"epoch": 29.01,
"grad_norm": 0.09613881260156631,
"learning_rate": 1.0009372071227742e-05,
"loss": 0.007,
"step": 9720
},
{
"epoch": 29.01,
"grad_norm": 0.01900539919734001,
"learning_rate": 9.962511715089034e-06,
"loss": 0.0109,
"step": 9730
},
{
"epoch": 29.01,
"grad_norm": 0.009411687031388283,
"learning_rate": 9.915651358950328e-06,
"loss": 0.0018,
"step": 9740
},
{
"epoch": 29.02,
"grad_norm": 16.337764739990234,
"learning_rate": 9.86879100281162e-06,
"loss": 0.0593,
"step": 9750
},
{
"epoch": 29.02,
"grad_norm": 0.028839366510510445,
"learning_rate": 9.821930646672915e-06,
"loss": 0.0604,
"step": 9760
},
{
"epoch": 29.02,
"grad_norm": 0.017779843881726265,
"learning_rate": 9.775070290534209e-06,
"loss": 0.0365,
"step": 9770
},
{
"epoch": 29.02,
"grad_norm": 0.003166783368214965,
"learning_rate": 9.728209934395503e-06,
"loss": 0.0053,
"step": 9780
},
{
"epoch": 29.02,
"grad_norm": 0.0021175253205001354,
"learning_rate": 9.681349578256797e-06,
"loss": 0.0001,
"step": 9790
},
{
"epoch": 29.02,
"grad_norm": 0.20540370047092438,
"learning_rate": 9.634489222118089e-06,
"loss": 0.0003,
"step": 9800
},
{
"epoch": 29.02,
"grad_norm": 0.002469886327162385,
"learning_rate": 9.587628865979383e-06,
"loss": 0.0002,
"step": 9810
},
{
"epoch": 29.02,
"grad_norm": 0.012953460216522217,
"learning_rate": 9.540768509840675e-06,
"loss": 0.0271,
"step": 9820
},
{
"epoch": 29.02,
"grad_norm": 0.02899201773107052,
"learning_rate": 9.49390815370197e-06,
"loss": 0.0414,
"step": 9830
},
{
"epoch": 29.02,
"grad_norm": 0.0029837109614163637,
"learning_rate": 9.447047797563262e-06,
"loss": 0.0002,
"step": 9840
},
{
"epoch": 29.02,
"grad_norm": 61.894805908203125,
"learning_rate": 9.400187441424556e-06,
"loss": 0.0608,
"step": 9850
},
{
"epoch": 29.02,
"grad_norm": 0.003072848543524742,
"learning_rate": 9.353327085285848e-06,
"loss": 0.0014,
"step": 9860
},
{
"epoch": 29.03,
"grad_norm": 0.0036488294135779142,
"learning_rate": 9.306466729147142e-06,
"loss": 0.001,
"step": 9870
},
{
"epoch": 29.03,
"grad_norm": 0.002994240028783679,
"learning_rate": 9.259606373008434e-06,
"loss": 0.0007,
"step": 9880
},
{
"epoch": 29.03,
"grad_norm": 0.01359494123607874,
"learning_rate": 9.212746016869728e-06,
"loss": 0.0007,
"step": 9890
},
{
"epoch": 29.03,
"grad_norm": 0.0029486478306353092,
"learning_rate": 9.165885660731022e-06,
"loss": 0.0002,
"step": 9900
},
{
"epoch": 29.03,
"eval_accuracy": 0.8166189111747851,
"eval_loss": 1.2678812742233276,
"eval_runtime": 33.8703,
"eval_samples_per_second": 20.608,
"eval_steps_per_second": 1.742,
"step": 9900
},
{
"epoch": 30.0,
"grad_norm": 0.0020161494612693787,
"learning_rate": 9.119025304592316e-06,
"loss": 0.0002,
"step": 9910
},
{
"epoch": 30.0,
"grad_norm": 0.001892567495815456,
"learning_rate": 9.072164948453609e-06,
"loss": 0.0069,
"step": 9920
},
{
"epoch": 30.0,
"grad_norm": 0.003333768341690302,
"learning_rate": 9.025304592314903e-06,
"loss": 0.0017,
"step": 9930
},
{
"epoch": 30.0,
"grad_norm": 5.158819675445557,
"learning_rate": 8.978444236176195e-06,
"loss": 0.0472,
"step": 9940
},
{
"epoch": 30.0,
"grad_norm": 0.010453186929225922,
"learning_rate": 8.931583880037489e-06,
"loss": 0.0017,
"step": 9950
},
{
"epoch": 30.01,
"grad_norm": 0.006460112985223532,
"learning_rate": 8.884723523898781e-06,
"loss": 0.0028,
"step": 9960
},
{
"epoch": 30.01,
"grad_norm": 1.868633508682251,
"learning_rate": 8.837863167760075e-06,
"loss": 0.0004,
"step": 9970
},
{
"epoch": 30.01,
"grad_norm": 0.0062409755773842335,
"learning_rate": 8.791002811621368e-06,
"loss": 0.0182,
"step": 9980
},
{
"epoch": 30.01,
"grad_norm": 0.002611867617815733,
"learning_rate": 8.744142455482662e-06,
"loss": 0.0002,
"step": 9990
},
{
"epoch": 30.01,
"grad_norm": 0.003704243106767535,
"learning_rate": 8.697282099343956e-06,
"loss": 0.0001,
"step": 10000
},
{
"epoch": 30.01,
"grad_norm": 0.0019702170975506306,
"learning_rate": 8.65042174320525e-06,
"loss": 0.0574,
"step": 10010
},
{
"epoch": 30.01,
"grad_norm": 0.006285363808274269,
"learning_rate": 8.603561387066542e-06,
"loss": 0.0001,
"step": 10020
},
{
"epoch": 30.01,
"grad_norm": 0.0028792780358344316,
"learning_rate": 8.556701030927836e-06,
"loss": 0.0059,
"step": 10030
},
{
"epoch": 30.01,
"grad_norm": 0.0020247281063348055,
"learning_rate": 8.509840674789128e-06,
"loss": 0.0005,
"step": 10040
},
{
"epoch": 30.01,
"grad_norm": 0.0075753917917609215,
"learning_rate": 8.462980318650422e-06,
"loss": 0.0039,
"step": 10050
},
{
"epoch": 30.01,
"grad_norm": 0.0014275303110480309,
"learning_rate": 8.416119962511715e-06,
"loss": 0.0304,
"step": 10060
},
{
"epoch": 30.01,
"grad_norm": 0.0023228314239531755,
"learning_rate": 8.369259606373009e-06,
"loss": 0.0002,
"step": 10070
},
{
"epoch": 30.02,
"grad_norm": 0.2118721902370453,
"learning_rate": 8.322399250234301e-06,
"loss": 0.0763,
"step": 10080
},
{
"epoch": 30.02,
"grad_norm": 0.12885122001171112,
"learning_rate": 8.275538894095595e-06,
"loss": 0.0003,
"step": 10090
},
{
"epoch": 30.02,
"grad_norm": 0.01231481321156025,
"learning_rate": 8.228678537956889e-06,
"loss": 0.0004,
"step": 10100
},
{
"epoch": 30.02,
"grad_norm": 0.003690751502290368,
"learning_rate": 8.181818181818183e-06,
"loss": 0.0019,
"step": 10110
},
{
"epoch": 30.02,
"grad_norm": 0.0029746764339506626,
"learning_rate": 8.134957825679477e-06,
"loss": 0.058,
"step": 10120
},
{
"epoch": 30.02,
"grad_norm": 0.003089478937909007,
"learning_rate": 8.08809746954077e-06,
"loss": 0.0789,
"step": 10130
},
{
"epoch": 30.02,
"grad_norm": 0.0020090111065655947,
"learning_rate": 8.041237113402063e-06,
"loss": 0.0001,
"step": 10140
},
{
"epoch": 30.02,
"grad_norm": 0.23149579763412476,
"learning_rate": 7.994376757263356e-06,
"loss": 0.0285,
"step": 10150
},
{
"epoch": 30.02,
"grad_norm": 0.006695437245070934,
"learning_rate": 7.94751640112465e-06,
"loss": 0.0811,
"step": 10160
},
{
"epoch": 30.02,
"grad_norm": 0.03278821334242821,
"learning_rate": 7.900656044985942e-06,
"loss": 0.0118,
"step": 10170
},
{
"epoch": 30.02,
"grad_norm": 0.002686940599232912,
"learning_rate": 7.853795688847236e-06,
"loss": 0.0013,
"step": 10180
},
{
"epoch": 30.02,
"grad_norm": 0.004242202267050743,
"learning_rate": 7.806935332708528e-06,
"loss": 0.0229,
"step": 10190
},
{
"epoch": 30.03,
"grad_norm": 0.12063409388065338,
"learning_rate": 7.760074976569822e-06,
"loss": 0.0447,
"step": 10200
},
{
"epoch": 30.03,
"grad_norm": 0.0015561155742034316,
"learning_rate": 7.713214620431115e-06,
"loss": 0.0445,
"step": 10210
},
{
"epoch": 30.03,
"grad_norm": 0.003858069656416774,
"learning_rate": 7.666354264292409e-06,
"loss": 0.0108,
"step": 10220
},
{
"epoch": 30.03,
"grad_norm": 0.001974466722458601,
"learning_rate": 7.619493908153702e-06,
"loss": 0.0018,
"step": 10230
},
{
"epoch": 30.03,
"eval_accuracy": 0.7965616045845272,
"eval_loss": 1.3747639656066895,
"eval_runtime": 33.9072,
"eval_samples_per_second": 20.586,
"eval_steps_per_second": 1.74,
"step": 10230
},
{
"epoch": 31.0,
"grad_norm": 0.0030528667848557234,
"learning_rate": 7.572633552014996e-06,
"loss": 0.0006,
"step": 10240
},
{
"epoch": 31.0,
"grad_norm": 0.07026159763336182,
"learning_rate": 7.525773195876289e-06,
"loss": 0.0002,
"step": 10250
},
{
"epoch": 31.0,
"grad_norm": 0.013108909130096436,
"learning_rate": 7.478912839737583e-06,
"loss": 0.0083,
"step": 10260
},
{
"epoch": 31.0,
"grad_norm": 0.003818312892690301,
"learning_rate": 7.432052483598875e-06,
"loss": 0.0001,
"step": 10270
},
{
"epoch": 31.0,
"grad_norm": 53.924110412597656,
"learning_rate": 7.385192127460169e-06,
"loss": 0.0049,
"step": 10280
},
{
"epoch": 31.01,
"grad_norm": 6.479648590087891,
"learning_rate": 7.3383317713214616e-06,
"loss": 0.066,
"step": 10290
},
{
"epoch": 31.01,
"grad_norm": 0.005377662368118763,
"learning_rate": 7.2914714151827556e-06,
"loss": 0.0042,
"step": 10300
},
{
"epoch": 31.01,
"grad_norm": 0.0028942872304469347,
"learning_rate": 7.244611059044049e-06,
"loss": 0.0006,
"step": 10310
},
{
"epoch": 31.01,
"grad_norm": 0.0026112585328519344,
"learning_rate": 7.197750702905343e-06,
"loss": 0.0074,
"step": 10320
},
{
"epoch": 31.01,
"grad_norm": 0.00254084006883204,
"learning_rate": 7.150890346766635e-06,
"loss": 0.0001,
"step": 10330
},
{
"epoch": 31.01,
"grad_norm": 0.027123264968395233,
"learning_rate": 7.104029990627929e-06,
"loss": 0.0002,
"step": 10340
},
{
"epoch": 31.01,
"grad_norm": 0.0028091182466596365,
"learning_rate": 7.057169634489222e-06,
"loss": 0.0005,
"step": 10350
},
{
"epoch": 31.01,
"grad_norm": 0.0014785215025767684,
"learning_rate": 7.010309278350516e-06,
"loss": 0.0001,
"step": 10360
},
{
"epoch": 31.01,
"grad_norm": 0.002326256362721324,
"learning_rate": 6.9634489222118085e-06,
"loss": 0.0312,
"step": 10370
},
{
"epoch": 31.01,
"grad_norm": 0.020408490672707558,
"learning_rate": 6.9165885660731026e-06,
"loss": 0.0289,
"step": 10380
},
{
"epoch": 31.01,
"grad_norm": 0.0020945239812135696,
"learning_rate": 6.869728209934395e-06,
"loss": 0.0003,
"step": 10390
},
{
"epoch": 31.01,
"grad_norm": 0.050324421375989914,
"learning_rate": 6.822867853795689e-06,
"loss": 0.0001,
"step": 10400
},
{
"epoch": 31.02,
"grad_norm": 0.09508836269378662,
"learning_rate": 6.776007497656983e-06,
"loss": 0.0003,
"step": 10410
},
{
"epoch": 31.02,
"grad_norm": 0.0019429631065577269,
"learning_rate": 6.729147141518276e-06,
"loss": 0.0076,
"step": 10420
},
{
"epoch": 31.02,
"grad_norm": 0.0016007090453058481,
"learning_rate": 6.68228678537957e-06,
"loss": 0.0132,
"step": 10430
},
{
"epoch": 31.02,
"grad_norm": 4.64786958694458,
"learning_rate": 6.635426429240862e-06,
"loss": 0.0006,
"step": 10440
},
{
"epoch": 31.02,
"grad_norm": 63.0573616027832,
"learning_rate": 6.588566073102156e-06,
"loss": 0.0605,
"step": 10450
},
{
"epoch": 31.02,
"grad_norm": 0.001429658499546349,
"learning_rate": 6.541705716963449e-06,
"loss": 0.0001,
"step": 10460
},
{
"epoch": 31.02,
"grad_norm": 0.012436199001967907,
"learning_rate": 6.494845360824743e-06,
"loss": 0.0055,
"step": 10470
},
{
"epoch": 31.02,
"grad_norm": 0.002089001704007387,
"learning_rate": 6.447985004686036e-06,
"loss": 0.0004,
"step": 10480
},
{
"epoch": 31.02,
"grad_norm": 0.006997866556048393,
"learning_rate": 6.40112464854733e-06,
"loss": 0.1141,
"step": 10490
},
{
"epoch": 31.02,
"grad_norm": 0.018327118828892708,
"learning_rate": 6.354264292408622e-06,
"loss": 0.0071,
"step": 10500
},
{
"epoch": 31.02,
"grad_norm": 0.30546697974205017,
"learning_rate": 6.307403936269916e-06,
"loss": 0.0002,
"step": 10510
},
{
"epoch": 31.02,
"grad_norm": 64.27317810058594,
"learning_rate": 6.260543580131209e-06,
"loss": 0.0119,
"step": 10520
},
{
"epoch": 31.03,
"grad_norm": 0.0014340607449412346,
"learning_rate": 6.2136832239925025e-06,
"loss": 0.0002,
"step": 10530
},
{
"epoch": 31.03,
"grad_norm": 0.012228988111019135,
"learning_rate": 6.166822867853796e-06,
"loss": 0.0011,
"step": 10540
},
{
"epoch": 31.03,
"grad_norm": 0.0018130154348909855,
"learning_rate": 6.11996251171509e-06,
"loss": 0.0012,
"step": 10550
},
{
"epoch": 31.03,
"grad_norm": 0.0017277301521971822,
"learning_rate": 6.073102155576383e-06,
"loss": 0.0371,
"step": 10560
},
{
"epoch": 31.03,
"eval_accuracy": 0.8080229226361032,
"eval_loss": 1.40940523147583,
"eval_runtime": 33.9429,
"eval_samples_per_second": 20.564,
"eval_steps_per_second": 1.738,
"step": 10560
},
{
"epoch": 32.0,
"grad_norm": 0.0025587286800146103,
"learning_rate": 6.026241799437676e-06,
"loss": 0.0003,
"step": 10570
},
{
"epoch": 32.0,
"grad_norm": 0.002236501080915332,
"learning_rate": 5.97938144329897e-06,
"loss": 0.009,
"step": 10580
},
{
"epoch": 32.0,
"grad_norm": 0.00986202247440815,
"learning_rate": 5.932521087160263e-06,
"loss": 0.0003,
"step": 10590
},
{
"epoch": 32.0,
"grad_norm": 0.001730900607071817,
"learning_rate": 5.885660731021556e-06,
"loss": 0.0176,
"step": 10600
},
{
"epoch": 32.0,
"grad_norm": 0.004606322385370731,
"learning_rate": 5.8388003748828495e-06,
"loss": 0.0001,
"step": 10610
},
{
"epoch": 32.01,
"grad_norm": 0.00272651226259768,
"learning_rate": 5.791940018744143e-06,
"loss": 0.0002,
"step": 10620
},
{
"epoch": 32.01,
"grad_norm": 0.0064768255688250065,
"learning_rate": 5.745079662605436e-06,
"loss": 0.0012,
"step": 10630
},
{
"epoch": 32.01,
"grad_norm": 0.002017725259065628,
"learning_rate": 5.69821930646673e-06,
"loss": 0.0001,
"step": 10640
},
{
"epoch": 32.01,
"grad_norm": 0.0018518833676353097,
"learning_rate": 5.651358950328023e-06,
"loss": 0.0001,
"step": 10650
},
{
"epoch": 32.01,
"grad_norm": 0.002825092989951372,
"learning_rate": 5.604498594189316e-06,
"loss": 0.0002,
"step": 10660
},
{
"epoch": 32.01,
"grad_norm": 0.0021178831811994314,
"learning_rate": 5.557638238050609e-06,
"loss": 0.0013,
"step": 10670
},
{
"epoch": 32.01,
"grad_norm": 0.0012423048028722405,
"learning_rate": 5.5107778819119025e-06,
"loss": 0.072,
"step": 10680
},
{
"epoch": 32.01,
"grad_norm": 0.002971925074234605,
"learning_rate": 5.4639175257731965e-06,
"loss": 0.0001,
"step": 10690
},
{
"epoch": 32.01,
"grad_norm": 0.001466022222302854,
"learning_rate": 5.41705716963449e-06,
"loss": 0.0001,
"step": 10700
},
{
"epoch": 32.01,
"grad_norm": 0.002207581652328372,
"learning_rate": 5.370196813495783e-06,
"loss": 0.0001,
"step": 10710
},
{
"epoch": 32.01,
"grad_norm": 0.0013427763478830457,
"learning_rate": 5.323336457357076e-06,
"loss": 0.0543,
"step": 10720
},
{
"epoch": 32.01,
"grad_norm": 0.0013656431110575795,
"learning_rate": 5.276476101218369e-06,
"loss": 0.0191,
"step": 10730
},
{
"epoch": 32.02,
"grad_norm": 0.5649217367172241,
"learning_rate": 5.229615745079663e-06,
"loss": 0.0178,
"step": 10740
},
{
"epoch": 32.02,
"grad_norm": 0.0019292469369247556,
"learning_rate": 5.182755388940956e-06,
"loss": 0.008,
"step": 10750
},
{
"epoch": 32.02,
"grad_norm": 0.005805399268865585,
"learning_rate": 5.1358950328022495e-06,
"loss": 0.0001,
"step": 10760
},
{
"epoch": 32.02,
"grad_norm": 1.1440871953964233,
"learning_rate": 5.089034676663543e-06,
"loss": 0.067,
"step": 10770
},
{
"epoch": 32.02,
"grad_norm": 0.0036515570245683193,
"learning_rate": 5.042174320524836e-06,
"loss": 0.0027,
"step": 10780
},
{
"epoch": 32.02,
"grad_norm": 0.0021176172886043787,
"learning_rate": 4.995313964386129e-06,
"loss": 0.0175,
"step": 10790
},
{
"epoch": 32.02,
"grad_norm": 0.04823027923703194,
"learning_rate": 4.948453608247423e-06,
"loss": 0.0059,
"step": 10800
},
{
"epoch": 32.02,
"grad_norm": 0.006527293939143419,
"learning_rate": 4.901593252108716e-06,
"loss": 0.0001,
"step": 10810
},
{
"epoch": 32.02,
"grad_norm": 0.02050858922302723,
"learning_rate": 4.854732895970009e-06,
"loss": 0.0013,
"step": 10820
},
{
"epoch": 32.02,
"grad_norm": 0.0015726288547739387,
"learning_rate": 4.8078725398313025e-06,
"loss": 0.0001,
"step": 10830
},
{
"epoch": 32.02,
"grad_norm": 0.00158556061796844,
"learning_rate": 4.761012183692596e-06,
"loss": 0.0394,
"step": 10840
},
{
"epoch": 32.02,
"grad_norm": 0.0019515565363690257,
"learning_rate": 4.71415182755389e-06,
"loss": 0.0048,
"step": 10850
},
{
"epoch": 32.03,
"grad_norm": 0.0021331189200282097,
"learning_rate": 4.667291471415184e-06,
"loss": 0.0001,
"step": 10860
},
{
"epoch": 32.03,
"grad_norm": 0.002155827358365059,
"learning_rate": 4.620431115276477e-06,
"loss": 0.0003,
"step": 10870
},
{
"epoch": 32.03,
"grad_norm": 0.0019439981551840901,
"learning_rate": 4.57357075913777e-06,
"loss": 0.0002,
"step": 10880
},
{
"epoch": 32.03,
"grad_norm": 0.0034947495441883802,
"learning_rate": 4.526710402999063e-06,
"loss": 0.0157,
"step": 10890
},
{
"epoch": 32.03,
"eval_accuracy": 0.8022922636103151,
"eval_loss": 1.4391096830368042,
"eval_runtime": 33.8979,
"eval_samples_per_second": 20.591,
"eval_steps_per_second": 1.741,
"step": 10890
},
{
"epoch": 33.0,
"grad_norm": 0.007783001288771629,
"learning_rate": 4.479850046860356e-06,
"loss": 0.0002,
"step": 10900
},
{
"epoch": 33.0,
"grad_norm": 0.004935835022479296,
"learning_rate": 4.43298969072165e-06,
"loss": 0.0065,
"step": 10910
},
{
"epoch": 33.0,
"grad_norm": 0.0015104643534868956,
"learning_rate": 4.3861293345829435e-06,
"loss": 0.0765,
"step": 10920
},
{
"epoch": 33.0,
"grad_norm": 0.0014216667041182518,
"learning_rate": 4.339268978444237e-06,
"loss": 0.0018,
"step": 10930
},
{
"epoch": 33.0,
"grad_norm": 0.022745607420802116,
"learning_rate": 4.29240862230553e-06,
"loss": 0.0081,
"step": 10940
},
{
"epoch": 33.01,
"grad_norm": 0.0021193595603108406,
"learning_rate": 4.245548266166823e-06,
"loss": 0.0003,
"step": 10950
},
{
"epoch": 33.01,
"grad_norm": 0.0011568386107683182,
"learning_rate": 4.198687910028116e-06,
"loss": 0.0001,
"step": 10960
},
{
"epoch": 33.01,
"grad_norm": 0.019418692216277122,
"learning_rate": 4.15182755388941e-06,
"loss": 0.1781,
"step": 10970
},
{
"epoch": 33.01,
"grad_norm": 34.7269401550293,
"learning_rate": 4.104967197750703e-06,
"loss": 0.0067,
"step": 10980
},
{
"epoch": 33.01,
"grad_norm": 0.0017960992408916354,
"learning_rate": 4.0581068416119964e-06,
"loss": 0.0001,
"step": 10990
},
{
"epoch": 33.01,
"grad_norm": 0.01256940234452486,
"learning_rate": 4.01124648547329e-06,
"loss": 0.0003,
"step": 11000
},
{
"epoch": 33.01,
"grad_norm": 0.0063404180109500885,
"learning_rate": 3.964386129334583e-06,
"loss": 0.0001,
"step": 11010
},
{
"epoch": 33.01,
"grad_norm": 0.018559589982032776,
"learning_rate": 3.917525773195877e-06,
"loss": 0.0001,
"step": 11020
},
{
"epoch": 33.01,
"grad_norm": 0.0015321632381528616,
"learning_rate": 3.87066541705717e-06,
"loss": 0.0001,
"step": 11030
},
{
"epoch": 33.01,
"grad_norm": 0.020633699372410774,
"learning_rate": 3.823805060918463e-06,
"loss": 0.0001,
"step": 11040
},
{
"epoch": 33.01,
"grad_norm": 0.0021525041665881872,
"learning_rate": 3.7769447047797563e-06,
"loss": 0.0601,
"step": 11050
},
{
"epoch": 33.01,
"grad_norm": 0.0019328080816194415,
"learning_rate": 3.73008434864105e-06,
"loss": 0.0002,
"step": 11060
},
{
"epoch": 33.02,
"grad_norm": 0.004199670627713203,
"learning_rate": 3.683223992502343e-06,
"loss": 0.0377,
"step": 11070
},
{
"epoch": 33.02,
"grad_norm": 0.0021045091561973095,
"learning_rate": 3.636363636363636e-06,
"loss": 0.0001,
"step": 11080
},
{
"epoch": 33.02,
"grad_norm": 0.0019059345358982682,
"learning_rate": 3.5895032802249297e-06,
"loss": 0.0001,
"step": 11090
},
{
"epoch": 33.02,
"grad_norm": 0.002579369815066457,
"learning_rate": 3.542642924086223e-06,
"loss": 0.0373,
"step": 11100
},
{
"epoch": 33.02,
"grad_norm": 0.0014094141079112887,
"learning_rate": 3.4957825679475165e-06,
"loss": 0.0001,
"step": 11110
},
{
"epoch": 33.02,
"grad_norm": 0.0017881103558465838,
"learning_rate": 3.4489222118088097e-06,
"loss": 0.0093,
"step": 11120
},
{
"epoch": 33.02,
"grad_norm": 0.001685052178800106,
"learning_rate": 3.402061855670103e-06,
"loss": 0.0556,
"step": 11130
},
{
"epoch": 33.02,
"grad_norm": 0.004337169695645571,
"learning_rate": 3.3552014995313964e-06,
"loss": 0.0396,
"step": 11140
},
{
"epoch": 33.02,
"grad_norm": 0.011138683184981346,
"learning_rate": 3.3083411433926896e-06,
"loss": 0.0001,
"step": 11150
},
{
"epoch": 33.02,
"grad_norm": 0.0024086865596473217,
"learning_rate": 3.2614807872539836e-06,
"loss": 0.0001,
"step": 11160
},
{
"epoch": 33.02,
"grad_norm": 0.0029931641183793545,
"learning_rate": 3.2146204311152767e-06,
"loss": 0.0001,
"step": 11170
},
{
"epoch": 33.02,
"grad_norm": 0.42393574118614197,
"learning_rate": 3.1677600749765703e-06,
"loss": 0.0016,
"step": 11180
},
{
"epoch": 33.03,
"grad_norm": 0.0025856320280581713,
"learning_rate": 3.120899718837863e-06,
"loss": 0.0001,
"step": 11190
},
{
"epoch": 33.03,
"grad_norm": 0.0019572244491428137,
"learning_rate": 3.0740393626991566e-06,
"loss": 0.0001,
"step": 11200
},
{
"epoch": 33.03,
"grad_norm": 0.3476734459400177,
"learning_rate": 3.02717900656045e-06,
"loss": 0.0044,
"step": 11210
},
{
"epoch": 33.03,
"grad_norm": 0.0009709845762699842,
"learning_rate": 2.9803186504217434e-06,
"loss": 0.0001,
"step": 11220
},
{
"epoch": 33.03,
"eval_accuracy": 0.8051575931232091,
"eval_loss": 1.383094310760498,
"eval_runtime": 33.8221,
"eval_samples_per_second": 20.637,
"eval_steps_per_second": 1.744,
"step": 11220
},
{
"epoch": 34.0,
"grad_norm": 0.0015264974208548665,
"learning_rate": 2.9334582942830366e-06,
"loss": 0.0001,
"step": 11230
},
{
"epoch": 34.0,
"grad_norm": 0.003399324370548129,
"learning_rate": 2.88659793814433e-06,
"loss": 0.0514,
"step": 11240
},
{
"epoch": 34.0,
"grad_norm": 0.0033612081315368414,
"learning_rate": 2.8397375820056233e-06,
"loss": 0.0001,
"step": 11250
},
{
"epoch": 34.0,
"grad_norm": 0.0017946057487279177,
"learning_rate": 2.792877225866917e-06,
"loss": 0.0001,
"step": 11260
},
{
"epoch": 34.0,
"grad_norm": 0.001860388438217342,
"learning_rate": 2.74601686972821e-06,
"loss": 0.0001,
"step": 11270
},
{
"epoch": 34.01,
"grad_norm": 0.0021725373808294535,
"learning_rate": 2.6991565135895036e-06,
"loss": 0.0001,
"step": 11280
},
{
"epoch": 34.01,
"grad_norm": 0.030490437522530556,
"learning_rate": 2.652296157450797e-06,
"loss": 0.0001,
"step": 11290
},
{
"epoch": 34.01,
"grad_norm": 0.0011160429567098618,
"learning_rate": 2.60543580131209e-06,
"loss": 0.0686,
"step": 11300
},
{
"epoch": 34.01,
"grad_norm": 0.002387237036600709,
"learning_rate": 2.5585754451733835e-06,
"loss": 0.0074,
"step": 11310
},
{
"epoch": 34.01,
"grad_norm": 0.002495302353054285,
"learning_rate": 2.5117150890346767e-06,
"loss": 0.0002,
"step": 11320
},
{
"epoch": 34.01,
"grad_norm": 0.022143444046378136,
"learning_rate": 2.46485473289597e-06,
"loss": 0.0002,
"step": 11330
},
{
"epoch": 34.01,
"grad_norm": 0.002796097891405225,
"learning_rate": 2.4179943767572634e-06,
"loss": 0.0001,
"step": 11340
},
{
"epoch": 34.01,
"grad_norm": 0.0035200004931539297,
"learning_rate": 2.3711340206185566e-06,
"loss": 0.0428,
"step": 11350
},
{
"epoch": 34.01,
"grad_norm": 27.465776443481445,
"learning_rate": 2.32427366447985e-06,
"loss": 0.059,
"step": 11360
},
{
"epoch": 34.01,
"grad_norm": 0.00824726838618517,
"learning_rate": 2.2774133083411434e-06,
"loss": 0.0537,
"step": 11370
},
{
"epoch": 34.01,
"grad_norm": 0.0025804354809224606,
"learning_rate": 2.2305529522024365e-06,
"loss": 0.0001,
"step": 11380
},
{
"epoch": 34.01,
"grad_norm": 0.01662873476743698,
"learning_rate": 2.1836925960637305e-06,
"loss": 0.0068,
"step": 11390
},
{
"epoch": 34.02,
"grad_norm": 0.0012878773268312216,
"learning_rate": 2.1368322399250237e-06,
"loss": 0.0001,
"step": 11400
},
{
"epoch": 34.02,
"grad_norm": 0.0017231220845133066,
"learning_rate": 2.089971883786317e-06,
"loss": 0.0002,
"step": 11410
},
{
"epoch": 34.02,
"grad_norm": 0.007844923995435238,
"learning_rate": 2.0431115276476104e-06,
"loss": 0.0001,
"step": 11420
},
{
"epoch": 34.02,
"grad_norm": 0.001733616110868752,
"learning_rate": 1.9962511715089036e-06,
"loss": 0.0034,
"step": 11430
},
{
"epoch": 34.02,
"grad_norm": 0.002772507956251502,
"learning_rate": 1.9493908153701968e-06,
"loss": 0.0234,
"step": 11440
},
{
"epoch": 34.02,
"grad_norm": 0.001203456544317305,
"learning_rate": 1.9025304592314903e-06,
"loss": 0.0001,
"step": 11450
},
{
"epoch": 34.02,
"grad_norm": 0.004119969438761473,
"learning_rate": 1.8556701030927835e-06,
"loss": 0.0399,
"step": 11460
},
{
"epoch": 34.02,
"grad_norm": 0.0023524747230112553,
"learning_rate": 1.8088097469540769e-06,
"loss": 0.0003,
"step": 11470
},
{
"epoch": 34.02,
"grad_norm": 0.0018690053839236498,
"learning_rate": 1.7619493908153703e-06,
"loss": 0.0001,
"step": 11480
},
{
"epoch": 34.02,
"grad_norm": 0.0028986113611608744,
"learning_rate": 1.7150890346766636e-06,
"loss": 0.0371,
"step": 11490
},
{
"epoch": 34.02,
"grad_norm": 0.0018392838537693024,
"learning_rate": 1.6682286785379568e-06,
"loss": 0.0001,
"step": 11500
},
{
"epoch": 34.02,
"grad_norm": 0.0016958696069195867,
"learning_rate": 1.6213683223992502e-06,
"loss": 0.0001,
"step": 11510
},
{
"epoch": 34.03,
"grad_norm": 0.004104943014681339,
"learning_rate": 1.5745079662605435e-06,
"loss": 0.0001,
"step": 11520
},
{
"epoch": 34.03,
"grad_norm": 0.008021462708711624,
"learning_rate": 1.527647610121837e-06,
"loss": 0.0001,
"step": 11530
},
{
"epoch": 34.03,
"grad_norm": 5.87186861038208,
"learning_rate": 1.4807872539831303e-06,
"loss": 0.0109,
"step": 11540
},
{
"epoch": 34.03,
"grad_norm": 0.0011679594172164798,
"learning_rate": 1.4339268978444237e-06,
"loss": 0.0001,
"step": 11550
},
{
"epoch": 34.03,
"eval_accuracy": 0.8080229226361032,
"eval_loss": 1.3971121311187744,
"eval_runtime": 33.9156,
"eval_samples_per_second": 20.581,
"eval_steps_per_second": 1.74,
"step": 11550
},
{
"epoch": 35.0,
"grad_norm": 0.0017861544620245695,
"learning_rate": 1.387066541705717e-06,
"loss": 0.0001,
"step": 11560
},
{
"epoch": 35.0,
"grad_norm": 0.0019135787151753902,
"learning_rate": 1.3402061855670102e-06,
"loss": 0.0002,
"step": 11570
},
{
"epoch": 35.0,
"grad_norm": 0.04832134768366814,
"learning_rate": 1.2933458294283038e-06,
"loss": 0.0001,
"step": 11580
},
{
"epoch": 35.0,
"grad_norm": 0.0034780928399413824,
"learning_rate": 1.2464854732895972e-06,
"loss": 0.0001,
"step": 11590
},
{
"epoch": 35.0,
"grad_norm": 0.001626980840228498,
"learning_rate": 1.1996251171508905e-06,
"loss": 0.0003,
"step": 11600
},
{
"epoch": 35.01,
"grad_norm": 0.006935800425708294,
"learning_rate": 1.1527647610121837e-06,
"loss": 0.0001,
"step": 11610
},
{
"epoch": 35.01,
"grad_norm": 0.0010628902819007635,
"learning_rate": 1.105904404873477e-06,
"loss": 0.001,
"step": 11620
},
{
"epoch": 35.01,
"grad_norm": 0.0014588043559342623,
"learning_rate": 1.0590440487347704e-06,
"loss": 0.0001,
"step": 11630
},
{
"epoch": 35.01,
"grad_norm": 0.007299583870917559,
"learning_rate": 1.0121836925960638e-06,
"loss": 0.0001,
"step": 11640
},
{
"epoch": 35.01,
"grad_norm": 0.002258384833112359,
"learning_rate": 9.65323336457357e-07,
"loss": 0.0001,
"step": 11650
},
{
"epoch": 35.01,
"grad_norm": 61.81095504760742,
"learning_rate": 9.184629803186506e-07,
"loss": 0.0222,
"step": 11660
},
{
"epoch": 35.01,
"grad_norm": 0.0012000646675005555,
"learning_rate": 8.716026241799438e-07,
"loss": 0.0001,
"step": 11670
},
{
"epoch": 35.01,
"grad_norm": 0.01990874670445919,
"learning_rate": 8.247422680412372e-07,
"loss": 0.0001,
"step": 11680
},
{
"epoch": 35.01,
"grad_norm": 0.01420762948691845,
"learning_rate": 7.778819119025305e-07,
"loss": 0.0001,
"step": 11690
},
{
"epoch": 35.01,
"grad_norm": 0.002269094344228506,
"learning_rate": 7.310215557638238e-07,
"loss": 0.0001,
"step": 11700
},
{
"epoch": 35.01,
"grad_norm": 0.0038073186296969652,
"learning_rate": 6.841611996251172e-07,
"loss": 0.0181,
"step": 11710
},
{
"epoch": 35.01,
"grad_norm": 0.03407540172338486,
"learning_rate": 6.373008434864106e-07,
"loss": 0.029,
"step": 11720
},
{
"epoch": 35.02,
"grad_norm": 0.030750174075365067,
"learning_rate": 5.904404873477039e-07,
"loss": 0.0093,
"step": 11730
},
{
"epoch": 35.02,
"grad_norm": 0.002124561695381999,
"learning_rate": 5.435801312089972e-07,
"loss": 0.0027,
"step": 11740
},
{
"epoch": 35.02,
"grad_norm": 0.0012157908640801907,
"learning_rate": 4.967197750702906e-07,
"loss": 0.0001,
"step": 11750
},
{
"epoch": 35.02,
"grad_norm": 0.002194877015426755,
"learning_rate": 4.498594189315839e-07,
"loss": 0.0001,
"step": 11760
},
{
"epoch": 35.02,
"grad_norm": 0.003094845451414585,
"learning_rate": 4.0299906279287724e-07,
"loss": 0.0001,
"step": 11770
},
{
"epoch": 35.02,
"grad_norm": 0.001540425349958241,
"learning_rate": 3.561387066541706e-07,
"loss": 0.0001,
"step": 11780
},
{
"epoch": 35.02,
"grad_norm": 0.0017908586887642741,
"learning_rate": 3.0927835051546394e-07,
"loss": 0.0001,
"step": 11790
},
{
"epoch": 35.02,
"grad_norm": 0.007806339301168919,
"learning_rate": 2.6241799437675726e-07,
"loss": 0.0399,
"step": 11800
},
{
"epoch": 35.02,
"grad_norm": 0.004247527569532394,
"learning_rate": 2.155576382380506e-07,
"loss": 0.034,
"step": 11810
},
{
"epoch": 35.02,
"grad_norm": 0.0017347713001072407,
"learning_rate": 1.6869728209934398e-07,
"loss": 0.0005,
"step": 11820
},
{
"epoch": 35.02,
"grad_norm": 0.001252649468369782,
"learning_rate": 1.218369259606373e-07,
"loss": 0.0001,
"step": 11830
},
{
"epoch": 35.02,
"grad_norm": 0.0012991786934435368,
"learning_rate": 7.497656982193066e-08,
"loss": 0.0528,
"step": 11840
},
{
"epoch": 35.03,
"grad_norm": 11.709962844848633,
"learning_rate": 2.8116213683223995e-08,
"loss": 0.0041,
"step": 11850
},
{
"epoch": 35.03,
"eval_accuracy": 0.8080229226361032,
"eval_loss": 1.3906208276748657,
"eval_runtime": 33.666,
"eval_samples_per_second": 20.733,
"eval_steps_per_second": 1.753,
"step": 11856
},
{
"epoch": 35.03,
"step": 11856,
"total_flos": 1.770184277095349e+20,
"train_loss": 0.18484977827027105,
"train_runtime": 12835.1405,
"train_samples_per_second": 11.085,
"train_steps_per_second": 0.924
},
{
"epoch": 35.03,
"eval_accuracy": 0.8209169054441261,
"eval_loss": 0.8522207736968994,
"eval_runtime": 35.5618,
"eval_samples_per_second": 19.628,
"eval_steps_per_second": 1.659,
"step": 11856
},
{
"epoch": 35.03,
"eval_accuracy": 0.8209169054441261,
"eval_loss": 0.8522207736968994,
"eval_runtime": 33.138,
"eval_samples_per_second": 21.063,
"eval_steps_per_second": 1.78,
"step": 11856
}
],
"logging_steps": 10,
"max_steps": 11856,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"total_flos": 1.770184277095349e+20,
"train_batch_size": 12,
"trial_name": null,
"trial_params": null
}