finetuned-electrical-images / trainer_state.json
Safawat's picture
🍻 cheers
4c30295 verified
{
"best_metric": 0.37258049845695496,
"best_model_checkpoint": "finetuned-electrical-images/checkpoint-1500",
"epoch": 10.0,
"eval_steps": 100,
"global_step": 2150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.046511627906976744,
"grad_norm": 2.7490031719207764,
"learning_rate": 0.00019906976744186048,
"loss": 1.5465,
"step": 10
},
{
"epoch": 0.09302325581395349,
"grad_norm": 3.2069549560546875,
"learning_rate": 0.00019813953488372096,
"loss": 1.2151,
"step": 20
},
{
"epoch": 0.13953488372093023,
"grad_norm": 4.26694393157959,
"learning_rate": 0.0001972093023255814,
"loss": 1.0806,
"step": 30
},
{
"epoch": 0.18604651162790697,
"grad_norm": 1.9304146766662598,
"learning_rate": 0.00019627906976744185,
"loss": 0.9911,
"step": 40
},
{
"epoch": 0.23255813953488372,
"grad_norm": 4.346214294433594,
"learning_rate": 0.00019534883720930232,
"loss": 0.8587,
"step": 50
},
{
"epoch": 0.27906976744186046,
"grad_norm": 3.977471113204956,
"learning_rate": 0.0001944186046511628,
"loss": 0.7717,
"step": 60
},
{
"epoch": 0.32558139534883723,
"grad_norm": 3.1426613330841064,
"learning_rate": 0.00019348837209302326,
"loss": 0.961,
"step": 70
},
{
"epoch": 0.37209302325581395,
"grad_norm": 2.316943407058716,
"learning_rate": 0.00019255813953488374,
"loss": 0.8362,
"step": 80
},
{
"epoch": 0.4186046511627907,
"grad_norm": 2.885920286178589,
"learning_rate": 0.0001916279069767442,
"loss": 0.829,
"step": 90
},
{
"epoch": 0.46511627906976744,
"grad_norm": 1.4288862943649292,
"learning_rate": 0.00019069767441860466,
"loss": 0.7116,
"step": 100
},
{
"epoch": 0.46511627906976744,
"eval_accuracy": 0.7920792079207921,
"eval_loss": 0.6398854851722717,
"eval_runtime": 7.8011,
"eval_samples_per_second": 77.682,
"eval_steps_per_second": 9.742,
"step": 100
},
{
"epoch": 0.5116279069767442,
"grad_norm": 3.5353667736053467,
"learning_rate": 0.00018976744186046513,
"loss": 0.5819,
"step": 110
},
{
"epoch": 0.5581395348837209,
"grad_norm": 3.7112417221069336,
"learning_rate": 0.00018883720930232557,
"loss": 0.5016,
"step": 120
},
{
"epoch": 0.6046511627906976,
"grad_norm": 3.1544606685638428,
"learning_rate": 0.00018790697674418605,
"loss": 0.6231,
"step": 130
},
{
"epoch": 0.6511627906976745,
"grad_norm": 2.670633316040039,
"learning_rate": 0.00018697674418604652,
"loss": 0.6856,
"step": 140
},
{
"epoch": 0.6976744186046512,
"grad_norm": 3.5320849418640137,
"learning_rate": 0.000186046511627907,
"loss": 0.5361,
"step": 150
},
{
"epoch": 0.7441860465116279,
"grad_norm": 5.482109546661377,
"learning_rate": 0.00018511627906976744,
"loss": 0.6735,
"step": 160
},
{
"epoch": 0.7906976744186046,
"grad_norm": 2.8392834663391113,
"learning_rate": 0.0001841860465116279,
"loss": 0.5717,
"step": 170
},
{
"epoch": 0.8372093023255814,
"grad_norm": 1.8788505792617798,
"learning_rate": 0.00018325581395348838,
"loss": 0.6031,
"step": 180
},
{
"epoch": 0.8837209302325582,
"grad_norm": 3.153599977493286,
"learning_rate": 0.00018232558139534886,
"loss": 0.5841,
"step": 190
},
{
"epoch": 0.9302325581395349,
"grad_norm": 1.6330416202545166,
"learning_rate": 0.0001813953488372093,
"loss": 0.6953,
"step": 200
},
{
"epoch": 0.9302325581395349,
"eval_accuracy": 0.8085808580858086,
"eval_loss": 0.5588750839233398,
"eval_runtime": 8.0533,
"eval_samples_per_second": 75.249,
"eval_steps_per_second": 9.437,
"step": 200
},
{
"epoch": 0.9767441860465116,
"grad_norm": 3.0009500980377197,
"learning_rate": 0.00018046511627906977,
"loss": 0.518,
"step": 210
},
{
"epoch": 1.0232558139534884,
"grad_norm": 5.527990341186523,
"learning_rate": 0.00017953488372093025,
"loss": 0.5661,
"step": 220
},
{
"epoch": 1.069767441860465,
"grad_norm": 1.3012688159942627,
"learning_rate": 0.0001786046511627907,
"loss": 0.4236,
"step": 230
},
{
"epoch": 1.1162790697674418,
"grad_norm": 1.5813956260681152,
"learning_rate": 0.00017767441860465117,
"loss": 0.4062,
"step": 240
},
{
"epoch": 1.1627906976744187,
"grad_norm": 6.4304890632629395,
"learning_rate": 0.00017674418604651164,
"loss": 0.4689,
"step": 250
},
{
"epoch": 1.2093023255813953,
"grad_norm": 3.993091583251953,
"learning_rate": 0.0001758139534883721,
"loss": 0.4486,
"step": 260
},
{
"epoch": 1.255813953488372,
"grad_norm": 4.19878625869751,
"learning_rate": 0.00017488372093023258,
"loss": 0.577,
"step": 270
},
{
"epoch": 1.302325581395349,
"grad_norm": 3.3949573040008545,
"learning_rate": 0.00017395348837209303,
"loss": 0.4669,
"step": 280
},
{
"epoch": 1.3488372093023255,
"grad_norm": 3.5557937622070312,
"learning_rate": 0.00017302325581395348,
"loss": 0.4234,
"step": 290
},
{
"epoch": 1.3953488372093024,
"grad_norm": 2.519487142562866,
"learning_rate": 0.00017209302325581395,
"loss": 0.4078,
"step": 300
},
{
"epoch": 1.3953488372093024,
"eval_accuracy": 0.8399339933993399,
"eval_loss": 0.4945888817310333,
"eval_runtime": 8.261,
"eval_samples_per_second": 73.357,
"eval_steps_per_second": 9.2,
"step": 300
},
{
"epoch": 1.441860465116279,
"grad_norm": 3.2524163722991943,
"learning_rate": 0.00017116279069767442,
"loss": 0.4963,
"step": 310
},
{
"epoch": 1.4883720930232558,
"grad_norm": 2.7722671031951904,
"learning_rate": 0.0001702325581395349,
"loss": 0.5716,
"step": 320
},
{
"epoch": 1.5348837209302326,
"grad_norm": 3.2851498126983643,
"learning_rate": 0.00016930232558139537,
"loss": 0.4878,
"step": 330
},
{
"epoch": 1.5813953488372094,
"grad_norm": 1.52852463722229,
"learning_rate": 0.00016837209302325584,
"loss": 0.4834,
"step": 340
},
{
"epoch": 1.627906976744186,
"grad_norm": 6.030341625213623,
"learning_rate": 0.00016744186046511629,
"loss": 0.5827,
"step": 350
},
{
"epoch": 1.6744186046511627,
"grad_norm": 3.0944032669067383,
"learning_rate": 0.00016651162790697673,
"loss": 0.4615,
"step": 360
},
{
"epoch": 1.7209302325581395,
"grad_norm": 3.8217456340789795,
"learning_rate": 0.0001655813953488372,
"loss": 0.4558,
"step": 370
},
{
"epoch": 1.7674418604651163,
"grad_norm": 2.4076743125915527,
"learning_rate": 0.00016465116279069768,
"loss": 0.5732,
"step": 380
},
{
"epoch": 1.8139534883720931,
"grad_norm": 4.107614994049072,
"learning_rate": 0.00016372093023255815,
"loss": 0.6245,
"step": 390
},
{
"epoch": 1.8604651162790697,
"grad_norm": 1.4518159627914429,
"learning_rate": 0.00016279069767441862,
"loss": 0.5852,
"step": 400
},
{
"epoch": 1.8604651162790697,
"eval_accuracy": 0.8399339933993399,
"eval_loss": 0.48718997836112976,
"eval_runtime": 8.1286,
"eval_samples_per_second": 74.552,
"eval_steps_per_second": 9.35,
"step": 400
},
{
"epoch": 1.9069767441860463,
"grad_norm": 2.9517464637756348,
"learning_rate": 0.00016186046511627907,
"loss": 0.5802,
"step": 410
},
{
"epoch": 1.9534883720930232,
"grad_norm": 3.5694568157196045,
"learning_rate": 0.00016093023255813954,
"loss": 0.4936,
"step": 420
},
{
"epoch": 2.0,
"grad_norm": 0.7227370142936707,
"learning_rate": 0.00016,
"loss": 0.4235,
"step": 430
},
{
"epoch": 2.046511627906977,
"grad_norm": 2.4811441898345947,
"learning_rate": 0.00015906976744186046,
"loss": 0.4333,
"step": 440
},
{
"epoch": 2.0930232558139537,
"grad_norm": 2.808461904525757,
"learning_rate": 0.00015813953488372093,
"loss": 0.4659,
"step": 450
},
{
"epoch": 2.13953488372093,
"grad_norm": 2.1043217182159424,
"learning_rate": 0.0001572093023255814,
"loss": 0.3284,
"step": 460
},
{
"epoch": 2.186046511627907,
"grad_norm": 3.650707483291626,
"learning_rate": 0.00015627906976744188,
"loss": 0.3416,
"step": 470
},
{
"epoch": 2.2325581395348837,
"grad_norm": 2.71091628074646,
"learning_rate": 0.00015534883720930232,
"loss": 0.456,
"step": 480
},
{
"epoch": 2.2790697674418605,
"grad_norm": 2.2614614963531494,
"learning_rate": 0.0001544186046511628,
"loss": 0.4062,
"step": 490
},
{
"epoch": 2.3255813953488373,
"grad_norm": 3.2703847885131836,
"learning_rate": 0.00015348837209302327,
"loss": 0.4993,
"step": 500
},
{
"epoch": 2.3255813953488373,
"eval_accuracy": 0.8597359735973598,
"eval_loss": 0.46872881054878235,
"eval_runtime": 8.4417,
"eval_samples_per_second": 71.786,
"eval_steps_per_second": 9.003,
"step": 500
},
{
"epoch": 2.3720930232558137,
"grad_norm": 4.928767204284668,
"learning_rate": 0.00015255813953488374,
"loss": 0.4723,
"step": 510
},
{
"epoch": 2.4186046511627906,
"grad_norm": 4.274898052215576,
"learning_rate": 0.0001516279069767442,
"loss": 0.4886,
"step": 520
},
{
"epoch": 2.4651162790697674,
"grad_norm": 2.146392345428467,
"learning_rate": 0.00015069767441860466,
"loss": 0.3828,
"step": 530
},
{
"epoch": 2.511627906976744,
"grad_norm": 3.7208266258239746,
"learning_rate": 0.0001497674418604651,
"loss": 0.412,
"step": 540
},
{
"epoch": 2.558139534883721,
"grad_norm": 2.139849901199341,
"learning_rate": 0.00014883720930232558,
"loss": 0.3794,
"step": 550
},
{
"epoch": 2.604651162790698,
"grad_norm": 1.760920524597168,
"learning_rate": 0.00014790697674418605,
"loss": 0.3078,
"step": 560
},
{
"epoch": 2.6511627906976747,
"grad_norm": 3.4851582050323486,
"learning_rate": 0.00014697674418604652,
"loss": 0.3498,
"step": 570
},
{
"epoch": 2.697674418604651,
"grad_norm": 1.7796868085861206,
"learning_rate": 0.000146046511627907,
"loss": 0.4433,
"step": 580
},
{
"epoch": 2.744186046511628,
"grad_norm": 2.8916869163513184,
"learning_rate": 0.00014511627906976747,
"loss": 0.3614,
"step": 590
},
{
"epoch": 2.7906976744186047,
"grad_norm": 4.171596527099609,
"learning_rate": 0.00014418604651162791,
"loss": 0.4479,
"step": 600
},
{
"epoch": 2.7906976744186047,
"eval_accuracy": 0.8844884488448845,
"eval_loss": 0.3985598087310791,
"eval_runtime": 8.4641,
"eval_samples_per_second": 71.596,
"eval_steps_per_second": 8.979,
"step": 600
},
{
"epoch": 2.8372093023255816,
"grad_norm": 2.6928250789642334,
"learning_rate": 0.00014325581395348836,
"loss": 0.3827,
"step": 610
},
{
"epoch": 2.883720930232558,
"grad_norm": 2.466237783432007,
"learning_rate": 0.00014232558139534883,
"loss": 0.3145,
"step": 620
},
{
"epoch": 2.9302325581395348,
"grad_norm": 2.5348870754241943,
"learning_rate": 0.0001413953488372093,
"loss": 0.4285,
"step": 630
},
{
"epoch": 2.9767441860465116,
"grad_norm": 4.993005275726318,
"learning_rate": 0.00014046511627906978,
"loss": 0.4147,
"step": 640
},
{
"epoch": 3.0232558139534884,
"grad_norm": 3.5268807411193848,
"learning_rate": 0.00013953488372093025,
"loss": 0.2991,
"step": 650
},
{
"epoch": 3.0697674418604652,
"grad_norm": 0.5349214673042297,
"learning_rate": 0.00013860465116279072,
"loss": 0.2837,
"step": 660
},
{
"epoch": 3.116279069767442,
"grad_norm": 2.4129347801208496,
"learning_rate": 0.00013767441860465117,
"loss": 0.2495,
"step": 670
},
{
"epoch": 3.1627906976744184,
"grad_norm": 2.0734763145446777,
"learning_rate": 0.00013674418604651162,
"loss": 0.3585,
"step": 680
},
{
"epoch": 3.2093023255813953,
"grad_norm": 4.114805221557617,
"learning_rate": 0.0001358139534883721,
"loss": 0.2906,
"step": 690
},
{
"epoch": 3.255813953488372,
"grad_norm": 4.394787788391113,
"learning_rate": 0.00013488372093023256,
"loss": 0.4101,
"step": 700
},
{
"epoch": 3.255813953488372,
"eval_accuracy": 0.8729372937293729,
"eval_loss": 0.4385151267051697,
"eval_runtime": 8.1298,
"eval_samples_per_second": 74.541,
"eval_steps_per_second": 9.348,
"step": 700
},
{
"epoch": 3.302325581395349,
"grad_norm": 2.861743450164795,
"learning_rate": 0.00013395348837209303,
"loss": 0.445,
"step": 710
},
{
"epoch": 3.3488372093023258,
"grad_norm": 4.178903579711914,
"learning_rate": 0.0001330232558139535,
"loss": 0.219,
"step": 720
},
{
"epoch": 3.395348837209302,
"grad_norm": 4.89815616607666,
"learning_rate": 0.00013209302325581395,
"loss": 0.463,
"step": 730
},
{
"epoch": 3.441860465116279,
"grad_norm": 10.707399368286133,
"learning_rate": 0.00013116279069767442,
"loss": 0.5219,
"step": 740
},
{
"epoch": 3.488372093023256,
"grad_norm": 3.0130255222320557,
"learning_rate": 0.0001302325581395349,
"loss": 0.4429,
"step": 750
},
{
"epoch": 3.5348837209302326,
"grad_norm": 5.012279510498047,
"learning_rate": 0.00012930232558139534,
"loss": 0.2735,
"step": 760
},
{
"epoch": 3.5813953488372094,
"grad_norm": 2.2448291778564453,
"learning_rate": 0.00012837209302325582,
"loss": 0.4101,
"step": 770
},
{
"epoch": 3.6279069767441863,
"grad_norm": 3.8598289489746094,
"learning_rate": 0.0001274418604651163,
"loss": 0.298,
"step": 780
},
{
"epoch": 3.6744186046511627,
"grad_norm": 1.7304565906524658,
"learning_rate": 0.00012651162790697676,
"loss": 0.2853,
"step": 790
},
{
"epoch": 3.7209302325581395,
"grad_norm": 3.275845527648926,
"learning_rate": 0.0001255813953488372,
"loss": 0.283,
"step": 800
},
{
"epoch": 3.7209302325581395,
"eval_accuracy": 0.8762376237623762,
"eval_loss": 0.4413387179374695,
"eval_runtime": 8.5192,
"eval_samples_per_second": 71.133,
"eval_steps_per_second": 8.921,
"step": 800
},
{
"epoch": 3.7674418604651163,
"grad_norm": 5.508560657501221,
"learning_rate": 0.00012465116279069768,
"loss": 0.1962,
"step": 810
},
{
"epoch": 3.813953488372093,
"grad_norm": 3.444455862045288,
"learning_rate": 0.00012372093023255815,
"loss": 0.2718,
"step": 820
},
{
"epoch": 3.8604651162790695,
"grad_norm": 10.332758903503418,
"learning_rate": 0.00012279069767441863,
"loss": 0.4886,
"step": 830
},
{
"epoch": 3.9069767441860463,
"grad_norm": 1.1791740655899048,
"learning_rate": 0.00012186046511627907,
"loss": 0.3607,
"step": 840
},
{
"epoch": 3.953488372093023,
"grad_norm": 1.7059251070022583,
"learning_rate": 0.00012093023255813953,
"loss": 0.2679,
"step": 850
},
{
"epoch": 4.0,
"grad_norm": 6.822539806365967,
"learning_rate": 0.00012,
"loss": 0.3047,
"step": 860
},
{
"epoch": 4.046511627906977,
"grad_norm": 3.6658146381378174,
"learning_rate": 0.00011906976744186048,
"loss": 0.2808,
"step": 870
},
{
"epoch": 4.093023255813954,
"grad_norm": 2.8236451148986816,
"learning_rate": 0.00011813953488372094,
"loss": 0.3813,
"step": 880
},
{
"epoch": 4.1395348837209305,
"grad_norm": 6.614228248596191,
"learning_rate": 0.00011720930232558141,
"loss": 0.3335,
"step": 890
},
{
"epoch": 4.186046511627907,
"grad_norm": 4.810386657714844,
"learning_rate": 0.00011627906976744187,
"loss": 0.3959,
"step": 900
},
{
"epoch": 4.186046511627907,
"eval_accuracy": 0.8729372937293729,
"eval_loss": 0.41208329796791077,
"eval_runtime": 8.3929,
"eval_samples_per_second": 72.204,
"eval_steps_per_second": 9.055,
"step": 900
},
{
"epoch": 4.232558139534884,
"grad_norm": 0.597582221031189,
"learning_rate": 0.00011534883720930234,
"loss": 0.2209,
"step": 910
},
{
"epoch": 4.27906976744186,
"grad_norm": 2.4885311126708984,
"learning_rate": 0.00011441860465116279,
"loss": 0.3125,
"step": 920
},
{
"epoch": 4.325581395348837,
"grad_norm": 3.7162835597991943,
"learning_rate": 0.00011348837209302326,
"loss": 0.1821,
"step": 930
},
{
"epoch": 4.372093023255814,
"grad_norm": 4.518478870391846,
"learning_rate": 0.00011255813953488372,
"loss": 0.181,
"step": 940
},
{
"epoch": 4.4186046511627906,
"grad_norm": 1.7456213235855103,
"learning_rate": 0.00011162790697674419,
"loss": 0.2535,
"step": 950
},
{
"epoch": 4.465116279069767,
"grad_norm": 2.0503828525543213,
"learning_rate": 0.00011069767441860466,
"loss": 0.2924,
"step": 960
},
{
"epoch": 4.511627906976744,
"grad_norm": 0.33591389656066895,
"learning_rate": 0.00010976744186046512,
"loss": 0.2453,
"step": 970
},
{
"epoch": 4.558139534883721,
"grad_norm": 2.924938917160034,
"learning_rate": 0.0001088372093023256,
"loss": 0.2443,
"step": 980
},
{
"epoch": 4.604651162790698,
"grad_norm": 4.228425979614258,
"learning_rate": 0.00010790697674418607,
"loss": 0.2446,
"step": 990
},
{
"epoch": 4.651162790697675,
"grad_norm": 0.3494820296764374,
"learning_rate": 0.00010697674418604651,
"loss": 0.318,
"step": 1000
},
{
"epoch": 4.651162790697675,
"eval_accuracy": 0.8696369636963697,
"eval_loss": 0.4397449195384979,
"eval_runtime": 8.4348,
"eval_samples_per_second": 71.845,
"eval_steps_per_second": 9.01,
"step": 1000
},
{
"epoch": 4.6976744186046515,
"grad_norm": 4.108438491821289,
"learning_rate": 0.00010604651162790697,
"loss": 0.1346,
"step": 1010
},
{
"epoch": 4.7441860465116275,
"grad_norm": 2.6504056453704834,
"learning_rate": 0.00010511627906976745,
"loss": 0.3125,
"step": 1020
},
{
"epoch": 4.790697674418604,
"grad_norm": 1.8026741743087769,
"learning_rate": 0.0001041860465116279,
"loss": 0.2085,
"step": 1030
},
{
"epoch": 4.837209302325581,
"grad_norm": 2.016584873199463,
"learning_rate": 0.00010325581395348838,
"loss": 0.3598,
"step": 1040
},
{
"epoch": 4.883720930232558,
"grad_norm": 7.666717529296875,
"learning_rate": 0.00010232558139534885,
"loss": 0.3086,
"step": 1050
},
{
"epoch": 4.930232558139535,
"grad_norm": 0.9514933228492737,
"learning_rate": 0.00010139534883720931,
"loss": 0.2095,
"step": 1060
},
{
"epoch": 4.976744186046512,
"grad_norm": 3.582228183746338,
"learning_rate": 0.00010046511627906978,
"loss": 0.3226,
"step": 1070
},
{
"epoch": 5.023255813953488,
"grad_norm": 1.5236479043960571,
"learning_rate": 9.953488372093024e-05,
"loss": 0.2046,
"step": 1080
},
{
"epoch": 5.069767441860465,
"grad_norm": 4.655299663543701,
"learning_rate": 9.86046511627907e-05,
"loss": 0.1163,
"step": 1090
},
{
"epoch": 5.116279069767442,
"grad_norm": 2.3776028156280518,
"learning_rate": 9.767441860465116e-05,
"loss": 0.2401,
"step": 1100
},
{
"epoch": 5.116279069767442,
"eval_accuracy": 0.8679867986798679,
"eval_loss": 0.4887414872646332,
"eval_runtime": 8.1962,
"eval_samples_per_second": 73.937,
"eval_steps_per_second": 9.273,
"step": 1100
},
{
"epoch": 5.162790697674419,
"grad_norm": 4.612281322479248,
"learning_rate": 9.674418604651163e-05,
"loss": 0.3254,
"step": 1110
},
{
"epoch": 5.209302325581396,
"grad_norm": 1.2196846008300781,
"learning_rate": 9.58139534883721e-05,
"loss": 0.2799,
"step": 1120
},
{
"epoch": 5.2558139534883725,
"grad_norm": 3.100369691848755,
"learning_rate": 9.488372093023256e-05,
"loss": 0.1837,
"step": 1130
},
{
"epoch": 5.3023255813953485,
"grad_norm": 1.2027745246887207,
"learning_rate": 9.395348837209302e-05,
"loss": 0.1463,
"step": 1140
},
{
"epoch": 5.348837209302325,
"grad_norm": 4.4534196853637695,
"learning_rate": 9.30232558139535e-05,
"loss": 0.205,
"step": 1150
},
{
"epoch": 5.395348837209302,
"grad_norm": 1.2410222291946411,
"learning_rate": 9.209302325581396e-05,
"loss": 0.2523,
"step": 1160
},
{
"epoch": 5.441860465116279,
"grad_norm": 7.617187023162842,
"learning_rate": 9.116279069767443e-05,
"loss": 0.2619,
"step": 1170
},
{
"epoch": 5.488372093023256,
"grad_norm": 0.7941145300865173,
"learning_rate": 9.023255813953489e-05,
"loss": 0.3047,
"step": 1180
},
{
"epoch": 5.534883720930233,
"grad_norm": 2.606306552886963,
"learning_rate": 8.930232558139535e-05,
"loss": 0.226,
"step": 1190
},
{
"epoch": 5.5813953488372094,
"grad_norm": 9.533924102783203,
"learning_rate": 8.837209302325582e-05,
"loss": 0.1273,
"step": 1200
},
{
"epoch": 5.5813953488372094,
"eval_accuracy": 0.8663366336633663,
"eval_loss": 0.4223584532737732,
"eval_runtime": 8.4233,
"eval_samples_per_second": 71.943,
"eval_steps_per_second": 9.023,
"step": 1200
},
{
"epoch": 5.627906976744186,
"grad_norm": 1.8951793909072876,
"learning_rate": 8.744186046511629e-05,
"loss": 0.1706,
"step": 1210
},
{
"epoch": 5.674418604651163,
"grad_norm": 1.8302172422409058,
"learning_rate": 8.651162790697674e-05,
"loss": 0.2261,
"step": 1220
},
{
"epoch": 5.720930232558139,
"grad_norm": 1.3308892250061035,
"learning_rate": 8.558139534883721e-05,
"loss": 0.2022,
"step": 1230
},
{
"epoch": 5.767441860465116,
"grad_norm": 0.5113635659217834,
"learning_rate": 8.465116279069768e-05,
"loss": 0.2085,
"step": 1240
},
{
"epoch": 5.813953488372093,
"grad_norm": 5.370401382446289,
"learning_rate": 8.372093023255814e-05,
"loss": 0.2813,
"step": 1250
},
{
"epoch": 5.8604651162790695,
"grad_norm": 1.0463091135025024,
"learning_rate": 8.27906976744186e-05,
"loss": 0.3038,
"step": 1260
},
{
"epoch": 5.906976744186046,
"grad_norm": 2.4071524143218994,
"learning_rate": 8.186046511627907e-05,
"loss": 0.1519,
"step": 1270
},
{
"epoch": 5.953488372093023,
"grad_norm": 6.68901252746582,
"learning_rate": 8.093023255813953e-05,
"loss": 0.1952,
"step": 1280
},
{
"epoch": 6.0,
"grad_norm": 5.0761542320251465,
"learning_rate": 8e-05,
"loss": 0.2148,
"step": 1290
},
{
"epoch": 6.046511627906977,
"grad_norm": 4.299339771270752,
"learning_rate": 7.906976744186047e-05,
"loss": 0.1101,
"step": 1300
},
{
"epoch": 6.046511627906977,
"eval_accuracy": 0.8778877887788779,
"eval_loss": 0.43775683641433716,
"eval_runtime": 8.4309,
"eval_samples_per_second": 71.878,
"eval_steps_per_second": 9.014,
"step": 1300
},
{
"epoch": 6.093023255813954,
"grad_norm": 5.793342590332031,
"learning_rate": 7.813953488372094e-05,
"loss": 0.1908,
"step": 1310
},
{
"epoch": 6.1395348837209305,
"grad_norm": 3.164515256881714,
"learning_rate": 7.72093023255814e-05,
"loss": 0.1572,
"step": 1320
},
{
"epoch": 6.186046511627907,
"grad_norm": 1.032468557357788,
"learning_rate": 7.627906976744187e-05,
"loss": 0.2154,
"step": 1330
},
{
"epoch": 6.232558139534884,
"grad_norm": 3.3498730659484863,
"learning_rate": 7.534883720930233e-05,
"loss": 0.1473,
"step": 1340
},
{
"epoch": 6.27906976744186,
"grad_norm": 1.0370675325393677,
"learning_rate": 7.441860465116279e-05,
"loss": 0.1568,
"step": 1350
},
{
"epoch": 6.325581395348837,
"grad_norm": 1.5336437225341797,
"learning_rate": 7.348837209302326e-05,
"loss": 0.1943,
"step": 1360
},
{
"epoch": 6.372093023255814,
"grad_norm": 7.077569007873535,
"learning_rate": 7.255813953488373e-05,
"loss": 0.242,
"step": 1370
},
{
"epoch": 6.4186046511627906,
"grad_norm": 6.786858558654785,
"learning_rate": 7.162790697674418e-05,
"loss": 0.2174,
"step": 1380
},
{
"epoch": 6.465116279069767,
"grad_norm": 2.3031513690948486,
"learning_rate": 7.069767441860465e-05,
"loss": 0.2683,
"step": 1390
},
{
"epoch": 6.511627906976744,
"grad_norm": 4.499554634094238,
"learning_rate": 6.976744186046513e-05,
"loss": 0.1773,
"step": 1400
},
{
"epoch": 6.511627906976744,
"eval_accuracy": 0.8844884488448845,
"eval_loss": 0.3730297386646271,
"eval_runtime": 8.1564,
"eval_samples_per_second": 74.297,
"eval_steps_per_second": 9.318,
"step": 1400
},
{
"epoch": 6.558139534883721,
"grad_norm": 0.8953260779380798,
"learning_rate": 6.883720930232558e-05,
"loss": 0.1669,
"step": 1410
},
{
"epoch": 6.604651162790698,
"grad_norm": 1.8238012790679932,
"learning_rate": 6.790697674418604e-05,
"loss": 0.2064,
"step": 1420
},
{
"epoch": 6.651162790697675,
"grad_norm": 0.47919437289237976,
"learning_rate": 6.697674418604652e-05,
"loss": 0.1294,
"step": 1430
},
{
"epoch": 6.6976744186046515,
"grad_norm": 1.9631860256195068,
"learning_rate": 6.604651162790698e-05,
"loss": 0.2931,
"step": 1440
},
{
"epoch": 6.7441860465116275,
"grad_norm": 2.078273057937622,
"learning_rate": 6.511627906976745e-05,
"loss": 0.0749,
"step": 1450
},
{
"epoch": 6.790697674418604,
"grad_norm": 8.262022972106934,
"learning_rate": 6.418604651162791e-05,
"loss": 0.1879,
"step": 1460
},
{
"epoch": 6.837209302325581,
"grad_norm": 8.179067611694336,
"learning_rate": 6.325581395348838e-05,
"loss": 0.1836,
"step": 1470
},
{
"epoch": 6.883720930232558,
"grad_norm": 3.35481858253479,
"learning_rate": 6.232558139534884e-05,
"loss": 0.1719,
"step": 1480
},
{
"epoch": 6.930232558139535,
"grad_norm": 5.25211763381958,
"learning_rate": 6.139534883720931e-05,
"loss": 0.1532,
"step": 1490
},
{
"epoch": 6.976744186046512,
"grad_norm": 4.454288959503174,
"learning_rate": 6.0465116279069765e-05,
"loss": 0.2248,
"step": 1500
},
{
"epoch": 6.976744186046512,
"eval_accuracy": 0.8861386138613861,
"eval_loss": 0.37258049845695496,
"eval_runtime": 8.4482,
"eval_samples_per_second": 71.731,
"eval_steps_per_second": 8.996,
"step": 1500
},
{
"epoch": 7.023255813953488,
"grad_norm": 0.325836181640625,
"learning_rate": 5.953488372093024e-05,
"loss": 0.2717,
"step": 1510
},
{
"epoch": 7.069767441860465,
"grad_norm": 0.20686429738998413,
"learning_rate": 5.8604651162790704e-05,
"loss": 0.1134,
"step": 1520
},
{
"epoch": 7.116279069767442,
"grad_norm": 1.2898919582366943,
"learning_rate": 5.767441860465117e-05,
"loss": 0.1802,
"step": 1530
},
{
"epoch": 7.162790697674419,
"grad_norm": 0.5109810829162598,
"learning_rate": 5.674418604651163e-05,
"loss": 0.1167,
"step": 1540
},
{
"epoch": 7.209302325581396,
"grad_norm": 2.8214943408966064,
"learning_rate": 5.5813953488372095e-05,
"loss": 0.0916,
"step": 1550
},
{
"epoch": 7.2558139534883725,
"grad_norm": 4.599625587463379,
"learning_rate": 5.488372093023256e-05,
"loss": 0.1704,
"step": 1560
},
{
"epoch": 7.3023255813953485,
"grad_norm": 3.6765389442443848,
"learning_rate": 5.3953488372093034e-05,
"loss": 0.0917,
"step": 1570
},
{
"epoch": 7.348837209302325,
"grad_norm": 1.5945552587509155,
"learning_rate": 5.3023255813953486e-05,
"loss": 0.0956,
"step": 1580
},
{
"epoch": 7.395348837209302,
"grad_norm": 6.084174156188965,
"learning_rate": 5.209302325581395e-05,
"loss": 0.1411,
"step": 1590
},
{
"epoch": 7.441860465116279,
"grad_norm": 2.9608631134033203,
"learning_rate": 5.1162790697674425e-05,
"loss": 0.0987,
"step": 1600
},
{
"epoch": 7.441860465116279,
"eval_accuracy": 0.8844884488448845,
"eval_loss": 0.43984663486480713,
"eval_runtime": 8.3551,
"eval_samples_per_second": 72.531,
"eval_steps_per_second": 9.096,
"step": 1600
},
{
"epoch": 7.488372093023256,
"grad_norm": 1.1511733531951904,
"learning_rate": 5.023255813953489e-05,
"loss": 0.1976,
"step": 1610
},
{
"epoch": 7.534883720930233,
"grad_norm": 1.5226008892059326,
"learning_rate": 4.930232558139535e-05,
"loss": 0.1001,
"step": 1620
},
{
"epoch": 7.5813953488372094,
"grad_norm": 3.7124974727630615,
"learning_rate": 4.8372093023255816e-05,
"loss": 0.1432,
"step": 1630
},
{
"epoch": 7.627906976744186,
"grad_norm": 1.3083992004394531,
"learning_rate": 4.744186046511628e-05,
"loss": 0.1514,
"step": 1640
},
{
"epoch": 7.674418604651163,
"grad_norm": 1.7809404134750366,
"learning_rate": 4.651162790697675e-05,
"loss": 0.0881,
"step": 1650
},
{
"epoch": 7.720930232558139,
"grad_norm": 0.19210846722126007,
"learning_rate": 4.5581395348837214e-05,
"loss": 0.1179,
"step": 1660
},
{
"epoch": 7.767441860465116,
"grad_norm": 0.44379866123199463,
"learning_rate": 4.465116279069767e-05,
"loss": 0.1302,
"step": 1670
},
{
"epoch": 7.813953488372093,
"grad_norm": 0.10616659373044968,
"learning_rate": 4.3720930232558146e-05,
"loss": 0.1636,
"step": 1680
},
{
"epoch": 7.8604651162790695,
"grad_norm": 2.069657325744629,
"learning_rate": 4.2790697674418605e-05,
"loss": 0.157,
"step": 1690
},
{
"epoch": 7.906976744186046,
"grad_norm": 4.515770435333252,
"learning_rate": 4.186046511627907e-05,
"loss": 0.16,
"step": 1700
},
{
"epoch": 7.906976744186046,
"eval_accuracy": 0.8828382838283828,
"eval_loss": 0.4170949161052704,
"eval_runtime": 8.1214,
"eval_samples_per_second": 74.617,
"eval_steps_per_second": 9.358,
"step": 1700
},
{
"epoch": 7.953488372093023,
"grad_norm": 8.669586181640625,
"learning_rate": 4.093023255813954e-05,
"loss": 0.1765,
"step": 1710
},
{
"epoch": 8.0,
"grad_norm": 5.801586627960205,
"learning_rate": 4e-05,
"loss": 0.1434,
"step": 1720
},
{
"epoch": 8.046511627906977,
"grad_norm": 0.6714385151863098,
"learning_rate": 3.906976744186047e-05,
"loss": 0.0739,
"step": 1730
},
{
"epoch": 8.093023255813954,
"grad_norm": 1.2774052619934082,
"learning_rate": 3.8139534883720935e-05,
"loss": 0.0654,
"step": 1740
},
{
"epoch": 8.13953488372093,
"grad_norm": 0.1617097109556198,
"learning_rate": 3.7209302325581394e-05,
"loss": 0.1071,
"step": 1750
},
{
"epoch": 8.186046511627907,
"grad_norm": 8.03242301940918,
"learning_rate": 3.627906976744187e-05,
"loss": 0.1191,
"step": 1760
},
{
"epoch": 8.232558139534884,
"grad_norm": 0.6172130107879639,
"learning_rate": 3.5348837209302326e-05,
"loss": 0.109,
"step": 1770
},
{
"epoch": 8.279069767441861,
"grad_norm": 0.7166829109191895,
"learning_rate": 3.441860465116279e-05,
"loss": 0.0863,
"step": 1780
},
{
"epoch": 8.325581395348838,
"grad_norm": 0.0343417190015316,
"learning_rate": 3.348837209302326e-05,
"loss": 0.1092,
"step": 1790
},
{
"epoch": 8.372093023255815,
"grad_norm": 0.09605341404676437,
"learning_rate": 3.2558139534883724e-05,
"loss": 0.1224,
"step": 1800
},
{
"epoch": 8.372093023255815,
"eval_accuracy": 0.8877887788778878,
"eval_loss": 0.4335933327674866,
"eval_runtime": 8.4644,
"eval_samples_per_second": 71.594,
"eval_steps_per_second": 8.979,
"step": 1800
},
{
"epoch": 8.418604651162791,
"grad_norm": 0.1179109588265419,
"learning_rate": 3.162790697674419e-05,
"loss": 0.1358,
"step": 1810
},
{
"epoch": 8.465116279069768,
"grad_norm": 0.0823044553399086,
"learning_rate": 3.0697674418604656e-05,
"loss": 0.1565,
"step": 1820
},
{
"epoch": 8.511627906976745,
"grad_norm": 0.6940491795539856,
"learning_rate": 2.976744186046512e-05,
"loss": 0.0398,
"step": 1830
},
{
"epoch": 8.55813953488372,
"grad_norm": 0.12108628451824188,
"learning_rate": 2.8837209302325585e-05,
"loss": 0.0299,
"step": 1840
},
{
"epoch": 8.604651162790697,
"grad_norm": 2.229219913482666,
"learning_rate": 2.7906976744186048e-05,
"loss": 0.1867,
"step": 1850
},
{
"epoch": 8.651162790697674,
"grad_norm": 4.451650619506836,
"learning_rate": 2.6976744186046517e-05,
"loss": 0.1431,
"step": 1860
},
{
"epoch": 8.69767441860465,
"grad_norm": 4.897462844848633,
"learning_rate": 2.6046511627906976e-05,
"loss": 0.1542,
"step": 1870
},
{
"epoch": 8.744186046511627,
"grad_norm": 0.9782124161720276,
"learning_rate": 2.5116279069767445e-05,
"loss": 0.1044,
"step": 1880
},
{
"epoch": 8.790697674418604,
"grad_norm": 2.251777410507202,
"learning_rate": 2.4186046511627908e-05,
"loss": 0.1508,
"step": 1890
},
{
"epoch": 8.837209302325581,
"grad_norm": 4.513321399688721,
"learning_rate": 2.3255813953488374e-05,
"loss": 0.2111,
"step": 1900
},
{
"epoch": 8.837209302325581,
"eval_accuracy": 0.8943894389438944,
"eval_loss": 0.394785612821579,
"eval_runtime": 8.2576,
"eval_samples_per_second": 73.387,
"eval_steps_per_second": 9.204,
"step": 1900
},
{
"epoch": 8.883720930232558,
"grad_norm": 2.1677255630493164,
"learning_rate": 2.2325581395348837e-05,
"loss": 0.1205,
"step": 1910
},
{
"epoch": 8.930232558139535,
"grad_norm": 3.6949923038482666,
"learning_rate": 2.1395348837209303e-05,
"loss": 0.1626,
"step": 1920
},
{
"epoch": 8.976744186046512,
"grad_norm": 4.727644920349121,
"learning_rate": 2.046511627906977e-05,
"loss": 0.1635,
"step": 1930
},
{
"epoch": 9.023255813953488,
"grad_norm": 2.22031569480896,
"learning_rate": 1.9534883720930235e-05,
"loss": 0.1543,
"step": 1940
},
{
"epoch": 9.069767441860465,
"grad_norm": 1.0797746181488037,
"learning_rate": 1.8604651162790697e-05,
"loss": 0.2055,
"step": 1950
},
{
"epoch": 9.116279069767442,
"grad_norm": 0.9525020122528076,
"learning_rate": 1.7674418604651163e-05,
"loss": 0.1099,
"step": 1960
},
{
"epoch": 9.162790697674419,
"grad_norm": 0.37071776390075684,
"learning_rate": 1.674418604651163e-05,
"loss": 0.1583,
"step": 1970
},
{
"epoch": 9.209302325581396,
"grad_norm": 0.05000032112002373,
"learning_rate": 1.5813953488372095e-05,
"loss": 0.0378,
"step": 1980
},
{
"epoch": 9.255813953488373,
"grad_norm": 0.06881304085254669,
"learning_rate": 1.488372093023256e-05,
"loss": 0.1168,
"step": 1990
},
{
"epoch": 9.30232558139535,
"grad_norm": 3.6886019706726074,
"learning_rate": 1.3953488372093024e-05,
"loss": 0.112,
"step": 2000
},
{
"epoch": 9.30232558139535,
"eval_accuracy": 0.8943894389438944,
"eval_loss": 0.4003974199295044,
"eval_runtime": 8.4662,
"eval_samples_per_second": 71.579,
"eval_steps_per_second": 8.977,
"step": 2000
},
{
"epoch": 9.348837209302326,
"grad_norm": 0.3669143617153168,
"learning_rate": 1.3023255813953488e-05,
"loss": 0.0836,
"step": 2010
},
{
"epoch": 9.395348837209303,
"grad_norm": 5.295791149139404,
"learning_rate": 1.2093023255813954e-05,
"loss": 0.0932,
"step": 2020
},
{
"epoch": 9.44186046511628,
"grad_norm": 0.12259897589683533,
"learning_rate": 1.1162790697674418e-05,
"loss": 0.1198,
"step": 2030
},
{
"epoch": 9.488372093023255,
"grad_norm": 0.18641342222690582,
"learning_rate": 1.0232558139534884e-05,
"loss": 0.1342,
"step": 2040
},
{
"epoch": 9.534883720930232,
"grad_norm": 2.212017774581909,
"learning_rate": 9.302325581395349e-06,
"loss": 0.1336,
"step": 2050
},
{
"epoch": 9.581395348837209,
"grad_norm": 0.13683325052261353,
"learning_rate": 8.372093023255815e-06,
"loss": 0.046,
"step": 2060
},
{
"epoch": 9.627906976744185,
"grad_norm": 0.07736368477344513,
"learning_rate": 7.44186046511628e-06,
"loss": 0.0623,
"step": 2070
},
{
"epoch": 9.674418604651162,
"grad_norm": 1.0054152011871338,
"learning_rate": 6.511627906976744e-06,
"loss": 0.069,
"step": 2080
},
{
"epoch": 9.720930232558139,
"grad_norm": 0.7521716356277466,
"learning_rate": 5.581395348837209e-06,
"loss": 0.153,
"step": 2090
},
{
"epoch": 9.767441860465116,
"grad_norm": 2.2922515869140625,
"learning_rate": 4.651162790697674e-06,
"loss": 0.0962,
"step": 2100
},
{
"epoch": 9.767441860465116,
"eval_accuracy": 0.8927392739273927,
"eval_loss": 0.4092388451099396,
"eval_runtime": 8.2945,
"eval_samples_per_second": 73.061,
"eval_steps_per_second": 9.163,
"step": 2100
},
{
"epoch": 9.813953488372093,
"grad_norm": 2.177103042602539,
"learning_rate": 3.72093023255814e-06,
"loss": 0.1215,
"step": 2110
},
{
"epoch": 9.86046511627907,
"grad_norm": 3.3745920658111572,
"learning_rate": 2.7906976744186046e-06,
"loss": 0.129,
"step": 2120
},
{
"epoch": 9.906976744186046,
"grad_norm": 0.3872903883457184,
"learning_rate": 1.86046511627907e-06,
"loss": 0.0951,
"step": 2130
},
{
"epoch": 9.953488372093023,
"grad_norm": 0.13965001702308655,
"learning_rate": 9.30232558139535e-07,
"loss": 0.1625,
"step": 2140
},
{
"epoch": 10.0,
"grad_norm": 0.7976939082145691,
"learning_rate": 0.0,
"loss": 0.1355,
"step": 2150
},
{
"epoch": 10.0,
"step": 2150,
"total_flos": 2.659620429501235e+18,
"train_loss": 0.30670992596204893,
"train_runtime": 1546.6686,
"train_samples_per_second": 22.19,
"train_steps_per_second": 1.39
}
],
"logging_steps": 10,
"max_steps": 2150,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 100,
"total_flos": 2.659620429501235e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}