vit-weldclassifyv3 / trainer_state.json
th041's picture
🍻 cheers
30d2f68 verified
raw
history blame
41.3 kB
{
"best_metric": 0.26709362864494324,
"best_model_checkpoint": "vit-weldclassifyv3/checkpoint-1000",
"epoch": 13.0,
"eval_steps": 100,
"global_step": 2028,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0641025641025641,
"grad_norm": 1.1238359212875366,
"learning_rate": 0.0001990138067061144,
"loss": 1.2792,
"step": 10
},
{
"epoch": 0.1282051282051282,
"grad_norm": 1.7609381675720215,
"learning_rate": 0.0001980276134122288,
"loss": 1.1608,
"step": 20
},
{
"epoch": 0.19230769230769232,
"grad_norm": 1.1302402019500732,
"learning_rate": 0.0001970414201183432,
"loss": 1.1137,
"step": 30
},
{
"epoch": 0.2564102564102564,
"grad_norm": 3.0818381309509277,
"learning_rate": 0.0001960552268244576,
"loss": 1.0182,
"step": 40
},
{
"epoch": 0.32051282051282054,
"grad_norm": 1.0849112272262573,
"learning_rate": 0.000195069033530572,
"loss": 1.2566,
"step": 50
},
{
"epoch": 0.38461538461538464,
"grad_norm": 1.0367400646209717,
"learning_rate": 0.0001940828402366864,
"loss": 1.0554,
"step": 60
},
{
"epoch": 0.44871794871794873,
"grad_norm": 2.154937744140625,
"learning_rate": 0.0001930966469428008,
"loss": 1.0788,
"step": 70
},
{
"epoch": 0.5128205128205128,
"grad_norm": 1.1957019567489624,
"learning_rate": 0.0001921104536489152,
"loss": 1.2299,
"step": 80
},
{
"epoch": 0.5769230769230769,
"grad_norm": 1.3072288036346436,
"learning_rate": 0.0001911242603550296,
"loss": 1.0164,
"step": 90
},
{
"epoch": 0.6410256410256411,
"grad_norm": 1.340012550354004,
"learning_rate": 0.00019013806706114398,
"loss": 0.8398,
"step": 100
},
{
"epoch": 0.6410256410256411,
"eval_accuracy": 0.5035971223021583,
"eval_loss": 1.031232476234436,
"eval_runtime": 2.6053,
"eval_samples_per_second": 106.706,
"eval_steps_per_second": 13.434,
"step": 100
},
{
"epoch": 0.7051282051282052,
"grad_norm": 2.5044240951538086,
"learning_rate": 0.00018915187376725837,
"loss": 0.8484,
"step": 110
},
{
"epoch": 0.7692307692307693,
"grad_norm": 2.8144619464874268,
"learning_rate": 0.00018816568047337278,
"loss": 0.8283,
"step": 120
},
{
"epoch": 0.8333333333333334,
"grad_norm": 1.8459696769714355,
"learning_rate": 0.0001871794871794872,
"loss": 0.8201,
"step": 130
},
{
"epoch": 0.8974358974358975,
"grad_norm": 3.3761656284332275,
"learning_rate": 0.0001861932938856016,
"loss": 0.7736,
"step": 140
},
{
"epoch": 0.9615384615384616,
"grad_norm": 2.285553216934204,
"learning_rate": 0.00018520710059171598,
"loss": 0.7505,
"step": 150
},
{
"epoch": 1.0256410256410255,
"grad_norm": 3.3042893409729004,
"learning_rate": 0.0001842209072978304,
"loss": 0.6614,
"step": 160
},
{
"epoch": 1.0897435897435896,
"grad_norm": 3.174131393432617,
"learning_rate": 0.00018323471400394478,
"loss": 0.7393,
"step": 170
},
{
"epoch": 1.1538461538461537,
"grad_norm": 2.8764002323150635,
"learning_rate": 0.00018224852071005917,
"loss": 0.892,
"step": 180
},
{
"epoch": 1.217948717948718,
"grad_norm": 3.123760223388672,
"learning_rate": 0.00018126232741617356,
"loss": 0.7055,
"step": 190
},
{
"epoch": 1.282051282051282,
"grad_norm": 2.54199481010437,
"learning_rate": 0.00018027613412228798,
"loss": 0.5613,
"step": 200
},
{
"epoch": 1.282051282051282,
"eval_accuracy": 0.6618705035971223,
"eval_loss": 0.7067616581916809,
"eval_runtime": 2.5932,
"eval_samples_per_second": 107.202,
"eval_steps_per_second": 13.497,
"step": 200
},
{
"epoch": 1.3461538461538463,
"grad_norm": 4.848214626312256,
"learning_rate": 0.0001792899408284024,
"loss": 0.634,
"step": 210
},
{
"epoch": 1.4102564102564101,
"grad_norm": 2.621889352798462,
"learning_rate": 0.00017830374753451678,
"loss": 0.6666,
"step": 220
},
{
"epoch": 1.4743589743589745,
"grad_norm": 3.7964868545532227,
"learning_rate": 0.00017731755424063117,
"loss": 0.5344,
"step": 230
},
{
"epoch": 1.5384615384615383,
"grad_norm": 3.703036308288574,
"learning_rate": 0.00017633136094674556,
"loss": 0.5636,
"step": 240
},
{
"epoch": 1.6025641025641026,
"grad_norm": 3.081791400909424,
"learning_rate": 0.00017534516765285997,
"loss": 0.4211,
"step": 250
},
{
"epoch": 1.6666666666666665,
"grad_norm": 3.167443037033081,
"learning_rate": 0.00017435897435897436,
"loss": 0.6438,
"step": 260
},
{
"epoch": 1.7307692307692308,
"grad_norm": 6.15064811706543,
"learning_rate": 0.00017337278106508875,
"loss": 0.5872,
"step": 270
},
{
"epoch": 1.7948717948717947,
"grad_norm": 2.0773203372955322,
"learning_rate": 0.00017238658777120317,
"loss": 0.6172,
"step": 280
},
{
"epoch": 1.858974358974359,
"grad_norm": 3.2962491512298584,
"learning_rate": 0.00017140039447731758,
"loss": 0.5252,
"step": 290
},
{
"epoch": 1.9230769230769231,
"grad_norm": 2.557178020477295,
"learning_rate": 0.00017041420118343197,
"loss": 0.4296,
"step": 300
},
{
"epoch": 1.9230769230769231,
"eval_accuracy": 0.8309352517985612,
"eval_loss": 0.4007588028907776,
"eval_runtime": 2.5685,
"eval_samples_per_second": 108.233,
"eval_steps_per_second": 13.626,
"step": 300
},
{
"epoch": 1.9871794871794872,
"grad_norm": 2.061478614807129,
"learning_rate": 0.00016942800788954636,
"loss": 0.4041,
"step": 310
},
{
"epoch": 2.051282051282051,
"grad_norm": 2.703629732131958,
"learning_rate": 0.00016844181459566075,
"loss": 0.3791,
"step": 320
},
{
"epoch": 2.1153846153846154,
"grad_norm": 0.8648492693901062,
"learning_rate": 0.00016745562130177514,
"loss": 0.2943,
"step": 330
},
{
"epoch": 2.1794871794871793,
"grad_norm": 4.140072345733643,
"learning_rate": 0.00016646942800788956,
"loss": 0.2485,
"step": 340
},
{
"epoch": 2.2435897435897436,
"grad_norm": 5.02016019821167,
"learning_rate": 0.00016548323471400394,
"loss": 0.4904,
"step": 350
},
{
"epoch": 2.3076923076923075,
"grad_norm": 1.8560619354248047,
"learning_rate": 0.00016449704142011836,
"loss": 0.3835,
"step": 360
},
{
"epoch": 2.371794871794872,
"grad_norm": 10.23709487915039,
"learning_rate": 0.00016351084812623275,
"loss": 0.3015,
"step": 370
},
{
"epoch": 2.435897435897436,
"grad_norm": 5.944732666015625,
"learning_rate": 0.00016252465483234716,
"loss": 0.3199,
"step": 380
},
{
"epoch": 2.5,
"grad_norm": 2.0510830879211426,
"learning_rate": 0.00016153846153846155,
"loss": 0.2512,
"step": 390
},
{
"epoch": 2.564102564102564,
"grad_norm": 3.4166271686553955,
"learning_rate": 0.00016055226824457594,
"loss": 0.3475,
"step": 400
},
{
"epoch": 2.564102564102564,
"eval_accuracy": 0.8812949640287769,
"eval_loss": 0.33450016379356384,
"eval_runtime": 3.6169,
"eval_samples_per_second": 76.862,
"eval_steps_per_second": 9.677,
"step": 400
},
{
"epoch": 2.628205128205128,
"grad_norm": 6.59075927734375,
"learning_rate": 0.00015956607495069033,
"loss": 0.2296,
"step": 410
},
{
"epoch": 2.6923076923076925,
"grad_norm": 2.345207929611206,
"learning_rate": 0.00015857988165680475,
"loss": 0.2535,
"step": 420
},
{
"epoch": 2.7564102564102564,
"grad_norm": 3.3290200233459473,
"learning_rate": 0.00015759368836291914,
"loss": 0.2902,
"step": 430
},
{
"epoch": 2.8205128205128203,
"grad_norm": 3.723925828933716,
"learning_rate": 0.00015660749506903355,
"loss": 0.2591,
"step": 440
},
{
"epoch": 2.8846153846153846,
"grad_norm": 4.227429389953613,
"learning_rate": 0.00015562130177514794,
"loss": 0.2915,
"step": 450
},
{
"epoch": 2.948717948717949,
"grad_norm": 3.918539047241211,
"learning_rate": 0.00015463510848126233,
"loss": 0.3859,
"step": 460
},
{
"epoch": 3.0128205128205128,
"grad_norm": 2.824794292449951,
"learning_rate": 0.00015364891518737675,
"loss": 0.3425,
"step": 470
},
{
"epoch": 3.076923076923077,
"grad_norm": 0.28933683037757874,
"learning_rate": 0.00015266272189349113,
"loss": 0.1796,
"step": 480
},
{
"epoch": 3.141025641025641,
"grad_norm": 2.101590394973755,
"learning_rate": 0.00015167652859960552,
"loss": 0.1881,
"step": 490
},
{
"epoch": 3.2051282051282053,
"grad_norm": 0.31414705514907837,
"learning_rate": 0.0001506903353057199,
"loss": 0.1183,
"step": 500
},
{
"epoch": 3.2051282051282053,
"eval_accuracy": 0.8489208633093526,
"eval_loss": 0.4293219745159149,
"eval_runtime": 3.4489,
"eval_samples_per_second": 80.605,
"eval_steps_per_second": 10.148,
"step": 500
},
{
"epoch": 3.269230769230769,
"grad_norm": 3.8199613094329834,
"learning_rate": 0.00014970414201183433,
"loss": 0.1736,
"step": 510
},
{
"epoch": 3.3333333333333335,
"grad_norm": 1.631716251373291,
"learning_rate": 0.00014871794871794872,
"loss": 0.1903,
"step": 520
},
{
"epoch": 3.3974358974358974,
"grad_norm": 3.813403844833374,
"learning_rate": 0.00014773175542406313,
"loss": 0.2243,
"step": 530
},
{
"epoch": 3.4615384615384617,
"grad_norm": 6.256704330444336,
"learning_rate": 0.00014674556213017752,
"loss": 0.1941,
"step": 540
},
{
"epoch": 3.5256410256410255,
"grad_norm": 6.570198059082031,
"learning_rate": 0.0001457593688362919,
"loss": 0.1747,
"step": 550
},
{
"epoch": 3.58974358974359,
"grad_norm": 3.7718803882598877,
"learning_rate": 0.00014477317554240633,
"loss": 0.2871,
"step": 560
},
{
"epoch": 3.6538461538461537,
"grad_norm": 3.889697790145874,
"learning_rate": 0.00014378698224852072,
"loss": 0.173,
"step": 570
},
{
"epoch": 3.717948717948718,
"grad_norm": 2.5270767211914062,
"learning_rate": 0.0001428007889546351,
"loss": 0.2472,
"step": 580
},
{
"epoch": 3.782051282051282,
"grad_norm": 0.2746649980545044,
"learning_rate": 0.0001418145956607495,
"loss": 0.1564,
"step": 590
},
{
"epoch": 3.8461538461538463,
"grad_norm": 2.9636824131011963,
"learning_rate": 0.0001408284023668639,
"loss": 0.1531,
"step": 600
},
{
"epoch": 3.8461538461538463,
"eval_accuracy": 0.9136690647482014,
"eval_loss": 0.2748344838619232,
"eval_runtime": 2.583,
"eval_samples_per_second": 107.625,
"eval_steps_per_second": 13.55,
"step": 600
},
{
"epoch": 3.91025641025641,
"grad_norm": 2.8231372833251953,
"learning_rate": 0.00013984220907297832,
"loss": 0.1651,
"step": 610
},
{
"epoch": 3.9743589743589745,
"grad_norm": 0.6206132769584656,
"learning_rate": 0.0001388560157790927,
"loss": 0.1854,
"step": 620
},
{
"epoch": 4.038461538461538,
"grad_norm": 0.720039427280426,
"learning_rate": 0.0001378698224852071,
"loss": 0.2629,
"step": 630
},
{
"epoch": 4.102564102564102,
"grad_norm": 0.28592395782470703,
"learning_rate": 0.00013688362919132152,
"loss": 0.0969,
"step": 640
},
{
"epoch": 4.166666666666667,
"grad_norm": 1.0571306943893433,
"learning_rate": 0.0001358974358974359,
"loss": 0.0816,
"step": 650
},
{
"epoch": 4.230769230769231,
"grad_norm": 5.520449638366699,
"learning_rate": 0.0001349112426035503,
"loss": 0.0997,
"step": 660
},
{
"epoch": 4.294871794871795,
"grad_norm": 10.63196849822998,
"learning_rate": 0.00013392504930966468,
"loss": 0.1369,
"step": 670
},
{
"epoch": 4.358974358974359,
"grad_norm": 2.723325729370117,
"learning_rate": 0.0001329388560157791,
"loss": 0.0631,
"step": 680
},
{
"epoch": 4.423076923076923,
"grad_norm": 0.07080604135990143,
"learning_rate": 0.00013195266272189352,
"loss": 0.1089,
"step": 690
},
{
"epoch": 4.487179487179487,
"grad_norm": 5.3309326171875,
"learning_rate": 0.0001309664694280079,
"loss": 0.1174,
"step": 700
},
{
"epoch": 4.487179487179487,
"eval_accuracy": 0.8812949640287769,
"eval_loss": 0.36486199498176575,
"eval_runtime": 3.5449,
"eval_samples_per_second": 78.422,
"eval_steps_per_second": 9.873,
"step": 700
},
{
"epoch": 4.551282051282051,
"grad_norm": 4.462215900421143,
"learning_rate": 0.0001299802761341223,
"loss": 0.1378,
"step": 710
},
{
"epoch": 4.615384615384615,
"grad_norm": 8.144935607910156,
"learning_rate": 0.00012899408284023668,
"loss": 0.0917,
"step": 720
},
{
"epoch": 4.67948717948718,
"grad_norm": 0.08221765607595444,
"learning_rate": 0.0001280078895463511,
"loss": 0.0562,
"step": 730
},
{
"epoch": 4.743589743589744,
"grad_norm": 8.699402809143066,
"learning_rate": 0.0001270216962524655,
"loss": 0.1787,
"step": 740
},
{
"epoch": 4.8076923076923075,
"grad_norm": 7.376129150390625,
"learning_rate": 0.00012603550295857988,
"loss": 0.064,
"step": 750
},
{
"epoch": 4.871794871794872,
"grad_norm": 0.08259747177362442,
"learning_rate": 0.0001250493096646943,
"loss": 0.0927,
"step": 760
},
{
"epoch": 4.935897435897436,
"grad_norm": 0.08165480941534042,
"learning_rate": 0.00012406311637080868,
"loss": 0.1067,
"step": 770
},
{
"epoch": 5.0,
"grad_norm": 0.19699496030807495,
"learning_rate": 0.0001230769230769231,
"loss": 0.0974,
"step": 780
},
{
"epoch": 5.064102564102564,
"grad_norm": 3.4025802612304688,
"learning_rate": 0.00012209072978303749,
"loss": 0.0977,
"step": 790
},
{
"epoch": 5.128205128205128,
"grad_norm": 2.273251533508301,
"learning_rate": 0.00012110453648915188,
"loss": 0.0498,
"step": 800
},
{
"epoch": 5.128205128205128,
"eval_accuracy": 0.8920863309352518,
"eval_loss": 0.327900767326355,
"eval_runtime": 3.5689,
"eval_samples_per_second": 77.895,
"eval_steps_per_second": 9.807,
"step": 800
},
{
"epoch": 5.1923076923076925,
"grad_norm": 0.624547004699707,
"learning_rate": 0.00012011834319526626,
"loss": 0.0821,
"step": 810
},
{
"epoch": 5.256410256410256,
"grad_norm": 3.7610857486724854,
"learning_rate": 0.00011913214990138068,
"loss": 0.0481,
"step": 820
},
{
"epoch": 5.32051282051282,
"grad_norm": 0.06995019316673279,
"learning_rate": 0.00011814595660749508,
"loss": 0.0117,
"step": 830
},
{
"epoch": 5.384615384615385,
"grad_norm": 0.08295288681983948,
"learning_rate": 0.00011715976331360947,
"loss": 0.0122,
"step": 840
},
{
"epoch": 5.448717948717949,
"grad_norm": 0.0509318970143795,
"learning_rate": 0.00011617357001972386,
"loss": 0.0447,
"step": 850
},
{
"epoch": 5.512820512820513,
"grad_norm": 0.05534667894244194,
"learning_rate": 0.00011518737672583828,
"loss": 0.0786,
"step": 860
},
{
"epoch": 5.576923076923077,
"grad_norm": 0.36779171228408813,
"learning_rate": 0.00011420118343195268,
"loss": 0.0427,
"step": 870
},
{
"epoch": 5.641025641025641,
"grad_norm": 0.0412164181470871,
"learning_rate": 0.00011321499013806707,
"loss": 0.0151,
"step": 880
},
{
"epoch": 5.705128205128205,
"grad_norm": 0.03488855063915253,
"learning_rate": 0.00011222879684418146,
"loss": 0.0331,
"step": 890
},
{
"epoch": 5.769230769230769,
"grad_norm": 5.378381252288818,
"learning_rate": 0.00011124260355029586,
"loss": 0.0817,
"step": 900
},
{
"epoch": 5.769230769230769,
"eval_accuracy": 0.935251798561151,
"eval_loss": 0.27631285786628723,
"eval_runtime": 3.5274,
"eval_samples_per_second": 78.812,
"eval_steps_per_second": 9.922,
"step": 900
},
{
"epoch": 5.833333333333333,
"grad_norm": 0.121369369328022,
"learning_rate": 0.00011025641025641027,
"loss": 0.0164,
"step": 910
},
{
"epoch": 5.897435897435898,
"grad_norm": 0.30134713649749756,
"learning_rate": 0.00010927021696252466,
"loss": 0.0399,
"step": 920
},
{
"epoch": 5.961538461538462,
"grad_norm": 9.133658409118652,
"learning_rate": 0.00010828402366863905,
"loss": 0.0893,
"step": 930
},
{
"epoch": 6.0256410256410255,
"grad_norm": 0.037670884281396866,
"learning_rate": 0.00010729783037475345,
"loss": 0.0285,
"step": 940
},
{
"epoch": 6.089743589743589,
"grad_norm": 4.902493953704834,
"learning_rate": 0.00010631163708086787,
"loss": 0.0796,
"step": 950
},
{
"epoch": 6.153846153846154,
"grad_norm": 6.080508232116699,
"learning_rate": 0.00010532544378698226,
"loss": 0.1406,
"step": 960
},
{
"epoch": 6.217948717948718,
"grad_norm": 3.923866033554077,
"learning_rate": 0.00010433925049309665,
"loss": 0.0914,
"step": 970
},
{
"epoch": 6.282051282051282,
"grad_norm": 0.9966168999671936,
"learning_rate": 0.00010335305719921105,
"loss": 0.0906,
"step": 980
},
{
"epoch": 6.346153846153846,
"grad_norm": 0.2861874997615814,
"learning_rate": 0.00010236686390532544,
"loss": 0.0335,
"step": 990
},
{
"epoch": 6.410256410256411,
"grad_norm": 0.030173303559422493,
"learning_rate": 0.00010138067061143986,
"loss": 0.0075,
"step": 1000
},
{
"epoch": 6.410256410256411,
"eval_accuracy": 0.920863309352518,
"eval_loss": 0.26709362864494324,
"eval_runtime": 2.5656,
"eval_samples_per_second": 108.358,
"eval_steps_per_second": 13.642,
"step": 1000
},
{
"epoch": 6.4743589743589745,
"grad_norm": 0.02864202857017517,
"learning_rate": 0.00010039447731755424,
"loss": 0.0628,
"step": 1010
},
{
"epoch": 6.538461538461538,
"grad_norm": 0.6442215442657471,
"learning_rate": 9.940828402366865e-05,
"loss": 0.0205,
"step": 1020
},
{
"epoch": 6.602564102564102,
"grad_norm": 0.047572050243616104,
"learning_rate": 9.842209072978305e-05,
"loss": 0.1029,
"step": 1030
},
{
"epoch": 6.666666666666667,
"grad_norm": 11.545117378234863,
"learning_rate": 9.743589743589744e-05,
"loss": 0.0454,
"step": 1040
},
{
"epoch": 6.730769230769231,
"grad_norm": 0.08284212648868561,
"learning_rate": 9.644970414201184e-05,
"loss": 0.0059,
"step": 1050
},
{
"epoch": 6.794871794871795,
"grad_norm": 0.19572466611862183,
"learning_rate": 9.546351084812624e-05,
"loss": 0.0907,
"step": 1060
},
{
"epoch": 6.858974358974359,
"grad_norm": 0.041858524084091187,
"learning_rate": 9.447731755424064e-05,
"loss": 0.0562,
"step": 1070
},
{
"epoch": 6.923076923076923,
"grad_norm": 0.045817919075489044,
"learning_rate": 9.349112426035503e-05,
"loss": 0.0262,
"step": 1080
},
{
"epoch": 6.987179487179487,
"grad_norm": 0.14285103976726532,
"learning_rate": 9.250493096646942e-05,
"loss": 0.0185,
"step": 1090
},
{
"epoch": 7.051282051282051,
"grad_norm": 0.027153167873620987,
"learning_rate": 9.151873767258384e-05,
"loss": 0.0265,
"step": 1100
},
{
"epoch": 7.051282051282051,
"eval_accuracy": 0.920863309352518,
"eval_loss": 0.3184675872325897,
"eval_runtime": 2.4994,
"eval_samples_per_second": 111.228,
"eval_steps_per_second": 14.004,
"step": 1100
},
{
"epoch": 7.115384615384615,
"grad_norm": 0.027239225804805756,
"learning_rate": 9.053254437869823e-05,
"loss": 0.0629,
"step": 1110
},
{
"epoch": 7.17948717948718,
"grad_norm": 0.02252044342458248,
"learning_rate": 8.954635108481263e-05,
"loss": 0.0198,
"step": 1120
},
{
"epoch": 7.243589743589744,
"grad_norm": 0.038163017481565475,
"learning_rate": 8.856015779092702e-05,
"loss": 0.0323,
"step": 1130
},
{
"epoch": 7.3076923076923075,
"grad_norm": 0.038108520209789276,
"learning_rate": 8.757396449704143e-05,
"loss": 0.0379,
"step": 1140
},
{
"epoch": 7.371794871794872,
"grad_norm": 0.019251950085163116,
"learning_rate": 8.658777120315582e-05,
"loss": 0.028,
"step": 1150
},
{
"epoch": 7.435897435897436,
"grad_norm": 0.01974482275545597,
"learning_rate": 8.560157790927023e-05,
"loss": 0.0043,
"step": 1160
},
{
"epoch": 7.5,
"grad_norm": 2.0657660961151123,
"learning_rate": 8.461538461538461e-05,
"loss": 0.0069,
"step": 1170
},
{
"epoch": 7.564102564102564,
"grad_norm": 0.025256551802158356,
"learning_rate": 8.362919132149902e-05,
"loss": 0.0045,
"step": 1180
},
{
"epoch": 7.628205128205128,
"grad_norm": 0.018220046535134315,
"learning_rate": 8.264299802761342e-05,
"loss": 0.0038,
"step": 1190
},
{
"epoch": 7.6923076923076925,
"grad_norm": 15.359424591064453,
"learning_rate": 8.165680473372781e-05,
"loss": 0.0457,
"step": 1200
},
{
"epoch": 7.6923076923076925,
"eval_accuracy": 0.9100719424460432,
"eval_loss": 0.37759149074554443,
"eval_runtime": 3.7105,
"eval_samples_per_second": 74.922,
"eval_steps_per_second": 9.433,
"step": 1200
},
{
"epoch": 7.756410256410256,
"grad_norm": 0.017574544996023178,
"learning_rate": 8.067061143984221e-05,
"loss": 0.0038,
"step": 1210
},
{
"epoch": 7.82051282051282,
"grad_norm": 0.01818062551319599,
"learning_rate": 7.968441814595661e-05,
"loss": 0.004,
"step": 1220
},
{
"epoch": 7.884615384615385,
"grad_norm": 0.024309920147061348,
"learning_rate": 7.869822485207101e-05,
"loss": 0.0038,
"step": 1230
},
{
"epoch": 7.948717948717949,
"grad_norm": 0.07784882932901382,
"learning_rate": 7.77120315581854e-05,
"loss": 0.0059,
"step": 1240
},
{
"epoch": 8.012820512820513,
"grad_norm": 0.035918813198804855,
"learning_rate": 7.67258382642998e-05,
"loss": 0.0047,
"step": 1250
},
{
"epoch": 8.076923076923077,
"grad_norm": 0.35960128903388977,
"learning_rate": 7.573964497041421e-05,
"loss": 0.0068,
"step": 1260
},
{
"epoch": 8.14102564102564,
"grad_norm": 0.015659425407648087,
"learning_rate": 7.475345167652861e-05,
"loss": 0.0039,
"step": 1270
},
{
"epoch": 8.205128205128204,
"grad_norm": 0.01647733524441719,
"learning_rate": 7.3767258382643e-05,
"loss": 0.0035,
"step": 1280
},
{
"epoch": 8.26923076923077,
"grad_norm": 0.015693532302975655,
"learning_rate": 7.27810650887574e-05,
"loss": 0.0203,
"step": 1290
},
{
"epoch": 8.333333333333334,
"grad_norm": 0.01847326196730137,
"learning_rate": 7.17948717948718e-05,
"loss": 0.0032,
"step": 1300
},
{
"epoch": 8.333333333333334,
"eval_accuracy": 0.9388489208633094,
"eval_loss": 0.2834855318069458,
"eval_runtime": 3.4846,
"eval_samples_per_second": 79.78,
"eval_steps_per_second": 10.044,
"step": 1300
},
{
"epoch": 8.397435897435898,
"grad_norm": 0.01613900437951088,
"learning_rate": 7.08086785009862e-05,
"loss": 0.0055,
"step": 1310
},
{
"epoch": 8.461538461538462,
"grad_norm": 0.015385860577225685,
"learning_rate": 6.98224852071006e-05,
"loss": 0.0118,
"step": 1320
},
{
"epoch": 8.525641025641026,
"grad_norm": 0.014069377444684505,
"learning_rate": 6.883629191321498e-05,
"loss": 0.0029,
"step": 1330
},
{
"epoch": 8.58974358974359,
"grad_norm": 0.015751365572214127,
"learning_rate": 6.78500986193294e-05,
"loss": 0.0116,
"step": 1340
},
{
"epoch": 8.653846153846153,
"grad_norm": 0.0143636055290699,
"learning_rate": 6.686390532544379e-05,
"loss": 0.0554,
"step": 1350
},
{
"epoch": 8.717948717948717,
"grad_norm": 0.015580832026898861,
"learning_rate": 6.587771203155819e-05,
"loss": 0.0031,
"step": 1360
},
{
"epoch": 8.782051282051283,
"grad_norm": 0.014274963177740574,
"learning_rate": 6.489151873767258e-05,
"loss": 0.0029,
"step": 1370
},
{
"epoch": 8.846153846153847,
"grad_norm": 0.02313074842095375,
"learning_rate": 6.390532544378698e-05,
"loss": 0.0339,
"step": 1380
},
{
"epoch": 8.91025641025641,
"grad_norm": 1.4907852411270142,
"learning_rate": 6.291913214990139e-05,
"loss": 0.0046,
"step": 1390
},
{
"epoch": 8.974358974358974,
"grad_norm": 0.015585100278258324,
"learning_rate": 6.193293885601579e-05,
"loss": 0.0027,
"step": 1400
},
{
"epoch": 8.974358974358974,
"eval_accuracy": 0.8884892086330936,
"eval_loss": 0.5365095734596252,
"eval_runtime": 2.5385,
"eval_samples_per_second": 109.513,
"eval_steps_per_second": 13.788,
"step": 1400
},
{
"epoch": 9.038461538461538,
"grad_norm": 0.014531627297401428,
"learning_rate": 6.094674556213018e-05,
"loss": 0.0253,
"step": 1410
},
{
"epoch": 9.102564102564102,
"grad_norm": 0.012606761418282986,
"learning_rate": 5.996055226824457e-05,
"loss": 0.0031,
"step": 1420
},
{
"epoch": 9.166666666666666,
"grad_norm": 0.012980329804122448,
"learning_rate": 5.897435897435898e-05,
"loss": 0.003,
"step": 1430
},
{
"epoch": 9.23076923076923,
"grad_norm": 0.012560625560581684,
"learning_rate": 5.798816568047337e-05,
"loss": 0.0029,
"step": 1440
},
{
"epoch": 9.294871794871796,
"grad_norm": 0.01267662551254034,
"learning_rate": 5.700197238658778e-05,
"loss": 0.0027,
"step": 1450
},
{
"epoch": 9.35897435897436,
"grad_norm": 0.018838873133063316,
"learning_rate": 5.601577909270217e-05,
"loss": 0.0026,
"step": 1460
},
{
"epoch": 9.423076923076923,
"grad_norm": 0.012279819697141647,
"learning_rate": 5.502958579881658e-05,
"loss": 0.0025,
"step": 1470
},
{
"epoch": 9.487179487179487,
"grad_norm": 0.011527939699590206,
"learning_rate": 5.4043392504930966e-05,
"loss": 0.0023,
"step": 1480
},
{
"epoch": 9.551282051282051,
"grad_norm": 0.01282353326678276,
"learning_rate": 5.3057199211045375e-05,
"loss": 0.0024,
"step": 1490
},
{
"epoch": 9.615384615384615,
"grad_norm": 0.012979278340935707,
"learning_rate": 5.2071005917159764e-05,
"loss": 0.0024,
"step": 1500
},
{
"epoch": 9.615384615384615,
"eval_accuracy": 0.9460431654676259,
"eval_loss": 0.28174683451652527,
"eval_runtime": 2.5662,
"eval_samples_per_second": 108.33,
"eval_steps_per_second": 13.639,
"step": 1500
},
{
"epoch": 9.679487179487179,
"grad_norm": 0.012905549257993698,
"learning_rate": 5.1084812623274167e-05,
"loss": 0.0023,
"step": 1510
},
{
"epoch": 9.743589743589745,
"grad_norm": 0.011163388378918171,
"learning_rate": 5.009861932938856e-05,
"loss": 0.0023,
"step": 1520
},
{
"epoch": 9.807692307692308,
"grad_norm": 0.012818182818591595,
"learning_rate": 4.9112426035502965e-05,
"loss": 0.0023,
"step": 1530
},
{
"epoch": 9.871794871794872,
"grad_norm": 0.012148367241024971,
"learning_rate": 4.812623274161736e-05,
"loss": 0.0023,
"step": 1540
},
{
"epoch": 9.935897435897436,
"grad_norm": 0.012448051013052464,
"learning_rate": 4.714003944773176e-05,
"loss": 0.0021,
"step": 1550
},
{
"epoch": 10.0,
"grad_norm": 0.01428731344640255,
"learning_rate": 4.615384615384616e-05,
"loss": 0.0024,
"step": 1560
},
{
"epoch": 10.064102564102564,
"grad_norm": 0.012622198089957237,
"learning_rate": 4.5167652859960554e-05,
"loss": 0.0021,
"step": 1570
},
{
"epoch": 10.128205128205128,
"grad_norm": 0.011053148657083511,
"learning_rate": 4.418145956607495e-05,
"loss": 0.0022,
"step": 1580
},
{
"epoch": 10.192307692307692,
"grad_norm": 0.010602343827486038,
"learning_rate": 4.319526627218935e-05,
"loss": 0.0022,
"step": 1590
},
{
"epoch": 10.256410256410255,
"grad_norm": 0.012038341723382473,
"learning_rate": 4.220907297830375e-05,
"loss": 0.0021,
"step": 1600
},
{
"epoch": 10.256410256410255,
"eval_accuracy": 0.9460431654676259,
"eval_loss": 0.28900742530822754,
"eval_runtime": 3.7223,
"eval_samples_per_second": 74.686,
"eval_steps_per_second": 9.403,
"step": 1600
},
{
"epoch": 10.320512820512821,
"grad_norm": 0.010555533692240715,
"learning_rate": 4.122287968441815e-05,
"loss": 0.0022,
"step": 1610
},
{
"epoch": 10.384615384615385,
"grad_norm": 0.013419007882475853,
"learning_rate": 4.0236686390532545e-05,
"loss": 0.0023,
"step": 1620
},
{
"epoch": 10.448717948717949,
"grad_norm": 0.009542490355670452,
"learning_rate": 3.925049309664695e-05,
"loss": 0.0021,
"step": 1630
},
{
"epoch": 10.512820512820513,
"grad_norm": 0.01066176313906908,
"learning_rate": 3.826429980276134e-05,
"loss": 0.0021,
"step": 1640
},
{
"epoch": 10.576923076923077,
"grad_norm": 0.009535272605717182,
"learning_rate": 3.7278106508875746e-05,
"loss": 0.002,
"step": 1650
},
{
"epoch": 10.64102564102564,
"grad_norm": 0.010441762395203114,
"learning_rate": 3.629191321499014e-05,
"loss": 0.0021,
"step": 1660
},
{
"epoch": 10.705128205128204,
"grad_norm": 0.009478352032601833,
"learning_rate": 3.5305719921104544e-05,
"loss": 0.002,
"step": 1670
},
{
"epoch": 10.76923076923077,
"grad_norm": 0.009695933200418949,
"learning_rate": 3.431952662721893e-05,
"loss": 0.002,
"step": 1680
},
{
"epoch": 10.833333333333334,
"grad_norm": 0.009181487374007702,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.0019,
"step": 1690
},
{
"epoch": 10.897435897435898,
"grad_norm": 0.010168403387069702,
"learning_rate": 3.234714003944773e-05,
"loss": 0.002,
"step": 1700
},
{
"epoch": 10.897435897435898,
"eval_accuracy": 0.9460431654676259,
"eval_loss": 0.29342445731163025,
"eval_runtime": 2.6399,
"eval_samples_per_second": 105.306,
"eval_steps_per_second": 13.258,
"step": 1700
},
{
"epoch": 10.961538461538462,
"grad_norm": 0.012188075110316277,
"learning_rate": 3.136094674556213e-05,
"loss": 0.0019,
"step": 1710
},
{
"epoch": 11.025641025641026,
"grad_norm": 0.01147390529513359,
"learning_rate": 3.037475345167653e-05,
"loss": 0.002,
"step": 1720
},
{
"epoch": 11.08974358974359,
"grad_norm": 0.00882646068930626,
"learning_rate": 2.9388560157790928e-05,
"loss": 0.0019,
"step": 1730
},
{
"epoch": 11.153846153846153,
"grad_norm": 0.00944901816546917,
"learning_rate": 2.8402366863905327e-05,
"loss": 0.002,
"step": 1740
},
{
"epoch": 11.217948717948717,
"grad_norm": 0.009782733395695686,
"learning_rate": 2.7416173570019726e-05,
"loss": 0.0019,
"step": 1750
},
{
"epoch": 11.282051282051283,
"grad_norm": 0.008780627511441708,
"learning_rate": 2.6429980276134125e-05,
"loss": 0.0019,
"step": 1760
},
{
"epoch": 11.346153846153847,
"grad_norm": 0.011177603155374527,
"learning_rate": 2.5443786982248524e-05,
"loss": 0.0019,
"step": 1770
},
{
"epoch": 11.41025641025641,
"grad_norm": 0.009699178859591484,
"learning_rate": 2.445759368836292e-05,
"loss": 0.0018,
"step": 1780
},
{
"epoch": 11.474358974358974,
"grad_norm": 0.012270525097846985,
"learning_rate": 2.3471400394477318e-05,
"loss": 0.0019,
"step": 1790
},
{
"epoch": 11.538461538461538,
"grad_norm": 0.00872350949794054,
"learning_rate": 2.2485207100591717e-05,
"loss": 0.0019,
"step": 1800
},
{
"epoch": 11.538461538461538,
"eval_accuracy": 0.9460431654676259,
"eval_loss": 0.2975642681121826,
"eval_runtime": 2.6404,
"eval_samples_per_second": 105.285,
"eval_steps_per_second": 13.255,
"step": 1800
},
{
"epoch": 11.602564102564102,
"grad_norm": 0.009663211181759834,
"learning_rate": 2.1499013806706113e-05,
"loss": 0.0019,
"step": 1810
},
{
"epoch": 11.666666666666666,
"grad_norm": 0.00840259250253439,
"learning_rate": 2.0512820512820512e-05,
"loss": 0.0018,
"step": 1820
},
{
"epoch": 11.73076923076923,
"grad_norm": 0.010098517872393131,
"learning_rate": 1.952662721893491e-05,
"loss": 0.0019,
"step": 1830
},
{
"epoch": 11.794871794871796,
"grad_norm": 0.013970685191452503,
"learning_rate": 1.854043392504931e-05,
"loss": 0.0019,
"step": 1840
},
{
"epoch": 11.85897435897436,
"grad_norm": 0.009059489704668522,
"learning_rate": 1.755424063116371e-05,
"loss": 0.0018,
"step": 1850
},
{
"epoch": 11.923076923076923,
"grad_norm": 0.009793447330594063,
"learning_rate": 1.6568047337278108e-05,
"loss": 0.0019,
"step": 1860
},
{
"epoch": 11.987179487179487,
"grad_norm": 0.008815682493150234,
"learning_rate": 1.5581854043392503e-05,
"loss": 0.0019,
"step": 1870
},
{
"epoch": 12.051282051282051,
"grad_norm": 0.011554540134966373,
"learning_rate": 1.4595660749506904e-05,
"loss": 0.0018,
"step": 1880
},
{
"epoch": 12.115384615384615,
"grad_norm": 0.010211811400949955,
"learning_rate": 1.3609467455621303e-05,
"loss": 0.0019,
"step": 1890
},
{
"epoch": 12.179487179487179,
"grad_norm": 0.008440189994871616,
"learning_rate": 1.2623274161735702e-05,
"loss": 0.0018,
"step": 1900
},
{
"epoch": 12.179487179487179,
"eval_accuracy": 0.9460431654676259,
"eval_loss": 0.2996102273464203,
"eval_runtime": 3.4755,
"eval_samples_per_second": 79.989,
"eval_steps_per_second": 10.071,
"step": 1900
},
{
"epoch": 12.243589743589743,
"grad_norm": 0.010010773316025734,
"learning_rate": 1.16370808678501e-05,
"loss": 0.0018,
"step": 1910
},
{
"epoch": 12.307692307692308,
"grad_norm": 0.008343766443431377,
"learning_rate": 1.0650887573964498e-05,
"loss": 0.0017,
"step": 1920
},
{
"epoch": 12.371794871794872,
"grad_norm": 0.009077221155166626,
"learning_rate": 9.664694280078896e-06,
"loss": 0.0018,
"step": 1930
},
{
"epoch": 12.435897435897436,
"grad_norm": 0.008946426212787628,
"learning_rate": 8.678500986193295e-06,
"loss": 0.0018,
"step": 1940
},
{
"epoch": 12.5,
"grad_norm": 0.008481796830892563,
"learning_rate": 7.692307692307694e-06,
"loss": 0.0018,
"step": 1950
},
{
"epoch": 12.564102564102564,
"grad_norm": 0.012229247018694878,
"learning_rate": 6.706114398422091e-06,
"loss": 0.0018,
"step": 1960
},
{
"epoch": 12.628205128205128,
"grad_norm": 0.009792056865990162,
"learning_rate": 5.71992110453649e-06,
"loss": 0.0019,
"step": 1970
},
{
"epoch": 12.692307692307692,
"grad_norm": 0.008133339695632458,
"learning_rate": 4.733727810650888e-06,
"loss": 0.0017,
"step": 1980
},
{
"epoch": 12.756410256410255,
"grad_norm": 0.00821082666516304,
"learning_rate": 3.7475345167652858e-06,
"loss": 0.0018,
"step": 1990
},
{
"epoch": 12.820512820512821,
"grad_norm": 0.008551714941859245,
"learning_rate": 2.7613412228796843e-06,
"loss": 0.0018,
"step": 2000
},
{
"epoch": 12.820512820512821,
"eval_accuracy": 0.9460431654676259,
"eval_loss": 0.3006349802017212,
"eval_runtime": 2.5194,
"eval_samples_per_second": 110.343,
"eval_steps_per_second": 13.892,
"step": 2000
},
{
"epoch": 12.884615384615385,
"grad_norm": 0.008309854194521904,
"learning_rate": 1.775147928994083e-06,
"loss": 0.0019,
"step": 2010
},
{
"epoch": 12.948717948717949,
"grad_norm": 0.008332509547472,
"learning_rate": 7.889546351084814e-07,
"loss": 0.0017,
"step": 2020
},
{
"epoch": 13.0,
"step": 2028,
"total_flos": 2.5134977377120666e+18,
"train_loss": 0.18042536613924515,
"train_runtime": 1249.5195,
"train_samples_per_second": 25.958,
"train_steps_per_second": 1.623
}
],
"logging_steps": 10,
"max_steps": 2028,
"num_input_tokens_seen": 0,
"num_train_epochs": 13,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.5134977377120666e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}