latif98's picture
End of training
0cadf88 verified
{
"best_metric": 0.9879931389365352,
"best_model_checkpoint": "videomae-base-finetuned-numbers-augmented/checkpoint-2816",
"epoch": 3.25,
"eval_steps": 500,
"global_step": 2816,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0035511363636363635,
"grad_norm": 11.368436813354492,
"learning_rate": 1.7730496453900712e-06,
"loss": 2.4086,
"step": 10
},
{
"epoch": 0.007102272727272727,
"grad_norm": 13.567573547363281,
"learning_rate": 3.5460992907801423e-06,
"loss": 2.4404,
"step": 20
},
{
"epoch": 0.01065340909090909,
"grad_norm": 10.261510848999023,
"learning_rate": 5.319148936170213e-06,
"loss": 2.4437,
"step": 30
},
{
"epoch": 0.014204545454545454,
"grad_norm": 10.864927291870117,
"learning_rate": 7.092198581560285e-06,
"loss": 2.4537,
"step": 40
},
{
"epoch": 0.01775568181818182,
"grad_norm": 13.574213027954102,
"learning_rate": 8.865248226950355e-06,
"loss": 2.3604,
"step": 50
},
{
"epoch": 0.02130681818181818,
"grad_norm": 12.087525367736816,
"learning_rate": 1.0638297872340426e-05,
"loss": 2.4028,
"step": 60
},
{
"epoch": 0.024857954545454544,
"grad_norm": 16.888160705566406,
"learning_rate": 1.2411347517730498e-05,
"loss": 2.3285,
"step": 70
},
{
"epoch": 0.028409090909090908,
"grad_norm": 11.21091365814209,
"learning_rate": 1.418439716312057e-05,
"loss": 2.4292,
"step": 80
},
{
"epoch": 0.03196022727272727,
"grad_norm": 12.686086654663086,
"learning_rate": 1.595744680851064e-05,
"loss": 2.3094,
"step": 90
},
{
"epoch": 0.03551136363636364,
"grad_norm": 13.735329627990723,
"learning_rate": 1.773049645390071e-05,
"loss": 2.2478,
"step": 100
},
{
"epoch": 0.0390625,
"grad_norm": 15.364631652832031,
"learning_rate": 1.950354609929078e-05,
"loss": 2.2576,
"step": 110
},
{
"epoch": 0.04261363636363636,
"grad_norm": 17.786842346191406,
"learning_rate": 2.1276595744680852e-05,
"loss": 2.2132,
"step": 120
},
{
"epoch": 0.04616477272727273,
"grad_norm": 15.442058563232422,
"learning_rate": 2.3049645390070924e-05,
"loss": 2.1968,
"step": 130
},
{
"epoch": 0.04971590909090909,
"grad_norm": 18.52874755859375,
"learning_rate": 2.4822695035460995e-05,
"loss": 1.9613,
"step": 140
},
{
"epoch": 0.053267045454545456,
"grad_norm": 16.437816619873047,
"learning_rate": 2.6595744680851064e-05,
"loss": 2.055,
"step": 150
},
{
"epoch": 0.056818181818181816,
"grad_norm": 16.25989532470703,
"learning_rate": 2.836879432624114e-05,
"loss": 2.2668,
"step": 160
},
{
"epoch": 0.060369318181818184,
"grad_norm": 16.64414405822754,
"learning_rate": 3.0141843971631207e-05,
"loss": 2.0752,
"step": 170
},
{
"epoch": 0.06392045454545454,
"grad_norm": 22.03922462463379,
"learning_rate": 3.191489361702128e-05,
"loss": 1.9947,
"step": 180
},
{
"epoch": 0.06747159090909091,
"grad_norm": 20.077898025512695,
"learning_rate": 3.3687943262411347e-05,
"loss": 1.8888,
"step": 190
},
{
"epoch": 0.07102272727272728,
"grad_norm": 17.019630432128906,
"learning_rate": 3.546099290780142e-05,
"loss": 1.8263,
"step": 200
},
{
"epoch": 0.07457386363636363,
"grad_norm": 22.599655151367188,
"learning_rate": 3.723404255319149e-05,
"loss": 1.6843,
"step": 210
},
{
"epoch": 0.078125,
"grad_norm": 17.443727493286133,
"learning_rate": 3.900709219858156e-05,
"loss": 1.7081,
"step": 220
},
{
"epoch": 0.08167613636363637,
"grad_norm": 12.519112586975098,
"learning_rate": 4.078014184397163e-05,
"loss": 1.4262,
"step": 230
},
{
"epoch": 0.08522727272727272,
"grad_norm": 21.003129959106445,
"learning_rate": 4.2553191489361704e-05,
"loss": 1.6321,
"step": 240
},
{
"epoch": 0.08877840909090909,
"grad_norm": 9.726280212402344,
"learning_rate": 4.432624113475177e-05,
"loss": 1.6712,
"step": 250
},
{
"epoch": 0.09232954545454546,
"grad_norm": 24.422882080078125,
"learning_rate": 4.609929078014185e-05,
"loss": 2.0967,
"step": 260
},
{
"epoch": 0.09588068181818182,
"grad_norm": 11.92298698425293,
"learning_rate": 4.787234042553192e-05,
"loss": 1.7163,
"step": 270
},
{
"epoch": 0.09943181818181818,
"grad_norm": 14.245928764343262,
"learning_rate": 4.964539007092199e-05,
"loss": 1.9799,
"step": 280
},
{
"epoch": 0.10298295454545454,
"grad_norm": 14.423157691955566,
"learning_rate": 4.9842146803472775e-05,
"loss": 1.6602,
"step": 290
},
{
"epoch": 0.10653409090909091,
"grad_norm": 11.99425983428955,
"learning_rate": 4.9644830307813736e-05,
"loss": 1.6844,
"step": 300
},
{
"epoch": 0.11008522727272728,
"grad_norm": 13.40038776397705,
"learning_rate": 4.94475138121547e-05,
"loss": 1.6082,
"step": 310
},
{
"epoch": 0.11363636363636363,
"grad_norm": 9.363014221191406,
"learning_rate": 4.925019731649566e-05,
"loss": 1.7191,
"step": 320
},
{
"epoch": 0.1171875,
"grad_norm": 16.138887405395508,
"learning_rate": 4.905288082083662e-05,
"loss": 1.621,
"step": 330
},
{
"epoch": 0.12073863636363637,
"grad_norm": 13.552618980407715,
"learning_rate": 4.885556432517759e-05,
"loss": 1.4569,
"step": 340
},
{
"epoch": 0.12428977272727272,
"grad_norm": 16.55213737487793,
"learning_rate": 4.865824782951855e-05,
"loss": 1.5886,
"step": 350
},
{
"epoch": 0.1278409090909091,
"grad_norm": 4.656749248504639,
"learning_rate": 4.8460931333859514e-05,
"loss": 1.2231,
"step": 360
},
{
"epoch": 0.13139204545454544,
"grad_norm": 9.768326759338379,
"learning_rate": 4.8263614838200475e-05,
"loss": 1.65,
"step": 370
},
{
"epoch": 0.13494318181818182,
"grad_norm": 20.023557662963867,
"learning_rate": 4.806629834254144e-05,
"loss": 1.4166,
"step": 380
},
{
"epoch": 0.13849431818181818,
"grad_norm": 18.180370330810547,
"learning_rate": 4.78689818468824e-05,
"loss": 1.1376,
"step": 390
},
{
"epoch": 0.14204545454545456,
"grad_norm": 16.621734619140625,
"learning_rate": 4.767166535122337e-05,
"loss": 1.5926,
"step": 400
},
{
"epoch": 0.1455965909090909,
"grad_norm": 17.98001480102539,
"learning_rate": 4.747434885556433e-05,
"loss": 1.3632,
"step": 410
},
{
"epoch": 0.14914772727272727,
"grad_norm": 20.42987060546875,
"learning_rate": 4.727703235990529e-05,
"loss": 1.3245,
"step": 420
},
{
"epoch": 0.15269886363636365,
"grad_norm": 11.056243896484375,
"learning_rate": 4.707971586424625e-05,
"loss": 1.2668,
"step": 430
},
{
"epoch": 0.15625,
"grad_norm": 13.91126537322998,
"learning_rate": 4.6882399368587215e-05,
"loss": 1.0435,
"step": 440
},
{
"epoch": 0.15980113636363635,
"grad_norm": 19.263099670410156,
"learning_rate": 4.6685082872928176e-05,
"loss": 1.3369,
"step": 450
},
{
"epoch": 0.16335227272727273,
"grad_norm": 23.511869430541992,
"learning_rate": 4.6487766377269145e-05,
"loss": 1.1933,
"step": 460
},
{
"epoch": 0.1669034090909091,
"grad_norm": 14.228583335876465,
"learning_rate": 4.629044988161011e-05,
"loss": 1.591,
"step": 470
},
{
"epoch": 0.17045454545454544,
"grad_norm": 19.068967819213867,
"learning_rate": 4.609313338595106e-05,
"loss": 0.9233,
"step": 480
},
{
"epoch": 0.17400568181818182,
"grad_norm": 11.136780738830566,
"learning_rate": 4.589581689029203e-05,
"loss": 1.0833,
"step": 490
},
{
"epoch": 0.17755681818181818,
"grad_norm": 10.600316047668457,
"learning_rate": 4.569850039463299e-05,
"loss": 1.1883,
"step": 500
},
{
"epoch": 0.18110795454545456,
"grad_norm": 14.483977317810059,
"learning_rate": 4.550118389897396e-05,
"loss": 1.2224,
"step": 510
},
{
"epoch": 0.1846590909090909,
"grad_norm": 20.28068733215332,
"learning_rate": 4.530386740331492e-05,
"loss": 0.9271,
"step": 520
},
{
"epoch": 0.18821022727272727,
"grad_norm": 8.706443786621094,
"learning_rate": 4.5106550907655884e-05,
"loss": 0.876,
"step": 530
},
{
"epoch": 0.19176136363636365,
"grad_norm": 24.446735382080078,
"learning_rate": 4.4909234411996846e-05,
"loss": 0.9443,
"step": 540
},
{
"epoch": 0.1953125,
"grad_norm": 18.790678024291992,
"learning_rate": 4.471191791633781e-05,
"loss": 1.5365,
"step": 550
},
{
"epoch": 0.19886363636363635,
"grad_norm": 12.385162353515625,
"learning_rate": 4.451460142067877e-05,
"loss": 1.1072,
"step": 560
},
{
"epoch": 0.20241477272727273,
"grad_norm": 10.875085830688477,
"learning_rate": 4.431728492501974e-05,
"loss": 1.3267,
"step": 570
},
{
"epoch": 0.2059659090909091,
"grad_norm": 23.155399322509766,
"learning_rate": 4.41199684293607e-05,
"loss": 1.0875,
"step": 580
},
{
"epoch": 0.20951704545454544,
"grad_norm": 16.206789016723633,
"learning_rate": 4.392265193370166e-05,
"loss": 1.532,
"step": 590
},
{
"epoch": 0.21306818181818182,
"grad_norm": 10.199226379394531,
"learning_rate": 4.372533543804262e-05,
"loss": 1.3542,
"step": 600
},
{
"epoch": 0.21661931818181818,
"grad_norm": 24.252439498901367,
"learning_rate": 4.3528018942383585e-05,
"loss": 0.9932,
"step": 610
},
{
"epoch": 0.22017045454545456,
"grad_norm": 21.493459701538086,
"learning_rate": 4.333070244672455e-05,
"loss": 0.9927,
"step": 620
},
{
"epoch": 0.2237215909090909,
"grad_norm": 14.551782608032227,
"learning_rate": 4.3133385951065515e-05,
"loss": 0.86,
"step": 630
},
{
"epoch": 0.22727272727272727,
"grad_norm": 36.133609771728516,
"learning_rate": 4.293606945540648e-05,
"loss": 0.7792,
"step": 640
},
{
"epoch": 0.23082386363636365,
"grad_norm": 13.04199504852295,
"learning_rate": 4.273875295974744e-05,
"loss": 0.9251,
"step": 650
},
{
"epoch": 0.234375,
"grad_norm": 13.920279502868652,
"learning_rate": 4.25414364640884e-05,
"loss": 1.0642,
"step": 660
},
{
"epoch": 0.23792613636363635,
"grad_norm": 4.325495719909668,
"learning_rate": 4.234411996842936e-05,
"loss": 0.9275,
"step": 670
},
{
"epoch": 0.24147727272727273,
"grad_norm": 23.056415557861328,
"learning_rate": 4.2146803472770324e-05,
"loss": 1.1907,
"step": 680
},
{
"epoch": 0.2450284090909091,
"grad_norm": 11.812889099121094,
"learning_rate": 4.194948697711129e-05,
"loss": 1.0329,
"step": 690
},
{
"epoch": 0.24857954545454544,
"grad_norm": 11.360099792480469,
"learning_rate": 4.1752170481452254e-05,
"loss": 0.8968,
"step": 700
},
{
"epoch": 0.25,
"eval_accuracy": 0.6878216123499142,
"eval_f1": 0.6885242103977541,
"eval_loss": 0.8689462542533875,
"eval_precision": 0.742302721464857,
"eval_recall": 0.688613815159033,
"eval_runtime": 653.0033,
"eval_samples_per_second": 0.893,
"eval_steps_per_second": 0.224,
"step": 704
},
{
"epoch": 1.0021306818181819,
"grad_norm": 5.516427040100098,
"learning_rate": 4.1554853985793216e-05,
"loss": 0.9278,
"step": 710
},
{
"epoch": 1.0056818181818181,
"grad_norm": 8.10287094116211,
"learning_rate": 4.135753749013418e-05,
"loss": 0.8833,
"step": 720
},
{
"epoch": 1.0092329545454546,
"grad_norm": 21.60080337524414,
"learning_rate": 4.116022099447514e-05,
"loss": 0.6148,
"step": 730
},
{
"epoch": 1.0127840909090908,
"grad_norm": 17.968292236328125,
"learning_rate": 4.09629044988161e-05,
"loss": 0.8788,
"step": 740
},
{
"epoch": 1.0163352272727273,
"grad_norm": 8.538472175598145,
"learning_rate": 4.076558800315707e-05,
"loss": 0.778,
"step": 750
},
{
"epoch": 1.0198863636363635,
"grad_norm": 4.343443870544434,
"learning_rate": 4.056827150749803e-05,
"loss": 0.5165,
"step": 760
},
{
"epoch": 1.0234375,
"grad_norm": 6.1510009765625,
"learning_rate": 4.037095501183899e-05,
"loss": 0.8024,
"step": 770
},
{
"epoch": 1.0269886363636365,
"grad_norm": 23.44464111328125,
"learning_rate": 4.0173638516179955e-05,
"loss": 1.0074,
"step": 780
},
{
"epoch": 1.0305397727272727,
"grad_norm": 16.371910095214844,
"learning_rate": 3.997632202052092e-05,
"loss": 0.9794,
"step": 790
},
{
"epoch": 1.0340909090909092,
"grad_norm": 8.532549858093262,
"learning_rate": 3.977900552486188e-05,
"loss": 0.8435,
"step": 800
},
{
"epoch": 1.0376420454545454,
"grad_norm": 15.13945484161377,
"learning_rate": 3.958168902920285e-05,
"loss": 0.9073,
"step": 810
},
{
"epoch": 1.0411931818181819,
"grad_norm": 12.980558395385742,
"learning_rate": 3.938437253354381e-05,
"loss": 0.8077,
"step": 820
},
{
"epoch": 1.0447443181818181,
"grad_norm": 7.141788005828857,
"learning_rate": 3.9187056037884764e-05,
"loss": 0.9853,
"step": 830
},
{
"epoch": 1.0482954545454546,
"grad_norm": 5.774048328399658,
"learning_rate": 3.898973954222573e-05,
"loss": 0.5617,
"step": 840
},
{
"epoch": 1.0518465909090908,
"grad_norm": 30.009641647338867,
"learning_rate": 3.8792423046566694e-05,
"loss": 0.8913,
"step": 850
},
{
"epoch": 1.0553977272727273,
"grad_norm": 11.07997989654541,
"learning_rate": 3.8595106550907656e-05,
"loss": 0.6674,
"step": 860
},
{
"epoch": 1.0589488636363635,
"grad_norm": 13.393827438354492,
"learning_rate": 3.8397790055248625e-05,
"loss": 0.5832,
"step": 870
},
{
"epoch": 1.0625,
"grad_norm": 10.827837944030762,
"learning_rate": 3.8200473559589587e-05,
"loss": 0.6509,
"step": 880
},
{
"epoch": 1.0660511363636365,
"grad_norm": 27.103227615356445,
"learning_rate": 3.800315706393054e-05,
"loss": 0.7135,
"step": 890
},
{
"epoch": 1.0696022727272727,
"grad_norm": 16.949966430664062,
"learning_rate": 3.780584056827151e-05,
"loss": 1.0985,
"step": 900
},
{
"epoch": 1.0731534090909092,
"grad_norm": 11.158037185668945,
"learning_rate": 3.760852407261247e-05,
"loss": 0.7335,
"step": 910
},
{
"epoch": 1.0767045454545454,
"grad_norm": 26.55095863342285,
"learning_rate": 3.7411207576953434e-05,
"loss": 0.7143,
"step": 920
},
{
"epoch": 1.0802556818181819,
"grad_norm": 7.920712947845459,
"learning_rate": 3.72138910812944e-05,
"loss": 0.8197,
"step": 930
},
{
"epoch": 1.0838068181818181,
"grad_norm": 2.0704779624938965,
"learning_rate": 3.7016574585635364e-05,
"loss": 0.7739,
"step": 940
},
{
"epoch": 1.0873579545454546,
"grad_norm": 29.551998138427734,
"learning_rate": 3.681925808997632e-05,
"loss": 0.9224,
"step": 950
},
{
"epoch": 1.0909090909090908,
"grad_norm": 9.859967231750488,
"learning_rate": 3.662194159431729e-05,
"loss": 0.8151,
"step": 960
},
{
"epoch": 1.0944602272727273,
"grad_norm": 10.744651794433594,
"learning_rate": 3.642462509865825e-05,
"loss": 0.4832,
"step": 970
},
{
"epoch": 1.0980113636363635,
"grad_norm": 3.085749864578247,
"learning_rate": 3.622730860299921e-05,
"loss": 0.5775,
"step": 980
},
{
"epoch": 1.1015625,
"grad_norm": 11.848700523376465,
"learning_rate": 3.602999210734018e-05,
"loss": 0.4641,
"step": 990
},
{
"epoch": 1.1051136363636365,
"grad_norm": 5.603400230407715,
"learning_rate": 3.583267561168114e-05,
"loss": 0.5069,
"step": 1000
},
{
"epoch": 1.1086647727272727,
"grad_norm": 8.615111351013184,
"learning_rate": 3.5635359116022096e-05,
"loss": 0.5031,
"step": 1010
},
{
"epoch": 1.1122159090909092,
"grad_norm": 23.804479598999023,
"learning_rate": 3.5438042620363065e-05,
"loss": 0.7348,
"step": 1020
},
{
"epoch": 1.1157670454545454,
"grad_norm": 1.017351508140564,
"learning_rate": 3.5240726124704027e-05,
"loss": 0.7015,
"step": 1030
},
{
"epoch": 1.1193181818181819,
"grad_norm": 14.092010498046875,
"learning_rate": 3.504340962904499e-05,
"loss": 0.5592,
"step": 1040
},
{
"epoch": 1.1228693181818181,
"grad_norm": 20.209379196166992,
"learning_rate": 3.484609313338596e-05,
"loss": 0.7704,
"step": 1050
},
{
"epoch": 1.1264204545454546,
"grad_norm": 0.6501548290252686,
"learning_rate": 3.464877663772691e-05,
"loss": 0.5026,
"step": 1060
},
{
"epoch": 1.1299715909090908,
"grad_norm": 1.0749989748001099,
"learning_rate": 3.4451460142067874e-05,
"loss": 0.5371,
"step": 1070
},
{
"epoch": 1.1335227272727273,
"grad_norm": 10.37779712677002,
"learning_rate": 3.425414364640884e-05,
"loss": 0.4032,
"step": 1080
},
{
"epoch": 1.1370738636363638,
"grad_norm": 30.61161231994629,
"learning_rate": 3.4056827150749804e-05,
"loss": 0.6612,
"step": 1090
},
{
"epoch": 1.140625,
"grad_norm": 28.0980167388916,
"learning_rate": 3.3859510655090766e-05,
"loss": 0.5713,
"step": 1100
},
{
"epoch": 1.1441761363636362,
"grad_norm": 25.723461151123047,
"learning_rate": 3.3662194159431734e-05,
"loss": 0.6669,
"step": 1110
},
{
"epoch": 1.1477272727272727,
"grad_norm": 7.138387203216553,
"learning_rate": 3.346487766377269e-05,
"loss": 0.5781,
"step": 1120
},
{
"epoch": 1.1512784090909092,
"grad_norm": 27.993236541748047,
"learning_rate": 3.326756116811365e-05,
"loss": 0.6033,
"step": 1130
},
{
"epoch": 1.1548295454545454,
"grad_norm": 3.16493558883667,
"learning_rate": 3.307024467245462e-05,
"loss": 0.787,
"step": 1140
},
{
"epoch": 1.1583806818181819,
"grad_norm": 11.943394660949707,
"learning_rate": 3.287292817679558e-05,
"loss": 0.657,
"step": 1150
},
{
"epoch": 1.1619318181818181,
"grad_norm": 3.7604823112487793,
"learning_rate": 3.267561168113654e-05,
"loss": 0.4096,
"step": 1160
},
{
"epoch": 1.1654829545454546,
"grad_norm": 21.66828727722168,
"learning_rate": 3.247829518547751e-05,
"loss": 0.5554,
"step": 1170
},
{
"epoch": 1.1690340909090908,
"grad_norm": 3.944326162338257,
"learning_rate": 3.2280978689818467e-05,
"loss": 0.8685,
"step": 1180
},
{
"epoch": 1.1725852272727273,
"grad_norm": 4.541408061981201,
"learning_rate": 3.208366219415943e-05,
"loss": 0.651,
"step": 1190
},
{
"epoch": 1.1761363636363638,
"grad_norm": 16.059410095214844,
"learning_rate": 3.18863456985004e-05,
"loss": 0.5313,
"step": 1200
},
{
"epoch": 1.1796875,
"grad_norm": 29.15842056274414,
"learning_rate": 3.168902920284136e-05,
"loss": 0.6873,
"step": 1210
},
{
"epoch": 1.1832386363636362,
"grad_norm": 27.368764877319336,
"learning_rate": 3.149171270718232e-05,
"loss": 0.7953,
"step": 1220
},
{
"epoch": 1.1867897727272727,
"grad_norm": 12.318428039550781,
"learning_rate": 3.129439621152329e-05,
"loss": 0.316,
"step": 1230
},
{
"epoch": 1.1903409090909092,
"grad_norm": 7.381816864013672,
"learning_rate": 3.1097079715864244e-05,
"loss": 0.5909,
"step": 1240
},
{
"epoch": 1.1938920454545454,
"grad_norm": 56.74608612060547,
"learning_rate": 3.0899763220205206e-05,
"loss": 0.5496,
"step": 1250
},
{
"epoch": 1.1974431818181819,
"grad_norm": 5.5123090744018555,
"learning_rate": 3.0702446724546174e-05,
"loss": 0.4415,
"step": 1260
},
{
"epoch": 1.2009943181818181,
"grad_norm": 3.0995726585388184,
"learning_rate": 3.0505130228887136e-05,
"loss": 0.4572,
"step": 1270
},
{
"epoch": 1.2045454545454546,
"grad_norm": 1.3388721942901611,
"learning_rate": 3.03078137332281e-05,
"loss": 0.4261,
"step": 1280
},
{
"epoch": 1.2080965909090908,
"grad_norm": 3.432847738265991,
"learning_rate": 3.0110497237569063e-05,
"loss": 0.5804,
"step": 1290
},
{
"epoch": 1.2116477272727273,
"grad_norm": 5.881500244140625,
"learning_rate": 2.9913180741910025e-05,
"loss": 0.3265,
"step": 1300
},
{
"epoch": 1.2151988636363638,
"grad_norm": 27.702701568603516,
"learning_rate": 2.971586424625099e-05,
"loss": 0.8524,
"step": 1310
},
{
"epoch": 1.21875,
"grad_norm": 3.5202479362487793,
"learning_rate": 2.951854775059195e-05,
"loss": 0.7208,
"step": 1320
},
{
"epoch": 1.2223011363636362,
"grad_norm": 14.859701156616211,
"learning_rate": 2.9321231254932913e-05,
"loss": 0.6345,
"step": 1330
},
{
"epoch": 1.2258522727272727,
"grad_norm": 2.571312427520752,
"learning_rate": 2.912391475927388e-05,
"loss": 0.3549,
"step": 1340
},
{
"epoch": 1.2294034090909092,
"grad_norm": 8.97351360321045,
"learning_rate": 2.892659826361484e-05,
"loss": 0.507,
"step": 1350
},
{
"epoch": 1.2329545454545454,
"grad_norm": 13.665780067443848,
"learning_rate": 2.8729281767955802e-05,
"loss": 0.4561,
"step": 1360
},
{
"epoch": 1.2365056818181819,
"grad_norm": 13.264731407165527,
"learning_rate": 2.8531965272296767e-05,
"loss": 0.539,
"step": 1370
},
{
"epoch": 1.2400568181818181,
"grad_norm": 29.683090209960938,
"learning_rate": 2.833464877663773e-05,
"loss": 0.5715,
"step": 1380
},
{
"epoch": 1.2436079545454546,
"grad_norm": 8.478317260742188,
"learning_rate": 2.813733228097869e-05,
"loss": 0.183,
"step": 1390
},
{
"epoch": 1.2471590909090908,
"grad_norm": 28.569978713989258,
"learning_rate": 2.7940015785319656e-05,
"loss": 0.5002,
"step": 1400
},
{
"epoch": 1.25,
"eval_accuracy": 0.8542024013722127,
"eval_f1": 0.8530975388761288,
"eval_loss": 0.4373819828033447,
"eval_precision": 0.8718298587182326,
"eval_recall": 0.853515201868564,
"eval_runtime": 695.9457,
"eval_samples_per_second": 0.838,
"eval_steps_per_second": 0.21,
"step": 1408
},
{
"epoch": 2.000710227272727,
"grad_norm": 25.39379119873047,
"learning_rate": 2.7742699289660618e-05,
"loss": 0.6607,
"step": 1410
},
{
"epoch": 2.0042613636363638,
"grad_norm": 3.422687530517578,
"learning_rate": 2.754538279400158e-05,
"loss": 0.5038,
"step": 1420
},
{
"epoch": 2.0078125,
"grad_norm": 1.3007397651672363,
"learning_rate": 2.7348066298342545e-05,
"loss": 0.3052,
"step": 1430
},
{
"epoch": 2.0113636363636362,
"grad_norm": 15.327472686767578,
"learning_rate": 2.7150749802683506e-05,
"loss": 0.2933,
"step": 1440
},
{
"epoch": 2.014914772727273,
"grad_norm": 23.62551498413086,
"learning_rate": 2.6953433307024468e-05,
"loss": 0.4165,
"step": 1450
},
{
"epoch": 2.018465909090909,
"grad_norm": 8.549898147583008,
"learning_rate": 2.6756116811365433e-05,
"loss": 0.3321,
"step": 1460
},
{
"epoch": 2.0220170454545454,
"grad_norm": 25.098161697387695,
"learning_rate": 2.6558800315706395e-05,
"loss": 0.4109,
"step": 1470
},
{
"epoch": 2.0255681818181817,
"grad_norm": 9.298599243164062,
"learning_rate": 2.6361483820047357e-05,
"loss": 0.4261,
"step": 1480
},
{
"epoch": 2.0291193181818183,
"grad_norm": 66.2896957397461,
"learning_rate": 2.6164167324388322e-05,
"loss": 0.3508,
"step": 1490
},
{
"epoch": 2.0326704545454546,
"grad_norm": 1.1833637952804565,
"learning_rate": 2.5966850828729284e-05,
"loss": 0.1169,
"step": 1500
},
{
"epoch": 2.036221590909091,
"grad_norm": 32.40926742553711,
"learning_rate": 2.5769534333070246e-05,
"loss": 0.7922,
"step": 1510
},
{
"epoch": 2.039772727272727,
"grad_norm": 31.9284610748291,
"learning_rate": 2.557221783741121e-05,
"loss": 0.3044,
"step": 1520
},
{
"epoch": 2.0433238636363638,
"grad_norm": 0.32966673374176025,
"learning_rate": 2.5374901341752172e-05,
"loss": 0.5598,
"step": 1530
},
{
"epoch": 2.046875,
"grad_norm": 3.2222678661346436,
"learning_rate": 2.5177584846093134e-05,
"loss": 0.5305,
"step": 1540
},
{
"epoch": 2.0504261363636362,
"grad_norm": 5.157206058502197,
"learning_rate": 2.4980268350434096e-05,
"loss": 0.3806,
"step": 1550
},
{
"epoch": 2.053977272727273,
"grad_norm": 29.749786376953125,
"learning_rate": 2.478295185477506e-05,
"loss": 0.6572,
"step": 1560
},
{
"epoch": 2.057528409090909,
"grad_norm": 5.4849534034729,
"learning_rate": 2.4585635359116023e-05,
"loss": 0.2828,
"step": 1570
},
{
"epoch": 2.0610795454545454,
"grad_norm": 1.8682303428649902,
"learning_rate": 2.4388318863456985e-05,
"loss": 0.275,
"step": 1580
},
{
"epoch": 2.0646306818181817,
"grad_norm": 31.073657989501953,
"learning_rate": 2.419100236779795e-05,
"loss": 0.2635,
"step": 1590
},
{
"epoch": 2.0681818181818183,
"grad_norm": 2.873807668685913,
"learning_rate": 2.399368587213891e-05,
"loss": 0.2189,
"step": 1600
},
{
"epoch": 2.0717329545454546,
"grad_norm": 6.81038761138916,
"learning_rate": 2.3796369376479873e-05,
"loss": 0.2335,
"step": 1610
},
{
"epoch": 2.075284090909091,
"grad_norm": 5.240432262420654,
"learning_rate": 2.359905288082084e-05,
"loss": 0.513,
"step": 1620
},
{
"epoch": 2.078835227272727,
"grad_norm": 2.8783762454986572,
"learning_rate": 2.34017363851618e-05,
"loss": 0.2719,
"step": 1630
},
{
"epoch": 2.0823863636363638,
"grad_norm": 4.599855422973633,
"learning_rate": 2.3204419889502762e-05,
"loss": 0.2012,
"step": 1640
},
{
"epoch": 2.0859375,
"grad_norm": 1.6575523614883423,
"learning_rate": 2.3007103393843727e-05,
"loss": 0.1299,
"step": 1650
},
{
"epoch": 2.0894886363636362,
"grad_norm": 0.3831326961517334,
"learning_rate": 2.280978689818469e-05,
"loss": 0.1766,
"step": 1660
},
{
"epoch": 2.093039772727273,
"grad_norm": 1.1560420989990234,
"learning_rate": 2.261247040252565e-05,
"loss": 0.3844,
"step": 1670
},
{
"epoch": 2.096590909090909,
"grad_norm": 1.8366397619247437,
"learning_rate": 2.2415153906866616e-05,
"loss": 0.3113,
"step": 1680
},
{
"epoch": 2.1001420454545454,
"grad_norm": 10.303657531738281,
"learning_rate": 2.2217837411207578e-05,
"loss": 0.1612,
"step": 1690
},
{
"epoch": 2.1036931818181817,
"grad_norm": 35.21874237060547,
"learning_rate": 2.202052091554854e-05,
"loss": 0.454,
"step": 1700
},
{
"epoch": 2.1072443181818183,
"grad_norm": 4.324901580810547,
"learning_rate": 2.1823204419889505e-05,
"loss": 0.5713,
"step": 1710
},
{
"epoch": 2.1107954545454546,
"grad_norm": 4.857918739318848,
"learning_rate": 2.1625887924230466e-05,
"loss": 0.4748,
"step": 1720
},
{
"epoch": 2.114346590909091,
"grad_norm": 0.15146464109420776,
"learning_rate": 2.1428571428571428e-05,
"loss": 0.3261,
"step": 1730
},
{
"epoch": 2.117897727272727,
"grad_norm": 19.752939224243164,
"learning_rate": 2.1231254932912393e-05,
"loss": 0.2406,
"step": 1740
},
{
"epoch": 2.1214488636363638,
"grad_norm": 18.78827476501465,
"learning_rate": 2.1033938437253355e-05,
"loss": 0.4846,
"step": 1750
},
{
"epoch": 2.125,
"grad_norm": 0.27601099014282227,
"learning_rate": 2.0836621941594317e-05,
"loss": 0.2026,
"step": 1760
},
{
"epoch": 2.1285511363636362,
"grad_norm": 0.5743858218193054,
"learning_rate": 2.0639305445935282e-05,
"loss": 0.2579,
"step": 1770
},
{
"epoch": 2.132102272727273,
"grad_norm": 0.10733279585838318,
"learning_rate": 2.0441988950276244e-05,
"loss": 0.2348,
"step": 1780
},
{
"epoch": 2.135653409090909,
"grad_norm": 2.2115983963012695,
"learning_rate": 2.0244672454617206e-05,
"loss": 0.2632,
"step": 1790
},
{
"epoch": 2.1392045454545454,
"grad_norm": 37.26658248901367,
"learning_rate": 2.004735595895817e-05,
"loss": 0.4384,
"step": 1800
},
{
"epoch": 2.1427556818181817,
"grad_norm": 10.85006332397461,
"learning_rate": 1.9850039463299132e-05,
"loss": 0.2217,
"step": 1810
},
{
"epoch": 2.1463068181818183,
"grad_norm": 4.717857360839844,
"learning_rate": 1.9652722967640098e-05,
"loss": 0.2748,
"step": 1820
},
{
"epoch": 2.1498579545454546,
"grad_norm": 2.3380446434020996,
"learning_rate": 1.945540647198106e-05,
"loss": 0.1738,
"step": 1830
},
{
"epoch": 2.153409090909091,
"grad_norm": 1.6607507467269897,
"learning_rate": 1.925808997632202e-05,
"loss": 0.2641,
"step": 1840
},
{
"epoch": 2.156960227272727,
"grad_norm": 12.202836036682129,
"learning_rate": 1.9060773480662986e-05,
"loss": 0.2995,
"step": 1850
},
{
"epoch": 2.1605113636363638,
"grad_norm": 20.316972732543945,
"learning_rate": 1.8863456985003948e-05,
"loss": 0.1573,
"step": 1860
},
{
"epoch": 2.1640625,
"grad_norm": 34.030296325683594,
"learning_rate": 1.866614048934491e-05,
"loss": 0.266,
"step": 1870
},
{
"epoch": 2.1676136363636362,
"grad_norm": 4.90464973449707,
"learning_rate": 1.8468823993685875e-05,
"loss": 0.1064,
"step": 1880
},
{
"epoch": 2.171164772727273,
"grad_norm": 38.46381378173828,
"learning_rate": 1.8271507498026837e-05,
"loss": 0.2679,
"step": 1890
},
{
"epoch": 2.174715909090909,
"grad_norm": 52.19645690917969,
"learning_rate": 1.80741910023678e-05,
"loss": 0.166,
"step": 1900
},
{
"epoch": 2.1782670454545454,
"grad_norm": 15.714051246643066,
"learning_rate": 1.7876874506708764e-05,
"loss": 0.4399,
"step": 1910
},
{
"epoch": 2.1818181818181817,
"grad_norm": 50.66694641113281,
"learning_rate": 1.7679558011049725e-05,
"loss": 0.1816,
"step": 1920
},
{
"epoch": 2.1853693181818183,
"grad_norm": 4.8143205642700195,
"learning_rate": 1.7482241515390687e-05,
"loss": 0.5087,
"step": 1930
},
{
"epoch": 2.1889204545454546,
"grad_norm": 1.34380042552948,
"learning_rate": 1.7284925019731652e-05,
"loss": 0.4257,
"step": 1940
},
{
"epoch": 2.192471590909091,
"grad_norm": 6.62140417098999,
"learning_rate": 1.7087608524072614e-05,
"loss": 0.1767,
"step": 1950
},
{
"epoch": 2.196022727272727,
"grad_norm": 34.63706588745117,
"learning_rate": 1.6890292028413576e-05,
"loss": 0.3301,
"step": 1960
},
{
"epoch": 2.1995738636363638,
"grad_norm": 3.23504638671875,
"learning_rate": 1.669297553275454e-05,
"loss": 0.2174,
"step": 1970
},
{
"epoch": 2.203125,
"grad_norm": 16.90635871887207,
"learning_rate": 1.64956590370955e-05,
"loss": 0.4572,
"step": 1980
},
{
"epoch": 2.2066761363636362,
"grad_norm": 20.676467895507812,
"learning_rate": 1.6298342541436465e-05,
"loss": 0.3199,
"step": 1990
},
{
"epoch": 2.210227272727273,
"grad_norm": 0.31672224402427673,
"learning_rate": 1.610102604577743e-05,
"loss": 0.2,
"step": 2000
},
{
"epoch": 2.213778409090909,
"grad_norm": 8.79170036315918,
"learning_rate": 1.5903709550118388e-05,
"loss": 0.1675,
"step": 2010
},
{
"epoch": 2.2173295454545454,
"grad_norm": 2.0761942863464355,
"learning_rate": 1.5706393054459353e-05,
"loss": 0.2614,
"step": 2020
},
{
"epoch": 2.2208806818181817,
"grad_norm": 0.06360641866922379,
"learning_rate": 1.550907655880032e-05,
"loss": 0.2335,
"step": 2030
},
{
"epoch": 2.2244318181818183,
"grad_norm": 18.36467170715332,
"learning_rate": 1.5311760063141277e-05,
"loss": 0.5066,
"step": 2040
},
{
"epoch": 2.2279829545454546,
"grad_norm": 9.642960548400879,
"learning_rate": 1.5114443567482242e-05,
"loss": 0.2035,
"step": 2050
},
{
"epoch": 2.231534090909091,
"grad_norm": 6.050065040588379,
"learning_rate": 1.4917127071823205e-05,
"loss": 0.1467,
"step": 2060
},
{
"epoch": 2.235085227272727,
"grad_norm": 0.06556593626737595,
"learning_rate": 1.4719810576164167e-05,
"loss": 0.1243,
"step": 2070
},
{
"epoch": 2.2386363636363638,
"grad_norm": 0.29696065187454224,
"learning_rate": 1.452249408050513e-05,
"loss": 0.2465,
"step": 2080
},
{
"epoch": 2.2421875,
"grad_norm": 1.2069284915924072,
"learning_rate": 1.4325177584846094e-05,
"loss": 0.191,
"step": 2090
},
{
"epoch": 2.2457386363636362,
"grad_norm": 2.228632688522339,
"learning_rate": 1.4127861089187056e-05,
"loss": 0.1718,
"step": 2100
},
{
"epoch": 2.249289772727273,
"grad_norm": 1.17184317111969,
"learning_rate": 1.393054459352802e-05,
"loss": 0.3627,
"step": 2110
},
{
"epoch": 2.25,
"eval_accuracy": 0.9622641509433962,
"eval_f1": 0.9617697305162668,
"eval_loss": 0.1109052374958992,
"eval_precision": 0.9647490495721144,
"eval_recall": 0.9614389495077621,
"eval_runtime": 696.0893,
"eval_samples_per_second": 0.838,
"eval_steps_per_second": 0.21,
"step": 2112
},
{
"epoch": 3.002840909090909,
"grad_norm": 0.03763706982135773,
"learning_rate": 1.3733228097868983e-05,
"loss": 0.0171,
"step": 2120
},
{
"epoch": 3.0063920454545454,
"grad_norm": 1.5585871934890747,
"learning_rate": 1.3535911602209945e-05,
"loss": 0.2023,
"step": 2130
},
{
"epoch": 3.0099431818181817,
"grad_norm": 20.643203735351562,
"learning_rate": 1.3338595106550908e-05,
"loss": 0.1212,
"step": 2140
},
{
"epoch": 3.0134943181818183,
"grad_norm": 0.19967862963676453,
"learning_rate": 1.3141278610891871e-05,
"loss": 0.0201,
"step": 2150
},
{
"epoch": 3.0170454545454546,
"grad_norm": 3.6144466400146484,
"learning_rate": 1.2943962115232833e-05,
"loss": 0.1455,
"step": 2160
},
{
"epoch": 3.020596590909091,
"grad_norm": 7.9842610359191895,
"learning_rate": 1.2746645619573797e-05,
"loss": 0.042,
"step": 2170
},
{
"epoch": 3.024147727272727,
"grad_norm": 3.346548080444336,
"learning_rate": 1.254932912391476e-05,
"loss": 0.0365,
"step": 2180
},
{
"epoch": 3.0276988636363638,
"grad_norm": 0.10210000723600388,
"learning_rate": 1.2352012628255722e-05,
"loss": 0.118,
"step": 2190
},
{
"epoch": 3.03125,
"grad_norm": 2.003911018371582,
"learning_rate": 1.2154696132596685e-05,
"loss": 0.0461,
"step": 2200
},
{
"epoch": 3.0348011363636362,
"grad_norm": 0.06448382884263992,
"learning_rate": 1.1957379636937649e-05,
"loss": 0.1276,
"step": 2210
},
{
"epoch": 3.038352272727273,
"grad_norm": 59.72112274169922,
"learning_rate": 1.176006314127861e-05,
"loss": 0.1744,
"step": 2220
},
{
"epoch": 3.041903409090909,
"grad_norm": 26.68014907836914,
"learning_rate": 1.1562746645619574e-05,
"loss": 0.0988,
"step": 2230
},
{
"epoch": 3.0454545454545454,
"grad_norm": 1.2907896041870117,
"learning_rate": 1.1365430149960538e-05,
"loss": 0.0432,
"step": 2240
},
{
"epoch": 3.0490056818181817,
"grad_norm": 2.807907819747925,
"learning_rate": 1.11681136543015e-05,
"loss": 0.1671,
"step": 2250
},
{
"epoch": 3.0525568181818183,
"grad_norm": 0.22834616899490356,
"learning_rate": 1.0970797158642463e-05,
"loss": 0.0643,
"step": 2260
},
{
"epoch": 3.0561079545454546,
"grad_norm": 0.23886850476264954,
"learning_rate": 1.0773480662983426e-05,
"loss": 0.1166,
"step": 2270
},
{
"epoch": 3.059659090909091,
"grad_norm": 2.190918445587158,
"learning_rate": 1.0576164167324388e-05,
"loss": 0.0236,
"step": 2280
},
{
"epoch": 3.063210227272727,
"grad_norm": 1.075537919998169,
"learning_rate": 1.0378847671665353e-05,
"loss": 0.1185,
"step": 2290
},
{
"epoch": 3.0667613636363638,
"grad_norm": 34.861881256103516,
"learning_rate": 1.0181531176006315e-05,
"loss": 0.1225,
"step": 2300
},
{
"epoch": 3.0703125,
"grad_norm": 0.2916108965873718,
"learning_rate": 9.984214680347277e-06,
"loss": 0.0877,
"step": 2310
},
{
"epoch": 3.0738636363636362,
"grad_norm": 24.542476654052734,
"learning_rate": 9.786898184688242e-06,
"loss": 0.1007,
"step": 2320
},
{
"epoch": 3.077414772727273,
"grad_norm": 1.0566986799240112,
"learning_rate": 9.589581689029204e-06,
"loss": 0.0563,
"step": 2330
},
{
"epoch": 3.080965909090909,
"grad_norm": 20.17405128479004,
"learning_rate": 9.392265193370165e-06,
"loss": 0.0374,
"step": 2340
},
{
"epoch": 3.0845170454545454,
"grad_norm": 0.07401897758245468,
"learning_rate": 9.194948697711129e-06,
"loss": 0.0495,
"step": 2350
},
{
"epoch": 3.0880681818181817,
"grad_norm": 4.210412502288818,
"learning_rate": 8.997632202052092e-06,
"loss": 0.1265,
"step": 2360
},
{
"epoch": 3.0916193181818183,
"grad_norm": 27.83361053466797,
"learning_rate": 8.800315706393054e-06,
"loss": 0.2213,
"step": 2370
},
{
"epoch": 3.0951704545454546,
"grad_norm": 0.04297183081507683,
"learning_rate": 8.602999210734018e-06,
"loss": 0.1469,
"step": 2380
},
{
"epoch": 3.098721590909091,
"grad_norm": 0.7070327401161194,
"learning_rate": 8.405682715074981e-06,
"loss": 0.0219,
"step": 2390
},
{
"epoch": 3.102272727272727,
"grad_norm": 0.22129130363464355,
"learning_rate": 8.208366219415943e-06,
"loss": 0.0115,
"step": 2400
},
{
"epoch": 3.1058238636363638,
"grad_norm": 0.8595547080039978,
"learning_rate": 8.011049723756906e-06,
"loss": 0.0124,
"step": 2410
},
{
"epoch": 3.109375,
"grad_norm": 0.13839711248874664,
"learning_rate": 7.81373322809787e-06,
"loss": 0.0865,
"step": 2420
},
{
"epoch": 3.1129261363636362,
"grad_norm": 0.04325387626886368,
"learning_rate": 7.6164167324388314e-06,
"loss": 0.0793,
"step": 2430
},
{
"epoch": 3.116477272727273,
"grad_norm": 3.90751051902771,
"learning_rate": 7.419100236779796e-06,
"loss": 0.1025,
"step": 2440
},
{
"epoch": 3.120028409090909,
"grad_norm": 0.03920091316103935,
"learning_rate": 7.2217837411207575e-06,
"loss": 0.2589,
"step": 2450
},
{
"epoch": 3.1235795454545454,
"grad_norm": 0.2701418995857239,
"learning_rate": 7.02446724546172e-06,
"loss": 0.0066,
"step": 2460
},
{
"epoch": 3.1271306818181817,
"grad_norm": 0.3685694634914398,
"learning_rate": 6.827150749802684e-06,
"loss": 0.0537,
"step": 2470
},
{
"epoch": 3.1306818181818183,
"grad_norm": 0.03167586028575897,
"learning_rate": 6.629834254143646e-06,
"loss": 0.2329,
"step": 2480
},
{
"epoch": 3.1342329545454546,
"grad_norm": 24.94156265258789,
"learning_rate": 6.4325177584846105e-06,
"loss": 0.113,
"step": 2490
},
{
"epoch": 3.137784090909091,
"grad_norm": 2.3878488540649414,
"learning_rate": 6.235201262825572e-06,
"loss": 0.0289,
"step": 2500
},
{
"epoch": 3.141335227272727,
"grad_norm": 0.1500893086194992,
"learning_rate": 6.037884767166536e-06,
"loss": 0.0164,
"step": 2510
},
{
"epoch": 3.1448863636363638,
"grad_norm": 1.0166605710983276,
"learning_rate": 5.840568271507498e-06,
"loss": 0.0665,
"step": 2520
},
{
"epoch": 3.1484375,
"grad_norm": 8.010841369628906,
"learning_rate": 5.643251775848461e-06,
"loss": 0.2883,
"step": 2530
},
{
"epoch": 3.1519886363636362,
"grad_norm": 11.449795722961426,
"learning_rate": 5.445935280189424e-06,
"loss": 0.0892,
"step": 2540
},
{
"epoch": 3.155539772727273,
"grad_norm": 41.279239654541016,
"learning_rate": 5.248618784530387e-06,
"loss": 0.0529,
"step": 2550
},
{
"epoch": 3.159090909090909,
"grad_norm": 0.06051742658019066,
"learning_rate": 5.05130228887135e-06,
"loss": 0.1401,
"step": 2560
},
{
"epoch": 3.1626420454545454,
"grad_norm": 0.08330044150352478,
"learning_rate": 4.853985793212313e-06,
"loss": 0.0411,
"step": 2570
},
{
"epoch": 3.1661931818181817,
"grad_norm": 44.56931686401367,
"learning_rate": 4.656669297553276e-06,
"loss": 0.0657,
"step": 2580
},
{
"epoch": 3.1697443181818183,
"grad_norm": 1.3760157823562622,
"learning_rate": 4.459352801894238e-06,
"loss": 0.1188,
"step": 2590
},
{
"epoch": 3.1732954545454546,
"grad_norm": 0.1809810996055603,
"learning_rate": 4.262036306235202e-06,
"loss": 0.0305,
"step": 2600
},
{
"epoch": 3.176846590909091,
"grad_norm": 1.571562647819519,
"learning_rate": 4.064719810576164e-06,
"loss": 0.0448,
"step": 2610
},
{
"epoch": 3.180397727272727,
"grad_norm": 0.04679562523961067,
"learning_rate": 3.867403314917127e-06,
"loss": 0.0876,
"step": 2620
},
{
"epoch": 3.1839488636363638,
"grad_norm": 0.40420079231262207,
"learning_rate": 3.67008681925809e-06,
"loss": 0.0399,
"step": 2630
},
{
"epoch": 3.1875,
"grad_norm": 0.8966354727745056,
"learning_rate": 3.472770323599053e-06,
"loss": 0.0747,
"step": 2640
},
{
"epoch": 3.1910511363636362,
"grad_norm": 0.499515563249588,
"learning_rate": 3.2754538279400157e-06,
"loss": 0.1019,
"step": 2650
},
{
"epoch": 3.194602272727273,
"grad_norm": 0.09785713255405426,
"learning_rate": 3.0781373322809787e-06,
"loss": 0.032,
"step": 2660
},
{
"epoch": 3.198153409090909,
"grad_norm": 0.037965867668390274,
"learning_rate": 2.8808208366219414e-06,
"loss": 0.0693,
"step": 2670
},
{
"epoch": 3.2017045454545454,
"grad_norm": 0.5521709322929382,
"learning_rate": 2.683504340962905e-06,
"loss": 0.0761,
"step": 2680
},
{
"epoch": 3.2052556818181817,
"grad_norm": 0.09314560890197754,
"learning_rate": 2.4861878453038674e-06,
"loss": 0.1744,
"step": 2690
},
{
"epoch": 3.2088068181818183,
"grad_norm": 12.886544227600098,
"learning_rate": 2.2888713496448305e-06,
"loss": 0.1549,
"step": 2700
},
{
"epoch": 3.2123579545454546,
"grad_norm": 0.18088915944099426,
"learning_rate": 2.091554853985793e-06,
"loss": 0.186,
"step": 2710
},
{
"epoch": 3.215909090909091,
"grad_norm": 0.18304170668125153,
"learning_rate": 1.8942383583267563e-06,
"loss": 0.0137,
"step": 2720
},
{
"epoch": 3.219460227272727,
"grad_norm": 5.272530555725098,
"learning_rate": 1.6969218626677192e-06,
"loss": 0.0369,
"step": 2730
},
{
"epoch": 3.2230113636363638,
"grad_norm": 0.17552484571933746,
"learning_rate": 1.499605367008682e-06,
"loss": 0.0675,
"step": 2740
},
{
"epoch": 3.2265625,
"grad_norm": 0.28992605209350586,
"learning_rate": 1.3022888713496448e-06,
"loss": 0.0744,
"step": 2750
},
{
"epoch": 3.2301136363636362,
"grad_norm": 0.17767734825611115,
"learning_rate": 1.1049723756906078e-06,
"loss": 0.1551,
"step": 2760
},
{
"epoch": 3.233664772727273,
"grad_norm": 0.43689748644828796,
"learning_rate": 9.076558800315707e-07,
"loss": 0.1754,
"step": 2770
},
{
"epoch": 3.237215909090909,
"grad_norm": 0.956597089767456,
"learning_rate": 7.103393843725336e-07,
"loss": 0.0069,
"step": 2780
},
{
"epoch": 3.2407670454545454,
"grad_norm": 25.514036178588867,
"learning_rate": 5.130228887134964e-07,
"loss": 0.0997,
"step": 2790
},
{
"epoch": 3.2443181818181817,
"grad_norm": 3.5862650871276855,
"learning_rate": 3.1570639305445935e-07,
"loss": 0.1085,
"step": 2800
},
{
"epoch": 3.2478693181818183,
"grad_norm": 0.035997312515974045,
"learning_rate": 1.1838989739542227e-07,
"loss": 0.0289,
"step": 2810
},
{
"epoch": 3.25,
"eval_accuracy": 0.9879931389365352,
"eval_f1": 0.9879774481091409,
"eval_loss": 0.037413839250802994,
"eval_precision": 0.9880848996629442,
"eval_recall": 0.9879997453526865,
"eval_runtime": 692.1711,
"eval_samples_per_second": 0.842,
"eval_steps_per_second": 0.211,
"step": 2816
},
{
"epoch": 3.25,
"step": 2816,
"total_flos": 1.4036798084625727e+19,
"train_loss": 0.6578278151044453,
"train_runtime": 18231.9061,
"train_samples_per_second": 0.618,
"train_steps_per_second": 0.154
},
{
"epoch": 3.25,
"eval_accuracy": 0.9558620689655173,
"eval_f1": 0.9562337987051189,
"eval_loss": 0.14939993619918823,
"eval_precision": 0.9567608190198278,
"eval_recall": 0.956465718392097,
"eval_runtime": 911.348,
"eval_samples_per_second": 0.796,
"eval_steps_per_second": 0.2,
"step": 2816
},
{
"epoch": 3.25,
"eval_accuracy": 0.9558620689655173,
"eval_f1": 0.9562337987051189,
"eval_loss": 0.14939993619918823,
"eval_precision": 0.9567608190198278,
"eval_recall": 0.956465718392097,
"eval_runtime": 930.9063,
"eval_samples_per_second": 0.779,
"eval_steps_per_second": 0.196,
"step": 2816
}
],
"logging_steps": 10,
"max_steps": 2816,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"total_flos": 1.4036798084625727e+19,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}