wav2vec2-base-ft-keyword-spotting / trainer_state.json
HenryXu1's picture
End of training
9e5320e verified
raw
history blame contribute delete
No virus
36.6 kB
{
"best_metric": 0.9832303618711385,
"best_model_checkpoint": "wav2vec2-base-ft-keyword-spotting/checkpoint-1995",
"epoch": 4.996869129618034,
"eval_steps": 500,
"global_step": 1995,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.025046963055729492,
"grad_norm": 1.278334379196167,
"learning_rate": 1.5e-06,
"loss": 2.4699,
"step": 10
},
{
"epoch": 0.050093926111458985,
"grad_norm": 1.3655153512954712,
"learning_rate": 3e-06,
"loss": 2.4539,
"step": 20
},
{
"epoch": 0.07514088916718847,
"grad_norm": 1.3241969347000122,
"learning_rate": 4.5e-06,
"loss": 2.4044,
"step": 30
},
{
"epoch": 0.10018785222291797,
"grad_norm": 1.7512290477752686,
"learning_rate": 6e-06,
"loss": 2.3261,
"step": 40
},
{
"epoch": 0.12523481527864747,
"grad_norm": 2.1144192218780518,
"learning_rate": 7.5e-06,
"loss": 2.2156,
"step": 50
},
{
"epoch": 0.15028177833437695,
"grad_norm": 2.416064739227295,
"learning_rate": 9e-06,
"loss": 2.0569,
"step": 60
},
{
"epoch": 0.17532874139010646,
"grad_norm": 2.3332056999206543,
"learning_rate": 1.05e-05,
"loss": 1.8437,
"step": 70
},
{
"epoch": 0.20037570444583594,
"grad_norm": 2.037410259246826,
"learning_rate": 1.2e-05,
"loss": 1.7693,
"step": 80
},
{
"epoch": 0.22542266750156542,
"grad_norm": 2.00655198097229,
"learning_rate": 1.3500000000000001e-05,
"loss": 1.6565,
"step": 90
},
{
"epoch": 0.25046963055729493,
"grad_norm": 1.5333060026168823,
"learning_rate": 1.5e-05,
"loss": 1.6115,
"step": 100
},
{
"epoch": 0.27551659361302444,
"grad_norm": 0.9409968852996826,
"learning_rate": 1.65e-05,
"loss": 1.6253,
"step": 110
},
{
"epoch": 0.3005635566687539,
"grad_norm": 1.4992973804473877,
"learning_rate": 1.8e-05,
"loss": 1.5118,
"step": 120
},
{
"epoch": 0.3256105197244834,
"grad_norm": 0.5201308727264404,
"learning_rate": 1.95e-05,
"loss": 1.503,
"step": 130
},
{
"epoch": 0.3506574827802129,
"grad_norm": 1.1067086458206177,
"learning_rate": 2.1e-05,
"loss": 1.5415,
"step": 140
},
{
"epoch": 0.37570444583594237,
"grad_norm": 1.8773168325424194,
"learning_rate": 2.25e-05,
"loss": 1.4933,
"step": 150
},
{
"epoch": 0.4007514088916719,
"grad_norm": 2.180652618408203,
"learning_rate": 2.4e-05,
"loss": 1.449,
"step": 160
},
{
"epoch": 0.4257983719474014,
"grad_norm": 1.6153920888900757,
"learning_rate": 2.55e-05,
"loss": 1.4281,
"step": 170
},
{
"epoch": 0.45084533500313084,
"grad_norm": 3.9366486072540283,
"learning_rate": 2.7000000000000002e-05,
"loss": 1.3706,
"step": 180
},
{
"epoch": 0.47589229805886035,
"grad_norm": 8.680933952331543,
"learning_rate": 2.8499999999999998e-05,
"loss": 1.3632,
"step": 190
},
{
"epoch": 0.5009392611145899,
"grad_norm": 1.6787041425704956,
"learning_rate": 3e-05,
"loss": 1.3052,
"step": 200
},
{
"epoch": 0.5259862241703194,
"grad_norm": 3.0554661750793457,
"learning_rate": 2.9832869080779945e-05,
"loss": 1.2246,
"step": 210
},
{
"epoch": 0.5510331872260489,
"grad_norm": 2.5610270500183105,
"learning_rate": 2.9665738161559886e-05,
"loss": 1.1641,
"step": 220
},
{
"epoch": 0.5760801502817783,
"grad_norm": 4.368018627166748,
"learning_rate": 2.9498607242339834e-05,
"loss": 1.1112,
"step": 230
},
{
"epoch": 0.6011271133375078,
"grad_norm": 3.1300201416015625,
"learning_rate": 2.933147632311978e-05,
"loss": 1.1034,
"step": 240
},
{
"epoch": 0.6261740763932373,
"grad_norm": 3.441697597503662,
"learning_rate": 2.916434540389972e-05,
"loss": 0.9748,
"step": 250
},
{
"epoch": 0.6512210394489668,
"grad_norm": 2.930171251296997,
"learning_rate": 2.8997214484679665e-05,
"loss": 0.9908,
"step": 260
},
{
"epoch": 0.6762680025046963,
"grad_norm": 4.085419178009033,
"learning_rate": 2.8830083565459613e-05,
"loss": 0.8302,
"step": 270
},
{
"epoch": 0.7013149655604258,
"grad_norm": 3.5097601413726807,
"learning_rate": 2.8662952646239554e-05,
"loss": 0.788,
"step": 280
},
{
"epoch": 0.7263619286161553,
"grad_norm": 4.0369062423706055,
"learning_rate": 2.84958217270195e-05,
"loss": 0.766,
"step": 290
},
{
"epoch": 0.7514088916718847,
"grad_norm": 2.472437858581543,
"learning_rate": 2.8328690807799443e-05,
"loss": 0.742,
"step": 300
},
{
"epoch": 0.7764558547276142,
"grad_norm": 3.6972556114196777,
"learning_rate": 2.8161559888579388e-05,
"loss": 0.7595,
"step": 310
},
{
"epoch": 0.8015028177833438,
"grad_norm": 3.38409161567688,
"learning_rate": 2.7994428969359332e-05,
"loss": 0.6941,
"step": 320
},
{
"epoch": 0.8265497808390733,
"grad_norm": 2.243342399597168,
"learning_rate": 2.7827298050139277e-05,
"loss": 0.6347,
"step": 330
},
{
"epoch": 0.8515967438948028,
"grad_norm": 4.12843132019043,
"learning_rate": 2.7660167130919218e-05,
"loss": 0.6031,
"step": 340
},
{
"epoch": 0.8766437069505323,
"grad_norm": 2.2556419372558594,
"learning_rate": 2.7493036211699166e-05,
"loss": 0.5767,
"step": 350
},
{
"epoch": 0.9016906700062617,
"grad_norm": 3.9663896560668945,
"learning_rate": 2.732590529247911e-05,
"loss": 0.5565,
"step": 360
},
{
"epoch": 0.9267376330619912,
"grad_norm": 3.594534397125244,
"learning_rate": 2.7158774373259055e-05,
"loss": 0.5529,
"step": 370
},
{
"epoch": 0.9517845961177207,
"grad_norm": 2.7648637294769287,
"learning_rate": 2.6991643454038996e-05,
"loss": 0.4842,
"step": 380
},
{
"epoch": 0.9768315591734502,
"grad_norm": 3.66634202003479,
"learning_rate": 2.6824512534818944e-05,
"loss": 0.5065,
"step": 390
},
{
"epoch": 0.9993738259236068,
"eval_accuracy": 0.9713150926743159,
"eval_loss": 0.3426418602466583,
"eval_runtime": 10.6773,
"eval_samples_per_second": 636.677,
"eval_steps_per_second": 19.949,
"step": 399
},
{
"epoch": 1.0018785222291797,
"grad_norm": 2.2331535816192627,
"learning_rate": 2.665738161559889e-05,
"loss": 0.4929,
"step": 400
},
{
"epoch": 1.0269254852849092,
"grad_norm": 2.8309195041656494,
"learning_rate": 2.649025069637883e-05,
"loss": 0.413,
"step": 410
},
{
"epoch": 1.0519724483406387,
"grad_norm": 3.615835428237915,
"learning_rate": 2.6323119777158774e-05,
"loss": 0.4205,
"step": 420
},
{
"epoch": 1.0770194113963683,
"grad_norm": 4.138455867767334,
"learning_rate": 2.6155988857938722e-05,
"loss": 0.3937,
"step": 430
},
{
"epoch": 1.1020663744520978,
"grad_norm": 3.2305445671081543,
"learning_rate": 2.5988857938718663e-05,
"loss": 0.4097,
"step": 440
},
{
"epoch": 1.127113337507827,
"grad_norm": 3.1868467330932617,
"learning_rate": 2.5821727019498608e-05,
"loss": 0.3661,
"step": 450
},
{
"epoch": 1.1521603005635566,
"grad_norm": 1.8689473867416382,
"learning_rate": 2.5654596100278553e-05,
"loss": 0.3327,
"step": 460
},
{
"epoch": 1.177207263619286,
"grad_norm": 1.8282161951065063,
"learning_rate": 2.5487465181058497e-05,
"loss": 0.3211,
"step": 470
},
{
"epoch": 1.2022542266750156,
"grad_norm": 3.445858955383301,
"learning_rate": 2.532033426183844e-05,
"loss": 0.3468,
"step": 480
},
{
"epoch": 1.227301189730745,
"grad_norm": 3.0723907947540283,
"learning_rate": 2.5153203342618386e-05,
"loss": 0.3533,
"step": 490
},
{
"epoch": 1.2523481527864746,
"grad_norm": 4.13520622253418,
"learning_rate": 2.4986072423398327e-05,
"loss": 0.2817,
"step": 500
},
{
"epoch": 1.277395115842204,
"grad_norm": 6.625748634338379,
"learning_rate": 2.4818941504178275e-05,
"loss": 0.3295,
"step": 510
},
{
"epoch": 1.3024420788979336,
"grad_norm": 1.7438695430755615,
"learning_rate": 2.465181058495822e-05,
"loss": 0.2915,
"step": 520
},
{
"epoch": 1.3274890419536631,
"grad_norm": 4.603183746337891,
"learning_rate": 2.448467966573816e-05,
"loss": 0.3447,
"step": 530
},
{
"epoch": 1.3525360050093926,
"grad_norm": 2.3780734539031982,
"learning_rate": 2.4317548746518106e-05,
"loss": 0.2678,
"step": 540
},
{
"epoch": 1.3775829680651221,
"grad_norm": 2.449040174484253,
"learning_rate": 2.415041782729805e-05,
"loss": 0.3029,
"step": 550
},
{
"epoch": 1.4026299311208517,
"grad_norm": 3.899897336959839,
"learning_rate": 2.3983286908077995e-05,
"loss": 0.2609,
"step": 560
},
{
"epoch": 1.4276768941765812,
"grad_norm": 4.0847978591918945,
"learning_rate": 2.381615598885794e-05,
"loss": 0.2979,
"step": 570
},
{
"epoch": 1.4527238572323107,
"grad_norm": 3.0735533237457275,
"learning_rate": 2.3649025069637884e-05,
"loss": 0.2612,
"step": 580
},
{
"epoch": 1.4777708202880402,
"grad_norm": 4.23598051071167,
"learning_rate": 2.3481894150417825e-05,
"loss": 0.2657,
"step": 590
},
{
"epoch": 1.5028177833437697,
"grad_norm": 2.6922895908355713,
"learning_rate": 2.3314763231197773e-05,
"loss": 0.2332,
"step": 600
},
{
"epoch": 1.5278647463994992,
"grad_norm": 2.444784641265869,
"learning_rate": 2.3147632311977718e-05,
"loss": 0.2646,
"step": 610
},
{
"epoch": 1.5529117094552287,
"grad_norm": 3.595010757446289,
"learning_rate": 2.298050139275766e-05,
"loss": 0.2258,
"step": 620
},
{
"epoch": 1.577958672510958,
"grad_norm": 4.990175724029541,
"learning_rate": 2.2813370473537603e-05,
"loss": 0.2477,
"step": 630
},
{
"epoch": 1.6030056355666875,
"grad_norm": 2.4451816082000732,
"learning_rate": 2.264623955431755e-05,
"loss": 0.2268,
"step": 640
},
{
"epoch": 1.628052598622417,
"grad_norm": 5.3694233894348145,
"learning_rate": 2.2479108635097492e-05,
"loss": 0.226,
"step": 650
},
{
"epoch": 1.6530995616781465,
"grad_norm": 4.013739585876465,
"learning_rate": 2.2311977715877437e-05,
"loss": 0.2519,
"step": 660
},
{
"epoch": 1.678146524733876,
"grad_norm": 2.913640022277832,
"learning_rate": 2.214484679665738e-05,
"loss": 0.2723,
"step": 670
},
{
"epoch": 1.7031934877896056,
"grad_norm": 3.3302664756774902,
"learning_rate": 2.1977715877437326e-05,
"loss": 0.2412,
"step": 680
},
{
"epoch": 1.7282404508453348,
"grad_norm": 3.4415056705474854,
"learning_rate": 2.181058495821727e-05,
"loss": 0.2463,
"step": 690
},
{
"epoch": 1.7532874139010644,
"grad_norm": 2.424220085144043,
"learning_rate": 2.1643454038997215e-05,
"loss": 0.198,
"step": 700
},
{
"epoch": 1.7783343769567939,
"grad_norm": 3.404729127883911,
"learning_rate": 2.147632311977716e-05,
"loss": 0.2263,
"step": 710
},
{
"epoch": 1.8033813400125234,
"grad_norm": 2.495832681655884,
"learning_rate": 2.1309192200557104e-05,
"loss": 0.2146,
"step": 720
},
{
"epoch": 1.8284283030682529,
"grad_norm": 3.8898746967315674,
"learning_rate": 2.114206128133705e-05,
"loss": 0.2289,
"step": 730
},
{
"epoch": 1.8534752661239824,
"grad_norm": 2.768136978149414,
"learning_rate": 2.0974930362116993e-05,
"loss": 0.2144,
"step": 740
},
{
"epoch": 1.878522229179712,
"grad_norm": 2.6077802181243896,
"learning_rate": 2.0807799442896935e-05,
"loss": 0.2269,
"step": 750
},
{
"epoch": 1.9035691922354414,
"grad_norm": 2.223156452178955,
"learning_rate": 2.0640668523676883e-05,
"loss": 0.1809,
"step": 760
},
{
"epoch": 1.928616155291171,
"grad_norm": 2.7288057804107666,
"learning_rate": 2.0473537604456827e-05,
"loss": 0.1833,
"step": 770
},
{
"epoch": 1.9536631183469004,
"grad_norm": 2.93996000289917,
"learning_rate": 2.0306406685236768e-05,
"loss": 0.2276,
"step": 780
},
{
"epoch": 1.97871008140263,
"grad_norm": 1.5655947923660278,
"learning_rate": 2.0139275766016713e-05,
"loss": 0.2089,
"step": 790
},
{
"epoch": 1.9987476518472134,
"eval_accuracy": 0.9780817887614004,
"eval_loss": 0.12669295072555542,
"eval_runtime": 10.7359,
"eval_samples_per_second": 633.204,
"eval_steps_per_second": 19.84,
"step": 798
},
{
"epoch": 2.0037570444583594,
"grad_norm": 2.9249160289764404,
"learning_rate": 1.997214484679666e-05,
"loss": 0.201,
"step": 800
},
{
"epoch": 2.028804007514089,
"grad_norm": 2.023646116256714,
"learning_rate": 1.9805013927576602e-05,
"loss": 0.1868,
"step": 810
},
{
"epoch": 2.0538509705698185,
"grad_norm": 1.4999943971633911,
"learning_rate": 1.9637883008356546e-05,
"loss": 0.1885,
"step": 820
},
{
"epoch": 2.078897933625548,
"grad_norm": 3.2979843616485596,
"learning_rate": 1.947075208913649e-05,
"loss": 0.1904,
"step": 830
},
{
"epoch": 2.1039448966812775,
"grad_norm": 1.8831233978271484,
"learning_rate": 1.9303621169916436e-05,
"loss": 0.185,
"step": 840
},
{
"epoch": 2.128991859737007,
"grad_norm": 1.3918356895446777,
"learning_rate": 1.913649025069638e-05,
"loss": 0.1829,
"step": 850
},
{
"epoch": 2.1540388227927365,
"grad_norm": 1.2814128398895264,
"learning_rate": 1.8969359331476325e-05,
"loss": 0.1667,
"step": 860
},
{
"epoch": 2.179085785848466,
"grad_norm": 3.2199411392211914,
"learning_rate": 1.8802228412256266e-05,
"loss": 0.179,
"step": 870
},
{
"epoch": 2.2041327489041955,
"grad_norm": 3.293250560760498,
"learning_rate": 1.863509749303621e-05,
"loss": 0.213,
"step": 880
},
{
"epoch": 2.229179711959925,
"grad_norm": 2.887782335281372,
"learning_rate": 1.846796657381616e-05,
"loss": 0.1872,
"step": 890
},
{
"epoch": 2.254226675015654,
"grad_norm": 3.306884288787842,
"learning_rate": 1.83008356545961e-05,
"loss": 0.1762,
"step": 900
},
{
"epoch": 2.279273638071384,
"grad_norm": 3.3350651264190674,
"learning_rate": 1.8133704735376044e-05,
"loss": 0.2153,
"step": 910
},
{
"epoch": 2.304320601127113,
"grad_norm": 1.8800674676895142,
"learning_rate": 1.796657381615599e-05,
"loss": 0.2069,
"step": 920
},
{
"epoch": 2.329367564182843,
"grad_norm": 3.2760367393493652,
"learning_rate": 1.7799442896935933e-05,
"loss": 0.1756,
"step": 930
},
{
"epoch": 2.354414527238572,
"grad_norm": 4.827606678009033,
"learning_rate": 1.7632311977715878e-05,
"loss": 0.1673,
"step": 940
},
{
"epoch": 2.3794614902943017,
"grad_norm": 4.115577697753906,
"learning_rate": 1.7465181058495822e-05,
"loss": 0.1503,
"step": 950
},
{
"epoch": 2.404508453350031,
"grad_norm": 2.163761854171753,
"learning_rate": 1.7298050139275764e-05,
"loss": 0.2029,
"step": 960
},
{
"epoch": 2.4295554164057607,
"grad_norm": 3.1453959941864014,
"learning_rate": 1.713091922005571e-05,
"loss": 0.1748,
"step": 970
},
{
"epoch": 2.45460237946149,
"grad_norm": 0.9899351596832275,
"learning_rate": 1.6963788300835656e-05,
"loss": 0.1589,
"step": 980
},
{
"epoch": 2.4796493425172197,
"grad_norm": 2.109621524810791,
"learning_rate": 1.6796657381615597e-05,
"loss": 0.1709,
"step": 990
},
{
"epoch": 2.504696305572949,
"grad_norm": 4.260733604431152,
"learning_rate": 1.6629526462395542e-05,
"loss": 0.1776,
"step": 1000
},
{
"epoch": 2.5297432686286787,
"grad_norm": 1.1494958400726318,
"learning_rate": 1.646239554317549e-05,
"loss": 0.1651,
"step": 1010
},
{
"epoch": 2.554790231684408,
"grad_norm": 4.246566295623779,
"learning_rate": 1.6295264623955434e-05,
"loss": 0.1941,
"step": 1020
},
{
"epoch": 2.5798371947401377,
"grad_norm": 2.06291127204895,
"learning_rate": 1.6128133704735375e-05,
"loss": 0.1276,
"step": 1030
},
{
"epoch": 2.6048841577958672,
"grad_norm": 2.7648720741271973,
"learning_rate": 1.596100278551532e-05,
"loss": 0.1725,
"step": 1040
},
{
"epoch": 2.6299311208515967,
"grad_norm": 1.355832815170288,
"learning_rate": 1.5793871866295268e-05,
"loss": 0.1585,
"step": 1050
},
{
"epoch": 2.6549780839073263,
"grad_norm": 3.2558505535125732,
"learning_rate": 1.562674094707521e-05,
"loss": 0.1778,
"step": 1060
},
{
"epoch": 2.6800250469630558,
"grad_norm": 6.209780693054199,
"learning_rate": 1.5459610027855154e-05,
"loss": 0.1789,
"step": 1070
},
{
"epoch": 2.7050720100187853,
"grad_norm": 2.586533784866333,
"learning_rate": 1.5292479108635098e-05,
"loss": 0.1648,
"step": 1080
},
{
"epoch": 2.730118973074515,
"grad_norm": 3.024934768676758,
"learning_rate": 1.5125348189415043e-05,
"loss": 0.1428,
"step": 1090
},
{
"epoch": 2.7551659361302443,
"grad_norm": 3.368821382522583,
"learning_rate": 1.4958217270194987e-05,
"loss": 0.1499,
"step": 1100
},
{
"epoch": 2.780212899185974,
"grad_norm": 2.3457772731781006,
"learning_rate": 1.479108635097493e-05,
"loss": 0.1733,
"step": 1110
},
{
"epoch": 2.8052598622417033,
"grad_norm": 1.2713210582733154,
"learning_rate": 1.4623955431754876e-05,
"loss": 0.1533,
"step": 1120
},
{
"epoch": 2.830306825297433,
"grad_norm": 2.4018795490264893,
"learning_rate": 1.445682451253482e-05,
"loss": 0.1713,
"step": 1130
},
{
"epoch": 2.8553537883531623,
"grad_norm": 2.1352932453155518,
"learning_rate": 1.4289693593314764e-05,
"loss": 0.1723,
"step": 1140
},
{
"epoch": 2.8804007514088914,
"grad_norm": 1.5749989748001099,
"learning_rate": 1.4122562674094708e-05,
"loss": 0.1965,
"step": 1150
},
{
"epoch": 2.9054477144646214,
"grad_norm": 3.0269646644592285,
"learning_rate": 1.3955431754874653e-05,
"loss": 0.1965,
"step": 1160
},
{
"epoch": 2.9304946775203504,
"grad_norm": 2.5006277561187744,
"learning_rate": 1.3788300835654596e-05,
"loss": 0.163,
"step": 1170
},
{
"epoch": 2.9555416405760804,
"grad_norm": 1.5534733533859253,
"learning_rate": 1.362116991643454e-05,
"loss": 0.1569,
"step": 1180
},
{
"epoch": 2.9805886036318094,
"grad_norm": 2.774616241455078,
"learning_rate": 1.3454038997214485e-05,
"loss": 0.1834,
"step": 1190
},
{
"epoch": 2.9981214777708205,
"eval_accuracy": 0.980435422182995,
"eval_loss": 0.08890607953071594,
"eval_runtime": 10.6431,
"eval_samples_per_second": 638.724,
"eval_steps_per_second": 20.013,
"step": 1197
},
{
"epoch": 3.005635566687539,
"grad_norm": 3.6320059299468994,
"learning_rate": 1.3286908077994428e-05,
"loss": 0.1771,
"step": 1200
},
{
"epoch": 3.0306825297432685,
"grad_norm": 1.4830687046051025,
"learning_rate": 1.3119777158774374e-05,
"loss": 0.171,
"step": 1210
},
{
"epoch": 3.055729492798998,
"grad_norm": 1.7326056957244873,
"learning_rate": 1.2952646239554317e-05,
"loss": 0.1576,
"step": 1220
},
{
"epoch": 3.0807764558547275,
"grad_norm": 2.301881790161133,
"learning_rate": 1.2785515320334262e-05,
"loss": 0.1675,
"step": 1230
},
{
"epoch": 3.105823418910457,
"grad_norm": 3.6545441150665283,
"learning_rate": 1.2618384401114206e-05,
"loss": 0.1707,
"step": 1240
},
{
"epoch": 3.1308703819661865,
"grad_norm": 1.9802820682525635,
"learning_rate": 1.245125348189415e-05,
"loss": 0.1744,
"step": 1250
},
{
"epoch": 3.155917345021916,
"grad_norm": 2.116532564163208,
"learning_rate": 1.2284122562674095e-05,
"loss": 0.1592,
"step": 1260
},
{
"epoch": 3.1809643080776455,
"grad_norm": 2.7397706508636475,
"learning_rate": 1.211699164345404e-05,
"loss": 0.1567,
"step": 1270
},
{
"epoch": 3.206011271133375,
"grad_norm": 2.8573620319366455,
"learning_rate": 1.1949860724233983e-05,
"loss": 0.1673,
"step": 1280
},
{
"epoch": 3.2310582341891045,
"grad_norm": 3.157711982727051,
"learning_rate": 1.1782729805013929e-05,
"loss": 0.1345,
"step": 1290
},
{
"epoch": 3.256105197244834,
"grad_norm": 4.402627944946289,
"learning_rate": 1.1615598885793872e-05,
"loss": 0.1689,
"step": 1300
},
{
"epoch": 3.2811521603005636,
"grad_norm": 1.3651645183563232,
"learning_rate": 1.1448467966573816e-05,
"loss": 0.0912,
"step": 1310
},
{
"epoch": 3.306199123356293,
"grad_norm": 3.8698039054870605,
"learning_rate": 1.1281337047353761e-05,
"loss": 0.1594,
"step": 1320
},
{
"epoch": 3.3312460864120226,
"grad_norm": 3.035409450531006,
"learning_rate": 1.1114206128133705e-05,
"loss": 0.1601,
"step": 1330
},
{
"epoch": 3.356293049467752,
"grad_norm": 1.8605338335037231,
"learning_rate": 1.0947075208913648e-05,
"loss": 0.1713,
"step": 1340
},
{
"epoch": 3.3813400125234816,
"grad_norm": 3.120457887649536,
"learning_rate": 1.0779944289693595e-05,
"loss": 0.1542,
"step": 1350
},
{
"epoch": 3.406386975579211,
"grad_norm": 3.9332990646362305,
"learning_rate": 1.0612813370473537e-05,
"loss": 0.1451,
"step": 1360
},
{
"epoch": 3.4314339386349406,
"grad_norm": 3.2169320583343506,
"learning_rate": 1.0445682451253482e-05,
"loss": 0.1417,
"step": 1370
},
{
"epoch": 3.45648090169067,
"grad_norm": 1.290043592453003,
"learning_rate": 1.0278551532033427e-05,
"loss": 0.1672,
"step": 1380
},
{
"epoch": 3.4815278647463996,
"grad_norm": 1.9304981231689453,
"learning_rate": 1.0111420612813371e-05,
"loss": 0.1558,
"step": 1390
},
{
"epoch": 3.506574827802129,
"grad_norm": 1.4201843738555908,
"learning_rate": 9.944289693593314e-06,
"loss": 0.1535,
"step": 1400
},
{
"epoch": 3.5316217908578587,
"grad_norm": 1.5534597635269165,
"learning_rate": 9.77715877437326e-06,
"loss": 0.1275,
"step": 1410
},
{
"epoch": 3.5566687539135877,
"grad_norm": 3.183288097381592,
"learning_rate": 9.610027855153203e-06,
"loss": 0.1588,
"step": 1420
},
{
"epoch": 3.5817157169693177,
"grad_norm": 3.252284288406372,
"learning_rate": 9.44289693593315e-06,
"loss": 0.1266,
"step": 1430
},
{
"epoch": 3.6067626800250467,
"grad_norm": 3.5988216400146484,
"learning_rate": 9.275766016713092e-06,
"loss": 0.1484,
"step": 1440
},
{
"epoch": 3.6318096430807767,
"grad_norm": 0.8129429221153259,
"learning_rate": 9.108635097493037e-06,
"loss": 0.1249,
"step": 1450
},
{
"epoch": 3.6568566061365058,
"grad_norm": 1.1750004291534424,
"learning_rate": 8.941504178272981e-06,
"loss": 0.1552,
"step": 1460
},
{
"epoch": 3.6819035691922357,
"grad_norm": 2.3609495162963867,
"learning_rate": 8.774373259052926e-06,
"loss": 0.1506,
"step": 1470
},
{
"epoch": 3.706950532247965,
"grad_norm": 3.020759344100952,
"learning_rate": 8.607242339832869e-06,
"loss": 0.1492,
"step": 1480
},
{
"epoch": 3.7319974953036943,
"grad_norm": 2.7013983726501465,
"learning_rate": 8.440111420612815e-06,
"loss": 0.1045,
"step": 1490
},
{
"epoch": 3.757044458359424,
"grad_norm": 3.6517138481140137,
"learning_rate": 8.272980501392758e-06,
"loss": 0.1354,
"step": 1500
},
{
"epoch": 3.7820914214151533,
"grad_norm": 1.283496618270874,
"learning_rate": 8.1058495821727e-06,
"loss": 0.1376,
"step": 1510
},
{
"epoch": 3.807138384470883,
"grad_norm": 4.068831443786621,
"learning_rate": 7.938718662952647e-06,
"loss": 0.147,
"step": 1520
},
{
"epoch": 3.8321853475266123,
"grad_norm": 1.8731778860092163,
"learning_rate": 7.77158774373259e-06,
"loss": 0.134,
"step": 1530
},
{
"epoch": 3.857232310582342,
"grad_norm": 2.750255823135376,
"learning_rate": 7.604456824512535e-06,
"loss": 0.1398,
"step": 1540
},
{
"epoch": 3.8822792736380713,
"grad_norm": 2.020031213760376,
"learning_rate": 7.43732590529248e-06,
"loss": 0.1236,
"step": 1550
},
{
"epoch": 3.907326236693801,
"grad_norm": 3.4288525581359863,
"learning_rate": 7.2701949860724235e-06,
"loss": 0.1223,
"step": 1560
},
{
"epoch": 3.9323731997495304,
"grad_norm": 2.974663019180298,
"learning_rate": 7.103064066852368e-06,
"loss": 0.1596,
"step": 1570
},
{
"epoch": 3.95742016280526,
"grad_norm": 2.157737970352173,
"learning_rate": 6.935933147632313e-06,
"loss": 0.1277,
"step": 1580
},
{
"epoch": 3.9824671258609894,
"grad_norm": 2.2225987911224365,
"learning_rate": 6.768802228412256e-06,
"loss": 0.142,
"step": 1590
},
{
"epoch": 4.0,
"eval_accuracy": 0.981318034716093,
"eval_loss": 0.08539879322052002,
"eval_runtime": 10.6548,
"eval_samples_per_second": 638.022,
"eval_steps_per_second": 19.991,
"step": 1597
},
{
"epoch": 4.007514088916719,
"grad_norm": 1.7441836595535278,
"learning_rate": 6.601671309192201e-06,
"loss": 0.1491,
"step": 1600
},
{
"epoch": 4.032561051972448,
"grad_norm": 2.0195930004119873,
"learning_rate": 6.4345403899721455e-06,
"loss": 0.1357,
"step": 1610
},
{
"epoch": 4.057608015028178,
"grad_norm": 2.4423089027404785,
"learning_rate": 6.267409470752089e-06,
"loss": 0.1438,
"step": 1620
},
{
"epoch": 4.082654978083907,
"grad_norm": 2.6411936283111572,
"learning_rate": 6.100278551532034e-06,
"loss": 0.131,
"step": 1630
},
{
"epoch": 4.107701941139637,
"grad_norm": 0.8893925547599792,
"learning_rate": 5.933147632311978e-06,
"loss": 0.1249,
"step": 1640
},
{
"epoch": 4.132748904195366,
"grad_norm": 2.2715697288513184,
"learning_rate": 5.766016713091923e-06,
"loss": 0.1642,
"step": 1650
},
{
"epoch": 4.157795867251096,
"grad_norm": 1.903985619544983,
"learning_rate": 5.598885793871867e-06,
"loss": 0.1418,
"step": 1660
},
{
"epoch": 4.182842830306825,
"grad_norm": 2.41510272026062,
"learning_rate": 5.43175487465181e-06,
"loss": 0.1535,
"step": 1670
},
{
"epoch": 4.207889793362555,
"grad_norm": 3.837315320968628,
"learning_rate": 5.264623955431755e-06,
"loss": 0.1433,
"step": 1680
},
{
"epoch": 4.232936756418284,
"grad_norm": 2.4811267852783203,
"learning_rate": 5.0974930362116986e-06,
"loss": 0.121,
"step": 1690
},
{
"epoch": 4.257983719474014,
"grad_norm": 4.462907791137695,
"learning_rate": 4.930362116991643e-06,
"loss": 0.1267,
"step": 1700
},
{
"epoch": 4.283030682529743,
"grad_norm": 3.019132137298584,
"learning_rate": 4.763231197771588e-06,
"loss": 0.1151,
"step": 1710
},
{
"epoch": 4.308077645585473,
"grad_norm": 3.2697532176971436,
"learning_rate": 4.596100278551532e-06,
"loss": 0.1068,
"step": 1720
},
{
"epoch": 4.333124608641202,
"grad_norm": 3.3036513328552246,
"learning_rate": 4.428969359331476e-06,
"loss": 0.1009,
"step": 1730
},
{
"epoch": 4.358171571696932,
"grad_norm": 2.5975863933563232,
"learning_rate": 4.2618384401114205e-06,
"loss": 0.1168,
"step": 1740
},
{
"epoch": 4.383218534752661,
"grad_norm": 1.6975170373916626,
"learning_rate": 4.094707520891365e-06,
"loss": 0.1549,
"step": 1750
},
{
"epoch": 4.408265497808391,
"grad_norm": 2.671894073486328,
"learning_rate": 3.927576601671309e-06,
"loss": 0.1211,
"step": 1760
},
{
"epoch": 4.43331246086412,
"grad_norm": 4.581605911254883,
"learning_rate": 3.7604456824512533e-06,
"loss": 0.1406,
"step": 1770
},
{
"epoch": 4.45835942391985,
"grad_norm": 3.0454249382019043,
"learning_rate": 3.593314763231198e-06,
"loss": 0.137,
"step": 1780
},
{
"epoch": 4.483406386975579,
"grad_norm": 2.0320184230804443,
"learning_rate": 3.426183844011142e-06,
"loss": 0.1105,
"step": 1790
},
{
"epoch": 4.508453350031308,
"grad_norm": 1.5015639066696167,
"learning_rate": 3.259052924791086e-06,
"loss": 0.1282,
"step": 1800
},
{
"epoch": 4.533500313087038,
"grad_norm": 2.1987569332122803,
"learning_rate": 3.0919220055710307e-06,
"loss": 0.1111,
"step": 1810
},
{
"epoch": 4.558547276142768,
"grad_norm": 1.3748631477355957,
"learning_rate": 2.924791086350975e-06,
"loss": 0.1326,
"step": 1820
},
{
"epoch": 4.583594239198497,
"grad_norm": 1.8067643642425537,
"learning_rate": 2.7576601671309194e-06,
"loss": 0.1112,
"step": 1830
},
{
"epoch": 4.608641202254226,
"grad_norm": 2.4993462562561035,
"learning_rate": 2.5905292479108636e-06,
"loss": 0.1014,
"step": 1840
},
{
"epoch": 4.633688165309956,
"grad_norm": 3.3623006343841553,
"learning_rate": 2.4233983286908077e-06,
"loss": 0.1319,
"step": 1850
},
{
"epoch": 4.658735128365686,
"grad_norm": 2.678271770477295,
"learning_rate": 2.2562674094707523e-06,
"loss": 0.1391,
"step": 1860
},
{
"epoch": 4.683782091421415,
"grad_norm": 1.3857640027999878,
"learning_rate": 2.0891364902506964e-06,
"loss": 0.1149,
"step": 1870
},
{
"epoch": 4.708829054477144,
"grad_norm": 2.552626371383667,
"learning_rate": 1.922005571030641e-06,
"loss": 0.119,
"step": 1880
},
{
"epoch": 4.733876017532874,
"grad_norm": 1.3172489404678345,
"learning_rate": 1.7548746518105849e-06,
"loss": 0.1226,
"step": 1890
},
{
"epoch": 4.758922980588603,
"grad_norm": 4.114231586456299,
"learning_rate": 1.5877437325905292e-06,
"loss": 0.1191,
"step": 1900
},
{
"epoch": 4.783969943644333,
"grad_norm": 3.1994917392730713,
"learning_rate": 1.4206128133704736e-06,
"loss": 0.1064,
"step": 1910
},
{
"epoch": 4.809016906700062,
"grad_norm": 2.785364866256714,
"learning_rate": 1.253481894150418e-06,
"loss": 0.1092,
"step": 1920
},
{
"epoch": 4.834063869755792,
"grad_norm": 1.6309309005737305,
"learning_rate": 1.0863509749303623e-06,
"loss": 0.1355,
"step": 1930
},
{
"epoch": 4.859110832811521,
"grad_norm": 3.174809694290161,
"learning_rate": 9.192200557103064e-07,
"loss": 0.1211,
"step": 1940
},
{
"epoch": 4.884157795867251,
"grad_norm": 2.4154794216156006,
"learning_rate": 7.520891364902508e-07,
"loss": 0.1135,
"step": 1950
},
{
"epoch": 4.90920475892298,
"grad_norm": 2.3144145011901855,
"learning_rate": 5.84958217270195e-07,
"loss": 0.1121,
"step": 1960
},
{
"epoch": 4.93425172197871,
"grad_norm": 2.8474748134613037,
"learning_rate": 4.178272980501393e-07,
"loss": 0.143,
"step": 1970
},
{
"epoch": 4.959298685034439,
"grad_norm": 2.4842255115509033,
"learning_rate": 2.506963788300836e-07,
"loss": 0.116,
"step": 1980
},
{
"epoch": 4.984345648090169,
"grad_norm": 2.9249684810638428,
"learning_rate": 8.356545961002785e-08,
"loss": 0.1472,
"step": 1990
},
{
"epoch": 4.996869129618034,
"eval_accuracy": 0.9832303618711385,
"eval_loss": 0.07819115370512009,
"eval_runtime": 10.6686,
"eval_samples_per_second": 637.194,
"eval_steps_per_second": 19.965,
"step": 1995
},
{
"epoch": 4.996869129618034,
"step": 1995,
"total_flos": 2.3180194781952e+18,
"train_loss": 0.40026830736557045,
"train_runtime": 1086.589,
"train_samples_per_second": 235.112,
"train_steps_per_second": 1.836
}
],
"logging_steps": 10,
"max_steps": 1995,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.3180194781952e+18,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}