gpt2-xl-lora-multi-512-7-top / trainer_state.json
MHGanainy's picture
MHGanainy/gpt2-xl-lora-multi-512-7
2d507a7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 10920,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009157509157509158,
"grad_norm": 0.10490927845239639,
"learning_rate": 2.289639381797367e-07,
"loss": 2.7059,
"step": 100
},
{
"epoch": 0.018315018315018316,
"grad_norm": 0.22449691593647003,
"learning_rate": 4.5563823697767606e-07,
"loss": 2.7179,
"step": 200
},
{
"epoch": 0.027472527472527472,
"grad_norm": 0.14629626274108887,
"learning_rate": 6.846021751574127e-07,
"loss": 2.7183,
"step": 300
},
{
"epoch": 0.03663003663003663,
"grad_norm": 0.19104591012001038,
"learning_rate": 9.112764739553521e-07,
"loss": 2.715,
"step": 400
},
{
"epoch": 0.045787545787545784,
"grad_norm": 0.2343115508556366,
"learning_rate": 1.1402404121350888e-06,
"loss": 2.6869,
"step": 500
},
{
"epoch": 0.054945054945054944,
"grad_norm": 0.1325504034757614,
"learning_rate": 1.366914710933028e-06,
"loss": 2.6707,
"step": 600
},
{
"epoch": 0.0641025641025641,
"grad_norm": 0.22846511006355286,
"learning_rate": 1.5958786491127649e-06,
"loss": 2.6654,
"step": 700
},
{
"epoch": 0.07326007326007326,
"grad_norm": 0.1422765702009201,
"learning_rate": 1.8248425872925015e-06,
"loss": 2.6269,
"step": 800
},
{
"epoch": 0.08241758241758242,
"grad_norm": 0.33947649598121643,
"learning_rate": 2.0538065254722382e-06,
"loss": 2.5998,
"step": 900
},
{
"epoch": 0.09157509157509157,
"grad_norm": 0.12349778413772583,
"learning_rate": 2.2804808242701776e-06,
"loss": 2.586,
"step": 1000
},
{
"epoch": 0.10073260073260074,
"grad_norm": 0.21951575577259064,
"learning_rate": 2.5094447624499143e-06,
"loss": 2.5917,
"step": 1100
},
{
"epoch": 0.10989010989010989,
"grad_norm": 0.16132010519504547,
"learning_rate": 2.738408700629651e-06,
"loss": 2.5675,
"step": 1200
},
{
"epoch": 0.11904761904761904,
"grad_norm": 0.31237655878067017,
"learning_rate": 2.9673726388093876e-06,
"loss": 2.5556,
"step": 1300
},
{
"epoch": 0.1282051282051282,
"grad_norm": 0.15914414823055267,
"learning_rate": 3.1963365769891247e-06,
"loss": 2.5326,
"step": 1400
},
{
"epoch": 0.13736263736263737,
"grad_norm": 0.1860857754945755,
"learning_rate": 3.425300515168861e-06,
"loss": 2.5353,
"step": 1500
},
{
"epoch": 0.14652014652014653,
"grad_norm": 0.18469680845737457,
"learning_rate": 3.6542644533485977e-06,
"loss": 2.5114,
"step": 1600
},
{
"epoch": 0.15567765567765568,
"grad_norm": 0.3093177080154419,
"learning_rate": 3.883228391528335e-06,
"loss": 2.5025,
"step": 1700
},
{
"epoch": 0.16483516483516483,
"grad_norm": 0.29793912172317505,
"learning_rate": 4.112192329708071e-06,
"loss": 2.4835,
"step": 1800
},
{
"epoch": 0.17399267399267399,
"grad_norm": 0.20354346930980682,
"learning_rate": 4.341156267887808e-06,
"loss": 2.4779,
"step": 1900
},
{
"epoch": 0.18315018315018314,
"grad_norm": 0.20856451988220215,
"learning_rate": 4.5701202060675444e-06,
"loss": 2.4694,
"step": 2000
},
{
"epoch": 0.19230769230769232,
"grad_norm": 0.17660389840602875,
"learning_rate": 4.7990841442472815e-06,
"loss": 2.4717,
"step": 2100
},
{
"epoch": 0.20146520146520147,
"grad_norm": 0.1957291066646576,
"learning_rate": 5.028048082427019e-06,
"loss": 2.4643,
"step": 2200
},
{
"epoch": 0.21062271062271062,
"grad_norm": 0.201618030667305,
"learning_rate": 5.257012020606756e-06,
"loss": 2.4694,
"step": 2300
},
{
"epoch": 0.21978021978021978,
"grad_norm": 0.22514258325099945,
"learning_rate": 5.485975958786491e-06,
"loss": 2.4528,
"step": 2400
},
{
"epoch": 0.22893772893772893,
"grad_norm": 0.2677931785583496,
"learning_rate": 5.714939896966228e-06,
"loss": 2.454,
"step": 2500
},
{
"epoch": 0.23809523809523808,
"grad_norm": 0.2718426287174225,
"learning_rate": 5.943903835145965e-06,
"loss": 2.4632,
"step": 2600
},
{
"epoch": 0.24725274725274726,
"grad_norm": 0.2885560691356659,
"learning_rate": 6.172867773325702e-06,
"loss": 2.4549,
"step": 2700
},
{
"epoch": 0.2564102564102564,
"grad_norm": 0.19180075824260712,
"learning_rate": 6.401831711505439e-06,
"loss": 2.4496,
"step": 2800
},
{
"epoch": 0.26556776556776557,
"grad_norm": 0.22613513469696045,
"learning_rate": 6.630795649685176e-06,
"loss": 2.4528,
"step": 2900
},
{
"epoch": 0.27472527472527475,
"grad_norm": 0.19509850442409515,
"learning_rate": 6.859759587864911e-06,
"loss": 2.4441,
"step": 3000
},
{
"epoch": 0.2838827838827839,
"grad_norm": 0.21075837314128876,
"learning_rate": 7.088723526044648e-06,
"loss": 2.4464,
"step": 3100
},
{
"epoch": 0.29304029304029305,
"grad_norm": 0.2654266357421875,
"learning_rate": 7.3176874642243855e-06,
"loss": 2.4298,
"step": 3200
},
{
"epoch": 0.3021978021978022,
"grad_norm": 0.27135413885116577,
"learning_rate": 7.546651402404122e-06,
"loss": 2.4364,
"step": 3300
},
{
"epoch": 0.31135531135531136,
"grad_norm": 0.27136310935020447,
"learning_rate": 7.773325701202062e-06,
"loss": 2.4287,
"step": 3400
},
{
"epoch": 0.32051282051282054,
"grad_norm": 0.2661312222480774,
"learning_rate": 8.002289639381798e-06,
"loss": 2.4275,
"step": 3500
},
{
"epoch": 0.32967032967032966,
"grad_norm": 0.20097434520721436,
"learning_rate": 8.231253577561534e-06,
"loss": 2.4336,
"step": 3600
},
{
"epoch": 0.33882783882783885,
"grad_norm": 0.3780891001224518,
"learning_rate": 8.46021751574127e-06,
"loss": 2.4372,
"step": 3700
},
{
"epoch": 0.34798534798534797,
"grad_norm": 0.23288694024085999,
"learning_rate": 8.689181453921009e-06,
"loss": 2.4103,
"step": 3800
},
{
"epoch": 0.35714285714285715,
"grad_norm": 0.24288786947727203,
"learning_rate": 8.918145392100745e-06,
"loss": 2.4149,
"step": 3900
},
{
"epoch": 0.3663003663003663,
"grad_norm": 0.25948914885520935,
"learning_rate": 9.147109330280481e-06,
"loss": 2.4236,
"step": 4000
},
{
"epoch": 0.37545787545787546,
"grad_norm": 0.26415252685546875,
"learning_rate": 9.37607326846022e-06,
"loss": 2.4355,
"step": 4100
},
{
"epoch": 0.38461538461538464,
"grad_norm": 0.3095110058784485,
"learning_rate": 9.605037206639955e-06,
"loss": 2.4275,
"step": 4200
},
{
"epoch": 0.39377289377289376,
"grad_norm": 0.23992522060871124,
"learning_rate": 9.834001144819692e-06,
"loss": 2.4135,
"step": 4300
},
{
"epoch": 0.40293040293040294,
"grad_norm": 0.31741300225257874,
"learning_rate": 1.0062965082999428e-05,
"loss": 2.4148,
"step": 4400
},
{
"epoch": 0.41208791208791207,
"grad_norm": 0.26608923077583313,
"learning_rate": 1.0291929021179166e-05,
"loss": 2.4186,
"step": 4500
},
{
"epoch": 0.42124542124542125,
"grad_norm": 0.23381298780441284,
"learning_rate": 1.0520892959358902e-05,
"loss": 2.4124,
"step": 4600
},
{
"epoch": 0.43040293040293043,
"grad_norm": 0.42702221870422363,
"learning_rate": 1.0749856897538637e-05,
"loss": 2.412,
"step": 4700
},
{
"epoch": 0.43956043956043955,
"grad_norm": 0.24715246260166168,
"learning_rate": 1.0978820835718376e-05,
"loss": 2.4138,
"step": 4800
},
{
"epoch": 0.44871794871794873,
"grad_norm": 0.42784959077835083,
"learning_rate": 1.1207784773898111e-05,
"loss": 2.3953,
"step": 4900
},
{
"epoch": 0.45787545787545786,
"grad_norm": 0.25161468982696533,
"learning_rate": 1.143674871207785e-05,
"loss": 2.3898,
"step": 5000
},
{
"epoch": 0.46703296703296704,
"grad_norm": 0.2743668258190155,
"learning_rate": 1.1665712650257585e-05,
"loss": 2.4119,
"step": 5100
},
{
"epoch": 0.47619047619047616,
"grad_norm": 0.28708940744400024,
"learning_rate": 1.1894676588437321e-05,
"loss": 2.4039,
"step": 5200
},
{
"epoch": 0.48534798534798534,
"grad_norm": 0.31553077697753906,
"learning_rate": 1.212364052661706e-05,
"loss": 2.3987,
"step": 5300
},
{
"epoch": 0.4945054945054945,
"grad_norm": 0.2647671103477478,
"learning_rate": 1.2352604464796796e-05,
"loss": 2.4103,
"step": 5400
},
{
"epoch": 0.5036630036630036,
"grad_norm": 0.2579342722892761,
"learning_rate": 1.2581568402976532e-05,
"loss": 2.3984,
"step": 5500
},
{
"epoch": 0.5128205128205128,
"grad_norm": 0.365583211183548,
"learning_rate": 1.281053234115627e-05,
"loss": 2.3927,
"step": 5600
},
{
"epoch": 0.521978021978022,
"grad_norm": 0.3024210035800934,
"learning_rate": 1.3039496279336006e-05,
"loss": 2.3956,
"step": 5700
},
{
"epoch": 0.5311355311355311,
"grad_norm": 0.31242358684539795,
"learning_rate": 1.3268460217515742e-05,
"loss": 2.3891,
"step": 5800
},
{
"epoch": 0.5402930402930403,
"grad_norm": 0.31390878558158875,
"learning_rate": 1.349742415569548e-05,
"loss": 2.3865,
"step": 5900
},
{
"epoch": 0.5494505494505495,
"grad_norm": 0.3613923192024231,
"learning_rate": 1.3726388093875217e-05,
"loss": 2.3919,
"step": 6000
},
{
"epoch": 0.5586080586080586,
"grad_norm": 0.30997100472450256,
"learning_rate": 1.3955352032054951e-05,
"loss": 2.3846,
"step": 6100
},
{
"epoch": 0.5677655677655677,
"grad_norm": 0.46809422969818115,
"learning_rate": 1.418431597023469e-05,
"loss": 2.3826,
"step": 6200
},
{
"epoch": 0.5769230769230769,
"grad_norm": 0.2764199674129486,
"learning_rate": 1.4413279908414425e-05,
"loss": 2.3893,
"step": 6300
},
{
"epoch": 0.5860805860805861,
"grad_norm": 0.3119123578071594,
"learning_rate": 1.4642243846594162e-05,
"loss": 2.3756,
"step": 6400
},
{
"epoch": 0.5952380952380952,
"grad_norm": 0.3249548375606537,
"learning_rate": 1.48712077847739e-05,
"loss": 2.3898,
"step": 6500
},
{
"epoch": 0.6043956043956044,
"grad_norm": 0.35118070244789124,
"learning_rate": 1.5100171722953636e-05,
"loss": 2.3845,
"step": 6600
},
{
"epoch": 0.6135531135531136,
"grad_norm": 0.307021826505661,
"learning_rate": 1.532913566113337e-05,
"loss": 2.3841,
"step": 6700
},
{
"epoch": 0.6227106227106227,
"grad_norm": 0.29582327604293823,
"learning_rate": 1.555809959931311e-05,
"loss": 2.3842,
"step": 6800
},
{
"epoch": 0.6318681318681318,
"grad_norm": 0.349324107170105,
"learning_rate": 1.5787063537492846e-05,
"loss": 2.3843,
"step": 6900
},
{
"epoch": 0.6410256410256411,
"grad_norm": 0.31297802925109863,
"learning_rate": 1.6016027475672583e-05,
"loss": 2.3819,
"step": 7000
},
{
"epoch": 0.6501831501831502,
"grad_norm": 0.27480989694595337,
"learning_rate": 1.624499141385232e-05,
"loss": 2.3553,
"step": 7100
},
{
"epoch": 0.6593406593406593,
"grad_norm": 0.33451569080352783,
"learning_rate": 1.6473955352032055e-05,
"loss": 2.3906,
"step": 7200
},
{
"epoch": 0.6684981684981685,
"grad_norm": 0.27567845582962036,
"learning_rate": 1.670291929021179e-05,
"loss": 2.3804,
"step": 7300
},
{
"epoch": 0.6776556776556777,
"grad_norm": 0.4053930938243866,
"learning_rate": 1.693188322839153e-05,
"loss": 2.376,
"step": 7400
},
{
"epoch": 0.6868131868131868,
"grad_norm": 0.30826249718666077,
"learning_rate": 1.7160847166571267e-05,
"loss": 2.3797,
"step": 7500
},
{
"epoch": 0.6959706959706959,
"grad_norm": 0.3094613254070282,
"learning_rate": 1.7389811104751004e-05,
"loss": 2.3781,
"step": 7600
},
{
"epoch": 0.7051282051282052,
"grad_norm": 0.2915087342262268,
"learning_rate": 1.761877504293074e-05,
"loss": 2.3694,
"step": 7700
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.276742547750473,
"learning_rate": 1.7847738981110476e-05,
"loss": 2.3586,
"step": 7800
},
{
"epoch": 0.7234432234432234,
"grad_norm": 0.2940647602081299,
"learning_rate": 1.8076702919290216e-05,
"loss": 2.377,
"step": 7900
},
{
"epoch": 0.7326007326007326,
"grad_norm": 0.3118553161621094,
"learning_rate": 1.8305666857469952e-05,
"loss": 2.363,
"step": 8000
},
{
"epoch": 0.7417582417582418,
"grad_norm": 0.29962947964668274,
"learning_rate": 1.8534630795649685e-05,
"loss": 2.3627,
"step": 8100
},
{
"epoch": 0.7509157509157509,
"grad_norm": 0.38140273094177246,
"learning_rate": 1.8763594733829424e-05,
"loss": 2.3629,
"step": 8200
},
{
"epoch": 0.76007326007326,
"grad_norm": 0.29137617349624634,
"learning_rate": 1.899255867200916e-05,
"loss": 2.3676,
"step": 8300
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.3427438735961914,
"learning_rate": 1.9221522610188897e-05,
"loss": 2.3621,
"step": 8400
},
{
"epoch": 0.7783882783882784,
"grad_norm": 0.30704206228256226,
"learning_rate": 1.9450486548368633e-05,
"loss": 2.3508,
"step": 8500
},
{
"epoch": 0.7875457875457875,
"grad_norm": 0.34004345536231995,
"learning_rate": 1.967945048654837e-05,
"loss": 2.3499,
"step": 8600
},
{
"epoch": 0.7967032967032966,
"grad_norm": 0.39838922023773193,
"learning_rate": 1.9908414424728106e-05,
"loss": 2.3544,
"step": 8700
},
{
"epoch": 0.8058608058608059,
"grad_norm": 0.3006947636604309,
"learning_rate": 1.9962812240794344e-05,
"loss": 2.36,
"step": 8800
},
{
"epoch": 0.815018315018315,
"grad_norm": 0.3303048610687256,
"learning_rate": 1.973655456746745e-05,
"loss": 2.3457,
"step": 8900
},
{
"epoch": 0.8241758241758241,
"grad_norm": 0.31546804308891296,
"learning_rate": 1.9309362692059617e-05,
"loss": 2.3522,
"step": 9000
},
{
"epoch": 0.8333333333333334,
"grad_norm": 0.3123638927936554,
"learning_rate": 1.869005261390877e-05,
"loss": 2.3552,
"step": 9100
},
{
"epoch": 0.8424908424908425,
"grad_norm": 0.36076679825782776,
"learning_rate": 1.789140509396394e-05,
"loss": 2.3434,
"step": 9200
},
{
"epoch": 0.8516483516483516,
"grad_norm": 0.3313111960887909,
"learning_rate": 1.69299018970067e-05,
"loss": 2.3782,
"step": 9300
},
{
"epoch": 0.8608058608058609,
"grad_norm": 0.3116619884967804,
"learning_rate": 1.582538565590479e-05,
"loss": 2.3398,
"step": 9400
},
{
"epoch": 0.86996336996337,
"grad_norm": 0.33244970440864563,
"learning_rate": 1.4600650377311523e-05,
"loss": 2.3443,
"step": 9500
},
{
"epoch": 0.8791208791208791,
"grad_norm": 0.3026200830936432,
"learning_rate": 1.3280971039583906e-05,
"loss": 2.3442,
"step": 9600
},
{
"epoch": 0.8882783882783882,
"grad_norm": 0.35270655155181885,
"learning_rate": 1.1893581990651848e-05,
"loss": 2.337,
"step": 9700
},
{
"epoch": 0.8974358974358975,
"grad_norm": 0.34508877992630005,
"learning_rate": 1.046711491019129e-05,
"loss": 2.3637,
"step": 9800
},
{
"epoch": 0.9065934065934066,
"grad_norm": 0.3241460919380188,
"learning_rate": 9.031007934929237e-06,
"loss": 2.3529,
"step": 9900
},
{
"epoch": 0.9157509157509157,
"grad_norm": 0.3581334054470062,
"learning_rate": 7.6148981410179966e-06,
"loss": 2.3524,
"step": 10000
},
{
"epoch": 0.924908424908425,
"grad_norm": 0.34301674365997314,
"learning_rate": 6.248009920877591e-06,
"loss": 2.3532,
"step": 10100
},
{
"epoch": 0.9340659340659341,
"grad_norm": 0.30783897638320923,
"learning_rate": 4.95855187663155e-06,
"loss": 2.3507,
"step": 10200
},
{
"epoch": 0.9432234432234432,
"grad_norm": 0.40239864587783813,
"learning_rate": 3.773134676503629e-06,
"loss": 2.3488,
"step": 10300
},
{
"epoch": 0.9523809523809523,
"grad_norm": 0.3629162907600403,
"learning_rate": 2.7162218879283174e-06,
"loss": 2.3507,
"step": 10400
},
{
"epoch": 0.9615384615384616,
"grad_norm": 0.3233180046081543,
"learning_rate": 1.8096251205843685e-06,
"loss": 2.3369,
"step": 10500
},
{
"epoch": 0.9706959706959707,
"grad_norm": 0.3085317313671112,
"learning_rate": 1.0720538981326557e-06,
"loss": 2.3495,
"step": 10600
},
{
"epoch": 0.9798534798534798,
"grad_norm": 0.332645446062088,
"learning_rate": 5.187295479980136e-07,
"loss": 2.3448,
"step": 10700
},
{
"epoch": 0.989010989010989,
"grad_norm": 0.3299219012260437,
"learning_rate": 1.6107107738836835e-07,
"loss": 2.3427,
"step": 10800
},
{
"epoch": 0.9981684981684982,
"grad_norm": 0.41852566599845886,
"learning_rate": 6.4595181575910496e-09,
"loss": 2.3397,
"step": 10900
},
{
"epoch": 1.0,
"step": 10920,
"total_flos": 1.59124040841796e+18,
"train_loss": 2.428147726093893,
"train_runtime": 2109.7476,
"train_samples_per_second": 82.811,
"train_steps_per_second": 5.176
}
],
"logging_steps": 100,
"max_steps": 10920,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.59124040841796e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}