zephyr-7b-dpo-full / trainer_state.json
RikkiXu's picture
Model save
f335070 verified
raw
history blame
No virus
22.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9980806142034548,
"eval_steps": 1000000,
"global_step": 390,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0025591810620601407,
"grad_norm": 709.6319834867974,
"learning_rate": 5.128205128205128e-09,
"logits/chosen": -2.5583817958831787,
"logits/rejected": -2.4487552642822266,
"logps/chosen": -258.1644592285156,
"logps/rejected": -216.25729370117188,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.025591810620601407,
"grad_norm": 670.9725237886468,
"learning_rate": 5.128205128205127e-08,
"logits/chosen": -2.606004238128662,
"logits/rejected": -2.553126573562622,
"logps/chosen": -267.5910339355469,
"logps/rejected": -217.67140197753906,
"loss": 0.6976,
"rewards/accuracies": 0.4513888955116272,
"rewards/chosen": -0.013270225375890732,
"rewards/margins": 0.0032119054812937975,
"rewards/rejected": -0.01648213155567646,
"step": 10
},
{
"epoch": 0.05118362124120281,
"grad_norm": 590.5735450897452,
"learning_rate": 1.0256410256410255e-07,
"logits/chosen": -2.6245453357696533,
"logits/rejected": -2.562206745147705,
"logps/chosen": -261.142333984375,
"logps/rejected": -207.0738067626953,
"loss": 0.6676,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": 0.08092932403087616,
"rewards/margins": 0.09077399969100952,
"rewards/rejected": -0.009844672866165638,
"step": 20
},
{
"epoch": 0.07677543186180422,
"grad_norm": 644.2505273786248,
"learning_rate": 1.5384615384615385e-07,
"logits/chosen": -2.6289420127868652,
"logits/rejected": -2.5566790103912354,
"logps/chosen": -253.9628143310547,
"logps/rejected": -198.890380859375,
"loss": 0.5156,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 0.5922147035598755,
"rewards/margins": 0.6031575798988342,
"rewards/rejected": -0.010942881926894188,
"step": 30
},
{
"epoch": 0.10236724248240563,
"grad_norm": 428.1589759637874,
"learning_rate": 1.999959945379852e-07,
"logits/chosen": -2.637456178665161,
"logits/rejected": -2.5609130859375,
"logps/chosen": -247.48666381835938,
"logps/rejected": -193.59280395507812,
"loss": 0.3866,
"rewards/accuracies": 0.8062499761581421,
"rewards/chosen": 1.4313427209854126,
"rewards/margins": 1.3846019506454468,
"rewards/rejected": 0.04674070328474045,
"step": 40
},
{
"epoch": 0.12795905310300704,
"grad_norm": 366.0325785868742,
"learning_rate": 1.9951572723309917e-07,
"logits/chosen": -2.684983730316162,
"logits/rejected": -2.618076801300049,
"logps/chosen": -262.3794250488281,
"logps/rejected": -204.0382843017578,
"loss": 0.3488,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 2.9288697242736816,
"rewards/margins": 2.3735203742980957,
"rewards/rejected": 0.5553494095802307,
"step": 50
},
{
"epoch": 0.15355086372360843,
"grad_norm": 375.0458504104559,
"learning_rate": 1.9823877374156647e-07,
"logits/chosen": -2.669403553009033,
"logits/rejected": -2.6018548011779785,
"logps/chosen": -262.41534423828125,
"logps/rejected": -206.5387420654297,
"loss": 0.311,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 3.2061855792999268,
"rewards/margins": 2.446310520172119,
"rewards/rejected": 0.7598745822906494,
"step": 60
},
{
"epoch": 0.17914267434420986,
"grad_norm": 371.9136724850062,
"learning_rate": 1.9617535688178958e-07,
"logits/chosen": -2.6535415649414062,
"logits/rejected": -2.590367078781128,
"logps/chosen": -290.74871826171875,
"logps/rejected": -224.1929168701172,
"loss": 0.3051,
"rewards/accuracies": 0.875,
"rewards/chosen": 3.5809733867645264,
"rewards/margins": 3.0821049213409424,
"rewards/rejected": 0.4988683760166168,
"step": 70
},
{
"epoch": 0.20473448496481125,
"grad_norm": 381.95411975681657,
"learning_rate": 1.9334199560765839e-07,
"logits/chosen": -2.6085634231567383,
"logits/rejected": -2.5423429012298584,
"logps/chosen": -260.9337463378906,
"logps/rejected": -203.12725830078125,
"loss": 0.2569,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 3.3122222423553467,
"rewards/margins": 3.3377063274383545,
"rewards/rejected": -0.025484371930360794,
"step": 80
},
{
"epoch": 0.23032629558541268,
"grad_norm": 331.9831865016541,
"learning_rate": 1.897613727639014e-07,
"logits/chosen": -2.610903739929199,
"logits/rejected": -2.556959867477417,
"logps/chosen": -256.61138916015625,
"logps/rejected": -215.09701538085938,
"loss": 0.2658,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 2.5669326782226562,
"rewards/margins": 2.867710590362549,
"rewards/rejected": -0.3007778227329254,
"step": 90
},
{
"epoch": 0.2559181062060141,
"grad_norm": 634.1345493995642,
"learning_rate": 1.8546215349560202e-07,
"logits/chosen": -2.6384997367858887,
"logits/rejected": -2.563520908355713,
"logps/chosen": -236.5784912109375,
"logps/rejected": -199.6844940185547,
"loss": 0.3117,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 2.7952113151550293,
"rewards/margins": 3.302074909210205,
"rewards/rejected": -0.5068637132644653,
"step": 100
},
{
"epoch": 0.28150991682661547,
"grad_norm": 319.85079714910466,
"learning_rate": 1.8047875576562553e-07,
"logits/chosen": -2.630192518234253,
"logits/rejected": -2.549891710281372,
"logps/chosen": -258.26971435546875,
"logps/rejected": -208.4285430908203,
"loss": 0.282,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 3.702361583709717,
"rewards/margins": 3.704643726348877,
"rewards/rejected": -0.002281466033309698,
"step": 110
},
{
"epoch": 0.30710172744721687,
"grad_norm": 376.18943633320504,
"learning_rate": 1.748510748171101e-07,
"logits/chosen": -2.6130287647247314,
"logits/rejected": -2.543454170227051,
"logps/chosen": -262.194580078125,
"logps/rejected": -206.72488403320312,
"loss": 0.2802,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 3.633894443511963,
"rewards/margins": 3.8131375312805176,
"rewards/rejected": -0.17924347519874573,
"step": 120
},
{
"epoch": 0.3326935380678183,
"grad_norm": 258.08224292776333,
"learning_rate": 1.6862416378687336e-07,
"logits/chosen": -2.587902069091797,
"logits/rejected": -2.5218377113342285,
"logps/chosen": -250.2627410888672,
"logps/rejected": -193.98731994628906,
"loss": 0.2644,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 3.5202510356903076,
"rewards/margins": 3.647292375564575,
"rewards/rejected": -0.12704159319400787,
"step": 130
},
{
"epoch": 0.3582853486884197,
"grad_norm": 312.9816016514414,
"learning_rate": 1.6184787302662547e-07,
"logits/chosen": -2.6076598167419434,
"logits/rejected": -2.539602279663086,
"logps/chosen": -267.7660217285156,
"logps/rejected": -213.330078125,
"loss": 0.3055,
"rewards/accuracies": 0.875,
"rewards/chosen": 3.705143690109253,
"rewards/margins": 3.5916450023651123,
"rewards/rejected": 0.11349865049123764,
"step": 140
},
{
"epoch": 0.3838771593090211,
"grad_norm": 298.63492430036257,
"learning_rate": 1.5457645101945046e-07,
"logits/chosen": -2.588223457336426,
"logits/rejected": -2.521327495574951,
"logps/chosen": -262.39776611328125,
"logps/rejected": -210.2900848388672,
"loss": 0.2675,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 4.096975803375244,
"rewards/margins": 4.044339656829834,
"rewards/rejected": 0.05263558775186539,
"step": 150
},
{
"epoch": 0.4094689699296225,
"grad_norm": 277.3876418755778,
"learning_rate": 1.4686811008647035e-07,
"logits/chosen": -2.5860092639923096,
"logits/rejected": -2.5171661376953125,
"logps/chosen": -241.29501342773438,
"logps/rejected": -192.94955444335938,
"loss": 0.2544,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 3.8214523792266846,
"rewards/margins": 3.785486936569214,
"rewards/rejected": 0.0359656922519207,
"step": 160
},
{
"epoch": 0.4350607805502239,
"grad_norm": 339.61682629601955,
"learning_rate": 1.387845603604855e-07,
"logits/chosen": -2.5826542377471924,
"logits/rejected": -2.51751446723938,
"logps/chosen": -262.99298095703125,
"logps/rejected": -206.5457000732422,
"loss": 0.2334,
"rewards/accuracies": 0.875,
"rewards/chosen": 3.8493824005126953,
"rewards/margins": 3.933476209640503,
"rewards/rejected": -0.08409398794174194,
"step": 170
},
{
"epoch": 0.46065259117082535,
"grad_norm": 293.5783730044579,
"learning_rate": 1.3039051575742468e-07,
"logits/chosen": -2.6330299377441406,
"logits/rejected": -2.547426700592041,
"logps/chosen": -235.4442596435547,
"logps/rejected": -183.63687133789062,
"loss": 0.2871,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 3.294564723968506,
"rewards/margins": 3.4877796173095703,
"rewards/rejected": -0.193215012550354,
"step": 180
},
{
"epoch": 0.48624440179142675,
"grad_norm": 298.0737945063753,
"learning_rate": 1.2175317590061675e-07,
"logits/chosen": -2.5894675254821777,
"logits/rejected": -2.51374888420105,
"logps/chosen": -257.42132568359375,
"logps/rejected": -199.62106323242188,
"loss": 0.239,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 3.699636936187744,
"rewards/margins": 3.845207929611206,
"rewards/rejected": -0.1455712914466858,
"step": 190
},
{
"epoch": 0.5118362124120281,
"grad_norm": 326.3771289852159,
"learning_rate": 1.1294168814540553e-07,
"logits/chosen": -2.625087261199951,
"logits/rejected": -2.5350747108459473,
"logps/chosen": -271.9561462402344,
"logps/rejected": -214.1556854248047,
"loss": 0.2317,
"rewards/accuracies": 0.90625,
"rewards/chosen": 4.283473014831543,
"rewards/margins": 4.552158832550049,
"rewards/rejected": -0.2686860263347626,
"step": 200
},
{
"epoch": 0.5374280230326296,
"grad_norm": 510.1343443372893,
"learning_rate": 1.0402659401094151e-07,
"logits/chosen": -2.602785587310791,
"logits/rejected": -2.5288333892822266,
"logps/chosen": -264.21099853515625,
"logps/rejected": -207.3443145751953,
"loss": 0.3153,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 3.421428680419922,
"rewards/margins": 3.9131996631622314,
"rewards/rejected": -0.4917708933353424,
"step": 210
},
{
"epoch": 0.5630198336532309,
"grad_norm": 362.61099626331173,
"learning_rate": 9.507926445081218e-08,
"logits/chosen": -2.607333183288574,
"logits/rejected": -2.5410573482513428,
"logps/chosen": -255.5386199951172,
"logps/rejected": -210.58261108398438,
"loss": 0.2657,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 3.069124698638916,
"rewards/margins": 3.4770140647888184,
"rewards/rejected": -0.4078896641731262,
"step": 220
},
{
"epoch": 0.5886116442738324,
"grad_norm": 270.86067735708986,
"learning_rate": 8.61713284835267e-08,
"logits/chosen": -2.6318671703338623,
"logits/rejected": -2.5431408882141113,
"logps/chosen": -265.3260803222656,
"logps/rejected": -212.6390380859375,
"loss": 0.249,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 4.038520812988281,
"rewards/margins": 3.9009087085723877,
"rewards/rejected": 0.1376120150089264,
"step": 230
},
{
"epoch": 0.6142034548944337,
"grad_norm": 496.26361322510223,
"learning_rate": 7.73740997570278e-08,
"logits/chosen": -2.6172757148742676,
"logits/rejected": -2.566117763519287,
"logps/chosen": -259.39569091796875,
"logps/rejected": -214.13717651367188,
"loss": 0.2362,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 4.167778968811035,
"rewards/margins": 4.450715065002441,
"rewards/rejected": -0.28293582797050476,
"step": 240
},
{
"epoch": 0.6397952655150352,
"grad_norm": 385.39306248287744,
"learning_rate": 6.875800563794424e-08,
"logits/chosen": -2.6301958560943604,
"logits/rejected": -2.5562362670898438,
"logps/chosen": -264.5022277832031,
"logps/rejected": -217.06472778320312,
"loss": 0.2541,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 3.638233184814453,
"rewards/margins": 3.9798378944396973,
"rewards/rejected": -0.34160494804382324,
"step": 250
},
{
"epoch": 0.6653870761356366,
"grad_norm": 246.0647695237683,
"learning_rate": 6.039202339608431e-08,
"logits/chosen": -2.6222901344299316,
"logits/rejected": -2.5577120780944824,
"logps/chosen": -258.84637451171875,
"logps/rejected": -203.7962188720703,
"loss": 0.2317,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 3.5067996978759766,
"rewards/margins": 3.801957607269287,
"rewards/rejected": -0.295158326625824,
"step": 260
},
{
"epoch": 0.690978886756238,
"grad_norm": 382.6551210350461,
"learning_rate": 5.2343127997869205e-08,
"logits/chosen": -2.602179765701294,
"logits/rejected": -2.5085558891296387,
"logps/chosen": -270.6271057128906,
"logps/rejected": -222.81655883789062,
"loss": 0.2926,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 2.9784107208251953,
"rewards/margins": 4.063758850097656,
"rewards/rejected": -1.085348129272461,
"step": 270
},
{
"epoch": 0.7165706973768394,
"grad_norm": 291.15530026271324,
"learning_rate": 4.4675755929468636e-08,
"logits/chosen": -2.627225160598755,
"logits/rejected": -2.565744638442993,
"logps/chosen": -266.7916259765625,
"logps/rejected": -214.30709838867188,
"loss": 0.2375,
"rewards/accuracies": 0.90625,
"rewards/chosen": 3.4573512077331543,
"rewards/margins": 4.138546943664551,
"rewards/rejected": -0.6811951398849487,
"step": 280
},
{
"epoch": 0.7421625079974408,
"grad_norm": 302.9770657235692,
"learning_rate": 3.745128934207224e-08,
"logits/chosen": -2.590620279312134,
"logits/rejected": -2.5388922691345215,
"logps/chosen": -265.51971435546875,
"logps/rejected": -212.6427764892578,
"loss": 0.2286,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 3.2131683826446533,
"rewards/margins": 3.975499391555786,
"rewards/rejected": -0.7623313665390015,
"step": 290
},
{
"epoch": 0.7677543186180422,
"grad_norm": 339.1306333767976,
"learning_rate": 3.0727564649040063e-08,
"logits/chosen": -2.6111063957214355,
"logits/rejected": -2.5518836975097656,
"logps/chosen": -271.9001159667969,
"logps/rejected": -216.2710723876953,
"loss": 0.2446,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 3.3372998237609863,
"rewards/margins": 3.9620718955993652,
"rewards/rejected": -0.6247718930244446,
"step": 300
},
{
"epoch": 0.7933461292386437,
"grad_norm": 235.34557136252968,
"learning_rate": 2.4558409508920985e-08,
"logits/chosen": -2.5742857456207275,
"logits/rejected": -2.499851703643799,
"logps/chosen": -275.28167724609375,
"logps/rejected": -211.0914764404297,
"loss": 0.2598,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 3.4961440563201904,
"rewards/margins": 4.247152805328369,
"rewards/rejected": -0.7510083913803101,
"step": 310
},
{
"epoch": 0.818937939859245,
"grad_norm": 368.2414347720992,
"learning_rate": 1.899321190108335e-08,
"logits/chosen": -2.59656023979187,
"logits/rejected": -2.545151472091675,
"logps/chosen": -258.7973327636719,
"logps/rejected": -200.56219482421875,
"loss": 0.2329,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 3.0572078227996826,
"rewards/margins": 3.9228408336639404,
"rewards/rejected": -0.8656327128410339,
"step": 320
},
{
"epoch": 0.8445297504798465,
"grad_norm": 412.2800478150093,
"learning_rate": 1.4076524743778317e-08,
"logits/chosen": -2.6195101737976074,
"logits/rejected": -2.55584716796875,
"logps/chosen": -262.2277526855469,
"logps/rejected": -213.35403442382812,
"loss": 0.231,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 3.7354748249053955,
"rewards/margins": 4.0478105545043945,
"rewards/rejected": -0.31233564019203186,
"step": 330
},
{
"epoch": 0.8701215611004478,
"grad_norm": 291.1936087568274,
"learning_rate": 9.847709219918398e-09,
"logits/chosen": -2.6071531772613525,
"logits/rejected": -2.5460381507873535,
"logps/chosen": -257.75164794921875,
"logps/rejected": -206.60986328125,
"loss": 0.221,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 3.5297904014587402,
"rewards/margins": 4.399306297302246,
"rewards/rejected": -0.8695155382156372,
"step": 340
},
{
"epoch": 0.8957133717210493,
"grad_norm": 301.99507828681374,
"learning_rate": 6.340619665972846e-09,
"logits/chosen": -2.617502212524414,
"logits/rejected": -2.5671114921569824,
"logps/chosen": -267.4635009765625,
"logps/rejected": -222.5786895751953,
"loss": 0.2579,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 3.393681287765503,
"rewards/margins": 3.9400668144226074,
"rewards/rejected": -0.546385645866394,
"step": 350
},
{
"epoch": 0.9213051823416507,
"grad_norm": 348.99526280716225,
"learning_rate": 3.583332546643769e-09,
"logits/chosen": -2.6180646419525146,
"logits/rejected": -2.5445971488952637,
"logps/chosen": -272.35406494140625,
"logps/rejected": -214.8975067138672,
"loss": 0.2825,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 3.431410551071167,
"rewards/margins": 4.0157270431518555,
"rewards/rejected": -0.584316074848175,
"step": 360
},
{
"epoch": 0.946896992962252,
"grad_norm": 251.0460885716357,
"learning_rate": 1.5979216850509847e-09,
"logits/chosen": -2.5794007778167725,
"logits/rejected": -2.5391972064971924,
"logps/chosen": -271.82513427734375,
"logps/rejected": -225.1968231201172,
"loss": 0.2529,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 3.722885847091675,
"rewards/margins": 4.0348358154296875,
"rewards/rejected": -0.3119499981403351,
"step": 370
},
{
"epoch": 0.9724888035828535,
"grad_norm": 201.79043052952954,
"learning_rate": 4.002815478505006e-10,
"logits/chosen": -2.6285715103149414,
"logits/rejected": -2.585494041442871,
"logps/chosen": -261.1199951171875,
"logps/rejected": -226.61105346679688,
"loss": 0.255,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 3.6326937675476074,
"rewards/margins": 4.036477088928223,
"rewards/rejected": -0.40378379821777344,
"step": 380
},
{
"epoch": 0.9980806142034548,
"grad_norm": 458.24518941114127,
"learning_rate": 0.0,
"logits/chosen": -2.641213893890381,
"logits/rejected": -2.575150966644287,
"logps/chosen": -248.17904663085938,
"logps/rejected": -206.1395721435547,
"loss": 0.2365,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 3.4077067375183105,
"rewards/margins": 4.0728960037231445,
"rewards/rejected": -0.6651893854141235,
"step": 390
},
{
"epoch": 0.9980806142034548,
"step": 390,
"total_flos": 0.0,
"train_loss": 0.2947830175742125,
"train_runtime": 6391.2384,
"train_samples_per_second": 7.823,
"train_steps_per_second": 0.061
}
],
"logging_steps": 10,
"max_steps": 390,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}