zephyr-7b-dpo-full / trainer_state.json
RikkiXu's picture
Model save
0773ddc verified
raw
history blame
No virus
19.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 368,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 1359.2177158077461,
"learning_rate": 2.702702702702703e-10,
"logits/chosen": -1.3332719802856445,
"logits/rejected": -1.246394395828247,
"logps/chosen": -286.9539794921875,
"logps/rejected": -263.3782958984375,
"loss": 0.7007,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.05,
"grad_norm": 1659.9667867776882,
"learning_rate": 2.702702702702703e-09,
"logits/chosen": -1.6176425218582153,
"logits/rejected": -1.3966516256332397,
"logps/chosen": -342.49456787109375,
"logps/rejected": -294.5242614746094,
"loss": 0.7618,
"rewards/accuracies": 0.4340277910232544,
"rewards/chosen": 0.040204986929893494,
"rewards/margins": 0.027825627475976944,
"rewards/rejected": 0.012379361316561699,
"step": 10
},
{
"epoch": 0.11,
"grad_norm": 1365.2835589869765,
"learning_rate": 5.405405405405406e-09,
"logits/chosen": -1.4892847537994385,
"logits/rejected": -1.311715006828308,
"logps/chosen": -314.7313537597656,
"logps/rejected": -279.33746337890625,
"loss": 0.7414,
"rewards/accuracies": 0.5406249761581421,
"rewards/chosen": 0.03467293456196785,
"rewards/margins": 0.06634848564863205,
"rewards/rejected": -0.0316755548119545,
"step": 20
},
{
"epoch": 0.16,
"grad_norm": 1495.215254347967,
"learning_rate": 8.108108108108109e-09,
"logits/chosen": -1.5478875637054443,
"logits/rejected": -1.3803514242172241,
"logps/chosen": -324.8534240722656,
"logps/rejected": -286.27276611328125,
"loss": 0.7533,
"rewards/accuracies": 0.5218750238418579,
"rewards/chosen": 0.05265723541378975,
"rewards/margins": 0.04645369574427605,
"rewards/rejected": 0.006203538738191128,
"step": 30
},
{
"epoch": 0.22,
"grad_norm": 1385.2297985942057,
"learning_rate": 9.997973265157192e-09,
"logits/chosen": -1.5357484817504883,
"logits/rejected": -1.3583762645721436,
"logps/chosen": -325.4037780761719,
"logps/rejected": -285.64508056640625,
"loss": 0.7565,
"rewards/accuracies": 0.4937500059604645,
"rewards/chosen": -0.010429712943732738,
"rewards/margins": -0.014903778210282326,
"rewards/rejected": 0.0044740648008883,
"step": 40
},
{
"epoch": 0.27,
"grad_norm": 1517.4779086138956,
"learning_rate": 9.961988113473708e-09,
"logits/chosen": -1.5413590669631958,
"logits/rejected": -1.3948113918304443,
"logps/chosen": -337.04327392578125,
"logps/rejected": -297.2876892089844,
"loss": 0.7454,
"rewards/accuracies": 0.47187501192092896,
"rewards/chosen": -0.022386690601706505,
"rewards/margins": -0.045912474393844604,
"rewards/rejected": 0.0235257837921381,
"step": 50
},
{
"epoch": 0.33,
"grad_norm": 1317.1513962511742,
"learning_rate": 9.881337335184878e-09,
"logits/chosen": -1.5795971155166626,
"logits/rejected": -1.43094801902771,
"logps/chosen": -319.79681396484375,
"logps/rejected": -285.0555725097656,
"loss": 0.6982,
"rewards/accuracies": 0.606249988079071,
"rewards/chosen": 0.046097733080387115,
"rewards/margins": 0.21919748187065125,
"rewards/rejected": -0.17309975624084473,
"step": 60
},
{
"epoch": 0.38,
"grad_norm": 1446.8726182465537,
"learning_rate": 9.756746912994832e-09,
"logits/chosen": -1.5057274103164673,
"logits/rejected": -1.343697190284729,
"logps/chosen": -312.12432861328125,
"logps/rejected": -275.1330871582031,
"loss": 0.6973,
"rewards/accuracies": 0.5874999761581421,
"rewards/chosen": -0.016833370551466942,
"rewards/margins": 0.13973814249038696,
"rewards/rejected": -0.15657152235507965,
"step": 70
},
{
"epoch": 0.43,
"grad_norm": 1216.5073312303066,
"learning_rate": 9.589338354885628e-09,
"logits/chosen": -1.5987694263458252,
"logits/rejected": -1.4453301429748535,
"logps/chosen": -323.3362731933594,
"logps/rejected": -288.1485595703125,
"loss": 0.6695,
"rewards/accuracies": 0.565625011920929,
"rewards/chosen": 0.04707593470811844,
"rewards/margins": 0.22710590064525604,
"rewards/rejected": -0.180029958486557,
"step": 80
},
{
"epoch": 0.49,
"grad_norm": 1161.721503342428,
"learning_rate": 9.380618598797472e-09,
"logits/chosen": -1.6032607555389404,
"logits/rejected": -1.4058634042739868,
"logps/chosen": -319.96173095703125,
"logps/rejected": -281.8211364746094,
"loss": 0.6501,
"rewards/accuracies": 0.628125011920929,
"rewards/chosen": 0.11853907257318497,
"rewards/margins": 0.32674410939216614,
"rewards/rejected": -0.20820502936840057,
"step": 90
},
{
"epoch": 0.54,
"grad_norm": 1255.206435160775,
"learning_rate": 9.132466447838596e-09,
"logits/chosen": -1.5419257879257202,
"logits/rejected": -1.3666443824768066,
"logps/chosen": -321.9004821777344,
"logps/rejected": -282.74346923828125,
"loss": 0.6241,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": 0.21292057633399963,
"rewards/margins": 0.4663107991218567,
"rewards/rejected": -0.25339022278785706,
"step": 100
},
{
"epoch": 0.6,
"grad_norm": 1196.6092743226689,
"learning_rate": 8.847115658129039e-09,
"logits/chosen": -1.50592839717865,
"logits/rejected": -1.3777477741241455,
"logps/chosen": -318.1675720214844,
"logps/rejected": -287.3236083984375,
"loss": 0.6068,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": 0.17643409967422485,
"rewards/margins": 0.4380553662776947,
"rewards/rejected": -0.26162129640579224,
"step": 110
},
{
"epoch": 0.65,
"grad_norm": 1069.9581355208816,
"learning_rate": 8.527134831514116e-09,
"logits/chosen": -1.5797988176345825,
"logits/rejected": -1.4248679876327515,
"logps/chosen": -331.4310302734375,
"logps/rejected": -297.9786682128906,
"loss": 0.613,
"rewards/accuracies": 0.6187499761581421,
"rewards/chosen": 0.19524307548999786,
"rewards/margins": 0.3385895788669586,
"rewards/rejected": -0.14334650337696075,
"step": 120
},
{
"epoch": 0.71,
"grad_norm": 1001.0236709718766,
"learning_rate": 8.175404294144481e-09,
"logits/chosen": -1.6208512783050537,
"logits/rejected": -1.4344335794448853,
"logps/chosen": -317.17388916015625,
"logps/rejected": -271.62255859375,
"loss": 0.5838,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": 0.3348919153213501,
"rewards/margins": 0.4819467067718506,
"rewards/rejected": -0.14705480635166168,
"step": 130
},
{
"epoch": 0.76,
"grad_norm": 1056.0149896521477,
"learning_rate": 7.79509016905158e-09,
"logits/chosen": -1.566674828529358,
"logits/rejected": -1.417965292930603,
"logps/chosen": -331.0799255371094,
"logps/rejected": -294.3246154785156,
"loss": 0.5738,
"rewards/accuracies": 0.7281249761581421,
"rewards/chosen": 0.5121167898178101,
"rewards/margins": 0.6085957288742065,
"rewards/rejected": -0.09647894650697708,
"step": 140
},
{
"epoch": 0.82,
"grad_norm": 1085.2712824812697,
"learning_rate": 7.389615876105773e-09,
"logits/chosen": -1.5494105815887451,
"logits/rejected": -1.4207683801651,
"logps/chosen": -314.55694580078125,
"logps/rejected": -291.8951110839844,
"loss": 0.5814,
"rewards/accuracies": 0.6656249761581421,
"rewards/chosen": 0.5086601972579956,
"rewards/margins": 0.5659324526786804,
"rewards/rejected": -0.057272158563137054,
"step": 150
},
{
"epoch": 0.87,
"grad_norm": 1147.2094257934837,
"learning_rate": 6.962631315901861e-09,
"logits/chosen": -1.5222028493881226,
"logits/rejected": -1.406696081161499,
"logps/chosen": -317.96502685546875,
"logps/rejected": -291.0884704589844,
"loss": 0.5667,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": 0.5517348051071167,
"rewards/margins": 0.5213624835014343,
"rewards/rejected": 0.030372310429811478,
"step": 160
},
{
"epoch": 0.92,
"grad_norm": 1097.0692572479243,
"learning_rate": 6.517980014965139e-09,
"logits/chosen": -1.5993129014968872,
"logits/rejected": -1.41109299659729,
"logps/chosen": -331.37066650390625,
"logps/rejected": -289.573486328125,
"loss": 0.5518,
"rewards/accuracies": 0.734375,
"rewards/chosen": 0.6186414957046509,
"rewards/margins": 0.6974190473556519,
"rewards/rejected": -0.0787774994969368,
"step": 170
},
{
"epoch": 0.98,
"grad_norm": 1005.7280013223308,
"learning_rate": 6.059664528022266e-09,
"logits/chosen": -1.6032111644744873,
"logits/rejected": -1.4532817602157593,
"logps/chosen": -315.0304260253906,
"logps/rejected": -276.7928771972656,
"loss": 0.5485,
"rewards/accuracies": 0.746874988079071,
"rewards/chosen": 0.6357426047325134,
"rewards/margins": 0.7282391786575317,
"rewards/rejected": -0.09249657392501831,
"step": 180
},
{
"epoch": 1.03,
"grad_norm": 1081.11663192182,
"learning_rate": 5.591810408770492e-09,
"logits/chosen": -1.5532522201538086,
"logits/rejected": -1.3794432878494263,
"logps/chosen": -315.5338439941406,
"logps/rejected": -278.7769470214844,
"loss": 0.5324,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": 0.6404975056648254,
"rewards/margins": 0.7475987076759338,
"rewards/rejected": -0.10710116475820541,
"step": 190
},
{
"epoch": 1.09,
"grad_norm": 994.8740373609369,
"learning_rate": 5.118629073464423e-09,
"logits/chosen": -1.5689371824264526,
"logits/rejected": -1.3581821918487549,
"logps/chosen": -325.8009033203125,
"logps/rejected": -282.7605895996094,
"loss": 0.5279,
"rewards/accuracies": 0.703125,
"rewards/chosen": 0.8169169425964355,
"rewards/margins": 0.8118550181388855,
"rewards/rejected": 0.005061971955001354,
"step": 200
},
{
"epoch": 1.14,
"grad_norm": 1046.062764450064,
"learning_rate": 4.644379891605983e-09,
"logits/chosen": -1.6105190515518188,
"logits/rejected": -1.4331891536712646,
"logps/chosen": -324.65234375,
"logps/rejected": -291.387451171875,
"loss": 0.5257,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.7297837138175964,
"rewards/margins": 0.761337399482727,
"rewards/rejected": -0.03155365586280823,
"step": 210
},
{
"epoch": 1.2,
"grad_norm": 1062.5909355592705,
"learning_rate": 4.173331844980362e-09,
"logits/chosen": -1.542718529701233,
"logits/rejected": -1.4185158014297485,
"logps/chosen": -323.8753356933594,
"logps/rejected": -293.48626708984375,
"loss": 0.5141,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": 0.7222188711166382,
"rewards/margins": 0.7484906911849976,
"rewards/rejected": -0.026271820068359375,
"step": 220
},
{
"epoch": 1.25,
"grad_norm": 1054.001314243816,
"learning_rate": 3.7097251001664824e-09,
"logits/chosen": -1.5353752374649048,
"logits/rejected": -1.3762162923812866,
"logps/chosen": -323.80047607421875,
"logps/rejected": -287.00726318359375,
"loss": 0.5117,
"rewards/accuracies": 0.715624988079071,
"rewards/chosen": 0.8148177266120911,
"rewards/margins": 0.8335307240486145,
"rewards/rejected": -0.018712949007749557,
"step": 230
},
{
"epoch": 1.3,
"grad_norm": 1057.7644150535455,
"learning_rate": 3.2577328404292057e-09,
"logits/chosen": -1.5480402708053589,
"logits/rejected": -1.4182308912277222,
"logps/chosen": -312.3599548339844,
"logps/rejected": -285.9615478515625,
"loss": 0.5004,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": 0.8805425763130188,
"rewards/margins": 0.8217647671699524,
"rewards/rejected": 0.05877774953842163,
"step": 240
},
{
"epoch": 1.36,
"grad_norm": 1134.7078580204877,
"learning_rate": 2.821423700565763e-09,
"logits/chosen": -1.597772479057312,
"logits/rejected": -1.4192153215408325,
"logps/chosen": -350.49432373046875,
"logps/rejected": -306.66290283203125,
"loss": 0.497,
"rewards/accuracies": 0.8031250238418579,
"rewards/chosen": 1.035740613937378,
"rewards/margins": 1.0768238306045532,
"rewards/rejected": -0.04108327627182007,
"step": 250
},
{
"epoch": 1.41,
"grad_norm": 1065.7113049843235,
"learning_rate": 2.4047251428513483e-09,
"logits/chosen": -1.615321397781372,
"logits/rejected": -1.4609696865081787,
"logps/chosen": -325.0920104980469,
"logps/rejected": -291.08197021484375,
"loss": 0.513,
"rewards/accuracies": 0.7406250238418579,
"rewards/chosen": 0.9730992317199707,
"rewards/margins": 0.9078540802001953,
"rewards/rejected": 0.06524516642093658,
"step": 260
},
{
"epoch": 1.47,
"grad_norm": 864.6386915761193,
"learning_rate": 2.011388103757442e-09,
"logits/chosen": -1.5269956588745117,
"logits/rejected": -1.3828452825546265,
"logps/chosen": -316.21124267578125,
"logps/rejected": -285.77667236328125,
"loss": 0.4948,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": 0.9972602725028992,
"rewards/margins": 0.9109752774238586,
"rewards/rejected": 0.08628497272729874,
"step": 270
},
{
"epoch": 1.52,
"grad_norm": 990.2860964249656,
"learning_rate": 1.644953229677474e-09,
"logits/chosen": -1.601299524307251,
"logits/rejected": -1.4185166358947754,
"logps/chosen": -325.8226623535156,
"logps/rejected": -284.75567626953125,
"loss": 0.5046,
"rewards/accuracies": 0.7906249761581421,
"rewards/chosen": 1.0880569219589233,
"rewards/margins": 1.0006572008132935,
"rewards/rejected": 0.08739979565143585,
"step": 280
},
{
"epoch": 1.58,
"grad_norm": 1035.2441434858022,
"learning_rate": 1.308719005590957e-09,
"logits/chosen": -1.5150429010391235,
"logits/rejected": -1.400657057762146,
"logps/chosen": -318.3056640625,
"logps/rejected": -282.54803466796875,
"loss": 0.5012,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": 0.9477123022079468,
"rewards/margins": 0.9480409622192383,
"rewards/rejected": -0.00032869577989913523,
"step": 290
},
{
"epoch": 1.63,
"grad_norm": 952.917065524399,
"learning_rate": 1.005712063557776e-09,
"logits/chosen": -1.6322643756866455,
"logits/rejected": -1.4532310962677002,
"logps/chosen": -323.9804382324219,
"logps/rejected": -290.63629150390625,
"loss": 0.505,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": 0.903466522693634,
"rewards/margins": 0.8820658922195435,
"rewards/rejected": 0.02140064910054207,
"step": 300
},
{
"epoch": 1.68,
"grad_norm": 946.6373160602382,
"learning_rate": 7.386599383124321e-10,
"logits/chosen": -1.5624831914901733,
"logits/rejected": -1.3783166408538818,
"logps/chosen": -321.67547607421875,
"logps/rejected": -285.8205261230469,
"loss": 0.5001,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": 0.9382842779159546,
"rewards/margins": 0.9216762781143188,
"rewards/rejected": 0.01660792902112007,
"step": 310
},
{
"epoch": 1.74,
"grad_norm": 995.8621536271032,
"learning_rate": 5.099665152003929e-10,
"logits/chosen": -1.5874156951904297,
"logits/rejected": -1.3745605945587158,
"logps/chosen": -333.58453369140625,
"logps/rejected": -289.9964294433594,
"loss": 0.4994,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 1.030099868774414,
"rewards/margins": 1.0678800344467163,
"rewards/rejected": -0.03778017312288284,
"step": 320
},
{
"epoch": 1.79,
"grad_norm": 920.3253000772652,
"learning_rate": 3.216903914633745e-10,
"logits/chosen": -1.5468734502792358,
"logits/rejected": -1.4234826564788818,
"logps/chosen": -325.117919921875,
"logps/rejected": -296.07086181640625,
"loss": 0.5015,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": 0.9095224142074585,
"rewards/margins": 0.8073747754096985,
"rewards/rejected": 0.1021476536989212,
"step": 330
},
{
"epoch": 1.85,
"grad_norm": 988.4564193799931,
"learning_rate": 1.7552634565570324e-10,
"logits/chosen": -1.5551540851593018,
"logits/rejected": -1.3869084119796753,
"logps/chosen": -329.73553466796875,
"logps/rejected": -292.8804931640625,
"loss": 0.4969,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 1.0978221893310547,
"rewards/margins": 1.0354284048080444,
"rewards/rejected": 0.0623936764895916,
"step": 340
},
{
"epoch": 1.9,
"grad_norm": 978.2289689829179,
"learning_rate": 7.279008199590543e-11,
"logits/chosen": -1.5398132801055908,
"logits/rejected": -1.3765273094177246,
"logps/chosen": -326.29425048828125,
"logps/rejected": -292.0558166503906,
"loss": 0.4951,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": 1.0201548337936401,
"rewards/margins": 1.052316665649414,
"rewards/rejected": -0.03216180205345154,
"step": 350
},
{
"epoch": 1.96,
"grad_norm": 921.2000894790062,
"learning_rate": 1.4406386978128017e-11,
"logits/chosen": -1.629494309425354,
"logits/rejected": -1.434570074081421,
"logps/chosen": -330.90911865234375,
"logps/rejected": -291.73638916015625,
"loss": 0.4873,
"rewards/accuracies": 0.8125,
"rewards/chosen": 1.1673272848129272,
"rewards/margins": 1.1191251277923584,
"rewards/rejected": 0.048202164471149445,
"step": 360
},
{
"epoch": 2.0,
"step": 368,
"total_flos": 0.0,
"train_loss": 0.577453197668428,
"train_runtime": 9953.5163,
"train_samples_per_second": 9.463,
"train_steps_per_second": 0.037
}
],
"logging_steps": 10,
"max_steps": 368,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}