zephyr-7b-dpo-full / trainer_state.json
RikkiXu's picture
Model save
03b8060 verified
raw
history blame
23.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9988623435722411,
"eval_steps": 10000000,
"global_step": 439,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 1666.4801111075699,
"learning_rate": 2.2727272727272727e-09,
"logits/chosen": -1.6768856048583984,
"logits/rejected": -1.7259055376052856,
"logps/chosen": -1.2793102264404297,
"logps/rejected": -1.2162058353424072,
"loss": 1.3182,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.02,
"grad_norm": 1931.392535785282,
"learning_rate": 2.2727272727272725e-08,
"logits/chosen": -1.7030127048492432,
"logits/rejected": -1.6681498289108276,
"logps/chosen": -1.2134774923324585,
"logps/rejected": -1.2203598022460938,
"loss": 1.3237,
"rewards/accuracies": 0.3958333432674408,
"rewards/chosen": -0.017192421481013298,
"rewards/margins": -0.05587056651711464,
"rewards/rejected": 0.03867815062403679,
"step": 10
},
{
"epoch": 0.05,
"grad_norm": 1830.5113687291991,
"learning_rate": 4.545454545454545e-08,
"logits/chosen": -1.780092477798462,
"logits/rejected": -1.7350375652313232,
"logps/chosen": -1.1447092294692993,
"logps/rejected": -1.1850192546844482,
"loss": 1.2938,
"rewards/accuracies": 0.512499988079071,
"rewards/chosen": -0.148686021566391,
"rewards/margins": 0.007283961866050959,
"rewards/rejected": -0.1559699922800064,
"step": 20
},
{
"epoch": 0.07,
"grad_norm": 1936.12548502703,
"learning_rate": 6.818181818181817e-08,
"logits/chosen": -1.7440731525421143,
"logits/rejected": -1.674883246421814,
"logps/chosen": -1.1901237964630127,
"logps/rejected": -1.2408424615859985,
"loss": 1.2335,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -0.7222227454185486,
"rewards/margins": 0.348898708820343,
"rewards/rejected": -1.0711214542388916,
"step": 30
},
{
"epoch": 0.09,
"grad_norm": 1274.5224242026777,
"learning_rate": 9.09090909090909e-08,
"logits/chosen": -1.730063796043396,
"logits/rejected": -1.6635162830352783,
"logps/chosen": -1.2168413400650024,
"logps/rejected": -1.274864912033081,
"loss": 1.2163,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": -0.9892686009407043,
"rewards/margins": 0.5700188875198364,
"rewards/rejected": -1.559287428855896,
"step": 40
},
{
"epoch": 0.11,
"grad_norm": 1227.2878182263496,
"learning_rate": 9.994307990108962e-08,
"logits/chosen": -1.6924018859863281,
"logits/rejected": -1.6282413005828857,
"logps/chosen": -1.2177562713623047,
"logps/rejected": -1.2450090646743774,
"loss": 1.1002,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": -0.9669270515441895,
"rewards/margins": 0.7160126566886902,
"rewards/rejected": -1.6829397678375244,
"step": 50
},
{
"epoch": 0.14,
"grad_norm": 1544.774039586819,
"learning_rate": 9.959570405988094e-08,
"logits/chosen": -1.721835732460022,
"logits/rejected": -1.6408640146255493,
"logps/chosen": -1.1233644485473633,
"logps/rejected": -1.1840593814849854,
"loss": 1.0709,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -1.4266197681427002,
"rewards/margins": 0.8766496777534485,
"rewards/rejected": -2.303269386291504,
"step": 60
},
{
"epoch": 0.16,
"grad_norm": 1005.439321360097,
"learning_rate": 9.893476820924666e-08,
"logits/chosen": -1.8021934032440186,
"logits/rejected": -1.7167574167251587,
"logps/chosen": -1.230804443359375,
"logps/rejected": -1.2889682054519653,
"loss": 1.0139,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -1.796240210533142,
"rewards/margins": 1.1498826742172241,
"rewards/rejected": -2.946122884750366,
"step": 70
},
{
"epoch": 0.18,
"grad_norm": 1396.6389927727296,
"learning_rate": 9.796445099843647e-08,
"logits/chosen": -1.7873592376708984,
"logits/rejected": -1.6995294094085693,
"logps/chosen": -1.2222901582717896,
"logps/rejected": -1.2922863960266113,
"loss": 1.1257,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -1.5321768522262573,
"rewards/margins": 0.9466292262077332,
"rewards/rejected": -2.4788060188293457,
"step": 80
},
{
"epoch": 0.2,
"grad_norm": 1030.6978947603225,
"learning_rate": 9.669088708527066e-08,
"logits/chosen": -1.734185814857483,
"logits/rejected": -1.6654260158538818,
"logps/chosen": -1.2461029291152954,
"logps/rejected": -1.265190839767456,
"loss": 0.9922,
"rewards/accuracies": 0.75,
"rewards/chosen": -1.0405571460723877,
"rewards/margins": 1.204342246055603,
"rewards/rejected": -2.244899272918701,
"step": 90
},
{
"epoch": 0.23,
"grad_norm": 1586.6455210629026,
"learning_rate": 9.512212835085849e-08,
"logits/chosen": -1.765062689781189,
"logits/rejected": -1.6767990589141846,
"logps/chosen": -1.1784775257110596,
"logps/rejected": -1.233091115951538,
"loss": 1.016,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -1.0022172927856445,
"rewards/margins": 1.1903674602508545,
"rewards/rejected": -2.192584753036499,
"step": 100
},
{
"epoch": 0.25,
"grad_norm": 1144.8635215765032,
"learning_rate": 9.326809299301306e-08,
"logits/chosen": -1.7722240686416626,
"logits/rejected": -1.6646277904510498,
"logps/chosen": -1.2064539194107056,
"logps/rejected": -1.2708475589752197,
"loss": 0.9709,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -0.5811842679977417,
"rewards/margins": 1.279047966003418,
"rewards/rejected": -1.8602321147918701,
"step": 110
},
{
"epoch": 0.27,
"grad_norm": 1417.2025435553687,
"learning_rate": 9.114050282021158e-08,
"logits/chosen": -1.762213110923767,
"logits/rejected": -1.6987812519073486,
"logps/chosen": -1.1565983295440674,
"logps/rejected": -1.2166643142700195,
"loss": 0.9277,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.9035682678222656,
"rewards/margins": 1.2673728466033936,
"rewards/rejected": -2.1709413528442383,
"step": 120
},
{
"epoch": 0.3,
"grad_norm": 1150.053731245993,
"learning_rate": 8.875280914254802e-08,
"logits/chosen": -1.7586562633514404,
"logits/rejected": -1.6647939682006836,
"logps/chosen": -1.20124089717865,
"logps/rejected": -1.2465784549713135,
"loss": 0.9452,
"rewards/accuracies": 0.768750011920929,
"rewards/chosen": -0.841080367565155,
"rewards/margins": 1.6912786960601807,
"rewards/rejected": -2.5323591232299805,
"step": 130
},
{
"epoch": 0.32,
"grad_norm": 1256.1179361929435,
"learning_rate": 8.612010772821971e-08,
"logits/chosen": -1.7858541011810303,
"logits/rejected": -1.7347157001495361,
"logps/chosen": -1.2216544151306152,
"logps/rejected": -1.2101521492004395,
"loss": 0.9734,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.7775429487228394,
"rewards/margins": 1.340767502784729,
"rewards/rejected": -2.1183104515075684,
"step": 140
},
{
"epoch": 0.34,
"grad_norm": 1137.7394214504618,
"learning_rate": 8.325904336322055e-08,
"logits/chosen": -1.7536169290542603,
"logits/rejected": -1.691907286643982,
"logps/chosen": -1.1751422882080078,
"logps/rejected": -1.2404186725616455,
"loss": 0.9503,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.7777413725852966,
"rewards/margins": 1.8102855682373047,
"rewards/rejected": -2.588027000427246,
"step": 150
},
{
"epoch": 0.36,
"grad_norm": 1056.887388548025,
"learning_rate": 8.01877046176447e-08,
"logits/chosen": -1.6971683502197266,
"logits/rejected": -1.6201127767562866,
"logps/chosen": -1.192384958267212,
"logps/rejected": -1.2586729526519775,
"loss": 0.9572,
"rewards/accuracies": 0.75,
"rewards/chosen": -1.4085007905960083,
"rewards/margins": 1.490207314491272,
"rewards/rejected": -2.898707866668701,
"step": 160
},
{
"epoch": 0.39,
"grad_norm": 991.2349708721418,
"learning_rate": 7.692550948392249e-08,
"logits/chosen": -1.759788155555725,
"logits/rejected": -1.6842018365859985,
"logps/chosen": -1.2190964221954346,
"logps/rejected": -1.244114637374878,
"loss": 0.9699,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -0.884096622467041,
"rewards/margins": 1.4573404788970947,
"rewards/rejected": -2.3414371013641357,
"step": 170
},
{
"epoch": 0.41,
"grad_norm": 1143.0994123075566,
"learning_rate": 7.349308261002021e-08,
"logits/chosen": -1.7149896621704102,
"logits/rejected": -1.649047613143921,
"logps/chosen": -1.2045981884002686,
"logps/rejected": -1.267183780670166,
"loss": 0.9333,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.7512127161026001,
"rewards/margins": 1.2935186624526978,
"rewards/rejected": -2.044731378555298,
"step": 180
},
{
"epoch": 0.43,
"grad_norm": 1220.476164637326,
"learning_rate": 6.991212490377531e-08,
"logits/chosen": -1.7686491012573242,
"logits/rejected": -1.7093932628631592,
"logps/chosen": -1.2307132482528687,
"logps/rejected": -1.2679650783538818,
"loss": 0.9238,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -0.06102483719587326,
"rewards/margins": 1.5709805488586426,
"rewards/rejected": -1.6320053339004517,
"step": 190
},
{
"epoch": 0.46,
"grad_norm": 1207.0013256647494,
"learning_rate": 6.620527633276978e-08,
"logits/chosen": -1.7065632343292236,
"logits/rejected": -1.6239337921142578,
"logps/chosen": -1.197272777557373,
"logps/rejected": -1.302229642868042,
"loss": 0.912,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -0.28195858001708984,
"rewards/margins": 1.8168141841888428,
"rewards/rejected": -2.0987727642059326,
"step": 200
},
{
"epoch": 0.48,
"grad_norm": 1547.2477497808447,
"learning_rate": 6.239597278716581e-08,
"logits/chosen": -1.776951551437378,
"logits/rejected": -1.708361029624939,
"logps/chosen": -1.24917471408844,
"logps/rejected": -1.246996283531189,
"loss": 0.9177,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -0.38446491956710815,
"rewards/margins": 1.8485082387924194,
"rewards/rejected": -2.232973098754883,
"step": 210
},
{
"epoch": 0.5,
"grad_norm": 986.8221787714062,
"learning_rate": 5.8508297910462456e-08,
"logits/chosen": -1.727354645729065,
"logits/rejected": -1.6401188373565674,
"logps/chosen": -1.1687428951263428,
"logps/rejected": -1.2474868297576904,
"loss": 0.8855,
"rewards/accuracies": 0.8062499761581421,
"rewards/chosen": -0.5227571725845337,
"rewards/margins": 2.097059726715088,
"rewards/rejected": -2.619816780090332,
"step": 220
},
{
"epoch": 0.52,
"grad_norm": 1466.4304397519809,
"learning_rate": 5.456683083494731e-08,
"logits/chosen": -1.7255643606185913,
"logits/rejected": -1.6826807260513306,
"logps/chosen": -1.2014678716659546,
"logps/rejected": -1.2398700714111328,
"loss": 0.9518,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.7659143209457397,
"rewards/margins": 1.4660184383392334,
"rewards/rejected": -2.231933116912842,
"step": 230
},
{
"epoch": 0.55,
"grad_norm": 1385.4891697791213,
"learning_rate": 5.059649078450834e-08,
"logits/chosen": -1.7211616039276123,
"logits/rejected": -1.6692482233047485,
"logps/chosen": -1.1779934167861938,
"logps/rejected": -1.2568769454956055,
"loss": 0.8955,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -0.9639942049980164,
"rewards/margins": 1.6844803094863892,
"rewards/rejected": -2.6484744548797607,
"step": 240
},
{
"epoch": 0.57,
"grad_norm": 1256.1363696264702,
"learning_rate": 4.6622379527277186e-08,
"logits/chosen": -1.7240123748779297,
"logits/rejected": -1.6668732166290283,
"logps/chosen": -1.1512458324432373,
"logps/rejected": -1.2001845836639404,
"loss": 0.8877,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -1.2357298135757446,
"rewards/margins": 1.5858875513076782,
"rewards/rejected": -2.821617364883423,
"step": 250
},
{
"epoch": 0.59,
"grad_norm": 882.5182339158957,
"learning_rate": 4.26696226741691e-08,
"logits/chosen": -1.736823320388794,
"logits/rejected": -1.657954216003418,
"logps/chosen": -1.244571328163147,
"logps/rejected": -1.2810423374176025,
"loss": 0.8757,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -1.174777626991272,
"rewards/margins": 1.6504262685775757,
"rewards/rejected": -2.8252038955688477,
"step": 260
},
{
"epoch": 0.61,
"grad_norm": 927.5282198039703,
"learning_rate": 3.876321082668098e-08,
"logits/chosen": -1.8001619577407837,
"logits/rejected": -1.7206510305404663,
"logps/chosen": -1.2467237710952759,
"logps/rejected": -1.272040843963623,
"loss": 0.8981,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -0.8233057856559753,
"rewards/margins": 1.6008100509643555,
"rewards/rejected": -2.4241158962249756,
"step": 270
},
{
"epoch": 0.64,
"grad_norm": 1414.5359647496766,
"learning_rate": 3.492784157826244e-08,
"logits/chosen": -1.7251765727996826,
"logits/rejected": -1.6225860118865967,
"logps/chosen": -1.2284091711044312,
"logps/rejected": -1.2839155197143555,
"loss": 0.8929,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -0.5533002018928528,
"rewards/margins": 2.0116381645202637,
"rewards/rejected": -2.5649383068084717,
"step": 280
},
{
"epoch": 0.66,
"grad_norm": 1016.8878069934569,
"learning_rate": 3.118776336817812e-08,
"logits/chosen": -1.777353048324585,
"logits/rejected": -1.704052209854126,
"logps/chosen": -1.2131690979003906,
"logps/rejected": -1.2390577793121338,
"loss": 0.8692,
"rewards/accuracies": 0.8187500238418579,
"rewards/chosen": -0.992035984992981,
"rewards/margins": 1.8672508001327515,
"rewards/rejected": -2.8592867851257324,
"step": 290
},
{
"epoch": 0.68,
"grad_norm": 1172.1973055026576,
"learning_rate": 2.7566622175067443e-08,
"logits/chosen": -1.75467050075531,
"logits/rejected": -1.6821622848510742,
"logps/chosen": -1.2167221307754517,
"logps/rejected": -1.312439203262329,
"loss": 0.9238,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.8138607144355774,
"rewards/margins": 1.7730525732040405,
"rewards/rejected": -2.5869133472442627,
"step": 300
},
{
"epoch": 0.71,
"grad_norm": 1325.0572604746271,
"learning_rate": 2.408731201945432e-08,
"logits/chosen": -1.7551631927490234,
"logits/rejected": -1.7048372030258179,
"logps/chosen": -1.1999702453613281,
"logps/rejected": -1.2347406148910522,
"loss": 0.9048,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.6074885725975037,
"rewards/margins": 1.4615652561187744,
"rewards/rejected": -2.0690536499023438,
"step": 310
},
{
"epoch": 0.73,
"grad_norm": 1307.0038735408154,
"learning_rate": 2.0771830220378112e-08,
"logits/chosen": -1.7086585760116577,
"logits/rejected": -1.6484758853912354,
"logps/chosen": -1.2315673828125,
"logps/rejected": -1.2379032373428345,
"loss": 0.8568,
"rewards/accuracies": 0.768750011920929,
"rewards/chosen": -0.8682917356491089,
"rewards/margins": 1.418613314628601,
"rewards/rejected": -2.28690505027771,
"step": 320
},
{
"epoch": 0.75,
"grad_norm": 1081.044775563757,
"learning_rate": 1.7641138321260257e-08,
"logits/chosen": -1.744683861732483,
"logits/rejected": -1.6652017831802368,
"logps/chosen": -1.1902309656143188,
"logps/rejected": -1.2386529445648193,
"loss": 0.8653,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -0.6376137733459473,
"rewards/margins": 2.0886900424957275,
"rewards/rejected": -2.726303815841675,
"step": 330
},
{
"epoch": 0.77,
"grad_norm": 981.6058112333443,
"learning_rate": 1.4715029564277793e-08,
"logits/chosen": -1.8191375732421875,
"logits/rejected": -1.7604516744613647,
"logps/chosen": -1.2077248096466064,
"logps/rejected": -1.262688398361206,
"loss": 0.9084,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -0.30642908811569214,
"rewards/margins": 1.7900102138519287,
"rewards/rejected": -2.0964391231536865,
"step": 340
},
{
"epoch": 0.8,
"grad_norm": 1453.9865513146688,
"learning_rate": 1.2012003751113343e-08,
"logits/chosen": -1.7971642017364502,
"logits/rejected": -1.7289268970489502,
"logps/chosen": -1.171277642250061,
"logps/rejected": -1.2399911880493164,
"loss": 0.8408,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.9107818603515625,
"rewards/margins": 1.8522905111312866,
"rewards/rejected": -2.7630724906921387,
"step": 350
},
{
"epoch": 0.82,
"grad_norm": 1215.5568858398267,
"learning_rate": 9.549150281252633e-09,
"logits/chosen": -1.7647504806518555,
"logits/rejected": -1.7033681869506836,
"logps/chosen": -1.1781424283981323,
"logps/rejected": -1.246336579322815,
"loss": 0.8691,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.6007339358329773,
"rewards/margins": 1.6463441848754883,
"rewards/rejected": -2.2470781803131104,
"step": 360
},
{
"epoch": 0.84,
"grad_norm": 989.7203519251033,
"learning_rate": 7.3420401072985306e-09,
"logits/chosen": -1.7938206195831299,
"logits/rejected": -1.7368061542510986,
"logps/chosen": -1.2212886810302734,
"logps/rejected": -1.2962526082992554,
"loss": 0.8304,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -0.6675101518630981,
"rewards/margins": 1.7500286102294922,
"rewards/rejected": -2.41753888130188,
"step": 370
},
{
"epoch": 0.86,
"grad_norm": 1103.1154426715473,
"learning_rate": 5.404627290395369e-09,
"logits/chosen": -1.7590833902359009,
"logits/rejected": -1.6879241466522217,
"logps/chosen": -1.1950905323028564,
"logps/rejected": -1.2529264688491821,
"loss": 0.8354,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.5484554171562195,
"rewards/margins": 1.7839889526367188,
"rewards/rejected": -2.332444429397583,
"step": 380
},
{
"epoch": 0.89,
"grad_norm": 1735.4553729147015,
"learning_rate": 3.74916077816162e-09,
"logits/chosen": -1.7692234516143799,
"logits/rejected": -1.7004655599594116,
"logps/chosen": -1.1599231958389282,
"logps/rejected": -1.1907106637954712,
"loss": 0.8962,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -1.0128358602523804,
"rewards/margins": 1.4658567905426025,
"rewards/rejected": -2.4786927700042725,
"step": 390
},
{
"epoch": 0.91,
"grad_norm": 1092.0703383397774,
"learning_rate": 2.386106962899165e-09,
"logits/chosen": -1.6911523342132568,
"logits/rejected": -1.6125024557113647,
"logps/chosen": -1.233060359954834,
"logps/rejected": -1.3061407804489136,
"loss": 0.8745,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.6639580130577087,
"rewards/margins": 1.6740585565567017,
"rewards/rejected": -2.3380167484283447,
"step": 400
},
{
"epoch": 0.93,
"grad_norm": 1112.8116007670076,
"learning_rate": 1.3240835096913706e-09,
"logits/chosen": -1.7251644134521484,
"logits/rejected": -1.613480567932129,
"logps/chosen": -1.1751197576522827,
"logps/rejected": -1.2636477947235107,
"loss": 0.9325,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -0.35689860582351685,
"rewards/margins": 1.9113714694976807,
"rewards/rejected": -2.2682700157165527,
"step": 410
},
{
"epoch": 0.96,
"grad_norm": 1072.5150365604325,
"learning_rate": 5.698048727497462e-10,
"logits/chosen": -1.7486751079559326,
"logits/rejected": -1.6716489791870117,
"logps/chosen": -1.198484182357788,
"logps/rejected": -1.2844183444976807,
"loss": 0.8652,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -0.7186421155929565,
"rewards/margins": 1.897702932357788,
"rewards/rejected": -2.616345167160034,
"step": 420
},
{
"epoch": 0.98,
"grad_norm": 1007.4274803164712,
"learning_rate": 1.2803984447259387e-10,
"logits/chosen": -1.756765365600586,
"logits/rejected": -1.6918004751205444,
"logps/chosen": -1.189513921737671,
"logps/rejected": -1.2563507556915283,
"loss": 0.8416,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.7174621820449829,
"rewards/margins": 1.9183887243270874,
"rewards/rejected": -2.635850667953491,
"step": 430
},
{
"epoch": 1.0,
"step": 439,
"total_flos": 0.0,
"train_loss": 0.9518976428785737,
"train_runtime": 6868.0311,
"train_samples_per_second": 8.188,
"train_steps_per_second": 0.064
}
],
"logging_steps": 10,
"max_steps": 439,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}