zephyr-7b-dpo-full / trainer_state.json
RikkiXu's picture
Model save
2868cb6 verified
raw history blame
No virus
21.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9980806142034548,
"eval_steps": 1000000,
"global_step": 390,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 747.1989420753074,
"learning_rate": 5.128205128205128e-09,
"logits/chosen": -2.5617921352386475,
"logits/rejected": -2.415619373321533,
"logps/chosen": -258.1644592285156,
"logps/rejected": -191.65736389160156,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.03,
"grad_norm": 772.6555707154646,
"learning_rate": 5.128205128205127e-08,
"logits/chosen": -2.610959053039551,
"logits/rejected": -2.52398681640625,
"logps/chosen": -267.3009948730469,
"logps/rejected": -198.18966674804688,
"loss": 0.7062,
"rewards/accuracies": 0.4444444477558136,
"rewards/chosen": -0.006153374910354614,
"rewards/margins": -0.0059446971863508224,
"rewards/rejected": -0.0002086775202769786,
"step": 10
},
{
"epoch": 0.05,
"grad_norm": 568.2747762208306,
"learning_rate": 1.0256410256410255e-07,
"logits/chosen": -2.626389980316162,
"logits/rejected": -2.5251834392547607,
"logps/chosen": -261.1226501464844,
"logps/rejected": -198.20663452148438,
"loss": 0.6286,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": 0.1013670414686203,
"rewards/margins": 0.16386722028255463,
"rewards/rejected": -0.06250017881393433,
"step": 20
},
{
"epoch": 0.08,
"grad_norm": 494.0619878155869,
"learning_rate": 1.5384615384615385e-07,
"logits/chosen": -2.631648302078247,
"logits/rejected": -2.533083915710449,
"logps/chosen": -253.81259155273438,
"logps/rejected": -194.48611450195312,
"loss": 0.4093,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 0.6733725666999817,
"rewards/margins": 0.9389022588729858,
"rewards/rejected": -0.2655297815799713,
"step": 30
},
{
"epoch": 0.1,
"grad_norm": 479.5871736251968,
"learning_rate": 1.999959945379852e-07,
"logits/chosen": -2.6485562324523926,
"logits/rejected": -2.5283126831054688,
"logps/chosen": -247.01113891601562,
"logps/rejected": -198.84341430664062,
"loss": 0.2427,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 1.6728582382202148,
"rewards/margins": 2.513617753982544,
"rewards/rejected": -0.84075927734375,
"step": 40
},
{
"epoch": 0.13,
"grad_norm": 268.0648126747043,
"learning_rate": 1.9951572723309917e-07,
"logits/chosen": -2.701251983642578,
"logits/rejected": -2.596193790435791,
"logps/chosen": -260.8350830078125,
"logps/rejected": -204.7702178955078,
"loss": 0.2435,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 3.198915481567383,
"rewards/margins": 4.101343631744385,
"rewards/rejected": -0.902428150177002,
"step": 50
},
{
"epoch": 0.15,
"grad_norm": 321.17307426963725,
"learning_rate": 1.9823877374156647e-07,
"logits/chosen": -2.6826422214508057,
"logits/rejected": -2.5699238777160645,
"logps/chosen": -260.90740966796875,
"logps/rejected": -190.84542846679688,
"loss": 0.2062,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 3.89800763130188,
"rewards/margins": 4.676665306091309,
"rewards/rejected": -0.7786582112312317,
"step": 60
},
{
"epoch": 0.18,
"grad_norm": 341.66511985659804,
"learning_rate": 1.9617535688178958e-07,
"logits/chosen": -2.664588451385498,
"logits/rejected": -2.5252487659454346,
"logps/chosen": -289.1197204589844,
"logps/rejected": -219.9818572998047,
"loss": 0.1912,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 4.399308204650879,
"rewards/margins": 5.727428436279297,
"rewards/rejected": -1.3281205892562866,
"step": 70
},
{
"epoch": 0.2,
"grad_norm": 345.50558992425647,
"learning_rate": 1.9334199560765839e-07,
"logits/chosen": -2.612795352935791,
"logits/rejected": -2.4848904609680176,
"logps/chosen": -260.74462890625,
"logps/rejected": -209.5861053466797,
"loss": 0.1745,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 3.798893690109253,
"rewards/margins": 5.829946994781494,
"rewards/rejected": -2.031052827835083,
"step": 80
},
{
"epoch": 0.23,
"grad_norm": 623.1485221220804,
"learning_rate": 1.897613727639014e-07,
"logits/chosen": -2.6385045051574707,
"logits/rejected": -2.5179295539855957,
"logps/chosen": -254.0596160888672,
"logps/rejected": -191.18374633789062,
"loss": 0.1808,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 3.131239414215088,
"rewards/margins": 5.626199722290039,
"rewards/rejected": -2.494959831237793,
"step": 90
},
{
"epoch": 0.26,
"grad_norm": 253.52286816658938,
"learning_rate": 1.8546215349560202e-07,
"logits/chosen": -2.6612112522125244,
"logits/rejected": -2.533451795578003,
"logps/chosen": -235.6942138671875,
"logps/rejected": -196.51657104492188,
"loss": 0.1619,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 3.5456454753875732,
"rewards/margins": 5.960389614105225,
"rewards/rejected": -2.4147439002990723,
"step": 100
},
{
"epoch": 0.28,
"grad_norm": 266.28422663795834,
"learning_rate": 1.8047875576562553e-07,
"logits/chosen": -2.6504344940185547,
"logits/rejected": -2.52449369430542,
"logps/chosen": -254.90847778320312,
"logps/rejected": -191.41806030273438,
"loss": 0.1662,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 4.884920597076416,
"rewards/margins": 6.528225898742676,
"rewards/rejected": -1.6433048248291016,
"step": 110
},
{
"epoch": 0.31,
"grad_norm": 248.5491633368023,
"learning_rate": 1.748510748171101e-07,
"logits/chosen": -2.626223564147949,
"logits/rejected": -2.531078815460205,
"logps/chosen": -258.3885803222656,
"logps/rejected": -210.59378051757812,
"loss": 0.1688,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": 4.675450801849365,
"rewards/margins": 6.811753273010254,
"rewards/rejected": -2.1363017559051514,
"step": 120
},
{
"epoch": 0.33,
"grad_norm": 219.45295559501494,
"learning_rate": 1.6862416378687336e-07,
"logits/chosen": -2.6003150939941406,
"logits/rejected": -2.465550661087036,
"logps/chosen": -249.7939910888672,
"logps/rejected": -186.2041778564453,
"loss": 0.1744,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 3.695958375930786,
"rewards/margins": 6.534309387207031,
"rewards/rejected": -2.838351011276245,
"step": 130
},
{
"epoch": 0.36,
"grad_norm": 335.11070283406536,
"learning_rate": 1.6184787302662547e-07,
"logits/chosen": -2.6305205821990967,
"logits/rejected": -2.519258975982666,
"logps/chosen": -269.74249267578125,
"logps/rejected": -205.24441528320312,
"loss": 0.1847,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 3.7317748069763184,
"rewards/margins": 6.535207271575928,
"rewards/rejected": -2.8034329414367676,
"step": 140
},
{
"epoch": 0.38,
"grad_norm": 237.3062963631657,
"learning_rate": 1.5457645101945046e-07,
"logits/chosen": -2.613662004470825,
"logits/rejected": -2.50919771194458,
"logps/chosen": -262.550537109375,
"logps/rejected": -209.60781860351562,
"loss": 0.1987,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 4.1123762130737305,
"rewards/margins": 7.0581793785095215,
"rewards/rejected": -2.945803165435791,
"step": 150
},
{
"epoch": 0.41,
"grad_norm": 781.538478360743,
"learning_rate": 1.4686811008647035e-07,
"logits/chosen": -2.6011874675750732,
"logits/rejected": -2.4891130924224854,
"logps/chosen": -241.5615692138672,
"logps/rejected": -182.00228881835938,
"loss": 0.1643,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": 4.244563102722168,
"rewards/margins": 6.548022270202637,
"rewards/rejected": -2.3034589290618896,
"step": 160
},
{
"epoch": 0.44,
"grad_norm": 420.43337815116814,
"learning_rate": 1.387845603604855e-07,
"logits/chosen": -2.6014420986175537,
"logits/rejected": -2.4862489700317383,
"logps/chosen": -260.2299499511719,
"logps/rejected": -197.31021118164062,
"loss": 0.1451,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 4.481954574584961,
"rewards/margins": 6.5617804527282715,
"rewards/rejected": -2.0798258781433105,
"step": 170
},
{
"epoch": 0.46,
"grad_norm": 219.52091746145038,
"learning_rate": 1.3039051575742468e-07,
"logits/chosen": -2.6457467079162598,
"logits/rejected": -2.5342042446136475,
"logps/chosen": -234.87063598632812,
"logps/rejected": -187.8373565673828,
"loss": 0.1641,
"rewards/accuracies": 0.9375,
"rewards/chosen": 3.4876370429992676,
"rewards/margins": 5.9574689865112305,
"rewards/rejected": -2.469831943511963,
"step": 180
},
{
"epoch": 0.49,
"grad_norm": 391.30420402591136,
"learning_rate": 1.2175317590061675e-07,
"logits/chosen": -2.598358154296875,
"logits/rejected": -2.5016226768493652,
"logps/chosen": -256.7655334472656,
"logps/rejected": -203.38717651367188,
"loss": 0.1704,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 3.634434223175049,
"rewards/margins": 6.311253547668457,
"rewards/rejected": -2.6768198013305664,
"step": 190
},
{
"epoch": 0.51,
"grad_norm": 573.9951634797832,
"learning_rate": 1.1294168814540553e-07,
"logits/chosen": -2.640943765640259,
"logits/rejected": -2.5037968158721924,
"logps/chosen": -270.6221923828125,
"logps/rejected": -199.61119079589844,
"loss": 0.1514,
"rewards/accuracies": 0.9375,
"rewards/chosen": 4.9661078453063965,
"rewards/margins": 7.715418338775635,
"rewards/rejected": -2.749310255050659,
"step": 200
},
{
"epoch": 0.54,
"grad_norm": 383.73670531019474,
"learning_rate": 1.0402659401094151e-07,
"logits/chosen": -2.6191225051879883,
"logits/rejected": -2.5026001930236816,
"logps/chosen": -262.648681640625,
"logps/rejected": -207.82363891601562,
"loss": 0.1405,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 4.20405912399292,
"rewards/margins": 7.261610507965088,
"rewards/rejected": -3.057551383972168,
"step": 210
},
{
"epoch": 0.56,
"grad_norm": 271.1779091479927,
"learning_rate": 9.507926445081218e-08,
"logits/chosen": -2.618332862854004,
"logits/rejected": -2.5180063247680664,
"logps/chosen": -255.3096160888672,
"logps/rejected": -204.3534698486328,
"loss": 0.1701,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 3.6050515174865723,
"rewards/margins": 7.0129852294921875,
"rewards/rejected": -3.4079346656799316,
"step": 220
},
{
"epoch": 0.59,
"grad_norm": 290.4603808348808,
"learning_rate": 8.61713284835267e-08,
"logits/chosen": -2.646491050720215,
"logits/rejected": -2.51234769821167,
"logps/chosen": -264.74420166015625,
"logps/rejected": -205.0327911376953,
"loss": 0.1425,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 4.327627182006836,
"rewards/margins": 7.188753604888916,
"rewards/rejected": -2.8611254692077637,
"step": 230
},
{
"epoch": 0.61,
"grad_norm": 422.65919601372,
"learning_rate": 7.73740997570278e-08,
"logits/chosen": -2.6252548694610596,
"logits/rejected": -2.5125999450683594,
"logps/chosen": -258.0855712890625,
"logps/rejected": -207.72213745117188,
"loss": 0.1341,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 4.273736953735352,
"rewards/margins": 7.429440498352051,
"rewards/rejected": -3.155702829360962,
"step": 240
},
{
"epoch": 0.64,
"grad_norm": 236.70780195109265,
"learning_rate": 6.875800563794424e-08,
"logits/chosen": -2.637636184692383,
"logits/rejected": -2.522014856338501,
"logps/chosen": -264.11163330078125,
"logps/rejected": -213.34262084960938,
"loss": 0.1561,
"rewards/accuracies": 0.9375,
"rewards/chosen": 3.965393543243408,
"rewards/margins": 7.122384548187256,
"rewards/rejected": -3.1569907665252686,
"step": 250
},
{
"epoch": 0.67,
"grad_norm": 261.4238126511671,
"learning_rate": 6.039202339608431e-08,
"logits/chosen": -2.6351547241210938,
"logits/rejected": -2.5343315601348877,
"logps/chosen": -257.4027099609375,
"logps/rejected": -206.6591033935547,
"loss": 0.1287,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 4.236881732940674,
"rewards/margins": 7.065675258636475,
"rewards/rejected": -2.828793525695801,
"step": 260
},
{
"epoch": 0.69,
"grad_norm": 290.68123700235606,
"learning_rate": 5.2343127997869205e-08,
"logits/chosen": -2.610731840133667,
"logits/rejected": -2.4807095527648926,
"logps/chosen": -270.530517578125,
"logps/rejected": -215.2696533203125,
"loss": 0.2282,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 3.1549553871154785,
"rewards/margins": 7.109915733337402,
"rewards/rejected": -3.9549612998962402,
"step": 270
},
{
"epoch": 0.72,
"grad_norm": 212.4315878069041,
"learning_rate": 4.4675755929468636e-08,
"logits/chosen": -2.6439692974090576,
"logits/rejected": -2.521467447280884,
"logps/chosen": -266.0245056152344,
"logps/rejected": -204.48606872558594,
"loss": 0.139,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": 3.838078737258911,
"rewards/margins": 7.6062164306640625,
"rewards/rejected": -3.7681381702423096,
"step": 280
},
{
"epoch": 0.74,
"grad_norm": 229.50979085481035,
"learning_rate": 3.745128934207224e-08,
"logits/chosen": -2.598167657852173,
"logits/rejected": -2.5218276977539062,
"logps/chosen": -266.5920104980469,
"logps/rejected": -222.931884765625,
"loss": 0.1507,
"rewards/accuracies": 0.9375,
"rewards/chosen": 3.6127426624298096,
"rewards/margins": 6.654023170471191,
"rewards/rejected": -3.0412800312042236,
"step": 290
},
{
"epoch": 0.77,
"grad_norm": 321.80727834811256,
"learning_rate": 3.0727564649040063e-08,
"logits/chosen": -2.6253504753112793,
"logits/rejected": -2.5061511993408203,
"logps/chosen": -270.4463806152344,
"logps/rejected": -213.0215301513672,
"loss": 0.141,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": 3.923215389251709,
"rewards/margins": 7.3881683349609375,
"rewards/rejected": -3.464953660964966,
"step": 300
},
{
"epoch": 0.79,
"grad_norm": 171.80295572629132,
"learning_rate": 2.4558409508920985e-08,
"logits/chosen": -2.5865883827209473,
"logits/rejected": -2.491208553314209,
"logps/chosen": -275.579345703125,
"logps/rejected": -208.02297973632812,
"loss": 0.1401,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 3.985370635986328,
"rewards/margins": 7.237724304199219,
"rewards/rejected": -3.2523536682128906,
"step": 310
},
{
"epoch": 0.82,
"grad_norm": 294.8015156484179,
"learning_rate": 1.899321190108335e-08,
"logits/chosen": -2.6083686351776123,
"logits/rejected": -2.508666515350342,
"logps/chosen": -258.6110534667969,
"logps/rejected": -207.57406616210938,
"loss": 0.1709,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 3.153282642364502,
"rewards/margins": 7.0364227294921875,
"rewards/rejected": -3.8831400871276855,
"step": 320
},
{
"epoch": 0.84,
"grad_norm": 262.1222746234904,
"learning_rate": 1.4076524743778317e-08,
"logits/chosen": -2.6292643547058105,
"logits/rejected": -2.531942129135132,
"logps/chosen": -261.5999450683594,
"logps/rejected": -207.26699829101562,
"loss": 0.1437,
"rewards/accuracies": 0.96875,
"rewards/chosen": 4.273932456970215,
"rewards/margins": 7.467469215393066,
"rewards/rejected": -3.1935369968414307,
"step": 330
},
{
"epoch": 0.87,
"grad_norm": 299.7750791753028,
"learning_rate": 9.847709219918398e-09,
"logits/chosen": -2.6148436069488525,
"logits/rejected": -2.5132031440734863,
"logps/chosen": -256.66473388671875,
"logps/rejected": -194.7910919189453,
"loss": 0.1894,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": 4.073337078094482,
"rewards/margins": 7.587210655212402,
"rewards/rejected": -3.51387357711792,
"step": 340
},
{
"epoch": 0.9,
"grad_norm": 346.7439242044206,
"learning_rate": 6.340619665972846e-09,
"logits/chosen": -2.630897045135498,
"logits/rejected": -2.5146021842956543,
"logps/chosen": -265.88916015625,
"logps/rejected": -212.77487182617188,
"loss": 0.1582,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 3.5220444202423096,
"rewards/margins": 7.0233283042907715,
"rewards/rejected": -3.501283645629883,
"step": 350
},
{
"epoch": 0.92,
"grad_norm": 307.153905111391,
"learning_rate": 3.583332546643769e-09,
"logits/chosen": -2.6361472606658936,
"logits/rejected": -2.5298829078674316,
"logps/chosen": -271.5682067871094,
"logps/rejected": -209.4921417236328,
"loss": 0.1547,
"rewards/accuracies": 0.9375,
"rewards/chosen": 3.9090733528137207,
"rewards/margins": 7.22186279296875,
"rewards/rejected": -3.3127894401550293,
"step": 360
},
{
"epoch": 0.95,
"grad_norm": 183.78861249224778,
"learning_rate": 1.5979216850509847e-09,
"logits/chosen": -2.593644618988037,
"logits/rejected": -2.4859519004821777,
"logps/chosen": -270.0194396972656,
"logps/rejected": -212.9616241455078,
"loss": 0.1314,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 3.80389142036438,
"rewards/margins": 6.737584114074707,
"rewards/rejected": -2.933692216873169,
"step": 370
},
{
"epoch": 0.97,
"grad_norm": 201.1703750461263,
"learning_rate": 4.002815478505006e-10,
"logits/chosen": -2.6470093727111816,
"logits/rejected": -2.552563428878784,
"logps/chosen": -261.07574462890625,
"logps/rejected": -216.7428741455078,
"loss": 0.1449,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": 4.029725074768066,
"rewards/margins": 6.822300910949707,
"rewards/rejected": -2.7925755977630615,
"step": 380
},
{
"epoch": 1.0,
"grad_norm": 359.6013698550439,
"learning_rate": 0.0,
"logits/chosen": -2.6531007289886475,
"logits/rejected": -2.540839195251465,
"logps/chosen": -249.09298706054688,
"logps/rejected": -204.24765014648438,
"loss": 0.146,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 3.7912139892578125,
"rewards/margins": 7.412507057189941,
"rewards/rejected": -3.621293306350708,
"step": 390
},
{
"epoch": 1.0,
"step": 390,
"total_flos": 0.0,
"train_loss": 0.19850983711389394,
"train_runtime": 5891.9709,
"train_samples_per_second": 8.486,
"train_steps_per_second": 0.066
}
],
"logging_steps": 10,
"max_steps": 390,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}