zephyr-7b-dpo-full / trainer_state.json
RikkiXu's picture
Model save
5acdae9 verified
raw
history blame
No virus
27.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 100,
"global_step": 478,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 2193.325330109938,
"learning_rate": 1.0416666666666666e-08,
"logits/chosen": -1.689455509185791,
"logits/rejected": -1.4794573783874512,
"logps/chosen": -126.21005249023438,
"logps/rejected": -98.13133239746094,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.02,
"grad_norm": 1722.3750513007058,
"learning_rate": 1.0416666666666667e-07,
"logits/chosen": -1.7072248458862305,
"logits/rejected": -1.6097939014434814,
"logps/chosen": -139.610107421875,
"logps/rejected": -91.32671356201172,
"loss": 0.6992,
"rewards/accuracies": 0.4722222089767456,
"rewards/chosen": 0.05638541281223297,
"rewards/margins": 0.07169164717197418,
"rewards/rejected": -0.015306234359741211,
"step": 10
},
{
"epoch": 0.04,
"grad_norm": 506.4974128593864,
"learning_rate": 2.0833333333333333e-07,
"logits/chosen": -1.6386321783065796,
"logits/rejected": -1.6488521099090576,
"logps/chosen": -130.5824737548828,
"logps/rejected": -94.01884460449219,
"loss": 0.3992,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 1.0460855960845947,
"rewards/margins": 1.4136831760406494,
"rewards/rejected": -0.3675975203514099,
"step": 20
},
{
"epoch": 0.06,
"grad_norm": 471.66591122190783,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": -1.6968982219696045,
"logits/rejected": -1.6314315795898438,
"logps/chosen": -131.55714416503906,
"logps/rejected": -105.11051177978516,
"loss": 0.2288,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 3.3202426433563232,
"rewards/margins": 5.000655174255371,
"rewards/rejected": -1.680413007736206,
"step": 30
},
{
"epoch": 0.08,
"grad_norm": 420.8471101103042,
"learning_rate": 4.1666666666666667e-07,
"logits/chosen": -1.6393680572509766,
"logits/rejected": -1.560689926147461,
"logps/chosen": -142.81134033203125,
"logps/rejected": -108.94673156738281,
"loss": 0.1804,
"rewards/accuracies": 0.9375,
"rewards/chosen": 4.454810619354248,
"rewards/margins": 9.642589569091797,
"rewards/rejected": -5.187777996063232,
"step": 40
},
{
"epoch": 0.1,
"grad_norm": 759.4656267159737,
"learning_rate": 4.999733114418725e-07,
"logits/chosen": -1.6069341897964478,
"logits/rejected": -1.631784439086914,
"logps/chosen": -126.6369400024414,
"logps/rejected": -116.30027770996094,
"loss": 0.1844,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 4.2187395095825195,
"rewards/margins": 12.810827255249023,
"rewards/rejected": -8.59208869934082,
"step": 50
},
{
"epoch": 0.13,
"grad_norm": 361.53910661789246,
"learning_rate": 4.990398100856366e-07,
"logits/chosen": -1.7161242961883545,
"logits/rejected": -1.661266565322876,
"logps/chosen": -141.4559326171875,
"logps/rejected": -125.5120620727539,
"loss": 0.17,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": 4.772133827209473,
"rewards/margins": 16.64890480041504,
"rewards/rejected": -11.876771926879883,
"step": 60
},
{
"epoch": 0.15,
"grad_norm": 382.2685121568309,
"learning_rate": 4.967775735898179e-07,
"logits/chosen": -1.670915961265564,
"logits/rejected": -1.6990423202514648,
"logps/chosen": -134.56948852539062,
"logps/rejected": -126.52008056640625,
"loss": 0.1751,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 5.264647006988525,
"rewards/margins": 19.5419979095459,
"rewards/rejected": -14.277349472045898,
"step": 70
},
{
"epoch": 0.17,
"grad_norm": 337.7204736269515,
"learning_rate": 4.931986719649298e-07,
"logits/chosen": -1.658257246017456,
"logits/rejected": -1.6444251537322998,
"logps/chosen": -128.6245880126953,
"logps/rejected": -119.48954010009766,
"loss": 0.1656,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 5.917626857757568,
"rewards/margins": 18.79892349243164,
"rewards/rejected": -12.88129711151123,
"step": 80
},
{
"epoch": 0.19,
"grad_norm": 658.102190064697,
"learning_rate": 4.883222001996351e-07,
"logits/chosen": -1.6909303665161133,
"logits/rejected": -1.710211157798767,
"logps/chosen": -136.05978393554688,
"logps/rejected": -131.3511199951172,
"loss": 0.1561,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 6.575238227844238,
"rewards/margins": 21.304401397705078,
"rewards/rejected": -14.729164123535156,
"step": 90
},
{
"epoch": 0.21,
"grad_norm": 303.0257944851465,
"learning_rate": 4.821741763807186e-07,
"logits/chosen": -1.739341139793396,
"logits/rejected": -1.722266435623169,
"logps/chosen": -116.35429382324219,
"logps/rejected": -117.0191421508789,
"loss": 0.1474,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": 5.8117475509643555,
"rewards/margins": 18.994068145751953,
"rewards/rejected": -13.182321548461914,
"step": 100
},
{
"epoch": 0.21,
"eval_logits/chosen": -1.783400058746338,
"eval_logits/rejected": -1.7724130153656006,
"eval_logps/chosen": -124.02442932128906,
"eval_logps/rejected": -121.5073013305664,
"eval_loss": 0.13251493871212006,
"eval_rewards/accuracies": 0.9296875,
"eval_rewards/chosen": 6.873911380767822,
"eval_rewards/margins": 21.00718879699707,
"eval_rewards/rejected": -14.133277893066406,
"eval_runtime": 97.5707,
"eval_samples_per_second": 20.498,
"eval_steps_per_second": 0.328,
"step": 100
},
{
"epoch": 0.23,
"grad_norm": 311.0110467993666,
"learning_rate": 4.747874028753375e-07,
"logits/chosen": -1.6540296077728271,
"logits/rejected": -1.7143176794052124,
"logps/chosen": -120.4977035522461,
"logps/rejected": -124.83198547363281,
"loss": 0.1601,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 5.774500370025635,
"rewards/margins": 17.103343963623047,
"rewards/rejected": -11.32884407043457,
"step": 110
},
{
"epoch": 0.25,
"grad_norm": 317.1259568395023,
"learning_rate": 4.662012913161997e-07,
"logits/chosen": -1.6986665725708008,
"logits/rejected": -1.6894630193710327,
"logps/chosen": -118.8506851196289,
"logps/rejected": -125.60331726074219,
"loss": 0.1522,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 6.9937944412231445,
"rewards/margins": 19.12407875061035,
"rewards/rejected": -12.130284309387207,
"step": 120
},
{
"epoch": 0.27,
"grad_norm": 183.0416909345485,
"learning_rate": 4.5646165232345103e-07,
"logits/chosen": -1.6666994094848633,
"logits/rejected": -1.6851320266723633,
"logps/chosen": -124.86369323730469,
"logps/rejected": -121.08885192871094,
"loss": 0.1572,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 7.925422668457031,
"rewards/margins": 21.82646942138672,
"rewards/rejected": -13.901046752929688,
"step": 130
},
{
"epoch": 0.29,
"grad_norm": 323.2169573441021,
"learning_rate": 4.456204510851956e-07,
"logits/chosen": -1.5770366191864014,
"logits/rejected": -1.5510694980621338,
"logps/chosen": -122.3641128540039,
"logps/rejected": -117.8587646484375,
"loss": 0.1593,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 7.602517604827881,
"rewards/margins": 19.99860954284668,
"rewards/rejected": -12.396090507507324,
"step": 140
},
{
"epoch": 0.31,
"grad_norm": 394.76094620235625,
"learning_rate": 4.337355301007335e-07,
"logits/chosen": -1.717551827430725,
"logits/rejected": -1.7436870336532593,
"logps/chosen": -120.7677001953125,
"logps/rejected": -116.0407943725586,
"loss": 0.135,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 7.475555419921875,
"rewards/margins": 20.28847885131836,
"rewards/rejected": -12.812922477722168,
"step": 150
},
{
"epoch": 0.33,
"grad_norm": 696.9618085200943,
"learning_rate": 4.2087030056579986e-07,
"logits/chosen": -1.5951331853866577,
"logits/rejected": -1.5702898502349854,
"logps/chosen": -128.13714599609375,
"logps/rejected": -122.9395751953125,
"loss": 0.1778,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 6.914339542388916,
"rewards/margins": 18.91141700744629,
"rewards/rejected": -11.997075080871582,
"step": 160
},
{
"epoch": 0.36,
"grad_norm": 534.2420908517273,
"learning_rate": 4.070934040463998e-07,
"logits/chosen": -1.7780072689056396,
"logits/rejected": -1.7594010829925537,
"logps/chosen": -123.1135025024414,
"logps/rejected": -123.6323013305664,
"loss": 0.1817,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 8.014005661010742,
"rewards/margins": 21.994348526000977,
"rewards/rejected": -13.980344772338867,
"step": 170
},
{
"epoch": 0.38,
"grad_norm": 515.866307384091,
"learning_rate": 3.9247834624635404e-07,
"logits/chosen": -1.7234785556793213,
"logits/rejected": -1.715887427330017,
"logps/chosen": -126.28702545166016,
"logps/rejected": -118.8428726196289,
"loss": 0.1945,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 8.433076858520508,
"rewards/margins": 22.419658660888672,
"rewards/rejected": -13.986579895019531,
"step": 180
},
{
"epoch": 0.4,
"grad_norm": 442.14688359180957,
"learning_rate": 3.7710310482256523e-07,
"logits/chosen": -1.7688430547714233,
"logits/rejected": -1.7464864253997803,
"logps/chosen": -115.27445983886719,
"logps/rejected": -132.3951416015625,
"loss": 0.1843,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 8.262548446655273,
"rewards/margins": 23.41249656677246,
"rewards/rejected": -15.149948120117188,
"step": 190
},
{
"epoch": 0.42,
"grad_norm": 436.2379767277873,
"learning_rate": 3.610497133404795e-07,
"logits/chosen": -1.7461057901382446,
"logits/rejected": -1.7516930103302002,
"logps/chosen": -121.12789154052734,
"logps/rejected": -116.14985656738281,
"loss": 0.2092,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": 9.004249572753906,
"rewards/margins": 23.075119018554688,
"rewards/rejected": -14.070871353149414,
"step": 200
},
{
"epoch": 0.42,
"eval_logits/chosen": -1.8477407693862915,
"eval_logits/rejected": -1.844199538230896,
"eval_logps/chosen": -120.94989776611328,
"eval_logps/rejected": -123.32437896728516,
"eval_loss": 0.15667258203029633,
"eval_rewards/accuracies": 0.93359375,
"eval_rewards/chosen": 8.411178588867188,
"eval_rewards/margins": 23.453001022338867,
"eval_rewards/rejected": -15.04181957244873,
"eval_runtime": 97.6977,
"eval_samples_per_second": 20.471,
"eval_steps_per_second": 0.328,
"step": 200
},
{
"epoch": 0.44,
"grad_norm": 437.460750971005,
"learning_rate": 3.4440382358952115e-07,
"logits/chosen": -1.6800906658172607,
"logits/rejected": -1.6162738800048828,
"logps/chosen": -119.17988586425781,
"logps/rejected": -112.8397445678711,
"loss": 0.1771,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 6.705443382263184,
"rewards/margins": 18.32439613342285,
"rewards/rejected": -11.618951797485352,
"step": 210
},
{
"epoch": 0.46,
"grad_norm": 298.75015782601395,
"learning_rate": 3.272542485937368e-07,
"logits/chosen": -1.8080447912216187,
"logits/rejected": -1.8503248691558838,
"logps/chosen": -120.43497467041016,
"logps/rejected": -113.6927719116211,
"loss": 0.1853,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 8.020153045654297,
"rewards/margins": 21.10222053527832,
"rewards/rejected": -13.082064628601074,
"step": 220
},
{
"epoch": 0.48,
"grad_norm": 290.6447474042267,
"learning_rate": 3.096924887558854e-07,
"logits/chosen": -1.7483365535736084,
"logits/rejected": -1.6977851390838623,
"logps/chosen": -132.64036560058594,
"logps/rejected": -132.7303009033203,
"loss": 0.2348,
"rewards/accuracies": 0.9375,
"rewards/chosen": 7.534226894378662,
"rewards/margins": 23.992801666259766,
"rewards/rejected": -16.458572387695312,
"step": 230
},
{
"epoch": 0.5,
"grad_norm": 435.0583755248922,
"learning_rate": 2.9181224366319943e-07,
"logits/chosen": -1.7618516683578491,
"logits/rejected": -1.7764402627944946,
"logps/chosen": -119.7364273071289,
"logps/rejected": -124.50996398925781,
"loss": 0.2076,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 6.094353675842285,
"rewards/margins": 19.534603118896484,
"rewards/rejected": -13.440248489379883,
"step": 240
},
{
"epoch": 0.52,
"grad_norm": 506.7749182741835,
"learning_rate": 2.7370891215954565e-07,
"logits/chosen": -1.7425343990325928,
"logits/rejected": -1.7735121250152588,
"logps/chosen": -123.32588195800781,
"logps/rejected": -126.53437805175781,
"loss": 0.2561,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 7.04464054107666,
"rewards/margins": 21.820133209228516,
"rewards/rejected": -14.775495529174805,
"step": 250
},
{
"epoch": 0.54,
"grad_norm": 278.0299175856851,
"learning_rate": 2.55479083351317e-07,
"logits/chosen": -1.7782995700836182,
"logits/rejected": -1.7973874807357788,
"logps/chosen": -127.03205871582031,
"logps/rejected": -114.83714294433594,
"loss": 0.1749,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 8.397477149963379,
"rewards/margins": 23.37790298461914,
"rewards/rejected": -14.980427742004395,
"step": 260
},
{
"epoch": 0.56,
"grad_norm": 372.30589353402263,
"learning_rate": 2.3722002126275822e-07,
"logits/chosen": -1.7808539867401123,
"logits/rejected": -1.719679832458496,
"logps/chosen": -117.4173355102539,
"logps/rejected": -119.30290222167969,
"loss": 0.1772,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 7.320569038391113,
"rewards/margins": 21.35511589050293,
"rewards/rejected": -14.034547805786133,
"step": 270
},
{
"epoch": 0.59,
"grad_norm": 470.22249240997917,
"learning_rate": 2.19029145890313e-07,
"logits/chosen": -1.6393606662750244,
"logits/rejected": -1.7276159524917603,
"logps/chosen": -123.42005920410156,
"logps/rejected": -132.44947814941406,
"loss": 0.2305,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 6.818305969238281,
"rewards/margins": 22.368270874023438,
"rewards/rejected": -15.549966812133789,
"step": 280
},
{
"epoch": 0.61,
"grad_norm": 652.1091329463337,
"learning_rate": 2.0100351342479216e-07,
"logits/chosen": -1.7318487167358398,
"logits/rejected": -1.706648826599121,
"logps/chosen": -114.55335998535156,
"logps/rejected": -119.69996643066406,
"loss": 0.2179,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 6.804747104644775,
"rewards/margins": 22.096506118774414,
"rewards/rejected": -15.29175853729248,
"step": 290
},
{
"epoch": 0.63,
"grad_norm": 426.10148588025714,
"learning_rate": 1.8323929841460178e-07,
"logits/chosen": -1.699927568435669,
"logits/rejected": -1.6480505466461182,
"logps/chosen": -132.07632446289062,
"logps/rejected": -136.4704132080078,
"loss": 0.1925,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 7.1819257736206055,
"rewards/margins": 22.29709243774414,
"rewards/rejected": -15.11516284942627,
"step": 300
},
{
"epoch": 0.63,
"eval_logits/chosen": -1.8054125308990479,
"eval_logits/rejected": -1.804688572883606,
"eval_logps/chosen": -122.28070068359375,
"eval_logps/rejected": -126.64250946044922,
"eval_loss": 0.17152084410190582,
"eval_rewards/accuracies": 0.92578125,
"eval_rewards/chosen": 7.745772361755371,
"eval_rewards/margins": 24.44666290283203,
"eval_rewards/rejected": -16.700889587402344,
"eval_runtime": 97.5667,
"eval_samples_per_second": 20.499,
"eval_steps_per_second": 0.328,
"step": 300
},
{
"epoch": 0.65,
"grad_norm": 499.48262107192954,
"learning_rate": 1.6583128063291573e-07,
"logits/chosen": -1.7035776376724243,
"logits/rejected": -1.73648202419281,
"logps/chosen": -123.8221664428711,
"logps/rejected": -127.0335464477539,
"loss": 0.1993,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 7.757756233215332,
"rewards/margins": 22.004987716674805,
"rewards/rejected": -14.247230529785156,
"step": 310
},
{
"epoch": 0.67,
"grad_norm": 351.5168221076795,
"learning_rate": 1.488723393865766e-07,
"logits/chosen": -1.7380039691925049,
"logits/rejected": -1.7079343795776367,
"logps/chosen": -113.3768081665039,
"logps/rejected": -126.8635482788086,
"loss": 0.1891,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 8.71003532409668,
"rewards/margins": 24.014118194580078,
"rewards/rejected": -15.304081916809082,
"step": 320
},
{
"epoch": 0.69,
"grad_norm": 321.010971408508,
"learning_rate": 1.3245295796480788e-07,
"logits/chosen": -1.664240837097168,
"logits/rejected": -1.7476810216903687,
"logps/chosen": -123.50030517578125,
"logps/rejected": -122.0934066772461,
"loss": 0.1955,
"rewards/accuracies": 0.90625,
"rewards/chosen": 7.341310024261475,
"rewards/margins": 21.513980865478516,
"rewards/rejected": -14.172670364379883,
"step": 330
},
{
"epoch": 0.71,
"grad_norm": 251.55976817411812,
"learning_rate": 1.1666074087171627e-07,
"logits/chosen": -1.6232049465179443,
"logits/rejected": -1.6956592798233032,
"logps/chosen": -117.71810150146484,
"logps/rejected": -113.10723876953125,
"loss": 0.2458,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 6.386099815368652,
"rewards/margins": 19.121448516845703,
"rewards/rejected": -12.73534870147705,
"step": 340
},
{
"epoch": 0.73,
"grad_norm": 784.7175940461042,
"learning_rate": 1.0157994641835734e-07,
"logits/chosen": -1.6620721817016602,
"logits/rejected": -1.6772327423095703,
"logps/chosen": -117.80794525146484,
"logps/rejected": -132.93991088867188,
"loss": 0.1649,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 6.138510704040527,
"rewards/margins": 21.829065322875977,
"rewards/rejected": -15.690553665161133,
"step": 350
},
{
"epoch": 0.75,
"grad_norm": 497.1984887279012,
"learning_rate": 8.729103716819111e-08,
"logits/chosen": -1.6556260585784912,
"logits/rejected": -1.6805181503295898,
"logps/chosen": -124.04801177978516,
"logps/rejected": -128.13685607910156,
"loss": 0.1743,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 7.051167964935303,
"rewards/margins": 21.460834503173828,
"rewards/rejected": -14.40966796875,
"step": 360
},
{
"epoch": 0.77,
"grad_norm": 701.6355853905161,
"learning_rate": 7.387025063449081e-08,
"logits/chosen": -1.7627124786376953,
"logits/rejected": -1.7043575048446655,
"logps/chosen": -113.8396224975586,
"logps/rejected": -117.89778137207031,
"loss": 0.1661,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 6.275058269500732,
"rewards/margins": 18.96691131591797,
"rewards/rejected": -12.691854476928711,
"step": 370
},
{
"epoch": 0.79,
"grad_norm": 384.3232766408438,
"learning_rate": 6.138919252022435e-08,
"logits/chosen": -1.7891247272491455,
"logits/rejected": -1.8081142902374268,
"logps/chosen": -121.1161880493164,
"logps/rejected": -131.83599853515625,
"loss": 0.1823,
"rewards/accuracies": 0.9375,
"rewards/chosen": 7.768773078918457,
"rewards/margins": 24.082279205322266,
"rewards/rejected": -16.31351089477539,
"step": 380
},
{
"epoch": 0.82,
"grad_norm": 419.5923423572011,
"learning_rate": 4.991445467064689e-08,
"logits/chosen": -1.6696172952651978,
"logits/rejected": -1.6762924194335938,
"logps/chosen": -117.07814025878906,
"logps/rejected": -120.15535736083984,
"loss": 0.1486,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": 7.70883846282959,
"rewards/margins": 22.634441375732422,
"rewards/rejected": -14.9256010055542,
"step": 390
},
{
"epoch": 0.84,
"grad_norm": 548.5530090451983,
"learning_rate": 3.9507259776993954e-08,
"logits/chosen": -1.6570144891738892,
"logits/rejected": -1.7342147827148438,
"logps/chosen": -119.59893798828125,
"logps/rejected": -120.49696350097656,
"loss": 0.2762,
"rewards/accuracies": 0.90625,
"rewards/chosen": 6.581268310546875,
"rewards/margins": 21.039697647094727,
"rewards/rejected": -14.4584321975708,
"step": 400
},
{
"epoch": 0.84,
"eval_logits/chosen": -1.8335998058319092,
"eval_logits/rejected": -1.8335609436035156,
"eval_logps/chosen": -121.38908386230469,
"eval_logps/rejected": -126.29609680175781,
"eval_loss": 0.16946831345558167,
"eval_rewards/accuracies": 0.9296875,
"eval_rewards/chosen": 8.191582679748535,
"eval_rewards/margins": 24.719266891479492,
"eval_rewards/rejected": -16.527685165405273,
"eval_runtime": 97.6978,
"eval_samples_per_second": 20.471,
"eval_steps_per_second": 0.328,
"step": 400
},
{
"epoch": 0.86,
"grad_norm": 482.27830673225736,
"learning_rate": 3.022313472693447e-08,
"logits/chosen": -1.7594387531280518,
"logits/rejected": -1.7884505987167358,
"logps/chosen": -130.15711975097656,
"logps/rejected": -124.42222595214844,
"loss": 0.2025,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 8.578957557678223,
"rewards/margins": 24.737140655517578,
"rewards/rejected": -16.158184051513672,
"step": 410
},
{
"epoch": 0.88,
"grad_norm": 378.46967638932284,
"learning_rate": 2.2111614344599684e-08,
"logits/chosen": -1.7343389987945557,
"logits/rejected": -1.731032371520996,
"logps/chosen": -115.0967025756836,
"logps/rejected": -119.22444915771484,
"loss": 0.1669,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 6.606478691101074,
"rewards/margins": 20.886428833007812,
"rewards/rejected": -14.279950141906738,
"step": 420
},
{
"epoch": 0.9,
"grad_norm": 500.1474042235916,
"learning_rate": 1.521597710086439e-08,
"logits/chosen": -1.6852420568466187,
"logits/rejected": -1.7072862386703491,
"logps/chosen": -131.11505126953125,
"logps/rejected": -119.86601257324219,
"loss": 0.1861,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 7.49709939956665,
"rewards/margins": 20.68734359741211,
"rewards/rejected": -13.190244674682617,
"step": 430
},
{
"epoch": 0.92,
"grad_norm": 310.90711668513416,
"learning_rate": 9.57301420397924e-09,
"logits/chosen": -1.8146944046020508,
"logits/rejected": -1.8261439800262451,
"logps/chosen": -122.62892150878906,
"logps/rejected": -125.58287048339844,
"loss": 0.1762,
"rewards/accuracies": 0.9375,
"rewards/chosen": 8.172103881835938,
"rewards/margins": 24.26197624206543,
"rewards/rejected": -16.089872360229492,
"step": 440
},
{
"epoch": 0.94,
"grad_norm": 445.19377861997515,
"learning_rate": 5.212833302556258e-09,
"logits/chosen": -1.8277533054351807,
"logits/rejected": -1.7913105487823486,
"logps/chosen": -117.95567321777344,
"logps/rejected": -121.9552001953125,
"loss": 0.2023,
"rewards/accuracies": 0.90625,
"rewards/chosen": 8.675628662109375,
"rewards/margins": 25.467309951782227,
"rewards/rejected": -16.79167938232422,
"step": 450
},
{
"epoch": 0.96,
"grad_norm": 209.63658036103772,
"learning_rate": 2.158697848236607e-09,
"logits/chosen": -1.7152612209320068,
"logits/rejected": -1.7372353076934814,
"logps/chosen": -122.6450424194336,
"logps/rejected": -125.43870544433594,
"loss": 0.1507,
"rewards/accuracies": 0.9375,
"rewards/chosen": 8.354387283325195,
"rewards/margins": 23.259435653686523,
"rewards/rejected": -14.905046463012695,
"step": 460
},
{
"epoch": 0.98,
"grad_norm": 282.1159277085459,
"learning_rate": 4.269029751107489e-10,
"logits/chosen": -1.691461205482483,
"logits/rejected": -1.706369161605835,
"logps/chosen": -116.26470947265625,
"logps/rejected": -135.1427459716797,
"loss": 0.1833,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 6.723459720611572,
"rewards/margins": 22.407346725463867,
"rewards/rejected": -15.683886528015137,
"step": 470
},
{
"epoch": 1.0,
"step": 478,
"total_flos": 0.0,
"train_loss": 0.20260818084413537,
"train_runtime": 7642.1158,
"train_samples_per_second": 8.0,
"train_steps_per_second": 0.063
}
],
"logging_steps": 10,
"max_steps": 478,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}