mjbuehler's picture
Model save
3979ede verified
raw
history blame
59.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1102,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0009074410163339383,
"grad_norm": 142.2236804737182,
"learning_rate": 4.504504504504504e-09,
"logits/chosen": -2.992155075073242,
"logits/rejected": -2.8812735080718994,
"logps/chosen": -496.1777038574219,
"logps/rejected": -286.02813720703125,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.009074410163339383,
"grad_norm": 117.29702644691126,
"learning_rate": 4.504504504504504e-08,
"logits/chosen": -3.0335752964019775,
"logits/rejected": -2.898245334625244,
"logps/chosen": -439.4527587890625,
"logps/rejected": -257.0932312011719,
"loss": 0.6922,
"rewards/accuracies": 0.4027777910232544,
"rewards/chosen": -0.0025693608913570642,
"rewards/margins": -0.003381188027560711,
"rewards/rejected": 0.000811827601864934,
"step": 10
},
{
"epoch": 0.018148820326678767,
"grad_norm": 121.55880301059094,
"learning_rate": 9.009009009009008e-08,
"logits/chosen": -2.9626657962799072,
"logits/rejected": -2.811065435409546,
"logps/chosen": -350.26495361328125,
"logps/rejected": -255.7550811767578,
"loss": 0.6818,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.023313423618674278,
"rewards/margins": 0.02311089262366295,
"rewards/rejected": 0.00020253304683137685,
"step": 20
},
{
"epoch": 0.02722323049001815,
"grad_norm": 100.93699671377806,
"learning_rate": 1.3513513513513515e-07,
"logits/chosen": -2.9238393306732178,
"logits/rejected": -2.7606401443481445,
"logps/chosen": -429.740966796875,
"logps/rejected": -255.2905731201172,
"loss": 0.6275,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": 0.16224327683448792,
"rewards/margins": 0.1733538806438446,
"rewards/rejected": -0.01111060194671154,
"step": 30
},
{
"epoch": 0.036297640653357534,
"grad_norm": 59.986377877914826,
"learning_rate": 1.8018018018018017e-07,
"logits/chosen": -3.002030372619629,
"logits/rejected": -2.842498779296875,
"logps/chosen": -454.6031188964844,
"logps/rejected": -251.68063354492188,
"loss": 0.5225,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 0.473835289478302,
"rewards/margins": 0.5063358545303345,
"rewards/rejected": -0.03250055015087128,
"step": 40
},
{
"epoch": 0.045372050816696916,
"grad_norm": 50.035828434181596,
"learning_rate": 2.2522522522522522e-07,
"logits/chosen": -2.9417529106140137,
"logits/rejected": -2.826834201812744,
"logps/chosen": -385.17877197265625,
"logps/rejected": -235.72042846679688,
"loss": 0.4419,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": 0.9201729893684387,
"rewards/margins": 1.0462477207183838,
"rewards/rejected": -0.12607479095458984,
"step": 50
},
{
"epoch": 0.0544464609800363,
"grad_norm": 49.18557390967671,
"learning_rate": 2.702702702702703e-07,
"logits/chosen": -2.95581316947937,
"logits/rejected": -2.827500581741333,
"logps/chosen": -356.5251770019531,
"logps/rejected": -240.679931640625,
"loss": 0.3881,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 1.303996205329895,
"rewards/margins": 1.6420990228652954,
"rewards/rejected": -0.33810287714004517,
"step": 60
},
{
"epoch": 0.06352087114337568,
"grad_norm": 39.424363627526176,
"learning_rate": 3.153153153153153e-07,
"logits/chosen": -3.004495620727539,
"logits/rejected": -2.840688467025757,
"logps/chosen": -372.34857177734375,
"logps/rejected": -231.1216278076172,
"loss": 0.3465,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.3726818561553955,
"rewards/margins": 1.8177658319473267,
"rewards/rejected": -0.44508394598960876,
"step": 70
},
{
"epoch": 0.07259528130671507,
"grad_norm": 45.15703470841338,
"learning_rate": 3.6036036036036033e-07,
"logits/chosen": -2.994868516921997,
"logits/rejected": -2.897393226623535,
"logps/chosen": -365.3287353515625,
"logps/rejected": -249.77969360351562,
"loss": 0.3355,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 1.2107493877410889,
"rewards/margins": 1.8611555099487305,
"rewards/rejected": -0.6504060626029968,
"step": 80
},
{
"epoch": 0.08166969147005444,
"grad_norm": 38.281035325477184,
"learning_rate": 4.054054054054054e-07,
"logits/chosen": -3.043982982635498,
"logits/rejected": -2.9395782947540283,
"logps/chosen": -421.8617248535156,
"logps/rejected": -285.7194519042969,
"loss": 0.2828,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.4924074411392212,
"rewards/margins": 2.530264377593994,
"rewards/rejected": -1.0378568172454834,
"step": 90
},
{
"epoch": 0.09074410163339383,
"grad_norm": 31.39224573581566,
"learning_rate": 4.5045045045045043e-07,
"logits/chosen": -3.025484561920166,
"logits/rejected": -2.930591583251953,
"logps/chosen": -398.21221923828125,
"logps/rejected": -283.81158447265625,
"loss": 0.2717,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 1.310978651046753,
"rewards/margins": 2.4949724674224854,
"rewards/rejected": -1.1839935779571533,
"step": 100
},
{
"epoch": 0.0998185117967332,
"grad_norm": 36.97628979551037,
"learning_rate": 4.954954954954955e-07,
"logits/chosen": -3.070251941680908,
"logits/rejected": -2.9774134159088135,
"logps/chosen": -334.50146484375,
"logps/rejected": -254.9846649169922,
"loss": 0.2879,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 1.2126104831695557,
"rewards/margins": 2.4223546981811523,
"rewards/rejected": -1.2097440958023071,
"step": 110
},
{
"epoch": 0.1088929219600726,
"grad_norm": 36.40105021641922,
"learning_rate": 4.99898253844669e-07,
"logits/chosen": -3.031291961669922,
"logits/rejected": -2.954357862472534,
"logps/chosen": -379.811767578125,
"logps/rejected": -316.3371276855469,
"loss": 0.2486,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 1.340606689453125,
"rewards/margins": 3.5223803520202637,
"rewards/rejected": -2.1817739009857178,
"step": 120
},
{
"epoch": 0.11796733212341198,
"grad_norm": 33.76463840674312,
"learning_rate": 4.995466450646198e-07,
"logits/chosen": -3.1293764114379883,
"logits/rejected": -3.0540575981140137,
"logps/chosen": -377.92376708984375,
"logps/rejected": -272.8460388183594,
"loss": 0.2578,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.2404711246490479,
"rewards/margins": 3.5550155639648438,
"rewards/rejected": -2.314544677734375,
"step": 130
},
{
"epoch": 0.12704174228675136,
"grad_norm": 39.029774483087,
"learning_rate": 4.989442707764628e-07,
"logits/chosen": -3.071303129196167,
"logits/rejected": -3.0478246212005615,
"logps/chosen": -376.86480712890625,
"logps/rejected": -292.97735595703125,
"loss": 0.2579,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 1.3565551042556763,
"rewards/margins": 3.8431458473205566,
"rewards/rejected": -2.4865901470184326,
"step": 140
},
{
"epoch": 0.13611615245009073,
"grad_norm": 43.986124060871525,
"learning_rate": 4.980917362966688e-07,
"logits/chosen": -3.1637825965881348,
"logits/rejected": -3.027613401412964,
"logps/chosen": -415.8460388183594,
"logps/rejected": -311.55364990234375,
"loss": 0.2438,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.2525231838226318,
"rewards/margins": 4.118167400360107,
"rewards/rejected": -2.8656444549560547,
"step": 150
},
{
"epoch": 0.14519056261343014,
"grad_norm": 46.28933752084163,
"learning_rate": 4.969898983237597e-07,
"logits/chosen": -3.1585474014282227,
"logits/rejected": -3.050255298614502,
"logps/chosen": -353.08795166015625,
"logps/rejected": -283.6133117675781,
"loss": 0.2386,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.1768524646759033,
"rewards/margins": 3.947617769241333,
"rewards/rejected": -2.7707653045654297,
"step": 160
},
{
"epoch": 0.1542649727767695,
"grad_norm": 40.8112800014664,
"learning_rate": 4.95639864077426e-07,
"logits/chosen": -3.096083164215088,
"logits/rejected": -3.021690845489502,
"logps/chosen": -412.97119140625,
"logps/rejected": -300.5186462402344,
"loss": 0.2598,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 1.2744516134262085,
"rewards/margins": 4.562188148498535,
"rewards/rejected": -3.287736177444458,
"step": 170
},
{
"epoch": 0.16333938294010888,
"grad_norm": 39.2976511256155,
"learning_rate": 4.940429901858992e-07,
"logits/chosen": -3.0431342124938965,
"logits/rejected": -2.972602605819702,
"logps/chosen": -365.78326416015625,
"logps/rejected": -281.6066589355469,
"loss": 0.2303,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 1.166215181350708,
"rewards/margins": 4.303629398345947,
"rewards/rejected": -3.1374142169952393,
"step": 180
},
{
"epoch": 0.1724137931034483,
"grad_norm": 34.32156443441858,
"learning_rate": 4.922008813226972e-07,
"logits/chosen": -3.0900955200195312,
"logits/rejected": -2.9535610675811768,
"logps/chosen": -395.902099609375,
"logps/rejected": -305.49871826171875,
"loss": 0.2338,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.2969857454299927,
"rewards/margins": 4.443634986877441,
"rewards/rejected": -3.14664888381958,
"step": 190
},
{
"epoch": 0.18148820326678766,
"grad_norm": 44.14419053367824,
"learning_rate": 4.901153885941126e-07,
"logits/chosen": -3.0704243183135986,
"logits/rejected": -2.931751251220703,
"logps/chosen": -411.55560302734375,
"logps/rejected": -368.4609375,
"loss": 0.2297,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.5352587699890137,
"rewards/margins": 5.012287616729736,
"rewards/rejected": -3.4770290851593018,
"step": 200
},
{
"epoch": 0.19056261343012704,
"grad_norm": 30.76712341757137,
"learning_rate": 4.877886076790663e-07,
"logits/chosen": -2.944895029067993,
"logits/rejected": -2.8378586769104004,
"logps/chosen": -375.783203125,
"logps/rejected": -300.9497375488281,
"loss": 0.2302,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 1.2123253345489502,
"rewards/margins": 4.578190803527832,
"rewards/rejected": -3.3658649921417236,
"step": 210
},
{
"epoch": 0.1996370235934664,
"grad_norm": 37.33134180621657,
"learning_rate": 4.852228767231913e-07,
"logits/chosen": -3.0588059425354004,
"logits/rejected": -2.8839163780212402,
"logps/chosen": -387.7483215332031,
"logps/rejected": -300.7808532714844,
"loss": 0.2294,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.5753682851791382,
"rewards/margins": 5.524721145629883,
"rewards/rejected": -3.949352741241455,
"step": 220
},
{
"epoch": 0.20871143375680581,
"grad_norm": 38.73538764825298,
"learning_rate": 4.824207739892674e-07,
"logits/chosen": -3.059415102005005,
"logits/rejected": -2.958604335784912,
"logps/chosen": -406.916015625,
"logps/rejected": -364.64337158203125,
"loss": 0.212,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 1.1598107814788818,
"rewards/margins": 4.741861343383789,
"rewards/rejected": -3.5820508003234863,
"step": 230
},
{
"epoch": 0.2177858439201452,
"grad_norm": 32.6395207117509,
"learning_rate": 4.793851152663654e-07,
"logits/chosen": -2.9935600757598877,
"logits/rejected": -2.83933687210083,
"logps/chosen": -371.41241455078125,
"logps/rejected": -300.81817626953125,
"loss": 0.2398,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 0.9606507420539856,
"rewards/margins": 4.657492160797119,
"rewards/rejected": -3.6968414783477783,
"step": 240
},
{
"epoch": 0.22686025408348456,
"grad_norm": 42.815736529820505,
"learning_rate": 4.7611895104030507e-07,
"logits/chosen": -2.9664015769958496,
"logits/rejected": -2.933074474334717,
"logps/chosen": -398.3942565917969,
"logps/rejected": -339.1114501953125,
"loss": 0.2072,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.0510776042938232,
"rewards/margins": 5.130535125732422,
"rewards/rejected": -4.079457759857178,
"step": 250
},
{
"epoch": 0.23593466424682397,
"grad_norm": 48.49290374090693,
"learning_rate": 4.726255634282693e-07,
"logits/chosen": -2.9970316886901855,
"logits/rejected": -2.9537835121154785,
"logps/chosen": -392.30181884765625,
"logps/rejected": -379.0828857421875,
"loss": 0.2264,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 0.7763628959655762,
"rewards/margins": 4.426127910614014,
"rewards/rejected": -3.6497650146484375,
"step": 260
},
{
"epoch": 0.24500907441016334,
"grad_norm": 32.77780026693269,
"learning_rate": 4.689084628806562e-07,
"logits/chosen": -2.927907943725586,
"logits/rejected": -2.8476579189300537,
"logps/chosen": -345.444091796875,
"logps/rejected": -308.51922607421875,
"loss": 0.2017,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 0.9452263116836548,
"rewards/margins": 5.563403129577637,
"rewards/rejected": -4.61817741394043,
"step": 270
},
{
"epoch": 0.2540834845735027,
"grad_norm": 43.514698543958396,
"learning_rate": 4.6497138465348296e-07,
"logits/chosen": -3.086697578430176,
"logits/rejected": -2.8975157737731934,
"logps/chosen": -374.7484436035156,
"logps/rejected": -321.0020446777344,
"loss": 0.2246,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 0.891644299030304,
"rewards/margins": 5.307863712310791,
"rewards/rejected": -4.416219234466553,
"step": 280
},
{
"epoch": 0.2631578947368421,
"grad_norm": 52.94657770019182,
"learning_rate": 4.608182850548852e-07,
"logits/chosen": -3.0034172534942627,
"logits/rejected": -2.9029269218444824,
"logps/chosen": -367.73492431640625,
"logps/rejected": -322.0057373046875,
"loss": 0.1981,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.3871870040893555,
"rewards/margins": 5.972929954528809,
"rewards/rejected": -4.585742950439453,
"step": 290
},
{
"epoch": 0.27223230490018147,
"grad_norm": 177.99829370761782,
"learning_rate": 4.564533374694852e-07,
"logits/chosen": -3.0038394927978516,
"logits/rejected": -2.9105029106140137,
"logps/chosen": -433.2913513183594,
"logps/rejected": -328.7992858886719,
"loss": 0.1692,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.1000345945358276,
"rewards/margins": 5.619580268859863,
"rewards/rejected": -4.519545555114746,
"step": 300
},
{
"epoch": 0.2813067150635209,
"grad_norm": 50.49886074130672,
"learning_rate": 4.518809281646232e-07,
"logits/chosen": -2.985572338104248,
"logits/rejected": -2.889586925506592,
"logps/chosen": -393.39849853515625,
"logps/rejected": -343.38818359375,
"loss": 0.1725,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.2861218452453613,
"rewards/margins": 5.814927577972412,
"rewards/rejected": -4.528805732727051,
"step": 310
},
{
"epoch": 0.29038112522686027,
"grad_norm": 55.02234775232488,
"learning_rate": 4.4710565188266623e-07,
"logits/chosen": -2.9454689025878906,
"logits/rejected": -2.8238115310668945,
"logps/chosen": -402.4577331542969,
"logps/rejected": -325.34228515625,
"loss": 0.1948,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.1505086421966553,
"rewards/margins": 6.387723445892334,
"rewards/rejected": -5.237215042114258,
"step": 320
},
{
"epoch": 0.29945553539019965,
"grad_norm": 39.01256666197835,
"learning_rate": 4.4213230722382343e-07,
"logits/chosen": -2.9265170097351074,
"logits/rejected": -2.8057961463928223,
"logps/chosen": -395.21783447265625,
"logps/rejected": -351.7288513183594,
"loss": 0.1713,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": 1.236005187034607,
"rewards/margins": 6.866035461425781,
"rewards/rejected": -5.630031108856201,
"step": 330
},
{
"epoch": 0.308529945553539,
"grad_norm": 47.58493724390636,
"learning_rate": 4.3696589182410805e-07,
"logits/chosen": -2.938333749771118,
"logits/rejected": -2.813967704772949,
"logps/chosen": -355.73797607421875,
"logps/rejected": -356.24774169921875,
"loss": 0.2,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.1646188497543335,
"rewards/margins": 5.591378211975098,
"rewards/rejected": -4.426760196685791,
"step": 340
},
{
"epoch": 0.3176043557168784,
"grad_norm": 39.65265947598852,
"learning_rate": 4.3161159733329143e-07,
"logits/chosen": -2.99360728263855,
"logits/rejected": -2.904939651489258,
"logps/chosen": -418.7228088378906,
"logps/rejected": -416.0142517089844,
"loss": 0.2203,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 0.9050772786140442,
"rewards/margins": 5.665884494781494,
"rewards/rejected": -4.760807514190674,
"step": 350
},
{
"epoch": 0.32667876588021777,
"grad_norm": 43.14961975290785,
"learning_rate": 4.2607480419789587e-07,
"logits/chosen": -2.92106556892395,
"logits/rejected": -2.852785587310791,
"logps/chosen": -333.32525634765625,
"logps/rejected": -300.5793151855469,
"loss": 0.1981,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 0.9021322131156921,
"rewards/margins": 5.309260368347168,
"rewards/rejected": -4.40712833404541,
"step": 360
},
{
"epoch": 0.33575317604355714,
"grad_norm": 27.974944443111728,
"learning_rate": 4.2036107625446783e-07,
"logits/chosen": -2.98514986038208,
"logits/rejected": -2.78173828125,
"logps/chosen": -397.83782958984375,
"logps/rejected": -335.9725646972656,
"loss": 0.1841,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.3740975856781006,
"rewards/margins": 7.041192531585693,
"rewards/rejected": -5.667096138000488,
"step": 370
},
{
"epoch": 0.3448275862068966,
"grad_norm": 35.04567436633488,
"learning_rate": 4.1447615513856635e-07,
"logits/chosen": -2.9535293579101562,
"logits/rejected": -2.8194968700408936,
"logps/chosen": -407.129150390625,
"logps/rejected": -377.0103759765625,
"loss": 0.1889,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.0056817531585693,
"rewards/margins": 5.7427191734313965,
"rewards/rejected": -4.737037181854248,
"step": 380
},
{
"epoch": 0.35390199637023595,
"grad_norm": 45.256080246210836,
"learning_rate": 4.084259545150832e-07,
"logits/chosen": -2.968611240386963,
"logits/rejected": -2.848597288131714,
"logps/chosen": -392.96746826171875,
"logps/rejected": -327.70751953125,
"loss": 0.2033,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 0.9895534515380859,
"rewards/margins": 6.1571173667907715,
"rewards/rejected": -5.167563438415527,
"step": 390
},
{
"epoch": 0.3629764065335753,
"grad_norm": 41.85296728501026,
"learning_rate": 4.022165541356941e-07,
"logits/chosen": -2.9857497215270996,
"logits/rejected": -2.7626802921295166,
"logps/chosen": -396.25079345703125,
"logps/rejected": -345.28436279296875,
"loss": 0.1963,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.164312481880188,
"rewards/margins": 7.196902275085449,
"rewards/rejected": -6.032589912414551,
"step": 400
},
{
"epoch": 0.3720508166969147,
"grad_norm": 62.753257963550354,
"learning_rate": 3.9585419372941163e-07,
"logits/chosen": -2.9352316856384277,
"logits/rejected": -2.838834762573242,
"logps/chosen": -389.45428466796875,
"logps/rejected": -342.6952209472656,
"loss": 0.2307,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 0.777059018611908,
"rewards/margins": 6.305887222290039,
"rewards/rejected": -5.5288286209106445,
"step": 410
},
{
"epoch": 0.3811252268602541,
"grad_norm": 48.98248771165015,
"learning_rate": 3.893452667323793e-07,
"logits/chosen": -3.0265731811523438,
"logits/rejected": -2.9187612533569336,
"logps/chosen": -394.258544921875,
"logps/rejected": -386.22991943359375,
"loss": 0.1854,
"rewards/accuracies": 0.9375,
"rewards/chosen": 1.1776565313339233,
"rewards/margins": 7.554742336273193,
"rewards/rejected": -6.3770856857299805,
"step": 420
},
{
"epoch": 0.39019963702359345,
"grad_norm": 39.229272199302606,
"learning_rate": 3.826963138632079e-07,
"logits/chosen": -2.914794683456421,
"logits/rejected": -2.7973687648773193,
"logps/chosen": -372.42962646484375,
"logps/rejected": -334.1484375,
"loss": 0.2196,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.8507086634635925,
"rewards/margins": 6.879024505615234,
"rewards/rejected": -6.028316020965576,
"step": 430
},
{
"epoch": 0.3992740471869328,
"grad_norm": 37.98126277398323,
"learning_rate": 3.759140165503101e-07,
"logits/chosen": -3.016681671142578,
"logits/rejected": -2.8523800373077393,
"logps/chosen": -371.7410583496094,
"logps/rejected": -334.4661560058594,
"loss": 0.2039,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.0404958724975586,
"rewards/margins": 7.069964408874512,
"rewards/rejected": -6.029468536376953,
"step": 440
},
{
"epoch": 0.40834845735027225,
"grad_norm": 32.62957193935001,
"learning_rate": 3.6900519021783783e-07,
"logits/chosen": -3.0138421058654785,
"logits/rejected": -2.8914031982421875,
"logps/chosen": -382.3927917480469,
"logps/rejected": -346.4029541015625,
"loss": 0.1882,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 0.9160375595092773,
"rewards/margins": 6.041922569274902,
"rewards/rejected": -5.125885486602783,
"step": 450
},
{
"epoch": 0.41742286751361163,
"grad_norm": 49.245544047890604,
"learning_rate": 3.619767774369694e-07,
"logits/chosen": -3.032761812210083,
"logits/rejected": -2.8894588947296143,
"logps/chosen": -375.8642578125,
"logps/rejected": -359.11322021484375,
"loss": 0.2369,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 0.7896968126296997,
"rewards/margins": 5.545886039733887,
"rewards/rejected": -4.756189346313477,
"step": 460
},
{
"epoch": 0.426497277676951,
"grad_norm": 40.38378734951698,
"learning_rate": 3.548358409494291e-07,
"logits/chosen": -3.0017242431640625,
"logits/rejected": -2.8712053298950195,
"logps/chosen": -403.6952209472656,
"logps/rejected": -327.45025634765625,
"loss": 0.2113,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 0.7638002634048462,
"rewards/margins": 5.509848594665527,
"rewards/rejected": -4.746048450469971,
"step": 470
},
{
"epoch": 0.4355716878402904,
"grad_norm": 42.20523053538018,
"learning_rate": 3.475895565702479e-07,
"logits/chosen": -3.0346570014953613,
"logits/rejected": -2.8783650398254395,
"logps/chosen": -402.5455322265625,
"logps/rejected": -357.03704833984375,
"loss": 0.1601,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 0.7746015787124634,
"rewards/margins": 6.277700424194336,
"rewards/rejected": -5.503098487854004,
"step": 480
},
{
"epoch": 0.44464609800362975,
"grad_norm": 44.76927870480236,
"learning_rate": 3.402452059769006e-07,
"logits/chosen": -2.947755813598633,
"logits/rejected": -2.7606940269470215,
"logps/chosen": -388.6606140136719,
"logps/rejected": -330.046142578125,
"loss": 0.1948,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.0170588493347168,
"rewards/margins": 6.382786273956299,
"rewards/rejected": -5.365727424621582,
"step": 490
},
{
"epoch": 0.4537205081669691,
"grad_norm": 52.753061991347224,
"learning_rate": 3.3281016939206175e-07,
"logits/chosen": -2.9896976947784424,
"logits/rejected": -2.8140571117401123,
"logps/chosen": -393.71551513671875,
"logps/rejected": -367.58056640625,
"loss": 0.2108,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 0.8480059504508972,
"rewards/margins": 6.418947696685791,
"rewards/rejected": -5.570941925048828,
"step": 500
},
{
"epoch": 0.4627949183303085,
"grad_norm": 33.81897197547977,
"learning_rate": 3.2529191816733575e-07,
"logits/chosen": -2.9537408351898193,
"logits/rejected": -2.8202428817749023,
"logps/chosen": -399.65386962890625,
"logps/rejected": -376.10516357421875,
"loss": 0.1827,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.8464071154594421,
"rewards/margins": 6.456235408782959,
"rewards/rejected": -5.609827995300293,
"step": 510
},
{
"epoch": 0.47186932849364793,
"grad_norm": 54.599453283610345,
"learning_rate": 3.1769800727541315e-07,
"logits/chosen": -2.878075361251831,
"logits/rejected": -2.6963212490081787,
"logps/chosen": -387.65777587890625,
"logps/rejected": -343.40325927734375,
"loss": 0.1787,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 0.7126725912094116,
"rewards/margins": 7.416208744049072,
"rewards/rejected": -6.703535556793213,
"step": 520
},
{
"epoch": 0.4809437386569873,
"grad_norm": 38.57546103390298,
"learning_rate": 3.1003606771819666e-07,
"logits/chosen": -2.9312281608581543,
"logits/rejected": -2.7162327766418457,
"logps/chosen": -394.450927734375,
"logps/rejected": -363.7554931640625,
"loss": 0.189,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 0.8061065673828125,
"rewards/margins": 6.801262855529785,
"rewards/rejected": -5.995156288146973,
"step": 530
},
{
"epoch": 0.4900181488203267,
"grad_norm": 40.112845944693504,
"learning_rate": 3.023137988585276e-07,
"logits/chosen": -2.88523006439209,
"logits/rejected": -2.7553329467773438,
"logps/chosen": -389.08917236328125,
"logps/rejected": -401.0629577636719,
"loss": 0.2119,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 0.9484742879867554,
"rewards/margins": 6.505688667297363,
"rewards/rejected": -5.557214260101318,
"step": 540
},
{
"epoch": 0.49909255898366606,
"grad_norm": 58.41683522614029,
"learning_rate": 2.945389606832165e-07,
"logits/chosen": -2.8954288959503174,
"logits/rejected": -2.743122100830078,
"logps/chosen": -406.09307861328125,
"logps/rejected": -352.92401123046875,
"loss": 0.2069,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 1.2253668308258057,
"rewards/margins": 8.006866455078125,
"rewards/rejected": -6.78149938583374,
"step": 550
},
{
"epoch": 0.5081669691470054,
"grad_norm": 45.17941049122177,
"learning_rate": 2.8671936600515445e-07,
"logits/chosen": -2.9202160835266113,
"logits/rejected": -2.773153066635132,
"logps/chosen": -373.97967529296875,
"logps/rejected": -369.88433837890625,
"loss": 0.1702,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 0.7372530698776245,
"rewards/margins": 6.256679534912109,
"rewards/rejected": -5.5194268226623535,
"step": 560
},
{
"epoch": 0.5172413793103449,
"grad_norm": 45.0884998444585,
"learning_rate": 2.788628726123399e-07,
"logits/chosen": -2.9033942222595215,
"logits/rejected": -2.814613103866577,
"logps/chosen": -372.53851318359375,
"logps/rejected": -321.98760986328125,
"loss": 0.2037,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.7214611768722534,
"rewards/margins": 6.599020957946777,
"rewards/rejected": -5.877559661865234,
"step": 570
},
{
"epoch": 0.5263157894736842,
"grad_norm": 42.70236030373264,
"learning_rate": 2.7097737537171095e-07,
"logits/chosen": -2.9954402446746826,
"logits/rejected": -2.794814109802246,
"logps/chosen": -385.25457763671875,
"logps/rejected": -384.85858154296875,
"loss": 0.1929,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.578790009021759,
"rewards/margins": 6.581429481506348,
"rewards/rejected": -6.0026397705078125,
"step": 580
},
{
"epoch": 0.5353901996370236,
"grad_norm": 29.334160089625406,
"learning_rate": 2.6307079829571685e-07,
"logits/chosen": -2.958986282348633,
"logits/rejected": -2.819699764251709,
"logps/chosen": -398.150146484375,
"logps/rejected": -386.773193359375,
"loss": 0.1857,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 0.7929005026817322,
"rewards/margins": 6.71899938583374,
"rewards/rejected": -5.9260993003845215,
"step": 590
},
{
"epoch": 0.5444646098003629,
"grad_norm": 29.399030894562397,
"learning_rate": 2.551510865796032e-07,
"logits/chosen": -2.888155698776245,
"logits/rejected": -2.7868504524230957,
"logps/chosen": -325.9752502441406,
"logps/rejected": -362.0836486816406,
"loss": 0.1832,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 0.4539141058921814,
"rewards/margins": 7.052548885345459,
"rewards/rejected": -6.598635196685791,
"step": 600
},
{
"epoch": 0.5535390199637024,
"grad_norm": 36.75793480010378,
"learning_rate": 2.472261986174088e-07,
"logits/chosen": -2.8816776275634766,
"logits/rejected": -2.7344906330108643,
"logps/chosen": -430.36065673828125,
"logps/rejected": -403.3946838378906,
"loss": 0.1992,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 0.22524046897888184,
"rewards/margins": 5.51934814453125,
"rewards/rejected": -5.294107913970947,
"step": 610
},
{
"epoch": 0.5626134301270418,
"grad_norm": 40.826798483602005,
"learning_rate": 2.393040980047015e-07,
"logits/chosen": -2.967729091644287,
"logits/rejected": -2.8321826457977295,
"logps/chosen": -422.4341735839844,
"logps/rejected": -382.2395324707031,
"loss": 0.1743,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 0.7554537057876587,
"rewards/margins": 7.423792839050293,
"rewards/rejected": -6.668339729309082,
"step": 620
},
{
"epoch": 0.5716878402903811,
"grad_norm": 38.907182524465654,
"learning_rate": 2.3139274553608494e-07,
"logits/chosen": -2.9248204231262207,
"logits/rejected": -2.7649989128112793,
"logps/chosen": -406.0899963378906,
"logps/rejected": -357.7886047363281,
"loss": 0.1822,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 0.6926982402801514,
"rewards/margins": 6.806327819824219,
"rewards/rejected": -6.1136298179626465,
"step": 630
},
{
"epoch": 0.5807622504537205,
"grad_norm": 64.2592666433393,
"learning_rate": 2.2350009120552156e-07,
"logits/chosen": -2.9787538051605225,
"logits/rejected": -2.829073905944824,
"logps/chosen": -407.52520751953125,
"logps/rejected": -409.28289794921875,
"loss": 0.2093,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 0.845790684223175,
"rewards/margins": 7.095252990722656,
"rewards/rejected": -6.249462604522705,
"step": 640
},
{
"epoch": 0.5898366606170599,
"grad_norm": 45.704317168510784,
"learning_rate": 2.1563406621750825e-07,
"logits/chosen": -2.8653993606567383,
"logits/rejected": -2.707611322402954,
"logps/chosen": -368.9764709472656,
"logps/rejected": -356.0212707519531,
"loss": 0.1977,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 0.7670904397964478,
"rewards/margins": 6.501168727874756,
"rewards/rejected": -5.734078407287598,
"step": 650
},
{
"epoch": 0.5989110707803993,
"grad_norm": 25.897772050384255,
"learning_rate": 2.0780257501713346e-07,
"logits/chosen": -2.9106478691101074,
"logits/rejected": -2.778205394744873,
"logps/chosen": -423.90802001953125,
"logps/rejected": -412.2939453125,
"loss": 0.1887,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 0.707015872001648,
"rewards/margins": 7.062180995941162,
"rewards/rejected": -6.355164527893066,
"step": 660
},
{
"epoch": 0.6079854809437386,
"grad_norm": 34.675374116606974,
"learning_rate": 2.000134873470243e-07,
"logits/chosen": -2.8369908332824707,
"logits/rejected": -2.7423768043518066,
"logps/chosen": -342.940185546875,
"logps/rejected": -344.69903564453125,
"loss": 0.178,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 0.7170498371124268,
"rewards/margins": 6.7858076095581055,
"rewards/rejected": -6.068758964538574,
"step": 670
},
{
"epoch": 0.617059891107078,
"grad_norm": 26.932488865103295,
"learning_rate": 1.922746303391655e-07,
"logits/chosen": -2.8944993019104004,
"logits/rejected": -2.760188102722168,
"logps/chosen": -394.26812744140625,
"logps/rejected": -363.68389892578125,
"loss": 0.1664,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.4090805053710938,
"rewards/margins": 7.532193660736084,
"rewards/rejected": -6.123114585876465,
"step": 680
},
{
"epoch": 0.6261343012704175,
"grad_norm": 37.470507715790056,
"learning_rate": 1.8459378064953754e-07,
"logits/chosen": -2.9657504558563232,
"logits/rejected": -2.8593227863311768,
"logps/chosen": -416.7271423339844,
"logps/rejected": -370.2811584472656,
"loss": 0.2084,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.9790989756584167,
"rewards/margins": 6.995152950286865,
"rewards/rejected": -6.016053676605225,
"step": 690
},
{
"epoch": 0.6352087114337568,
"grad_norm": 37.273869581880994,
"learning_rate": 1.7697865664347694e-07,
"logits/chosen": -2.9320342540740967,
"logits/rejected": -2.7820043563842773,
"logps/chosen": -388.0702819824219,
"logps/rejected": -331.7247009277344,
"loss": 0.183,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 0.8440800905227661,
"rewards/margins": 6.018949031829834,
"rewards/rejected": -5.174870014190674,
"step": 700
},
{
"epoch": 0.6442831215970962,
"grad_norm": 44.63318462189936,
"learning_rate": 1.6943691063961213e-07,
"logits/chosen": -2.9753224849700928,
"logits/rejected": -2.7769880294799805,
"logps/chosen": -434.841796875,
"logps/rejected": -362.3944396972656,
"loss": 0.2011,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 1.0161527395248413,
"rewards/margins": 7.2209320068359375,
"rewards/rejected": -6.20477819442749,
"step": 710
},
{
"epoch": 0.6533575317604355,
"grad_norm": 44.28088050095069,
"learning_rate": 1.6197612122016846e-07,
"logits/chosen": -2.9147086143493652,
"logits/rejected": -2.8043932914733887,
"logps/chosen": -398.91796875,
"logps/rejected": -389.1512145996094,
"loss": 0.1713,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 0.8414427042007446,
"rewards/margins": 6.729714870452881,
"rewards/rejected": -5.888272285461426,
"step": 720
},
{
"epoch": 0.662431941923775,
"grad_norm": 47.2641364219176,
"learning_rate": 1.5460378561536985e-07,
"logits/chosen": -2.917802333831787,
"logits/rejected": -2.7316904067993164,
"logps/chosen": -369.023193359375,
"logps/rejected": -319.9668273925781,
"loss": 0.1807,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 0.9150484204292297,
"rewards/margins": 6.587407112121582,
"rewards/rejected": -5.672359466552734,
"step": 730
},
{
"epoch": 0.6715063520871143,
"grad_norm": 30.01994165255659,
"learning_rate": 1.473273121695898e-07,
"logits/chosen": -2.909447193145752,
"logits/rejected": -2.808865547180176,
"logps/chosen": -408.38360595703125,
"logps/rejected": -387.4463806152344,
"loss": 0.185,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 0.7770580649375916,
"rewards/margins": 6.37229061126709,
"rewards/rejected": -5.5952324867248535,
"step": 740
},
{
"epoch": 0.6805807622504537,
"grad_norm": 47.21435045984216,
"learning_rate": 1.4015401289682214e-07,
"logits/chosen": -2.8479952812194824,
"logits/rejected": -2.741609811782837,
"logps/chosen": -336.0369567871094,
"logps/rejected": -328.47589111328125,
"loss": 0.2359,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 0.46019020676612854,
"rewards/margins": 5.526650905609131,
"rewards/rejected": -5.066461086273193,
"step": 750
},
{
"epoch": 0.6896551724137931,
"grad_norm": 36.804170758784224,
"learning_rate": 1.3309109613295335e-07,
"logits/chosen": -2.9334492683410645,
"logits/rejected": -2.7972865104675293,
"logps/chosen": -420.43133544921875,
"logps/rejected": -374.5361328125,
"loss": 0.1815,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 0.5759689211845398,
"rewards/margins": 6.218279838562012,
"rewards/rejected": -5.642312049865723,
"step": 760
},
{
"epoch": 0.6987295825771325,
"grad_norm": 35.669878276021684,
"learning_rate": 1.2614565929221848e-07,
"logits/chosen": -2.9297587871551514,
"logits/rejected": -2.762052059173584,
"logps/chosen": -372.5475769042969,
"logps/rejected": -365.61163330078125,
"loss": 0.1943,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 0.7651561498641968,
"rewards/margins": 6.711656093597412,
"rewards/rejected": -5.946499824523926,
"step": 770
},
{
"epoch": 0.7078039927404719,
"grad_norm": 37.95229234009131,
"learning_rate": 1.1932468173512137e-07,
"logits/chosen": -2.9614641666412354,
"logits/rejected": -2.714155435562134,
"logps/chosen": -418.35504150390625,
"logps/rejected": -348.20721435546875,
"loss": 0.1698,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.0913097858428955,
"rewards/margins": 7.305689811706543,
"rewards/rejected": -6.214380741119385,
"step": 780
},
{
"epoch": 0.7168784029038112,
"grad_norm": 36.57594683086614,
"learning_rate": 1.1263501775498438e-07,
"logits/chosen": -2.938605785369873,
"logits/rejected": -2.8134965896606445,
"logps/chosen": -362.9466552734375,
"logps/rejected": -375.305908203125,
"loss": 0.2012,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 0.29936400055885315,
"rewards/margins": 5.319377899169922,
"rewards/rejected": -5.020013332366943,
"step": 790
},
{
"epoch": 0.7259528130671506,
"grad_norm": 44.23152439879075,
"learning_rate": 1.0608338969017682e-07,
"logits/chosen": -2.996309757232666,
"logits/rejected": -2.8086256980895996,
"logps/chosen": -454.28607177734375,
"logps/rejected": -414.04010009765625,
"loss": 0.2077,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.2388927936553955,
"rewards/margins": 7.735617160797119,
"rewards/rejected": -6.496724605560303,
"step": 800
},
{
"epoch": 0.73502722323049,
"grad_norm": 49.451677093132325,
"learning_rate": 9.96763811689425e-08,
"logits/chosen": -2.904308795928955,
"logits/rejected": -2.783384323120117,
"logps/chosen": -391.5575256347656,
"logps/rejected": -389.44952392578125,
"loss": 0.2014,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 0.8881933093070984,
"rewards/margins": 6.98279333114624,
"rewards/rejected": -6.094600677490234,
"step": 810
},
{
"epoch": 0.7441016333938294,
"grad_norm": 52.0962797399699,
"learning_rate": 9.3420430493615e-08,
"logits/chosen": -2.8749382495880127,
"logits/rejected": -2.759850025177002,
"logps/chosen": -366.92822265625,
"logps/rejected": -347.29150390625,
"loss": 0.1903,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 0.6582810878753662,
"rewards/margins": 6.6857147216796875,
"rewards/rejected": -6.027434349060059,
"step": 820
},
{
"epoch": 0.7531760435571688,
"grad_norm": 33.61793404339224,
"learning_rate": 8.732182417086903e-08,
"logits/chosen": -2.954723834991455,
"logits/rejected": -2.773974895477295,
"logps/chosen": -398.5955810546875,
"logps/rejected": -388.16168212890625,
"loss": 0.1736,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": 1.4529194831848145,
"rewards/margins": 8.60301399230957,
"rewards/rejected": -7.150094032287598,
"step": 830
},
{
"epoch": 0.7622504537205081,
"grad_norm": 35.017851342156135,
"learning_rate": 8.138669059450778e-08,
"logits/chosen": -2.9093117713928223,
"logits/rejected": -2.808568000793457,
"logps/chosen": -387.08154296875,
"logps/rejected": -375.1770324707031,
"loss": 0.1741,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 0.7035520076751709,
"rewards/margins": 6.644250392913818,
"rewards/rejected": -5.940698623657227,
"step": 840
},
{
"epoch": 0.7713248638838476,
"grad_norm": 50.95837810281916,
"learning_rate": 7.562099388713702e-08,
"logits/chosen": -2.957639455795288,
"logits/rejected": -2.8320231437683105,
"logps/chosen": -385.71533203125,
"logps/rejected": -370.684326171875,
"loss": 0.1801,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.0452622175216675,
"rewards/margins": 6.53484582901001,
"rewards/rejected": -5.489583492279053,
"step": 850
},
{
"epoch": 0.7803992740471869,
"grad_norm": 38.76152254168693,
"learning_rate": 7.003052790691089e-08,
"logits/chosen": -2.9534757137298584,
"logits/rejected": -2.808516502380371,
"logps/chosen": -386.3631896972656,
"logps/rejected": -357.408203125,
"loss": 0.1747,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.7921295762062073,
"rewards/margins": 6.969007968902588,
"rewards/rejected": -6.176877498626709,
"step": 860
},
{
"epoch": 0.7894736842105263,
"grad_norm": 48.43342560419616,
"learning_rate": 6.462091042537576e-08,
"logits/chosen": -2.9628138542175293,
"logits/rejected": -2.819770336151123,
"logps/chosen": -469.53765869140625,
"logps/rejected": -417.4677734375,
"loss": 0.2053,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 1.4326789379119873,
"rewards/margins": 8.428723335266113,
"rewards/rejected": -6.996045112609863,
"step": 870
},
{
"epoch": 0.7985480943738656,
"grad_norm": 59.70924121351975,
"learning_rate": 5.9397577482259043e-08,
"logits/chosen": -2.8752875328063965,
"logits/rejected": -2.7745578289031982,
"logps/chosen": -355.57318115234375,
"logps/rejected": -377.0833740234375,
"loss": 0.2041,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 0.831215500831604,
"rewards/margins": 7.086012363433838,
"rewards/rejected": -6.254797458648682,
"step": 880
},
{
"epoch": 0.8076225045372051,
"grad_norm": 48.84810595799757,
"learning_rate": 5.436577792287841e-08,
"logits/chosen": -2.9051766395568848,
"logits/rejected": -2.7932395935058594,
"logps/chosen": -353.7893981933594,
"logps/rejected": -335.53155517578125,
"loss": 0.1852,
"rewards/accuracies": 0.9375,
"rewards/chosen": 1.2077093124389648,
"rewards/margins": 7.8495988845825195,
"rewards/rejected": -6.641890048980713,
"step": 890
},
{
"epoch": 0.8166969147005445,
"grad_norm": 42.88339321895752,
"learning_rate": 4.953056812365958e-08,
"logits/chosen": -2.968008518218994,
"logits/rejected": -2.7981395721435547,
"logps/chosen": -372.144775390625,
"logps/rejected": -380.69683837890625,
"loss": 0.1864,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.842302680015564,
"rewards/margins": 6.652394771575928,
"rewards/rejected": -5.810092449188232,
"step": 900
},
{
"epoch": 0.8257713248638838,
"grad_norm": 28.951235980213713,
"learning_rate": 4.489680691106279e-08,
"logits/chosen": -2.989492654800415,
"logits/rejected": -2.8102896213531494,
"logps/chosen": -456.3057556152344,
"logps/rejected": -383.52679443359375,
"loss": 0.1658,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 0.962626576423645,
"rewards/margins": 6.753287315368652,
"rewards/rejected": -5.790660381317139,
"step": 910
},
{
"epoch": 0.8348457350272233,
"grad_norm": 30.634191739944754,
"learning_rate": 4.046915067902443e-08,
"logits/chosen": -2.9627013206481934,
"logits/rejected": -2.7708044052124023,
"logps/chosen": -386.7125549316406,
"logps/rejected": -367.0064697265625,
"loss": 0.1785,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": 1.275061011314392,
"rewards/margins": 8.114995956420898,
"rewards/rejected": -6.839936256408691,
"step": 920
},
{
"epoch": 0.8439201451905626,
"grad_norm": 40.72997706997498,
"learning_rate": 3.625204870981974e-08,
"logits/chosen": -2.981132984161377,
"logits/rejected": -2.8593552112579346,
"logps/chosen": -377.1697998046875,
"logps/rejected": -368.0603942871094,
"loss": 0.1753,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 0.7182249426841736,
"rewards/margins": 6.968735694885254,
"rewards/rejected": -6.250511169433594,
"step": 930
},
{
"epoch": 0.852994555353902,
"grad_norm": 41.94194593926165,
"learning_rate": 3.2249738703049175e-08,
"logits/chosen": -2.9394679069519043,
"logits/rejected": -2.8016715049743652,
"logps/chosen": -411.6636657714844,
"logps/rejected": -401.5919494628906,
"loss": 0.1752,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.9104796648025513,
"rewards/margins": 6.801443576812744,
"rewards/rejected": -5.890963554382324,
"step": 940
},
{
"epoch": 0.8620689655172413,
"grad_norm": 41.16087847448295,
"learning_rate": 2.8466242517240142e-08,
"logits/chosen": -2.865461826324463,
"logits/rejected": -2.7517268657684326,
"logps/chosen": -380.6204833984375,
"logps/rejected": -385.02252197265625,
"loss": 0.1663,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": 0.9618560671806335,
"rewards/margins": 7.1874799728393555,
"rewards/rejected": -6.225625038146973,
"step": 950
},
{
"epoch": 0.8711433756805808,
"grad_norm": 42.56619788961049,
"learning_rate": 2.4905362128344652e-08,
"logits/chosen": -2.952007293701172,
"logits/rejected": -2.8182990550994873,
"logps/chosen": -385.042236328125,
"logps/rejected": -372.07244873046875,
"loss": 0.1849,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 0.5808476209640503,
"rewards/margins": 6.048865795135498,
"rewards/rejected": -5.468017578125,
"step": 960
},
{
"epoch": 0.8802177858439202,
"grad_norm": 42.694992390184375,
"learning_rate": 2.1570675809193554e-08,
"logits/chosen": -2.9329612255096436,
"logits/rejected": -2.7521860599517822,
"logps/chosen": -355.7178649902344,
"logps/rejected": -344.80706787109375,
"loss": 0.165,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 0.8769713640213013,
"rewards/margins": 7.218625068664551,
"rewards/rejected": -6.341653823852539,
"step": 970
},
{
"epoch": 0.8892921960072595,
"grad_norm": 44.47460215236176,
"learning_rate": 1.846553453374586e-08,
"logits/chosen": -2.974410057067871,
"logits/rejected": -2.8346712589263916,
"logps/chosen": -332.41546630859375,
"logps/rejected": -347.7070007324219,
"loss": 0.1838,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.37545478343963623,
"rewards/margins": 5.861527442932129,
"rewards/rejected": -5.486072540283203,
"step": 980
},
{
"epoch": 0.8983666061705989,
"grad_norm": 40.813616862543405,
"learning_rate": 1.559305860974805e-08,
"logits/chosen": -2.964592456817627,
"logits/rejected": -2.7961647510528564,
"logps/chosen": -376.69952392578125,
"logps/rejected": -346.335205078125,
"loss": 0.1654,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.2973288297653198,
"rewards/margins": 7.887129306793213,
"rewards/rejected": -6.589800834655762,
"step": 990
},
{
"epoch": 0.9074410163339383,
"grad_norm": 49.7394201603181,
"learning_rate": 1.2956134543185449e-08,
"logits/chosen": -2.921257257461548,
"logits/rejected": -2.7407443523406982,
"logps/chosen": -385.1451110839844,
"logps/rejected": -322.24420166015625,
"loss": 0.1997,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 0.8060976266860962,
"rewards/margins": 6.704709053039551,
"rewards/rejected": -5.898611545562744,
"step": 1000
},
{
"epoch": 0.9165154264972777,
"grad_norm": 37.49009348161237,
"learning_rate": 1.0557412137677884e-08,
"logits/chosen": -2.918593406677246,
"logits/rejected": -2.7678284645080566,
"logps/chosen": -395.64837646484375,
"logps/rejected": -375.7630615234375,
"loss": 0.1726,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 0.6721734404563904,
"rewards/margins": 6.370687007904053,
"rewards/rejected": -5.698513031005859,
"step": 1010
},
{
"epoch": 0.925589836660617,
"grad_norm": 38.05733801062978,
"learning_rate": 8.399301831733403e-09,
"logits/chosen": -2.9395432472229004,
"logits/rejected": -2.7823617458343506,
"logps/chosen": -366.7617492675781,
"logps/rejected": -380.7712707519531,
"loss": 0.1866,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 0.8301785588264465,
"rewards/margins": 7.507985591888428,
"rewards/rejected": -6.677806854248047,
"step": 1020
},
{
"epoch": 0.9346642468239564,
"grad_norm": 48.56610093039856,
"learning_rate": 6.483972276536576e-09,
"logits/chosen": -2.9480996131896973,
"logits/rejected": -2.7890021800994873,
"logps/chosen": -426.88836669921875,
"logps/rejected": -351.18865966796875,
"loss": 0.1814,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.70756995677948,
"rewards/margins": 6.491690158843994,
"rewards/rejected": -5.784119606018066,
"step": 1030
},
{
"epoch": 0.9437386569872959,
"grad_norm": 46.68373107587798,
"learning_rate": 4.813348156704866e-09,
"logits/chosen": -2.9594180583953857,
"logits/rejected": -2.7199923992156982,
"logps/chosen": -368.69635009765625,
"logps/rejected": -394.76495361328125,
"loss": 0.1897,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 0.9022048115730286,
"rewards/margins": 7.9423065185546875,
"rewards/rejected": -7.040102481842041,
"step": 1040
},
{
"epoch": 0.9528130671506352,
"grad_norm": 36.58692937622254,
"learning_rate": 3.389108256203338e-09,
"logits/chosen": -2.948655605316162,
"logits/rejected": -2.722470283508301,
"logps/chosen": -396.5683288574219,
"logps/rejected": -346.69183349609375,
"loss": 0.1677,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 0.7442362308502197,
"rewards/margins": 7.20086145401001,
"rewards/rejected": -6.456625938415527,
"step": 1050
},
{
"epoch": 0.9618874773139746,
"grad_norm": 55.430814413767415,
"learning_rate": 2.2126837713609403e-09,
"logits/chosen": -2.8871541023254395,
"logits/rejected": -2.744736671447754,
"logps/chosen": -364.49560546875,
"logps/rejected": -356.38458251953125,
"loss": 0.1784,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 0.7931915521621704,
"rewards/margins": 6.91254186630249,
"rewards/rejected": -6.119350433349609,
"step": 1060
},
{
"epoch": 0.9709618874773139,
"grad_norm": 65.34227799978058,
"learning_rate": 1.2852568726837987e-09,
"logits/chosen": -2.975020408630371,
"logits/rejected": -2.7820792198181152,
"logps/chosen": -431.84185791015625,
"logps/rejected": -396.8271484375,
"loss": 0.2151,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 1.1091196537017822,
"rewards/margins": 7.056349754333496,
"rewards/rejected": -5.947229862213135,
"step": 1070
},
{
"epoch": 0.9800362976406534,
"grad_norm": 33.456813936400515,
"learning_rate": 6.077595169105277e-10,
"logits/chosen": -2.886939525604248,
"logits/rejected": -2.7690751552581787,
"logps/chosen": -361.1842346191406,
"logps/rejected": -356.4033508300781,
"loss": 0.1774,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.015371322631836,
"rewards/margins": 7.721086025238037,
"rewards/rejected": -6.705715179443359,
"step": 1080
},
{
"epoch": 0.9891107078039928,
"grad_norm": 40.254145384111084,
"learning_rate": 1.8087251050369344e-10,
"logits/chosen": -2.9307265281677246,
"logits/rejected": -2.742335557937622,
"logps/chosen": -380.31597900390625,
"logps/rejected": -383.219970703125,
"loss": 0.1869,
"rewards/accuracies": 0.9375,
"rewards/chosen": 1.3260897397994995,
"rewards/margins": 8.104445457458496,
"rewards/rejected": -6.778355598449707,
"step": 1090
},
{
"epoch": 0.9981851179673321,
"grad_norm": 39.63910669613641,
"learning_rate": 5.024825517951914e-12,
"logits/chosen": -2.9634110927581787,
"logits/rejected": -2.8349342346191406,
"logps/chosen": -384.75299072265625,
"logps/rejected": -384.3755798339844,
"loss": 0.1842,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.5791146755218506,
"rewards/margins": 6.315325736999512,
"rewards/rejected": -5.736211776733398,
"step": 1100
},
{
"epoch": 1.0,
"step": 1102,
"total_flos": 0.0,
"train_loss": 0.22162555615179336,
"train_runtime": 6190.6716,
"train_samples_per_second": 11.391,
"train_steps_per_second": 0.178
}
],
"logging_steps": 10,
"max_steps": 1102,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}