tulu-2-7b-hh-dpo / trainer_state.json
NicholasCorrado's picture
Model save
4aeefdb verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9996020692399522,
"eval_steps": 1000,
"global_step": 628,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0015917230401910067,
"grad_norm": 1.083263489032715,
"learning_rate": 7.936507936507936e-09,
"logits/chosen": -1.870174765586853,
"logits/rejected": -1.8014098405838013,
"logps/chosen": -158.94805908203125,
"logps/rejected": -194.90118408203125,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.01591723040191007,
"grad_norm": 1.7486374056371567,
"learning_rate": 7.936507936507936e-08,
"logits/chosen": -1.8175890445709229,
"logits/rejected": -1.8403409719467163,
"logps/chosen": -188.36404418945312,
"logps/rejected": -179.35687255859375,
"loss": 0.6932,
"rewards/accuracies": 0.3576388955116272,
"rewards/chosen": -0.0004040856729261577,
"rewards/margins": -0.0008076421800069511,
"rewards/rejected": 0.00040355639066547155,
"step": 10
},
{
"epoch": 0.03183446080382014,
"grad_norm": 1.2288534081464715,
"learning_rate": 1.5873015873015872e-07,
"logits/chosen": -1.9721990823745728,
"logits/rejected": -1.980015516281128,
"logps/chosen": -197.92391967773438,
"logps/rejected": -207.22412109375,
"loss": 0.6932,
"rewards/accuracies": 0.5,
"rewards/chosen": 8.275737855001353e-06,
"rewards/margins": -0.0001316081325057894,
"rewards/rejected": 0.00013988386490382254,
"step": 20
},
{
"epoch": 0.0477516912057302,
"grad_norm": 0.9705773937715004,
"learning_rate": 2.3809523809523806e-07,
"logits/chosen": -1.8532848358154297,
"logits/rejected": -1.8753681182861328,
"logps/chosen": -192.031982421875,
"logps/rejected": -195.5606231689453,
"loss": 0.6931,
"rewards/accuracies": 0.5375000238418579,
"rewards/chosen": 0.0007524025277234614,
"rewards/margins": 0.000668005901388824,
"rewards/rejected": 8.439658995484933e-05,
"step": 30
},
{
"epoch": 0.06366892160764027,
"grad_norm": 1.3879715707434512,
"learning_rate": 3.1746031746031743e-07,
"logits/chosen": -1.9114805459976196,
"logits/rejected": -1.9526256322860718,
"logps/chosen": -198.54586791992188,
"logps/rejected": -190.65158081054688,
"loss": 0.6931,
"rewards/accuracies": 0.515625,
"rewards/chosen": 0.0004263836017344147,
"rewards/margins": 0.00022551305301021785,
"rewards/rejected": 0.0002008705196203664,
"step": 40
},
{
"epoch": 0.07958615200955034,
"grad_norm": 0.9223665834941889,
"learning_rate": 3.968253968253968e-07,
"logits/chosen": -1.9061291217803955,
"logits/rejected": -1.963629126548767,
"logps/chosen": -190.56007385253906,
"logps/rejected": -193.8275146484375,
"loss": 0.693,
"rewards/accuracies": 0.512499988079071,
"rewards/chosen": 0.0014020069502294064,
"rewards/margins": 0.00029031833400949836,
"rewards/rejected": 0.001111688674427569,
"step": 50
},
{
"epoch": 0.0955033824114604,
"grad_norm": 1.6880319723067283,
"learning_rate": 4.761904761904761e-07,
"logits/chosen": -1.9793567657470703,
"logits/rejected": -2.001311779022217,
"logps/chosen": -196.06846618652344,
"logps/rejected": -188.52914428710938,
"loss": 0.6929,
"rewards/accuracies": 0.528124988079071,
"rewards/chosen": 0.00374850956723094,
"rewards/margins": 0.0002347224799450487,
"rewards/rejected": 0.003513787407428026,
"step": 60
},
{
"epoch": 0.11142061281337047,
"grad_norm": 1.2950341728538266,
"learning_rate": 4.998106548810311e-07,
"logits/chosen": -1.9034782648086548,
"logits/rejected": -1.9826198816299438,
"logps/chosen": -196.86940002441406,
"logps/rejected": -177.41644287109375,
"loss": 0.6926,
"rewards/accuracies": 0.5718749761581421,
"rewards/chosen": 0.005933411885052919,
"rewards/margins": 0.001384641625918448,
"rewards/rejected": 0.004548769909888506,
"step": 70
},
{
"epoch": 0.12733784321528055,
"grad_norm": 2.34883059858935,
"learning_rate": 4.988839406031596e-07,
"logits/chosen": -1.9306480884552002,
"logits/rejected": -1.9404945373535156,
"logps/chosen": -176.39120483398438,
"logps/rejected": -193.53347778320312,
"loss": 0.6922,
"rewards/accuracies": 0.550000011920929,
"rewards/chosen": 0.010212738066911697,
"rewards/margins": 0.0014647066127508879,
"rewards/rejected": 0.008748031221330166,
"step": 80
},
{
"epoch": 0.14325507361719061,
"grad_norm": 1.8215899265671025,
"learning_rate": 4.971879403278432e-07,
"logits/chosen": -1.8801124095916748,
"logits/rejected": -1.9323303699493408,
"logps/chosen": -189.4450225830078,
"logps/rejected": -190.84414672851562,
"loss": 0.6922,
"rewards/accuracies": 0.543749988079071,
"rewards/chosen": 0.011725414544343948,
"rewards/margins": 0.0013827414950355887,
"rewards/rejected": 0.01034267246723175,
"step": 90
},
{
"epoch": 0.15917230401910068,
"grad_norm": 1.208023660425821,
"learning_rate": 4.947278962947386e-07,
"logits/chosen": -1.8471660614013672,
"logits/rejected": -1.8932346105575562,
"logps/chosen": -193.34718322753906,
"logps/rejected": -196.87457275390625,
"loss": 0.6915,
"rewards/accuracies": 0.581250011920929,
"rewards/chosen": 0.015522779896855354,
"rewards/margins": 0.002660988597199321,
"rewards/rejected": 0.01286179106682539,
"step": 100
},
{
"epoch": 0.17508953442101075,
"grad_norm": 1.1987469741900727,
"learning_rate": 4.915114123589732e-07,
"logits/chosen": -1.8427928686141968,
"logits/rejected": -1.9131923913955688,
"logps/chosen": -177.29983520507812,
"logps/rejected": -178.62013244628906,
"loss": 0.6911,
"rewards/accuracies": 0.5562499761581421,
"rewards/chosen": 0.019433852285146713,
"rewards/margins": 0.003971750847995281,
"rewards/rejected": 0.015462102368474007,
"step": 110
},
{
"epoch": 0.1910067648229208,
"grad_norm": 1.3513440295821657,
"learning_rate": 4.875484304880629e-07,
"logits/chosen": -1.852002501487732,
"logits/rejected": -1.8977711200714111,
"logps/chosen": -196.73301696777344,
"logps/rejected": -198.54873657226562,
"loss": 0.6905,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.02165614813566208,
"rewards/margins": 0.006273286882787943,
"rewards/rejected": 0.015382861718535423,
"step": 120
},
{
"epoch": 0.20692399522483088,
"grad_norm": 0.919308465194322,
"learning_rate": 4.828512000318616e-07,
"logits/chosen": -1.8836482763290405,
"logits/rejected": -1.9497747421264648,
"logps/chosen": -197.17617797851562,
"logps/rejected": -199.19927978515625,
"loss": 0.6895,
"rewards/accuracies": 0.5874999761581421,
"rewards/chosen": 0.026965487748384476,
"rewards/margins": 0.008914025500416756,
"rewards/rejected": 0.01805146224796772,
"step": 130
},
{
"epoch": 0.22284122562674094,
"grad_norm": 1.3141295336399639,
"learning_rate": 4.774342398605221e-07,
"logits/chosen": -1.9415794610977173,
"logits/rejected": -1.9936542510986328,
"logps/chosen": -191.3373565673828,
"logps/rejected": -182.86744689941406,
"loss": 0.6894,
"rewards/accuracies": 0.565625011920929,
"rewards/chosen": 0.02900194190442562,
"rewards/margins": 0.007812701165676117,
"rewards/rejected": 0.021189238876104355,
"step": 140
},
{
"epoch": 0.238758456028651,
"grad_norm": 1.0426885097254153,
"learning_rate": 4.713142934875005e-07,
"logits/chosen": -1.9385217428207397,
"logits/rejected": -2.025717258453369,
"logps/chosen": -201.0162811279297,
"logps/rejected": -189.43167114257812,
"loss": 0.6882,
"rewards/accuracies": 0.606249988079071,
"rewards/chosen": 0.030758211389183998,
"rewards/margins": 0.014848137274384499,
"rewards/rejected": 0.01591007225215435,
"step": 150
},
{
"epoch": 0.2546756864305611,
"grad_norm": 0.9993599317482367,
"learning_rate": 4.64510277316316e-07,
"logits/chosen": -1.883013367652893,
"logits/rejected": -1.9087369441986084,
"logps/chosen": -177.09432983398438,
"logps/rejected": -181.0247802734375,
"loss": 0.6875,
"rewards/accuracies": 0.581250011920929,
"rewards/chosen": 0.019768565893173218,
"rewards/margins": 0.01374894194304943,
"rewards/rejected": 0.006019626744091511,
"step": 160
},
{
"epoch": 0.27059291683247116,
"grad_norm": 6.045830822367488,
"learning_rate": 4.570432221710314e-07,
"logits/chosen": -1.8615553379058838,
"logits/rejected": -1.9155362844467163,
"logps/chosen": -191.3380126953125,
"logps/rejected": -196.19644165039062,
"loss": 0.6866,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": 0.01975223794579506,
"rewards/margins": 0.01370079256594181,
"rewards/rejected": 0.006051443982869387,
"step": 170
},
{
"epoch": 0.28651014723438123,
"grad_norm": 3.6863233560878217,
"learning_rate": 4.4893620829118124e-07,
"logits/chosen": -1.8656437397003174,
"logits/rejected": -1.9271419048309326,
"logps/chosen": -197.45355224609375,
"logps/rejected": -187.43008422851562,
"loss": 0.6861,
"rewards/accuracies": 0.621874988079071,
"rewards/chosen": 0.011210992000997066,
"rewards/margins": 0.020882535725831985,
"rewards/rejected": -0.009671543724834919,
"step": 180
},
{
"epoch": 0.3024273776362913,
"grad_norm": 1.0448028144351018,
"learning_rate": 4.40214293992074e-07,
"logits/chosen": -1.9013402462005615,
"logits/rejected": -1.9314658641815186,
"logps/chosen": -189.4186553955078,
"logps/rejected": -181.21237182617188,
"loss": 0.685,
"rewards/accuracies": 0.6031249761581421,
"rewards/chosen": -0.005181929562240839,
"rewards/margins": 0.016439586877822876,
"rewards/rejected": -0.021621517837047577,
"step": 190
},
{
"epoch": 0.31834460803820136,
"grad_norm": 2.39968390976015,
"learning_rate": 4.3090443821097566e-07,
"logits/chosen": -1.7716095447540283,
"logits/rejected": -1.8057435750961304,
"logps/chosen": -191.8214111328125,
"logps/rejected": -202.6089630126953,
"loss": 0.6834,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": -0.008436818607151508,
"rewards/margins": 0.02256094664335251,
"rewards/rejected": -0.030997764319181442,
"step": 200
},
{
"epoch": 0.3342618384401114,
"grad_norm": 1.2956793625076484,
"learning_rate": 4.210354171785795e-07,
"logits/chosen": -1.7898409366607666,
"logits/rejected": -1.8095554113388062,
"logps/chosen": -187.91336059570312,
"logps/rejected": -191.72274780273438,
"loss": 0.6822,
"rewards/accuracies": 0.6031249761581421,
"rewards/chosen": -0.006862046662718058,
"rewards/margins": 0.026659756898880005,
"rewards/rejected": -0.033521804958581924,
"step": 210
},
{
"epoch": 0.3501790688420215,
"grad_norm": 2.0041098363033623,
"learning_rate": 4.1063773547332584e-07,
"logits/chosen": -1.7102569341659546,
"logits/rejected": -1.7631139755249023,
"logps/chosen": -192.54238891601562,
"logps/rejected": -196.4750213623047,
"loss": 0.6811,
"rewards/accuracies": 0.515625,
"rewards/chosen": -0.02755718305706978,
"rewards/margins": 0.021966466680169106,
"rewards/rejected": -0.04952365159988403,
"step": 220
},
{
"epoch": 0.36609629924393156,
"grad_norm": 1.9332381756111658,
"learning_rate": 3.997435317334988e-07,
"logits/chosen": -1.7369827032089233,
"logits/rejected": -1.7770426273345947,
"logps/chosen": -191.87298583984375,
"logps/rejected": -193.9591522216797,
"loss": 0.6817,
"rewards/accuracies": 0.5375000238418579,
"rewards/chosen": -0.043014999479055405,
"rewards/margins": 0.020200857892632484,
"rewards/rejected": -0.06321585923433304,
"step": 230
},
{
"epoch": 0.3820135296458416,
"grad_norm": 1.8903913985972478,
"learning_rate": 3.8838647931853684e-07,
"logits/chosen": -1.7956278324127197,
"logits/rejected": -1.7797701358795166,
"logps/chosen": -191.063720703125,
"logps/rejected": -194.3412322998047,
"loss": 0.6807,
"rewards/accuracies": 0.596875011920929,
"rewards/chosen": -0.04468019679188728,
"rewards/margins": 0.024412112310528755,
"rewards/rejected": -0.06909231096506119,
"step": 240
},
{
"epoch": 0.3979307600477517,
"grad_norm": 2.209986978975659,
"learning_rate": 3.7660168222660824e-07,
"logits/chosen": -1.7686593532562256,
"logits/rejected": -1.784973382949829,
"logps/chosen": -210.4496612548828,
"logps/rejected": -210.5813751220703,
"loss": 0.6764,
"rewards/accuracies": 0.6187499761581421,
"rewards/chosen": -0.06448944658041,
"rewards/margins": 0.04242020100355148,
"rewards/rejected": -0.10690965503454208,
"step": 250
},
{
"epoch": 0.41384799044966175,
"grad_norm": 2.7492197743164577,
"learning_rate": 3.6442556659016475e-07,
"logits/chosen": -1.6692039966583252,
"logits/rejected": -1.7762839794158936,
"logps/chosen": -204.4394073486328,
"logps/rejected": -207.07705688476562,
"loss": 0.6753,
"rewards/accuracies": 0.590624988079071,
"rewards/chosen": -0.07524862885475159,
"rewards/margins": 0.02850642427802086,
"rewards/rejected": -0.10375505685806274,
"step": 260
},
{
"epoch": 0.4297652208515718,
"grad_norm": 2.266511648349345,
"learning_rate": 3.5189576808485404e-07,
"logits/chosen": -1.7136598825454712,
"logits/rejected": -1.710269570350647,
"logps/chosen": -208.0347137451172,
"logps/rejected": -209.95751953125,
"loss": 0.6737,
"rewards/accuracies": 0.578125,
"rewards/chosen": -0.08503381162881851,
"rewards/margins": 0.0269694272428751,
"rewards/rejected": -0.11200324445962906,
"step": 270
},
{
"epoch": 0.4456824512534819,
"grad_norm": 2.5291958520679354,
"learning_rate": 3.390510155998023e-07,
"logits/chosen": -1.628941297531128,
"logits/rejected": -1.6508426666259766,
"logps/chosen": -214.1870574951172,
"logps/rejected": -212.94284057617188,
"loss": 0.6707,
"rewards/accuracies": 0.6468750238418579,
"rewards/chosen": -0.12327191978693008,
"rewards/margins": 0.07120023667812347,
"rewards/rejected": -0.19447214901447296,
"step": 280
},
{
"epoch": 0.46159968165539195,
"grad_norm": 2.574405785518012,
"learning_rate": 3.2593101152883795e-07,
"logits/chosen": -1.6216236352920532,
"logits/rejected": -1.6766021251678467,
"logps/chosen": -216.7034149169922,
"logps/rejected": -219.9063262939453,
"loss": 0.6732,
"rewards/accuracies": 0.621874988079071,
"rewards/chosen": -0.16276732087135315,
"rewards/margins": 0.05662418156862259,
"rewards/rejected": -0.21939150989055634,
"step": 290
},
{
"epoch": 0.477516912057302,
"grad_norm": 2.6453554421569514,
"learning_rate": 3.125763090526674e-07,
"logits/chosen": -1.5658096075057983,
"logits/rejected": -1.609926462173462,
"logps/chosen": -212.8961944580078,
"logps/rejected": -213.0232696533203,
"loss": 0.6673,
"rewards/accuracies": 0.596875011920929,
"rewards/chosen": -0.14756593108177185,
"rewards/margins": 0.06441614031791687,
"rewards/rejected": -0.21198205649852753,
"step": 300
},
{
"epoch": 0.4934341424592121,
"grad_norm": 4.170174585407687,
"learning_rate": 2.9902818679131775e-07,
"logits/chosen": -1.5784204006195068,
"logits/rejected": -1.5990747213363647,
"logps/chosen": -216.91799926757812,
"logps/rejected": -221.1397705078125,
"loss": 0.665,
"rewards/accuracies": 0.590624988079071,
"rewards/chosen": -0.20333552360534668,
"rewards/margins": 0.05764692276716232,
"rewards/rejected": -0.2609824538230896,
"step": 310
},
{
"epoch": 0.5093513728611222,
"grad_norm": 2.5979901565172714,
"learning_rate": 2.8532852121428733e-07,
"logits/chosen": -1.5708476305007935,
"logits/rejected": -1.610568642616272,
"logps/chosen": -212.8919219970703,
"logps/rejected": -209.1988983154297,
"loss": 0.6602,
"rewards/accuracies": 0.6031249761581421,
"rewards/chosen": -0.21002519130706787,
"rewards/margins": 0.07976146787405014,
"rewards/rejected": -0.2897866368293762,
"step": 320
},
{
"epoch": 0.5252686032630323,
"grad_norm": 3.0268583107868676,
"learning_rate": 2.715196572027789e-07,
"logits/chosen": -1.5595386028289795,
"logits/rejected": -1.579420804977417,
"logps/chosen": -208.55126953125,
"logps/rejected": -228.11337280273438,
"loss": 0.6561,
"rewards/accuracies": 0.6343749761581421,
"rewards/chosen": -0.19552794098854065,
"rewards/margins": 0.11572189629077911,
"rewards/rejected": -0.31124982237815857,
"step": 330
},
{
"epoch": 0.5411858336649423,
"grad_norm": 2.608251205411858,
"learning_rate": 2.5764427716409815e-07,
"logits/chosen": -1.5596858263015747,
"logits/rejected": -1.5745502710342407,
"logps/chosen": -222.6412811279297,
"logps/rejected": -227.70651245117188,
"loss": 0.6544,
"rewards/accuracies": 0.640625,
"rewards/chosen": -0.21032941341400146,
"rewards/margins": 0.09801909327507019,
"rewards/rejected": -0.30834850668907166,
"step": 340
},
{
"epoch": 0.5571030640668524,
"grad_norm": 2.3596476657059666,
"learning_rate": 2.4374526910277886e-07,
"logits/chosen": -1.4910805225372314,
"logits/rejected": -1.4995691776275635,
"logps/chosen": -219.46536254882812,
"logps/rejected": -235.89620971679688,
"loss": 0.6613,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.2539146840572357,
"rewards/margins": 0.09661410748958588,
"rewards/rejected": -0.3505287766456604,
"step": 350
},
{
"epoch": 0.5730202944687625,
"grad_norm": 2.721512353744307,
"learning_rate": 2.2986559405621886e-07,
"logits/chosen": -1.5120307207107544,
"logits/rejected": -1.5819119215011597,
"logps/chosen": -236.832763671875,
"logps/rejected": -235.5454559326172,
"loss": 0.6668,
"rewards/accuracies": 0.565625011920929,
"rewards/chosen": -0.3000979721546173,
"rewards/margins": 0.07621388137340546,
"rewards/rejected": -0.37631186842918396,
"step": 360
},
{
"epoch": 0.5889375248706725,
"grad_norm": 3.020330440823815,
"learning_rate": 2.160481533045751e-07,
"logits/chosen": -1.4668768644332886,
"logits/rejected": -1.5363471508026123,
"logps/chosen": -218.35946655273438,
"logps/rejected": -224.10208129882812,
"loss": 0.6492,
"rewards/accuracies": 0.6156250238418579,
"rewards/chosen": -0.2719998359680176,
"rewards/margins": 0.12549051642417908,
"rewards/rejected": -0.39749038219451904,
"step": 370
},
{
"epoch": 0.6048547552725826,
"grad_norm": 2.9446946400840974,
"learning_rate": 2.0233565576536564e-07,
"logits/chosen": -1.485190749168396,
"logits/rejected": -1.5274041891098022,
"logps/chosen": -222.81765747070312,
"logps/rejected": -222.657470703125,
"loss": 0.6538,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.26167741417884827,
"rewards/margins": 0.08558806031942368,
"rewards/rejected": -0.34726545214653015,
"step": 380
},
{
"epoch": 0.6207719856744927,
"grad_norm": 3.1883509652873223,
"learning_rate": 1.887704859826528e-07,
"logits/chosen": -1.4792619943618774,
"logits/rejected": -1.5055263042449951,
"logps/chosen": -221.5879364013672,
"logps/rejected": -239.82571411132812,
"loss": 0.6495,
"rewards/accuracies": 0.6187499761581421,
"rewards/chosen": -0.2849220633506775,
"rewards/margins": 0.13855159282684326,
"rewards/rejected": -0.42347365617752075,
"step": 390
},
{
"epoch": 0.6366892160764027,
"grad_norm": 2.8954609378442657,
"learning_rate": 1.7539457311884675e-07,
"logits/chosen": -1.4238282442092896,
"logits/rejected": -1.423081398010254,
"logps/chosen": -229.7063446044922,
"logps/rejected": -235.14236450195312,
"loss": 0.6522,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": -0.2554658353328705,
"rewards/margins": 0.14818701148033142,
"rewards/rejected": -0.4036528468132019,
"step": 400
},
{
"epoch": 0.6526064464783128,
"grad_norm": 3.203997856966496,
"learning_rate": 1.6224926135406693e-07,
"logits/chosen": -1.460460901260376,
"logits/rejected": -1.4777477979660034,
"logps/chosen": -236.0177764892578,
"logps/rejected": -240.2278289794922,
"loss": 0.6525,
"rewards/accuracies": 0.596875011920929,
"rewards/chosen": -0.32773512601852417,
"rewards/margins": 0.08220280706882477,
"rewards/rejected": -0.4099380075931549,
"step": 410
},
{
"epoch": 0.6685236768802229,
"grad_norm": 2.8800206835573094,
"learning_rate": 1.4937518209365108e-07,
"logits/chosen": -1.5116090774536133,
"logits/rejected": -1.5359185934066772,
"logps/chosen": -248.60263061523438,
"logps/rejected": -245.75967407226562,
"loss": 0.6527,
"rewards/accuracies": 0.6187499761581421,
"rewards/chosen": -0.3213188052177429,
"rewards/margins": 0.10633780807256699,
"rewards/rejected": -0.4276565909385681,
"step": 420
},
{
"epoch": 0.6844409072821329,
"grad_norm": 3.29734083048723,
"learning_rate": 1.3681212837880977e-07,
"logits/chosen": -1.4253356456756592,
"logits/rejected": -1.444923758506775,
"logps/chosen": -220.8928985595703,
"logps/rejected": -238.1382293701172,
"loss": 0.6502,
"rewards/accuracies": 0.621874988079071,
"rewards/chosen": -0.3292413353919983,
"rewards/margins": 0.10124459117650986,
"rewards/rejected": -0.43048587441444397,
"step": 430
},
{
"epoch": 0.700358137684043,
"grad_norm": 3.2357462634957908,
"learning_rate": 1.2459893188861613e-07,
"logits/chosen": -1.3886950016021729,
"logits/rejected": -1.4186146259307861,
"logps/chosen": -229.12069702148438,
"logps/rejected": -241.5669708251953,
"loss": 0.65,
"rewards/accuracies": 0.6656249761581421,
"rewards/chosen": -0.31468960642814636,
"rewards/margins": 0.14469651877880096,
"rewards/rejected": -0.4593861699104309,
"step": 440
},
{
"epoch": 0.716275368085953,
"grad_norm": 3.664386493120931,
"learning_rate": 1.1277334291351145e-07,
"logits/chosen": -1.4633034467697144,
"logits/rejected": -1.4953584671020508,
"logps/chosen": -229.84976196289062,
"logps/rejected": -245.46762084960938,
"loss": 0.6451,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": -0.35235971212387085,
"rewards/margins": 0.1600349247455597,
"rewards/rejected": -0.5123946666717529,
"step": 450
},
{
"epoch": 0.7321925984878631,
"grad_norm": 3.054029038543547,
"learning_rate": 1.0137191367132078e-07,
"logits/chosen": -1.466225504875183,
"logits/rejected": -1.5026957988739014,
"logps/chosen": -227.568359375,
"logps/rejected": -251.8888397216797,
"loss": 0.6449,
"rewards/accuracies": 0.6468750238418579,
"rewards/chosen": -0.2990425229072571,
"rewards/margins": 0.16379448771476746,
"rewards/rejected": -0.46283698081970215,
"step": 460
},
{
"epoch": 0.7481098288897732,
"grad_norm": 3.604867452453406,
"learning_rate": 9.042988532644249e-08,
"logits/chosen": -1.447538137435913,
"logits/rejected": -1.5032737255096436,
"logps/chosen": -240.51644897460938,
"logps/rejected": -250.5240020751953,
"loss": 0.6527,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": -0.36844930052757263,
"rewards/margins": 0.1114746555685997,
"rewards/rejected": -0.47992390394210815,
"step": 470
},
{
"epoch": 0.7640270592916832,
"grad_norm": 3.100922028441707,
"learning_rate": 7.998107906142839e-08,
"logits/chosen": -1.4335956573486328,
"logits/rejected": -1.468783974647522,
"logps/chosen": -220.85147094726562,
"logps/rejected": -234.76596069335938,
"loss": 0.6462,
"rewards/accuracies": 0.640625,
"rewards/chosen": -0.3198398947715759,
"rewards/margins": 0.13547857105731964,
"rewards/rejected": -0.45531851053237915,
"step": 480
},
{
"epoch": 0.7799442896935933,
"grad_norm": 3.0897060153169447,
"learning_rate": 7.005779153764682e-08,
"logits/chosen": -1.460636854171753,
"logits/rejected": -1.481178641319275,
"logps/chosen": -217.1626434326172,
"logps/rejected": -241.839111328125,
"loss": 0.6465,
"rewards/accuracies": 0.628125011920929,
"rewards/chosen": -0.33777886629104614,
"rewards/margins": 0.1417325884103775,
"rewards/rejected": -0.47951143980026245,
"step": 490
},
{
"epoch": 0.7958615200955034,
"grad_norm": 3.515426969892236,
"learning_rate": 6.069069506815325e-08,
"logits/chosen": -1.423098087310791,
"logits/rejected": -1.4111202955245972,
"logps/chosen": -228.85360717773438,
"logps/rejected": -247.3131561279297,
"loss": 0.6459,
"rewards/accuracies": 0.6468750238418579,
"rewards/chosen": -0.3209570348262787,
"rewards/margins": 0.18556544184684753,
"rewards/rejected": -0.506522536277771,
"step": 500
},
{
"epoch": 0.8117787504974134,
"grad_norm": 3.168259746643005,
"learning_rate": 5.190874281132851e-08,
"logits/chosen": -1.3531607389450073,
"logits/rejected": -1.4315484762191772,
"logps/chosen": -228.17135620117188,
"logps/rejected": -237.56796264648438,
"loss": 0.6446,
"rewards/accuracies": 0.6187499761581421,
"rewards/chosen": -0.3634713888168335,
"rewards/margins": 0.11672137677669525,
"rewards/rejected": -0.48019275069236755,
"step": 510
},
{
"epoch": 0.8276959808993235,
"grad_norm": 3.547783875081257,
"learning_rate": 4.373907927832513e-08,
"logits/chosen": -1.4724462032318115,
"logits/rejected": -1.4711410999298096,
"logps/chosen": -216.9486541748047,
"logps/rejected": -229.16531372070312,
"loss": 0.6497,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": -0.34610506892204285,
"rewards/margins": 0.10790134966373444,
"rewards/rejected": -0.4540063738822937,
"step": 520
},
{
"epoch": 0.8436132113012336,
"grad_norm": 3.0690519001773744,
"learning_rate": 3.620695643093924e-08,
"logits/chosen": -1.436292290687561,
"logits/rejected": -1.4620921611785889,
"logps/chosen": -220.2271728515625,
"logps/rejected": -242.4230499267578,
"loss": 0.6444,
"rewards/accuracies": 0.671875,
"rewards/chosen": -0.2857658565044403,
"rewards/margins": 0.16418084502220154,
"rewards/rejected": -0.44994670152664185,
"step": 530
},
{
"epoch": 0.8595304417031436,
"grad_norm": 3.5137530293056676,
"learning_rate": 2.9335655629243645e-08,
"logits/chosen": -1.4339349269866943,
"logits/rejected": -1.4756128787994385,
"logps/chosen": -225.1971893310547,
"logps/rejected": -240.7228546142578,
"loss": 0.6474,
"rewards/accuracies": 0.628125011920929,
"rewards/chosen": -0.32935741543769836,
"rewards/margins": 0.12228095531463623,
"rewards/rejected": -0.4516383707523346,
"step": 540
},
{
"epoch": 0.8754476721050537,
"grad_norm": 3.303113230393075,
"learning_rate": 2.31464156702382e-08,
"logits/chosen": -1.4354156255722046,
"logits/rejected": -1.4688793420791626,
"logps/chosen": -228.87588500976562,
"logps/rejected": -227.47872924804688,
"loss": 0.6503,
"rewards/accuracies": 0.609375,
"rewards/chosen": -0.31748342514038086,
"rewards/margins": 0.12047611176967621,
"rewards/rejected": -0.4379595220088959,
"step": 550
},
{
"epoch": 0.8913649025069638,
"grad_norm": 3.6187583437530138,
"learning_rate": 1.7658367139945228e-08,
"logits/chosen": -1.484116554260254,
"logits/rejected": -1.4669333696365356,
"logps/chosen": -220.8059844970703,
"logps/rejected": -235.5103759765625,
"loss": 0.6422,
"rewards/accuracies": 0.578125,
"rewards/chosen": -0.34964197874069214,
"rewards/margins": 0.1103706806898117,
"rewards/rejected": -0.46001267433166504,
"step": 560
},
{
"epoch": 0.9072821329088738,
"grad_norm": 2.946862169915585,
"learning_rate": 1.2888473281864597e-08,
"logits/chosen": -1.3717347383499146,
"logits/rejected": -1.4307266473770142,
"logps/chosen": -230.9296112060547,
"logps/rejected": -248.7758026123047,
"loss": 0.6373,
"rewards/accuracies": 0.6468750238418579,
"rewards/chosen": -0.3366944193840027,
"rewards/margins": 0.13169880211353302,
"rewards/rejected": -0.4683932363986969,
"step": 570
},
{
"epoch": 0.9231993633107839,
"grad_norm": 3.986364311332544,
"learning_rate": 8.851477564560061e-09,
"logits/chosen": -1.4669787883758545,
"logits/rejected": -1.5305414199829102,
"logps/chosen": -227.6433563232422,
"logps/rejected": -243.227294921875,
"loss": 0.6434,
"rewards/accuracies": 0.6468750238418579,
"rewards/chosen": -0.32894089818000793,
"rewards/margins": 0.15033873915672302,
"rewards/rejected": -0.47927966713905334,
"step": 580
},
{
"epoch": 0.939116593712694,
"grad_norm": 3.4745032456527394,
"learning_rate": 5.559858110443016e-09,
"logits/chosen": -1.418752908706665,
"logits/rejected": -1.463607668876648,
"logps/chosen": -237.24252319335938,
"logps/rejected": -252.40145874023438,
"loss": 0.6384,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.3376900553703308,
"rewards/margins": 0.15840700268745422,
"rewards/rejected": -0.4960970878601074,
"step": 590
},
{
"epoch": 0.955033824114604,
"grad_norm": 6.105907684391068,
"learning_rate": 3.023789126611137e-09,
"logits/chosen": -1.4318501949310303,
"logits/rejected": -1.4236361980438232,
"logps/chosen": -224.8287811279297,
"logps/rejected": -240.1597442626953,
"loss": 0.6398,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -0.32297495007514954,
"rewards/margins": 0.1879177987575531,
"rewards/rejected": -0.5108927488327026,
"step": 600
},
{
"epoch": 0.9709510545165141,
"grad_norm": 7.921761427791219,
"learning_rate": 1.2511094569571668e-09,
"logits/chosen": -1.4697223901748657,
"logits/rejected": -1.5091297626495361,
"logps/chosen": -227.2742156982422,
"logps/rejected": -238.57974243164062,
"loss": 0.6414,
"rewards/accuracies": 0.596875011920929,
"rewards/chosen": -0.3365384042263031,
"rewards/margins": 0.1458342969417572,
"rewards/rejected": -0.4823727011680603,
"step": 610
},
{
"epoch": 0.9868682849184242,
"grad_norm": 3.1578814752663407,
"learning_rate": 2.4729835275189016e-10,
"logits/chosen": -1.4319922924041748,
"logits/rejected": -1.4167181253433228,
"logps/chosen": -224.8016357421875,
"logps/rejected": -247.70339965820312,
"loss": 0.6404,
"rewards/accuracies": 0.6343749761581421,
"rewards/chosen": -0.36974093317985535,
"rewards/margins": 0.14402151107788086,
"rewards/rejected": -0.5137624740600586,
"step": 620
},
{
"epoch": 0.9996020692399522,
"step": 628,
"total_flos": 0.0,
"train_loss": 0.6662861807331159,
"train_runtime": 15158.4407,
"train_samples_per_second": 10.608,
"train_steps_per_second": 0.041
}
],
"logging_steps": 10,
"max_steps": 628,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}