DPO-Negative-zephyr-7b-sft-full / trainer_state.json
TTTXXX01's picture
Model save
ded9dde verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1274,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0007849293563579278,
"grad_norm": 12.475590481200975,
"learning_rate": 3.90625e-09,
"logits/chosen": 5802.96484375,
"logits/rejected": 2731.5830078125,
"logps/chosen": -222.11163330078125,
"logps/rejected": -100.4254150390625,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.007849293563579277,
"grad_norm": 12.274808184833539,
"learning_rate": 3.9062499999999997e-08,
"logits/chosen": 4856.951171875,
"logits/rejected": 4244.28857421875,
"logps/chosen": -213.7843475341797,
"logps/rejected": -184.21807861328125,
"loss": 0.6932,
"rewards/accuracies": 0.4166666865348816,
"rewards/chosen": -0.030446164309978485,
"rewards/margins": -0.0019169822335243225,
"rewards/rejected": -0.02852918766438961,
"step": 10
},
{
"epoch": 0.015698587127158554,
"grad_norm": 12.2487664431145,
"learning_rate": 7.812499999999999e-08,
"logits/chosen": 5932.51220703125,
"logits/rejected": 4735.83056640625,
"logps/chosen": -229.7317657470703,
"logps/rejected": -202.9927978515625,
"loss": 0.6931,
"rewards/accuracies": 0.491666704416275,
"rewards/chosen": -0.022238370031118393,
"rewards/margins": -0.021966135129332542,
"rewards/rejected": -0.0002722390054259449,
"step": 20
},
{
"epoch": 0.023547880690737835,
"grad_norm": 10.90593260752459,
"learning_rate": 1.1718749999999999e-07,
"logits/chosen": 5929.66748046875,
"logits/rejected": 4987.24462890625,
"logps/chosen": -268.12542724609375,
"logps/rejected": -219.9074249267578,
"loss": 0.693,
"rewards/accuracies": 0.5333333611488342,
"rewards/chosen": 0.01516792643815279,
"rewards/margins": 0.03965631127357483,
"rewards/rejected": -0.024488374590873718,
"step": 30
},
{
"epoch": 0.03139717425431711,
"grad_norm": 11.139383333515463,
"learning_rate": 1.5624999999999999e-07,
"logits/chosen": 5152.3271484375,
"logits/rejected": 4249.921875,
"logps/chosen": -229.8832550048828,
"logps/rejected": -190.4424591064453,
"loss": 0.6924,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": 0.17236828804016113,
"rewards/margins": 0.19836299121379852,
"rewards/rejected": -0.02599470689892769,
"step": 40
},
{
"epoch": 0.03924646781789639,
"grad_norm": 11.645627718018657,
"learning_rate": 1.9531249999999998e-07,
"logits/chosen": 6222.9287109375,
"logits/rejected": 4912.32080078125,
"logps/chosen": -290.1268615722656,
"logps/rejected": -216.86715698242188,
"loss": 0.6917,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": 0.30730703473091125,
"rewards/margins": 0.3963390290737152,
"rewards/rejected": -0.08903200179338455,
"step": 50
},
{
"epoch": 0.04709576138147567,
"grad_norm": 11.078657530540328,
"learning_rate": 2.3437499999999998e-07,
"logits/chosen": 5279.97265625,
"logits/rejected": 4425.509765625,
"logps/chosen": -238.16915893554688,
"logps/rejected": -221.2810516357422,
"loss": 0.6909,
"rewards/accuracies": 0.6166666150093079,
"rewards/chosen": 0.1900128722190857,
"rewards/margins": 0.3637149930000305,
"rewards/rejected": -0.17370210587978363,
"step": 60
},
{
"epoch": 0.054945054945054944,
"grad_norm": 10.308416988867366,
"learning_rate": 2.734375e-07,
"logits/chosen": 4956.0888671875,
"logits/rejected": 4729.8525390625,
"logps/chosen": -201.91192626953125,
"logps/rejected": -190.0984649658203,
"loss": 0.6901,
"rewards/accuracies": 0.6416666507720947,
"rewards/chosen": 0.21822793781757355,
"rewards/margins": 0.6749156713485718,
"rewards/rejected": -0.4566877484321594,
"step": 70
},
{
"epoch": 0.06279434850863422,
"grad_norm": 10.353972450706404,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": 5457.76318359375,
"logits/rejected": 5014.908203125,
"logps/chosen": -224.1815643310547,
"logps/rejected": -198.37759399414062,
"loss": 0.6866,
"rewards/accuracies": 0.6333333253860474,
"rewards/chosen": 0.34879016876220703,
"rewards/margins": 1.1451596021652222,
"rewards/rejected": -0.7963694334030151,
"step": 80
},
{
"epoch": 0.0706436420722135,
"grad_norm": 11.353604327534766,
"learning_rate": 3.5156249999999997e-07,
"logits/chosen": 5620.28076171875,
"logits/rejected": 4871.333984375,
"logps/chosen": -254.109619140625,
"logps/rejected": -202.3819580078125,
"loss": 0.6813,
"rewards/accuracies": 0.7333332896232605,
"rewards/chosen": -0.20441606640815735,
"rewards/margins": 2.9601101875305176,
"rewards/rejected": -3.1645264625549316,
"step": 90
},
{
"epoch": 0.07849293563579278,
"grad_norm": 10.863062067723817,
"learning_rate": 3.9062499999999997e-07,
"logits/chosen": 5459.212890625,
"logits/rejected": 5301.59375,
"logps/chosen": -248.0714111328125,
"logps/rejected": -224.43264770507812,
"loss": 0.6785,
"rewards/accuracies": 0.6750000715255737,
"rewards/chosen": -0.9182969927787781,
"rewards/margins": 4.251065254211426,
"rewards/rejected": -5.169361591339111,
"step": 100
},
{
"epoch": 0.08634222919937205,
"grad_norm": 10.382759941135648,
"learning_rate": 4.2968749999999996e-07,
"logits/chosen": 5782.78466796875,
"logits/rejected": 4790.76806640625,
"logps/chosen": -228.216796875,
"logps/rejected": -205.3565673828125,
"loss": 0.6726,
"rewards/accuracies": 0.6166666746139526,
"rewards/chosen": -6.446338653564453,
"rewards/margins": 3.2950546741485596,
"rewards/rejected": -9.741393089294434,
"step": 110
},
{
"epoch": 0.09419152276295134,
"grad_norm": 11.981598045577474,
"learning_rate": 4.6874999999999996e-07,
"logits/chosen": 5616.5302734375,
"logits/rejected": 4723.5234375,
"logps/chosen": -257.08856201171875,
"logps/rejected": -236.7274627685547,
"loss": 0.6647,
"rewards/accuracies": 0.5916666388511658,
"rewards/chosen": -12.205629348754883,
"rewards/margins": 5.487653732299805,
"rewards/rejected": -17.693279266357422,
"step": 120
},
{
"epoch": 0.10204081632653061,
"grad_norm": 10.165251494055534,
"learning_rate": 4.999962424962166e-07,
"logits/chosen": 5558.59130859375,
"logits/rejected": 5161.0791015625,
"logps/chosen": -268.20135498046875,
"logps/rejected": -242.4136505126953,
"loss": 0.6552,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -11.282087326049805,
"rewards/margins": 10.648801803588867,
"rewards/rejected": -21.930889129638672,
"step": 130
},
{
"epoch": 0.10989010989010989,
"grad_norm": 11.574010515701348,
"learning_rate": 4.998647417232375e-07,
"logits/chosen": 5367.11572265625,
"logits/rejected": 4732.9560546875,
"logps/chosen": -254.75390625,
"logps/rejected": -239.10379028320312,
"loss": 0.6475,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": -26.574289321899414,
"rewards/margins": 11.674284934997559,
"rewards/rejected": -38.248573303222656,
"step": 140
},
{
"epoch": 0.11773940345368916,
"grad_norm": 16.539816919613617,
"learning_rate": 4.995454786965036e-07,
"logits/chosen": 5435.21240234375,
"logits/rejected": 4535.978515625,
"logps/chosen": -287.0696105957031,
"logps/rejected": -250.68338012695312,
"loss": 0.6374,
"rewards/accuracies": 0.6333333253860474,
"rewards/chosen": -41.22538375854492,
"rewards/margins": 17.202348709106445,
"rewards/rejected": -58.427734375,
"step": 150
},
{
"epoch": 0.12558869701726844,
"grad_norm": 13.73694661177953,
"learning_rate": 4.990386933279972e-07,
"logits/chosen": 5422.66015625,
"logits/rejected": 4827.36376953125,
"logps/chosen": -286.73846435546875,
"logps/rejected": -287.48944091796875,
"loss": 0.6226,
"rewards/accuracies": 0.6666666865348816,
"rewards/chosen": -46.53741455078125,
"rewards/margins": 23.57110595703125,
"rewards/rejected": -70.1085205078125,
"step": 160
},
{
"epoch": 0.13343799058084774,
"grad_norm": 19.848950507634342,
"learning_rate": 4.983447664444096e-07,
"logits/chosen": 5697.08544921875,
"logits/rejected": 5069.8369140625,
"logps/chosen": -314.49658203125,
"logps/rejected": -299.6322937011719,
"loss": 0.6165,
"rewards/accuracies": 0.6416666507720947,
"rewards/chosen": -58.45717239379883,
"rewards/margins": 21.190235137939453,
"rewards/rejected": -79.64739990234375,
"step": 170
},
{
"epoch": 0.141287284144427,
"grad_norm": 15.58018487574887,
"learning_rate": 4.97464219500968e-07,
"logits/chosen": 5011.5390625,
"logits/rejected": 4356.61669921875,
"logps/chosen": -291.97216796875,
"logps/rejected": -284.8171081542969,
"loss": 0.6071,
"rewards/accuracies": 0.6416667699813843,
"rewards/chosen": -59.84586715698242,
"rewards/margins": 24.90540885925293,
"rewards/rejected": -84.75127410888672,
"step": 180
},
{
"epoch": 0.14913657770800628,
"grad_norm": 29.929410797708396,
"learning_rate": 4.963977141895843e-07,
"logits/chosen": 5085.25927734375,
"logits/rejected": 4315.0166015625,
"logps/chosen": -335.6153869628906,
"logps/rejected": -343.84417724609375,
"loss": 0.5813,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -86.85151672363281,
"rewards/margins": 45.62450408935547,
"rewards/rejected": -132.4760284423828,
"step": 190
},
{
"epoch": 0.15698587127158556,
"grad_norm": 23.010314557802847,
"learning_rate": 4.951460519416227e-07,
"logits/chosen": 4992.20263671875,
"logits/rejected": 4604.447265625,
"logps/chosen": -288.6399230957031,
"logps/rejected": -322.26800537109375,
"loss": 0.5795,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -67.88831329345703,
"rewards/margins": 36.403099060058594,
"rewards/rejected": -104.2914047241211,
"step": 200
},
{
"epoch": 0.16483516483516483,
"grad_norm": 21.226483616035054,
"learning_rate": 4.937101733256606e-07,
"logits/chosen": 4520.96533203125,
"logits/rejected": 4006.594482421875,
"logps/chosen": -252.9499053955078,
"logps/rejected": -269.8682861328125,
"loss": 0.5821,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -59.6712646484375,
"rewards/margins": 33.010738372802734,
"rewards/rejected": -92.6820068359375,
"step": 210
},
{
"epoch": 0.1726844583987441,
"grad_norm": 26.87015939833776,
"learning_rate": 4.920911573406924e-07,
"logits/chosen": 5554.3759765625,
"logits/rejected": 4668.2509765625,
"logps/chosen": -319.15203857421875,
"logps/rejected": -305.9933776855469,
"loss": 0.5645,
"rewards/accuracies": 0.7166666984558105,
"rewards/chosen": -76.67576599121094,
"rewards/margins": 45.06718063354492,
"rewards/rejected": -121.7429428100586,
"step": 220
},
{
"epoch": 0.18053375196232338,
"grad_norm": 17.217923042997022,
"learning_rate": 4.902902206053098e-07,
"logits/chosen": 5001.35107421875,
"logits/rejected": 4525.578125,
"logps/chosen": -378.13311767578125,
"logps/rejected": -380.3160705566406,
"loss": 0.5919,
"rewards/accuracies": 0.6750000715255737,
"rewards/chosen": -146.92613220214844,
"rewards/margins": 31.738452911376953,
"rewards/rejected": -178.6645965576172,
"step": 230
},
{
"epoch": 0.18838304552590268,
"grad_norm": 18.49164154902578,
"learning_rate": 4.883087164434672e-07,
"logits/chosen": 4519.3544921875,
"logits/rejected": 3586.21728515625,
"logps/chosen": -320.5347900390625,
"logps/rejected": -326.6560363769531,
"loss": 0.5792,
"rewards/accuracies": 0.7416667342185974,
"rewards/chosen": -112.61865234375,
"rewards/margins": 41.59739303588867,
"rewards/rejected": -154.21603393554688,
"step": 240
},
{
"epoch": 0.19623233908948196,
"grad_norm": 26.615530089919684,
"learning_rate": 4.861481338675183e-07,
"logits/chosen": 5337.7587890625,
"logits/rejected": 4751.5302734375,
"logps/chosen": -285.52813720703125,
"logps/rejected": -334.30804443359375,
"loss": 0.5758,
"rewards/accuracies": 0.6916667222976685,
"rewards/chosen": -80.38849639892578,
"rewards/margins": 49.028236389160156,
"rewards/rejected": -129.41671752929688,
"step": 250
},
{
"epoch": 0.20408163265306123,
"grad_norm": 26.246394909716958,
"learning_rate": 4.838100964592904e-07,
"logits/chosen": 5445.4697265625,
"logits/rejected": 4349.0927734375,
"logps/chosen": -369.8463439941406,
"logps/rejected": -354.04150390625,
"loss": 0.5862,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -121.27079772949219,
"rewards/margins": 43.82752227783203,
"rewards/rejected": -165.0983428955078,
"step": 260
},
{
"epoch": 0.2119309262166405,
"grad_norm": 22.317945165315084,
"learning_rate": 4.812963611500339e-07,
"logits/chosen": 5298.1767578125,
"logits/rejected": 5102.0869140625,
"logps/chosen": -382.55364990234375,
"logps/rejected": -407.2611389160156,
"loss": 0.5343,
"rewards/accuracies": 0.6999999284744263,
"rewards/chosen": -143.6182098388672,
"rewards/margins": 51.069644927978516,
"rewards/rejected": -194.68785095214844,
"step": 270
},
{
"epoch": 0.21978021978021978,
"grad_norm": 23.940733165507943,
"learning_rate": 4.786088169001671e-07,
"logits/chosen": 4491.1669921875,
"logits/rejected": 3884.12939453125,
"logps/chosen": -326.46990966796875,
"logps/rejected": -377.7791442871094,
"loss": 0.5462,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -125.93644714355469,
"rewards/margins": 63.7982292175293,
"rewards/rejected": -189.73468017578125,
"step": 280
},
{
"epoch": 0.22762951334379905,
"grad_norm": 29.619900329265853,
"learning_rate": 4.7574948327980567e-07,
"logits/chosen": 6284.06396484375,
"logits/rejected": 4566.36865234375,
"logps/chosen": -410.40447998046875,
"logps/rejected": -409.33306884765625,
"loss": 0.5178,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -122.14656066894531,
"rewards/margins": 82.41648864746094,
"rewards/rejected": -204.5630340576172,
"step": 290
},
{
"epoch": 0.23547880690737832,
"grad_norm": 21.413305886609265,
"learning_rate": 4.727205089511466e-07,
"logits/chosen": 4532.3505859375,
"logits/rejected": 4436.92919921875,
"logps/chosen": -362.99530029296875,
"logps/rejected": -413.7596740722656,
"loss": 0.5352,
"rewards/accuracies": 0.7166666984558105,
"rewards/chosen": -159.3097686767578,
"rewards/margins": 63.9193115234375,
"rewards/rejected": -223.2290802001953,
"step": 300
},
{
"epoch": 0.24332810047095763,
"grad_norm": 28.573536621811172,
"learning_rate": 4.6952417005384247e-07,
"logits/chosen": 5070.1220703125,
"logits/rejected": 4487.4990234375,
"logps/chosen": -379.1419982910156,
"logps/rejected": -401.3731384277344,
"loss": 0.5832,
"rewards/accuracies": 0.6833332777023315,
"rewards/chosen": -156.77000427246094,
"rewards/margins": 48.11083221435547,
"rewards/rejected": -204.880859375,
"step": 310
},
{
"epoch": 0.25117739403453687,
"grad_norm": 25.68145197432455,
"learning_rate": 4.661628684945851e-07,
"logits/chosen": 5099.7763671875,
"logits/rejected": 4360.29052734375,
"logps/chosen": -398.98260498046875,
"logps/rejected": -448.6531677246094,
"loss": 0.5388,
"rewards/accuracies": 0.75,
"rewards/chosen": -154.9723358154297,
"rewards/margins": 69.99870300292969,
"rewards/rejected": -224.97103881835938,
"step": 320
},
{
"epoch": 0.25902668759811615,
"grad_norm": 21.770264556528193,
"learning_rate": 4.626391301421782e-07,
"logits/chosen": 4882.7724609375,
"logits/rejected": 4423.73876953125,
"logps/chosen": -393.62530517578125,
"logps/rejected": -388.9612121582031,
"loss": 0.5863,
"rewards/accuracies": 0.6583333015441895,
"rewards/chosen": -157.69467163085938,
"rewards/margins": 32.65513229370117,
"rewards/rejected": -190.3498077392578,
"step": 330
},
{
"epoch": 0.2668759811616955,
"grad_norm": 21.589266550649103,
"learning_rate": 4.5895560292945996e-07,
"logits/chosen": 5193.98876953125,
"logits/rejected": 5274.88134765625,
"logps/chosen": -351.21490478515625,
"logps/rejected": -404.2591552734375,
"loss": 0.5607,
"rewards/accuracies": 0.6916667222976685,
"rewards/chosen": -113.73481750488281,
"rewards/margins": 46.264808654785156,
"rewards/rejected": -159.9995880126953,
"step": 340
},
{
"epoch": 0.27472527472527475,
"grad_norm": 36.464413378574925,
"learning_rate": 4.5511505486349865e-07,
"logits/chosen": 5461.7431640625,
"logits/rejected": 4910.5126953125,
"logps/chosen": -365.9176330566406,
"logps/rejected": -426.64886474609375,
"loss": 0.5272,
"rewards/accuracies": 0.7416667342185974,
"rewards/chosen": -126.54740142822266,
"rewards/margins": 69.62226867675781,
"rewards/rejected": -196.169677734375,
"step": 350
},
{
"epoch": 0.282574568288854,
"grad_norm": 30.867271483802618,
"learning_rate": 4.5112037194555876e-07,
"logits/chosen": 4940.5576171875,
"logits/rejected": 4777.501953125,
"logps/chosen": -362.74676513671875,
"logps/rejected": -459.696044921875,
"loss": 0.5099,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -147.5183563232422,
"rewards/margins": 89.27476501464844,
"rewards/rejected": -236.7931365966797,
"step": 360
},
{
"epoch": 0.2904238618524333,
"grad_norm": 33.42858804556624,
"learning_rate": 4.4697455600239863e-07,
"logits/chosen": 4417.3720703125,
"logits/rejected": 4072.72998046875,
"logps/chosen": -407.4537353515625,
"logps/rejected": -440.5425720214844,
"loss": 0.5758,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -182.4361572265625,
"rewards/margins": 69.95653533935547,
"rewards/rejected": -252.3927001953125,
"step": 370
},
{
"epoch": 0.29827315541601257,
"grad_norm": 30.08975581641362,
"learning_rate": 4.426807224305315e-07,
"logits/chosen": 5314.12353515625,
"logits/rejected": 4283.44580078125,
"logps/chosen": -442.2035217285156,
"logps/rejected": -454.37371826171875,
"loss": 0.5278,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -167.31723022460938,
"rewards/margins": 82.02327728271484,
"rewards/rejected": -249.3405303955078,
"step": 380
},
{
"epoch": 0.30612244897959184,
"grad_norm": 37.07543716683873,
"learning_rate": 4.3824209785514326e-07,
"logits/chosen": 5521.34619140625,
"logits/rejected": 4131.35205078125,
"logps/chosen": -383.25079345703125,
"logps/rejected": -408.64959716796875,
"loss": 0.531,
"rewards/accuracies": 0.8166667222976685,
"rewards/chosen": -122.80107116699219,
"rewards/margins": 87.73664855957031,
"rewards/rejected": -210.53775024414062,
"step": 390
},
{
"epoch": 0.3139717425431711,
"grad_norm": 30.149579075911312,
"learning_rate": 4.3366201770542687e-07,
"logits/chosen": 4727.5537109375,
"logits/rejected": 4581.6103515625,
"logps/chosen": -371.4571838378906,
"logps/rejected": -422.8492126464844,
"loss": 0.5492,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -141.8148193359375,
"rewards/margins": 69.41258239746094,
"rewards/rejected": -211.22738647460938,
"step": 400
},
{
"epoch": 0.3218210361067504,
"grad_norm": 53.112004291984846,
"learning_rate": 4.2894392370815567e-07,
"logits/chosen": 5015.6982421875,
"logits/rejected": 4405.0615234375,
"logps/chosen": -478.71160888671875,
"logps/rejected": -553.7672119140625,
"loss": 0.4601,
"rewards/accuracies": 0.75,
"rewards/chosen": -222.1967315673828,
"rewards/margins": 99.87347412109375,
"rewards/rejected": -322.0701599121094,
"step": 410
},
{
"epoch": 0.32967032967032966,
"grad_norm": 42.71067198975326,
"learning_rate": 4.2409136130137845e-07,
"logits/chosen": 4696.68359375,
"logits/rejected": 4161.30859375,
"logps/chosen": -485.77850341796875,
"logps/rejected": -544.0584106445312,
"loss": 0.5109,
"rewards/accuracies": 0.75,
"rewards/chosen": -240.3953399658203,
"rewards/margins": 101.9373779296875,
"rewards/rejected": -342.33270263671875,
"step": 420
},
{
"epoch": 0.33751962323390894,
"grad_norm": 36.86470558327797,
"learning_rate": 4.1910797697018017e-07,
"logits/chosen": 4535.0703125,
"logits/rejected": 3715.22900390625,
"logps/chosen": -387.56622314453125,
"logps/rejected": -442.45001220703125,
"loss": 0.5041,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -168.50804138183594,
"rewards/margins": 94.3797378540039,
"rewards/rejected": -262.88775634765625,
"step": 430
},
{
"epoch": 0.3453689167974882,
"grad_norm": 41.1927543645631,
"learning_rate": 4.1399751550651084e-07,
"logits/chosen": 4775.3779296875,
"logits/rejected": 4688.51806640625,
"logps/chosen": -415.14837646484375,
"logps/rejected": -473.6229553222656,
"loss": 0.509,
"rewards/accuracies": 0.7499999403953552,
"rewards/chosen": -193.5423126220703,
"rewards/margins": 67.2273178100586,
"rewards/rejected": -260.7696228027344,
"step": 440
},
{
"epoch": 0.3532182103610675,
"grad_norm": 27.935596047973707,
"learning_rate": 4.087638171951401e-07,
"logits/chosen": 5618.0029296875,
"logits/rejected": 3925.375732421875,
"logps/chosen": -436.5665588378906,
"logps/rejected": -468.33807373046875,
"loss": 0.5111,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -180.71682739257812,
"rewards/margins": 106.69264221191406,
"rewards/rejected": -287.409423828125,
"step": 450
},
{
"epoch": 0.36106750392464676,
"grad_norm": 55.088267998695486,
"learning_rate": 4.034108149278543e-07,
"logits/chosen": 5733.8076171875,
"logits/rejected": 4270.50830078125,
"logps/chosen": -596.9160766601562,
"logps/rejected": -619.10205078125,
"loss": 0.489,
"rewards/accuracies": 0.7916666269302368,
"rewards/chosen": -296.2103576660156,
"rewards/margins": 110.0383071899414,
"rewards/rejected": -406.2486877441406,
"step": 460
},
{
"epoch": 0.36891679748822603,
"grad_norm": 59.00967696023003,
"learning_rate": 3.979425312480629e-07,
"logits/chosen": 4872.0048828125,
"logits/rejected": 4213.068359375,
"logps/chosen": -542.8075561523438,
"logps/rejected": -597.90771484375,
"loss": 0.5373,
"rewards/accuracies": 0.7750000953674316,
"rewards/chosen": -288.88983154296875,
"rewards/margins": 84.90777587890625,
"rewards/rejected": -373.7976379394531,
"step": 470
},
{
"epoch": 0.37676609105180536,
"grad_norm": 22.16721464836784,
"learning_rate": 3.923630753280357e-07,
"logits/chosen": 5398.4091796875,
"logits/rejected": 4557.08056640625,
"logps/chosen": -404.0497741699219,
"logps/rejected": -438.47943115234375,
"loss": 0.5094,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -154.57666015625,
"rewards/margins": 87.84249877929688,
"rewards/rejected": -242.41915893554688,
"step": 480
},
{
"epoch": 0.38461538461538464,
"grad_norm": 38.00527910475893,
"learning_rate": 3.866766398810424e-07,
"logits/chosen": 4992.42431640625,
"logits/rejected": 4696.40380859375,
"logps/chosen": -374.29962158203125,
"logps/rejected": -471.99493408203125,
"loss": 0.4872,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -162.0869598388672,
"rewards/margins": 87.14299011230469,
"rewards/rejected": -249.2299346923828,
"step": 490
},
{
"epoch": 0.3924646781789639,
"grad_norm": 32.335247005694626,
"learning_rate": 3.8088749801071496e-07,
"logits/chosen": 5322.54345703125,
"logits/rejected": 4003.192626953125,
"logps/chosen": -513.4617919921875,
"logps/rejected": -576.3197631835938,
"loss": 0.5035,
"rewards/accuracies": 0.76666659116745,
"rewards/chosen": -250.0567626953125,
"rewards/margins": 104.65950775146484,
"rewards/rejected": -354.71624755859375,
"step": 500
},
{
"epoch": 0.4003139717425432,
"grad_norm": 30.478334273643554,
"learning_rate": 3.75e-07,
"logits/chosen": 4294.5712890625,
"logits/rejected": 3678.155517578125,
"logps/chosen": -439.2808532714844,
"logps/rejected": -507.6415100097656,
"loss": 0.5039,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -214.4663848876953,
"rewards/margins": 108.06767272949219,
"rewards/rejected": -322.5340576171875,
"step": 510
},
{
"epoch": 0.40816326530612246,
"grad_norm": 33.03421947702496,
"learning_rate": 3.6901857004211443e-07,
"logits/chosen": 4474.36474609375,
"logits/rejected": 4061.989501953125,
"logps/chosen": -458.46466064453125,
"logps/rejected": -526.6428833007812,
"loss": 0.5403,
"rewards/accuracies": 0.7333332896232605,
"rewards/chosen": -222.7661590576172,
"rewards/margins": 86.2357406616211,
"rewards/rejected": -309.0019226074219,
"step": 520
},
{
"epoch": 0.41601255886970173,
"grad_norm": 32.527399512274805,
"learning_rate": 3.6294770291596076e-07,
"logits/chosen": 5134.86865234375,
"logits/rejected": 4116.083984375,
"logps/chosen": -439.0199279785156,
"logps/rejected": -473.38330078125,
"loss": 0.4737,
"rewards/accuracies": 0.6916666626930237,
"rewards/chosen": -186.16439819335938,
"rewards/margins": 68.76704406738281,
"rewards/rejected": -254.9314422607422,
"step": 530
},
{
"epoch": 0.423861852433281,
"grad_norm": 39.06089225778835,
"learning_rate": 3.5679196060850034e-07,
"logits/chosen": 4892.14208984375,
"logits/rejected": 4289.6962890625,
"logps/chosen": -460.6468200683594,
"logps/rejected": -508.8052673339844,
"loss": 0.5188,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -213.62527465820312,
"rewards/margins": 87.31403350830078,
"rewards/rejected": -300.9393310546875,
"step": 540
},
{
"epoch": 0.4317111459968603,
"grad_norm": 49.118786005060336,
"learning_rate": 3.505559688866229e-07,
"logits/chosen": 4719.75146484375,
"logits/rejected": 4281.369140625,
"logps/chosen": -461.7283630371094,
"logps/rejected": -552.6526489257812,
"loss": 0.4963,
"rewards/accuracies": 0.7666667699813843,
"rewards/chosen": -209.6588897705078,
"rewards/margins": 96.42349243164062,
"rewards/rejected": -306.0823669433594,
"step": 550
},
{
"epoch": 0.43956043956043955,
"grad_norm": 27.349903612204887,
"learning_rate": 3.4424441382108826e-07,
"logits/chosen": 4647.734375,
"logits/rejected": 4307.3291015625,
"logps/chosen": -440.3028259277344,
"logps/rejected": -483.41400146484375,
"loss": 0.5272,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -200.43692016601562,
"rewards/margins": 67.8515625,
"rewards/rejected": -268.2884826660156,
"step": 560
},
{
"epoch": 0.4474097331240188,
"grad_norm": 39.524755315387765,
"learning_rate": 3.378620382651523e-07,
"logits/chosen": 4954.46630859375,
"logits/rejected": 4496.30615234375,
"logps/chosen": -484.87713623046875,
"logps/rejected": -542.4344482421875,
"loss": 0.4735,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -201.98617553710938,
"rewards/margins": 94.5064697265625,
"rewards/rejected": -296.49261474609375,
"step": 570
},
{
"epoch": 0.4552590266875981,
"grad_norm": 62.06889378266735,
"learning_rate": 3.314136382905234e-07,
"logits/chosen": 4907.35009765625,
"logits/rejected": 4291.45947265625,
"logps/chosen": -480.59649658203125,
"logps/rejected": -604.5867309570312,
"loss": 0.5052,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -229.9254150390625,
"rewards/margins": 140.01011657714844,
"rewards/rejected": -369.935546875,
"step": 580
},
{
"epoch": 0.4631083202511774,
"grad_norm": 39.59158963097188,
"learning_rate": 3.249040595833274e-07,
"logits/chosen": 5314.03662109375,
"logits/rejected": 4333.29052734375,
"logps/chosen": -549.6556396484375,
"logps/rejected": -595.24560546875,
"loss": 0.4797,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -276.78326416015625,
"rewards/margins": 119.59981536865234,
"rewards/rejected": -396.3830871582031,
"step": 590
},
{
"epoch": 0.47095761381475665,
"grad_norm": 36.55576011897287,
"learning_rate": 3.1833819380279023e-07,
"logits/chosen": 5052.849609375,
"logits/rejected": 4232.447265625,
"logps/chosen": -459.165283203125,
"logps/rejected": -543.8878173828125,
"loss": 0.4682,
"rewards/accuracies": 0.7916666269302368,
"rewards/chosen": -245.71682739257812,
"rewards/margins": 89.57303619384766,
"rewards/rejected": -335.28985595703125,
"step": 600
},
{
"epoch": 0.478806907378336,
"grad_norm": 30.821613737632966,
"learning_rate": 3.11720974905373e-07,
"logits/chosen": 4877.54296875,
"logits/rejected": 4174.18798828125,
"logps/chosen": -466.88775634765625,
"logps/rejected": -536.7265014648438,
"loss": 0.4739,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -224.50619506835938,
"rewards/margins": 108.45524597167969,
"rewards/rejected": -332.96142578125,
"step": 610
},
{
"epoch": 0.48665620094191525,
"grad_norm": 34.74645392610602,
"learning_rate": 3.0505737543712275e-07,
"logits/chosen": 4051.50390625,
"logits/rejected": 3255.41650390625,
"logps/chosen": -474.4776306152344,
"logps/rejected": -518.7869873046875,
"loss": 0.487,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -259.45001220703125,
"rewards/margins": 83.18482971191406,
"rewards/rejected": -342.63482666015625,
"step": 620
},
{
"epoch": 0.4945054945054945,
"grad_norm": 47.743071517312806,
"learning_rate": 2.9835240279702513e-07,
"logits/chosen": 5330.208984375,
"logits/rejected": 4389.91796875,
"logps/chosen": -520.0737915039062,
"logps/rejected": -579.0745239257812,
"loss": 0.5009,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -236.19070434570312,
"rewards/margins": 129.39373779296875,
"rewards/rejected": -365.58441162109375,
"step": 630
},
{
"epoch": 0.5023547880690737,
"grad_norm": 29.295984687001845,
"learning_rate": 2.9161109547416667e-07,
"logits/chosen": 5068.5205078125,
"logits/rejected": 4261.4248046875,
"logps/chosen": -480.3011779785156,
"logps/rejected": -536.0881958007812,
"loss": 0.4777,
"rewards/accuracies": 0.6666666865348816,
"rewards/chosen": -233.4359588623047,
"rewards/margins": 69.01892852783203,
"rewards/rejected": -302.45489501953125,
"step": 640
},
{
"epoch": 0.5102040816326531,
"grad_norm": 41.563700872658984,
"learning_rate": 2.848385192615339e-07,
"logits/chosen": 4259.57080078125,
"logits/rejected": 3367.3671875,
"logps/chosen": -489.69189453125,
"logps/rejected": -546.6644287109375,
"loss": 0.4827,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -262.749267578125,
"rewards/margins": 100.28528594970703,
"rewards/rejected": -363.0345458984375,
"step": 650
},
{
"epoch": 0.5180533751962323,
"grad_norm": 40.103833207905225,
"learning_rate": 2.780397634492949e-07,
"logits/chosen": 4852.6220703125,
"logits/rejected": 3722.33447265625,
"logps/chosen": -526.5555419921875,
"logps/rejected": -621.1095581054688,
"loss": 0.5056,
"rewards/accuracies": 0.8083332777023315,
"rewards/chosen": -274.5597229003906,
"rewards/margins": 141.62924194335938,
"rewards/rejected": -416.18890380859375,
"step": 660
},
{
"epoch": 0.5259026687598116,
"grad_norm": 29.43088475886643,
"learning_rate": 2.71219937000424e-07,
"logits/chosen": 4830.27197265625,
"logits/rejected": 3893.19189453125,
"logps/chosen": -486.42681884765625,
"logps/rejected": -530.4225463867188,
"loss": 0.5243,
"rewards/accuracies": 0.7166666984558105,
"rewards/chosen": -245.6635284423828,
"rewards/margins": 78.86155700683594,
"rewards/rejected": -324.52508544921875,
"step": 670
},
{
"epoch": 0.533751962323391,
"grad_norm": 31.182801588258933,
"learning_rate": 2.6438416471154273e-07,
"logits/chosen": 4733.90185546875,
"logits/rejected": 3882.770263671875,
"logps/chosen": -514.396484375,
"logps/rejected": -553.091552734375,
"loss": 0.5028,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -267.9259033203125,
"rewards/margins": 90.08674621582031,
"rewards/rejected": -358.01263427734375,
"step": 680
},
{
"epoch": 0.5416012558869702,
"grad_norm": 47.627386195652505,
"learning_rate": 2.5753758336186326e-07,
"logits/chosen": 4698.3837890625,
"logits/rejected": 4201.091796875,
"logps/chosen": -506.1795959472656,
"logps/rejected": -616.4149780273438,
"loss": 0.4414,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -265.3197326660156,
"rewards/margins": 117.90340423583984,
"rewards/rejected": -383.22314453125,
"step": 690
},
{
"epoch": 0.5494505494505495,
"grad_norm": 39.58077310786335,
"learning_rate": 2.5068533785312666e-07,
"logits/chosen": 4304.9482421875,
"logits/rejected": 4061.88427734375,
"logps/chosen": -559.4569702148438,
"logps/rejected": -645.285888671875,
"loss": 0.5265,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -340.834716796875,
"rewards/margins": 101.8834228515625,
"rewards/rejected": -442.7181701660156,
"step": 700
},
{
"epoch": 0.5572998430141287,
"grad_norm": 42.63941422845071,
"learning_rate": 2.4383257734343794e-07,
"logits/chosen": 4288.2998046875,
"logits/rejected": 4244.28662109375,
"logps/chosen": -509.9969177246094,
"logps/rejected": -607.218994140625,
"loss": 0.4635,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -289.3899841308594,
"rewards/margins": 97.62913513183594,
"rewards/rejected": -387.0191345214844,
"step": 710
},
{
"epoch": 0.565149136577708,
"grad_norm": 33.11283438508928,
"learning_rate": 2.3698445137790258e-07,
"logits/chosen": 4733.6513671875,
"logits/rejected": 3980.5078125,
"logps/chosen": -513.1058349609375,
"logps/rejected": -590.878173828125,
"loss": 0.4792,
"rewards/accuracies": 0.7416667342185974,
"rewards/chosen": -263.0565490722656,
"rewards/margins": 112.4889144897461,
"rewards/rejected": -375.54547119140625,
"step": 720
},
{
"epoch": 0.5729984301412873,
"grad_norm": 34.77232311269171,
"learning_rate": 2.3014610601897157e-07,
"logits/chosen": 5122.7705078125,
"logits/rejected": 3787.330078125,
"logps/chosen": -532.5799560546875,
"logps/rejected": -587.8629150390625,
"loss": 0.4759,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -276.46661376953125,
"rewards/margins": 119.9605484008789,
"rewards/rejected": -396.4271545410156,
"step": 730
},
{
"epoch": 0.5808477237048666,
"grad_norm": 36.53178617730355,
"learning_rate": 2.2332267997940513e-07,
"logits/chosen": 4205.9931640625,
"logits/rejected": 3482.835205078125,
"logps/chosen": -500.3135681152344,
"logps/rejected": -572.3468017578125,
"loss": 0.4586,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -281.8234558105469,
"rewards/margins": 113.65240478515625,
"rewards/rejected": -395.47589111328125,
"step": 740
},
{
"epoch": 0.5886970172684458,
"grad_norm": 32.23670986998527,
"learning_rate": 2.1651930076075723e-07,
"logits/chosen": 4532.78759765625,
"logits/rejected": 4020.828125,
"logps/chosen": -500.58502197265625,
"logps/rejected": -567.5943603515625,
"loss": 0.5057,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -294.63348388671875,
"rewards/margins": 91.31996154785156,
"rewards/rejected": -385.9534606933594,
"step": 750
},
{
"epoch": 0.5965463108320251,
"grad_norm": 40.49186660052862,
"learning_rate": 2.0974108080028692e-07,
"logits/chosen": 4855.615234375,
"logits/rejected": 3718.32177734375,
"logps/chosen": -473.718994140625,
"logps/rejected": -524.0787963867188,
"loss": 0.4946,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -242.5243377685547,
"rewards/margins": 91.66629791259766,
"rewards/rejected": -334.19061279296875,
"step": 760
},
{
"epoch": 0.6043956043956044,
"grad_norm": 38.989817764999636,
"learning_rate": 2.0299311362918773e-07,
"logits/chosen": 5004.6923828125,
"logits/rejected": 4233.04345703125,
"logps/chosen": -498.7569274902344,
"logps/rejected": -583.8013916015625,
"loss": 0.5025,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -239.9112548828125,
"rewards/margins": 103.9779281616211,
"rewards/rejected": -343.8891906738281,
"step": 770
},
{
"epoch": 0.6122448979591837,
"grad_norm": 42.44351810817626,
"learning_rate": 1.962804700450265e-07,
"logits/chosen": 4899.0458984375,
"logits/rejected": 4581.07373046875,
"logps/chosen": -488.00018310546875,
"logps/rejected": -598.136474609375,
"loss": 0.4991,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -246.29177856445312,
"rewards/margins": 95.75922393798828,
"rewards/rejected": -342.05096435546875,
"step": 780
},
{
"epoch": 0.6200941915227629,
"grad_norm": 50.94896890068703,
"learning_rate": 1.8960819430126334e-07,
"logits/chosen": 4493.31884765625,
"logits/rejected": 3905.24560546875,
"logps/chosen": -514.9113159179688,
"logps/rejected": -618.4744262695312,
"loss": 0.4683,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -293.01318359375,
"rewards/margins": 122.41926574707031,
"rewards/rejected": -415.43243408203125,
"step": 790
},
{
"epoch": 0.6279434850863422,
"grad_norm": 42.38684087506178,
"learning_rate": 1.8298130031671972e-07,
"logits/chosen": 4511.79833984375,
"logits/rejected": 3870.30029296875,
"logps/chosen": -538.5282592773438,
"logps/rejected": -622.373046875,
"loss": 0.489,
"rewards/accuracies": 0.7583333849906921,
"rewards/chosen": -296.51654052734375,
"rewards/margins": 99.34091186523438,
"rewards/rejected": -395.857421875,
"step": 800
},
{
"epoch": 0.6357927786499215,
"grad_norm": 42.05527039402671,
"learning_rate": 1.7640476790784075e-07,
"logits/chosen": 4179.4951171875,
"logits/rejected": 3657.673095703125,
"logps/chosen": -492.28521728515625,
"logps/rejected": -614.8783569335938,
"loss": 0.4932,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -265.53662109375,
"rewards/margins": 115.07731628417969,
"rewards/rejected": -380.61395263671875,
"step": 810
},
{
"epoch": 0.6436420722135008,
"grad_norm": 34.44512542209413,
"learning_rate": 1.6988353904658492e-07,
"logits/chosen": 4655.6416015625,
"logits/rejected": 3476.08349609375,
"logps/chosen": -484.12078857421875,
"logps/rejected": -511.9400329589844,
"loss": 0.4743,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -234.5116424560547,
"rewards/margins": 96.8333740234375,
"rewards/rejected": -331.3450012207031,
"step": 820
},
{
"epoch": 0.6514913657770801,
"grad_norm": 57.225633692645914,
"learning_rate": 1.634225141467513e-07,
"logits/chosen": 4608.59375,
"logits/rejected": 3967.21630859375,
"logps/chosen": -460.72760009765625,
"logps/rejected": -558.2176513671875,
"loss": 0.4673,
"rewards/accuracies": 0.7583333849906921,
"rewards/chosen": -231.8772735595703,
"rewards/margins": 120.73353576660156,
"rewards/rejected": -352.6108093261719,
"step": 830
},
{
"epoch": 0.6593406593406593,
"grad_norm": 37.195175661942116,
"learning_rate": 1.570265483815364e-07,
"logits/chosen": 5059.12158203125,
"logits/rejected": 4047.733642578125,
"logps/chosen": -494.36944580078125,
"logps/rejected": -580.4661865234375,
"loss": 0.4881,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -243.17831420898438,
"rewards/margins": 110.78678131103516,
"rewards/rejected": -353.965087890625,
"step": 840
},
{
"epoch": 0.6671899529042387,
"grad_norm": 36.039668928381964,
"learning_rate": 1.5070044803508691e-07,
"logits/chosen": 4657.09228515625,
"logits/rejected": 4077.115966796875,
"logps/chosen": -473.2142639160156,
"logps/rejected": -570.8925170898438,
"loss": 0.4397,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -234.0208282470703,
"rewards/margins": 123.80484771728516,
"rewards/rejected": -357.82562255859375,
"step": 850
},
{
"epoch": 0.6750392464678179,
"grad_norm": 41.15640165341099,
"learning_rate": 1.444489668907914e-07,
"logits/chosen": 5019.951171875,
"logits/rejected": 4151.033203125,
"logps/chosen": -512.0299072265625,
"logps/rejected": -555.0409545898438,
"loss": 0.5147,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -240.62014770507812,
"rewards/margins": 98.43951416015625,
"rewards/rejected": -339.0596618652344,
"step": 860
},
{
"epoch": 0.6828885400313972,
"grad_norm": 38.0462720236845,
"learning_rate": 1.3827680265902232e-07,
"logits/chosen": 5007.7041015625,
"logits/rejected": 4021.748046875,
"logps/chosen": -477.19769287109375,
"logps/rejected": -537.0064086914062,
"loss": 0.486,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -222.881591796875,
"rewards/margins": 100.46888732910156,
"rewards/rejected": -323.3504333496094,
"step": 870
},
{
"epoch": 0.6907378335949764,
"grad_norm": 46.01072603362906,
"learning_rate": 1.3218859344701632e-07,
"logits/chosen": 4371.8818359375,
"logits/rejected": 4112.9267578125,
"logps/chosen": -474.7000427246094,
"logps/rejected": -578.34033203125,
"loss": 0.4892,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -244.37979125976562,
"rewards/margins": 88.69207000732422,
"rewards/rejected": -333.07183837890625,
"step": 880
},
{
"epoch": 0.6985871271585558,
"grad_norm": 39.360533303984695,
"learning_rate": 1.2618891427354172e-07,
"logits/chosen": 5183.97265625,
"logits/rejected": 4043.081298828125,
"logps/chosen": -525.6726684570312,
"logps/rejected": -587.99853515625,
"loss": 0.5108,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -245.16909790039062,
"rewards/margins": 119.1616439819336,
"rewards/rejected": -364.3307189941406,
"step": 890
},
{
"epoch": 0.706436420722135,
"grad_norm": 41.9517544228501,
"learning_rate": 1.202822736309758e-07,
"logits/chosen": 4367.7421875,
"logits/rejected": 3901.80078125,
"logps/chosen": -473.6145935058594,
"logps/rejected": -585.4021606445312,
"loss": 0.4798,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -249.7177276611328,
"rewards/margins": 113.61027526855469,
"rewards/rejected": -363.3280334472656,
"step": 900
},
{
"epoch": 0.7142857142857143,
"grad_norm": 41.23111129688047,
"learning_rate": 1.1447311009737299e-07,
"logits/chosen": 4254.6923828125,
"logits/rejected": 3928.880126953125,
"logps/chosen": -522.9127197265625,
"logps/rejected": -628.2672119140625,
"loss": 0.5044,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -296.88519287109375,
"rewards/margins": 113.23392486572266,
"rewards/rejected": -410.119140625,
"step": 910
},
{
"epoch": 0.7221350078492935,
"grad_norm": 48.49255829087865,
"learning_rate": 1.0876578900107053e-07,
"logits/chosen": 4771.11083984375,
"logits/rejected": 3838.133544921875,
"logps/chosen": -539.0571899414062,
"logps/rejected": -593.8516845703125,
"loss": 0.4849,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -286.70806884765625,
"rewards/margins": 98.1644515991211,
"rewards/rejected": -384.8725280761719,
"step": 920
},
{
"epoch": 0.7299843014128728,
"grad_norm": 29.048281553606326,
"learning_rate": 1.0316459914033793e-07,
"logits/chosen": 4733.7548828125,
"logits/rejected": 3398.47607421875,
"logps/chosen": -539.5250244140625,
"logps/rejected": -598.8209838867188,
"loss": 0.4516,
"rewards/accuracies": 0.7916666269302368,
"rewards/chosen": -284.1636657714844,
"rewards/margins": 119.49433898925781,
"rewards/rejected": -403.65802001953125,
"step": 930
},
{
"epoch": 0.7378335949764521,
"grad_norm": 37.687722286449436,
"learning_rate": 9.767374956053584e-08,
"logits/chosen": 4548.51953125,
"logits/rejected": 3858.14794921875,
"logps/chosen": -521.103271484375,
"logps/rejected": -632.01611328125,
"loss": 0.4854,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -285.16241455078125,
"rewards/margins": 131.4991912841797,
"rewards/rejected": -416.66168212890625,
"step": 940
},
{
"epoch": 0.7456828885400314,
"grad_norm": 34.343114145693335,
"learning_rate": 9.229736639120561e-08,
"logits/chosen": 4674.4990234375,
"logits/rejected": 4218.14697265625,
"logps/chosen": -537.5806274414062,
"logps/rejected": -605.527587890625,
"loss": 0.531,
"rewards/accuracies": 0.6916666626930237,
"rewards/chosen": -301.95098876953125,
"rewards/margins": 78.54788970947266,
"rewards/rejected": -380.4989318847656,
"step": 950
},
{
"epoch": 0.7535321821036107,
"grad_norm": 35.435212690367706,
"learning_rate": 8.70394897454659e-08,
"logits/chosen": 4569.05810546875,
"logits/rejected": 3965.15234375,
"logps/chosen": -524.1510009765625,
"logps/rejected": -611.5798950195312,
"loss": 0.4704,
"rewards/accuracies": 0.8083332777023315,
"rewards/chosen": -289.6184387207031,
"rewards/margins": 109.60545349121094,
"rewards/rejected": -399.223876953125,
"step": 960
},
{
"epoch": 0.7613814756671899,
"grad_norm": 36.18475296507249,
"learning_rate": 8.19040706840472e-08,
"logits/chosen": 4704.16748046875,
"logits/rejected": 3813.880859375,
"logps/chosen": -555.306396484375,
"logps/rejected": -637.1813354492188,
"loss": 0.471,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -297.9849548339844,
"rewards/margins": 124.18892669677734,
"rewards/rejected": -422.17388916015625,
"step": 970
},
{
"epoch": 0.7692307692307693,
"grad_norm": 35.56098880497067,
"learning_rate": 7.689496824624525e-08,
"logits/chosen": 4480.2490234375,
"logits/rejected": 3471.422607421875,
"logps/chosen": -550.8575439453125,
"logps/rejected": -640.6934204101562,
"loss": 0.4533,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -305.61871337890625,
"rewards/margins": 134.81448364257812,
"rewards/rejected": -440.4331970214844,
"step": 980
},
{
"epoch": 0.7770800627943485,
"grad_norm": 37.66932352177828,
"learning_rate": 7.201594655002458e-08,
"logits/chosen": 4696.7587890625,
"logits/rejected": 3814.75439453125,
"logps/chosen": -562.9727172851562,
"logps/rejected": -640.4573974609375,
"loss": 0.476,
"rewards/accuracies": 0.73333340883255,
"rewards/chosen": -322.6749267578125,
"rewards/margins": 114.89030456542969,
"rewards/rejected": -437.565185546875,
"step": 990
},
{
"epoch": 0.7849293563579278,
"grad_norm": 44.26634372617539,
"learning_rate": 6.727067196345099e-08,
"logits/chosen": 4435.0849609375,
"logits/rejected": 3616.951171875,
"logps/chosen": -535.949462890625,
"logps/rejected": -591.9482421875,
"loss": 0.4839,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -307.537353515625,
"rewards/margins": 95.3097152709961,
"rewards/rejected": -402.8470764160156,
"step": 1000
},
{
"epoch": 0.792778649921507,
"grad_norm": 39.02096024119352,
"learning_rate": 6.26627103495786e-08,
"logits/chosen": 4594.76123046875,
"logits/rejected": 3711.387939453125,
"logps/chosen": -521.0853271484375,
"logps/rejected": -601.8387451171875,
"loss": 0.5102,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -292.9401550292969,
"rewards/margins": 109.3145523071289,
"rewards/rejected": -402.2546691894531,
"step": 1010
},
{
"epoch": 0.8006279434850864,
"grad_norm": 33.68706386764225,
"learning_rate": 5.8195524386862374e-08,
"logits/chosen": 4717.626953125,
"logits/rejected": 4049.309814453125,
"logps/chosen": -522.1405029296875,
"logps/rejected": -628.5657958984375,
"loss": 0.4749,
"rewards/accuracies": 0.8166667222976685,
"rewards/chosen": -257.61749267578125,
"rewards/margins": 136.46478271484375,
"rewards/rejected": -394.0822448730469,
"step": 1020
},
{
"epoch": 0.8084772370486656,
"grad_norm": 43.15445960678464,
"learning_rate": 5.38724709671092e-08,
"logits/chosen": 5028.97900390625,
"logits/rejected": 4620.5185546875,
"logps/chosen": -521.6478271484375,
"logps/rejected": -627.3370361328125,
"loss": 0.4769,
"rewards/accuracies": 0.73333340883255,
"rewards/chosen": -276.5311584472656,
"rewards/margins": 109.94007873535156,
"rewards/rejected": -386.4712219238281,
"step": 1030
},
{
"epoch": 0.8163265306122449,
"grad_norm": 44.18125140255459,
"learning_rate": 4.969679867292276e-08,
"logits/chosen": 4397.26953125,
"logits/rejected": 3876.498046875,
"logps/chosen": -512.53173828125,
"logps/rejected": -622.7384033203125,
"loss": 0.4752,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -278.93634033203125,
"rewards/margins": 125.53292083740234,
"rewards/rejected": -404.46929931640625,
"step": 1040
},
{
"epoch": 0.8241758241758241,
"grad_norm": 42.83203202326322,
"learning_rate": 4.5671645336537416e-08,
"logits/chosen": 4472.75048828125,
"logits/rejected": 3953.61279296875,
"logps/chosen": -520.4683227539062,
"logps/rejected": -612.5220947265625,
"loss": 0.4865,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -270.6463623046875,
"rewards/margins": 117.76123046875,
"rewards/rejected": -388.4075622558594,
"step": 1050
},
{
"epoch": 0.8320251177394035,
"grad_norm": 68.71503980995867,
"learning_rate": 4.180003568187776e-08,
"logits/chosen": 5573.80078125,
"logits/rejected": 4271.798828125,
"logps/chosen": -555.3674926757812,
"logps/rejected": -595.8564453125,
"loss": 0.5129,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -280.8909606933594,
"rewards/margins": 87.57881164550781,
"rewards/rejected": -368.46978759765625,
"step": 1060
},
{
"epoch": 0.8398744113029827,
"grad_norm": 41.55475665394904,
"learning_rate": 3.8084879051612144e-08,
"logits/chosen": 4646.9365234375,
"logits/rejected": 4112.8759765625,
"logps/chosen": -506.4486389160156,
"logps/rejected": -588.69091796875,
"loss": 0.4904,
"rewards/accuracies": 0.7916666269302368,
"rewards/chosen": -268.349365234375,
"rewards/margins": 120.84466552734375,
"rewards/rejected": -389.19403076171875,
"step": 1070
},
{
"epoch": 0.847723704866562,
"grad_norm": 30.642111450948516,
"learning_rate": 3.452896722091128e-08,
"logits/chosen": 5146.5068359375,
"logits/rejected": 3804.760986328125,
"logps/chosen": -551.9224853515625,
"logps/rejected": -599.4852294921875,
"loss": 0.4562,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -272.5589904785156,
"rewards/margins": 119.67265319824219,
"rewards/rejected": -392.23162841796875,
"step": 1080
},
{
"epoch": 0.8555729984301413,
"grad_norm": 44.62067621146283,
"learning_rate": 3.11349722995527e-08,
"logits/chosen": 5147.46484375,
"logits/rejected": 3730.094482421875,
"logps/chosen": -525.3967895507812,
"logps/rejected": -606.7144775390625,
"loss": 0.4684,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -286.5589294433594,
"rewards/margins": 101.43851470947266,
"rewards/rejected": -387.9974365234375,
"step": 1090
},
{
"epoch": 0.8634222919937206,
"grad_norm": 38.62390814349895,
"learning_rate": 2.7905444723949762e-08,
"logits/chosen": 4961.51123046875,
"logits/rejected": 3957.74951171875,
"logps/chosen": -529.5001831054688,
"logps/rejected": -599.69091796875,
"loss": 0.4862,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -279.7748107910156,
"rewards/margins": 123.9386215209961,
"rewards/rejected": -403.71343994140625,
"step": 1100
},
{
"epoch": 0.8712715855572999,
"grad_norm": 38.372589730130144,
"learning_rate": 2.484281134061142e-08,
"logits/chosen": 5294.82666015625,
"logits/rejected": 4111.9658203125,
"logps/chosen": -554.0993041992188,
"logps/rejected": -626.9337768554688,
"loss": 0.4704,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -278.41546630859375,
"rewards/margins": 119.38763427734375,
"rewards/rejected": -397.8031005859375,
"step": 1110
},
{
"epoch": 0.8791208791208791,
"grad_norm": 57.98312326007393,
"learning_rate": 2.194937358247506e-08,
"logits/chosen": 5172.3076171875,
"logits/rejected": 4062.425048828125,
"logps/chosen": -533.79833984375,
"logps/rejected": -614.7215576171875,
"loss": 0.4686,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -278.099609375,
"rewards/margins": 113.34749603271484,
"rewards/rejected": -391.44708251953125,
"step": 1120
},
{
"epoch": 0.8869701726844584,
"grad_norm": 46.674460372370675,
"learning_rate": 1.9227305739481612e-08,
"logits/chosen": 4703.9501953125,
"logits/rejected": 3532.016357421875,
"logps/chosen": -493.99896240234375,
"logps/rejected": -565.5808715820312,
"loss": 0.4667,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -247.5250244140625,
"rewards/margins": 131.71957397460938,
"rewards/rejected": -379.24462890625,
"step": 1130
},
{
"epoch": 0.8948194662480377,
"grad_norm": 37.832002412663726,
"learning_rate": 1.6678653324693787e-08,
"logits/chosen": 5219.7587890625,
"logits/rejected": 4062.36572265625,
"logps/chosen": -540.58935546875,
"logps/rejected": -617.1656494140625,
"loss": 0.4424,
"rewards/accuracies": 0.716666579246521,
"rewards/chosen": -269.20721435546875,
"rewards/margins": 120.55854797363281,
"rewards/rejected": -389.7657165527344,
"step": 1140
},
{
"epoch": 0.902668759811617,
"grad_norm": 35.67033747718792,
"learning_rate": 1.4305331537183384e-08,
"logits/chosen": 4566.01318359375,
"logits/rejected": 4102.55224609375,
"logps/chosen": -514.7657470703125,
"logps/rejected": -602.22265625,
"loss": 0.4702,
"rewards/accuracies": 0.6833333969116211,
"rewards/chosen": -277.2621765136719,
"rewards/margins": 97.84246826171875,
"rewards/rejected": -375.10467529296875,
"step": 1150
},
{
"epoch": 0.9105180533751962,
"grad_norm": 35.392397404976016,
"learning_rate": 1.2109123822844653e-08,
"logits/chosen": 4693.57568359375,
"logits/rejected": 3592.93798828125,
"logps/chosen": -512.32763671875,
"logps/rejected": -577.1019287109375,
"loss": 0.4816,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -274.27008056640625,
"rewards/margins": 101.40149688720703,
"rewards/rejected": -375.67156982421875,
"step": 1160
},
{
"epoch": 0.9183673469387755,
"grad_norm": 45.95075382829133,
"learning_rate": 1.0091680534213387e-08,
"logits/chosen": 5148.4287109375,
"logits/rejected": 4804.6767578125,
"logps/chosen": -520.1044311523438,
"logps/rejected": -644.5147705078125,
"loss": 0.4686,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -267.404541015625,
"rewards/margins": 124.0258560180664,
"rewards/rejected": -391.430419921875,
"step": 1170
},
{
"epoch": 0.9262166405023547,
"grad_norm": 34.40936489949813,
"learning_rate": 8.254517690300944e-09,
"logits/chosen": 4555.9189453125,
"logits/rejected": 3968.663330078125,
"logps/chosen": -514.3239135742188,
"logps/rejected": -612.931884765625,
"loss": 0.4563,
"rewards/accuracies": 0.7583333849906921,
"rewards/chosen": -264.92047119140625,
"rewards/margins": 124.9105453491211,
"rewards/rejected": -389.8310546875,
"step": 1180
},
{
"epoch": 0.9340659340659341,
"grad_norm": 27.002821538256047,
"learning_rate": 6.599015837372907e-09,
"logits/chosen": 4941.2802734375,
"logits/rejected": 4154.6494140625,
"logps/chosen": -561.2823486328125,
"logps/rejected": -636.2681884765625,
"loss": 0.4865,
"rewards/accuracies": 0.6833333969116211,
"rewards/chosen": -303.1945495605469,
"rewards/margins": 109.92854309082031,
"rewards/rejected": -413.123046875,
"step": 1190
},
{
"epoch": 0.9419152276295133,
"grad_norm": 49.08541664264898,
"learning_rate": 5.126419011529992e-09,
"logits/chosen": 5152.6005859375,
"logits/rejected": 4174.25390625,
"logps/chosen": -535.614013671875,
"logps/rejected": -634.1514892578125,
"loss": 0.4724,
"rewards/accuracies": 0.8166666030883789,
"rewards/chosen": -270.90155029296875,
"rewards/margins": 138.95677185058594,
"rewards/rejected": -409.8583984375,
"step": 1200
},
{
"epoch": 0.9497645211930926,
"grad_norm": 39.9904215355168,
"learning_rate": 3.837833803870177e-09,
"logits/chosen": 4798.62548828125,
"logits/rejected": 4023.02392578125,
"logps/chosen": -528.8109130859375,
"logps/rejected": -625.5532836914062,
"loss": 0.4918,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -279.74578857421875,
"rewards/margins": 123.31895446777344,
"rewards/rejected": -403.06475830078125,
"step": 1210
},
{
"epoch": 0.957613814756672,
"grad_norm": 42.131002325032554,
"learning_rate": 2.734228528934679e-09,
"logits/chosen": 6014.54296875,
"logits/rejected": 4256.4326171875,
"logps/chosen": -600.74169921875,
"logps/rejected": -666.4763793945312,
"loss": 0.4675,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -292.49603271484375,
"rewards/margins": 123.0171890258789,
"rewards/rejected": -415.51324462890625,
"step": 1220
},
{
"epoch": 0.9654631083202512,
"grad_norm": 56.35675392721789,
"learning_rate": 1.8164324970625645e-09,
"logits/chosen": 5335.0078125,
"logits/rejected": 4028.524658203125,
"logps/chosen": -554.1253662109375,
"logps/rejected": -623.9908447265625,
"loss": 0.5101,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -283.2986755371094,
"rewards/margins": 121.4345474243164,
"rewards/rejected": -404.73321533203125,
"step": 1230
},
{
"epoch": 0.9733124018838305,
"grad_norm": 40.63112442071848,
"learning_rate": 1.0851353912008642e-09,
"logits/chosen": 4527.8544921875,
"logits/rejected": 4048.458251953125,
"logps/chosen": -528.7938842773438,
"logps/rejected": -645.0094604492188,
"loss": 0.4688,
"rewards/accuracies": 0.6999999284744263,
"rewards/chosen": -289.4532470703125,
"rewards/margins": 114.35411071777344,
"rewards/rejected": -403.80731201171875,
"step": 1240
},
{
"epoch": 0.9811616954474097,
"grad_norm": 28.15817665696615,
"learning_rate": 5.408867486384471e-10,
"logits/chosen": 4681.501953125,
"logits/rejected": 3783.815185546875,
"logps/chosen": -495.4972229003906,
"logps/rejected": -564.3740844726562,
"loss": 0.4698,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -257.2614440917969,
"rewards/margins": 112.9941635131836,
"rewards/rejected": -370.25555419921875,
"step": 1250
},
{
"epoch": 0.989010989010989,
"grad_norm": 37.47361218014324,
"learning_rate": 1.840955480532924e-10,
"logits/chosen": 4424.9326171875,
"logits/rejected": 4007.66259765625,
"logps/chosen": -502.62652587890625,
"logps/rejected": -603.6646728515625,
"loss": 0.4513,
"rewards/accuracies": 0.8166666030883789,
"rewards/chosen": -263.01336669921875,
"rewards/margins": 118.80323791503906,
"rewards/rejected": -381.8165588378906,
"step": 1260
},
{
"epoch": 0.9968602825745683,
"grad_norm": 38.74483535442522,
"learning_rate": 1.502990218302247e-11,
"logits/chosen": 4574.69970703125,
"logits/rejected": 3588.76318359375,
"logps/chosen": -516.0886840820312,
"logps/rejected": -583.727294921875,
"loss": 0.5012,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -284.0630798339844,
"rewards/margins": 107.25108337402344,
"rewards/rejected": -391.31414794921875,
"step": 1270
},
{
"epoch": 1.0,
"step": 1274,
"total_flos": 0.0,
"train_loss": 0.5240897330421855,
"train_runtime": 13006.3363,
"train_samples_per_second": 4.7,
"train_steps_per_second": 0.098
}
],
"logging_steps": 10,
"max_steps": 1274,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 2000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}