DPO_shift2-zephyr-7b-sft-full / trainer_state.json
TTTXXX01's picture
Model save
e42c028 verified
raw
history blame contribute delete
No virus
67.9 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1274,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0007849293563579278,
"grad_norm": 6.093131966217363,
"learning_rate": 3.90625e-09,
"logits/chosen": 5914.52099609375,
"logits/rejected": 2785.021484375,
"logps/chosen": -212.45889282226562,
"logps/rejected": -98.59669494628906,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.007849293563579277,
"grad_norm": 6.063672072770918,
"learning_rate": 3.9062499999999997e-08,
"logits/chosen": 4973.8095703125,
"logits/rejected": 4328.318359375,
"logps/chosen": -204.25518798828125,
"logps/rejected": -179.73281860351562,
"loss": 0.6931,
"rewards/accuracies": 0.5092592835426331,
"rewards/chosen": 0.02867966890335083,
"rewards/margins": 0.05586132034659386,
"rewards/rejected": -0.02718164585530758,
"step": 10
},
{
"epoch": 0.015698587127158554,
"grad_norm": 6.188456944981756,
"learning_rate": 7.812499999999999e-08,
"logits/chosen": 6084.14111328125,
"logits/rejected": 4834.31640625,
"logps/chosen": -217.20156860351562,
"logps/rejected": -196.7338104248047,
"loss": 0.6932,
"rewards/accuracies": 0.5416666865348816,
"rewards/chosen": 0.009626108221709728,
"rewards/margins": 0.029883313924074173,
"rewards/rejected": -0.020257214084267616,
"step": 20
},
{
"epoch": 0.023547880690737835,
"grad_norm": 5.474904458795652,
"learning_rate": 1.1718749999999999e-07,
"logits/chosen": 6084.2373046875,
"logits/rejected": 5105.03662109375,
"logps/chosen": -250.5583953857422,
"logps/rejected": -209.34817504882812,
"loss": 0.693,
"rewards/accuracies": 0.5083333253860474,
"rewards/chosen": 0.013943061232566833,
"rewards/margins": 0.0645134299993515,
"rewards/rejected": -0.05057036876678467,
"step": 30
},
{
"epoch": 0.03139717425431711,
"grad_norm": 5.700132865060452,
"learning_rate": 1.5624999999999999e-07,
"logits/chosen": 5311.515625,
"logits/rejected": 4346.8349609375,
"logps/chosen": -211.9991455078125,
"logps/rejected": -181.73727416992188,
"loss": 0.6928,
"rewards/accuracies": 0.6750000715255737,
"rewards/chosen": 0.08009053766727448,
"rewards/margins": 0.1303517371416092,
"rewards/rejected": -0.05026119947433472,
"step": 40
},
{
"epoch": 0.03924646781789639,
"grad_norm": 5.75968341286992,
"learning_rate": 1.9531249999999998e-07,
"logits/chosen": 6424.73291015625,
"logits/rejected": 5042.3193359375,
"logps/chosen": -265.3091735839844,
"logps/rejected": -206.7834014892578,
"loss": 0.6924,
"rewards/accuracies": 0.6750000715255737,
"rewards/chosen": 0.3174227178096771,
"rewards/margins": 0.4012085497379303,
"rewards/rejected": -0.08378583192825317,
"step": 50
},
{
"epoch": 0.04709576138147567,
"grad_norm": 5.546891159199977,
"learning_rate": 2.3437499999999998e-07,
"logits/chosen": 5484.2392578125,
"logits/rejected": 4559.8701171875,
"logps/chosen": -213.73947143554688,
"logps/rejected": -209.15982055664062,
"loss": 0.6918,
"rewards/accuracies": 0.6583333611488342,
"rewards/chosen": 0.26449495553970337,
"rewards/margins": 0.6242072582244873,
"rewards/rejected": -0.3597122132778168,
"step": 60
},
{
"epoch": 0.054945054945054944,
"grad_norm": 5.331755041716918,
"learning_rate": 2.734375e-07,
"logits/chosen": 5194.4169921875,
"logits/rejected": 4918.47998046875,
"logps/chosen": -178.27944946289062,
"logps/rejected": -177.4241485595703,
"loss": 0.6911,
"rewards/accuracies": 0.6666667461395264,
"rewards/chosen": 0.2014751136302948,
"rewards/margins": 0.8939019441604614,
"rewards/rejected": -0.6924268007278442,
"step": 70
},
{
"epoch": 0.06279434850863422,
"grad_norm": 5.584999261034571,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": 5774.71337890625,
"logits/rejected": 5269.7578125,
"logps/chosen": -196.8366241455078,
"logps/rejected": -183.1029815673828,
"loss": 0.6888,
"rewards/accuracies": 0.6583333015441895,
"rewards/chosen": -0.26804548501968384,
"rewards/margins": 1.244419813156128,
"rewards/rejected": -1.5124653577804565,
"step": 80
},
{
"epoch": 0.0706436420722135,
"grad_norm": 6.357864437998398,
"learning_rate": 3.5156249999999997e-07,
"logits/chosen": 6036.94384765625,
"logits/rejected": 5178.0302734375,
"logps/chosen": -220.6177520751953,
"logps/rejected": -191.21290588378906,
"loss": 0.6847,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -1.735548973083496,
"rewards/margins": 5.033526420593262,
"rewards/rejected": -6.769075870513916,
"step": 90
},
{
"epoch": 0.07849293563579278,
"grad_norm": 7.040364327766666,
"learning_rate": 3.9062499999999997e-07,
"logits/chosen": 5949.79296875,
"logits/rejected": 5727.5908203125,
"logps/chosen": -216.8369140625,
"logps/rejected": -212.58670043945312,
"loss": 0.6839,
"rewards/accuracies": 0.6583333611488342,
"rewards/chosen": -6.606234073638916,
"rewards/margins": 6.538748264312744,
"rewards/rejected": -13.144981384277344,
"step": 100
},
{
"epoch": 0.08634222919937205,
"grad_norm": 6.763596378595654,
"learning_rate": 4.2968749999999996e-07,
"logits/chosen": 6423.328125,
"logits/rejected": 5241.2958984375,
"logps/chosen": -198.6194305419922,
"logps/rejected": -202.97906494140625,
"loss": 0.6778,
"rewards/accuracies": 0.6583333611488342,
"rewards/chosen": -15.504582405090332,
"rewards/margins": 8.482271194458008,
"rewards/rejected": -23.986852645874023,
"step": 110
},
{
"epoch": 0.09419152276295134,
"grad_norm": 10.481073772174538,
"learning_rate": 4.6874999999999996e-07,
"logits/chosen": 6349.3115234375,
"logits/rejected": 5272.1162109375,
"logps/chosen": -227.03030395507812,
"logps/rejected": -234.8236846923828,
"loss": 0.672,
"rewards/accuracies": 0.625,
"rewards/chosen": -24.193578720092773,
"rewards/margins": 12.465006828308105,
"rewards/rejected": -36.65858840942383,
"step": 120
},
{
"epoch": 0.10204081632653061,
"grad_norm": 8.025924997034352,
"learning_rate": 4.999962424962166e-07,
"logits/chosen": 6358.8583984375,
"logits/rejected": 5857.8310546875,
"logps/chosen": -239.5376434326172,
"logps/rejected": -246.1665496826172,
"loss": 0.6658,
"rewards/accuracies": 0.6833333373069763,
"rewards/chosen": -29.729150772094727,
"rewards/margins": 18.7928466796875,
"rewards/rejected": -48.521995544433594,
"step": 130
},
{
"epoch": 0.10989010989010989,
"grad_norm": 9.521563540986381,
"learning_rate": 4.998647417232375e-07,
"logits/chosen": 6323.96923828125,
"logits/rejected": 5568.6796875,
"logps/chosen": -233.1209259033203,
"logps/rejected": -242.63882446289062,
"loss": 0.6641,
"rewards/accuracies": 0.6500000357627869,
"rewards/chosen": -47.453453063964844,
"rewards/margins": 17.59344482421875,
"rewards/rejected": -65.04689025878906,
"step": 140
},
{
"epoch": 0.11773940345368916,
"grad_norm": 12.644608831856553,
"learning_rate": 4.995454786965036e-07,
"logits/chosen": 6541.3330078125,
"logits/rejected": 5415.2822265625,
"logps/chosen": -245.84255981445312,
"logps/rejected": -239.1229705810547,
"loss": 0.6607,
"rewards/accuracies": 0.6916667222976685,
"rewards/chosen": -44.15815734863281,
"rewards/margins": 22.66707992553711,
"rewards/rejected": -66.82524108886719,
"step": 150
},
{
"epoch": 0.12558869701726844,
"grad_norm": 8.265424491202015,
"learning_rate": 4.990386933279972e-07,
"logits/chosen": 6377.3125,
"logits/rejected": 5663.359375,
"logps/chosen": -241.92959594726562,
"logps/rejected": -266.5514831542969,
"loss": 0.6574,
"rewards/accuracies": 0.6916667222976685,
"rewards/chosen": -44.40141296386719,
"rewards/margins": 25.092517852783203,
"rewards/rejected": -69.4939193725586,
"step": 160
},
{
"epoch": 0.13343799058084774,
"grad_norm": 9.232273980396416,
"learning_rate": 4.983447664444096e-07,
"logits/chosen": 6607.70947265625,
"logits/rejected": 5855.15625,
"logps/chosen": -261.1861267089844,
"logps/rejected": -268.4610900878906,
"loss": 0.6564,
"rewards/accuracies": 0.6666666865348816,
"rewards/chosen": -51.56547927856445,
"rewards/margins": 20.574033737182617,
"rewards/rejected": -72.13951110839844,
"step": 170
},
{
"epoch": 0.141287284144427,
"grad_norm": 9.866448872437736,
"learning_rate": 4.97464219500968e-07,
"logits/chosen": 5737.3740234375,
"logits/rejected": 4962.4228515625,
"logps/chosen": -247.4786834716797,
"logps/rejected": -266.4071960449219,
"loss": 0.6521,
"rewards/accuracies": 0.6916667222976685,
"rewards/chosen": -58.9509162902832,
"rewards/margins": 29.29502296447754,
"rewards/rejected": -88.24593353271484,
"step": 180
},
{
"epoch": 0.14913657770800628,
"grad_norm": 8.7062689635517,
"learning_rate": 4.963977141895843e-07,
"logits/chosen": 5834.91796875,
"logits/rejected": 4950.6669921875,
"logps/chosen": -265.50799560546875,
"logps/rejected": -303.29736328125,
"loss": 0.6419,
"rewards/accuracies": 0.75,
"rewards/chosen": -61.749046325683594,
"rewards/margins": 50.30883026123047,
"rewards/rejected": -112.05787658691406,
"step": 190
},
{
"epoch": 0.15698587127158556,
"grad_norm": 10.780388842430504,
"learning_rate": 4.951460519416227e-07,
"logits/chosen": 5741.23486328125,
"logits/rejected": 5287.05322265625,
"logps/chosen": -251.54641723632812,
"logps/rejected": -304.95709228515625,
"loss": 0.6438,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -69.00214385986328,
"rewards/margins": 39.283905029296875,
"rewards/rejected": -108.28604888916016,
"step": 200
},
{
"epoch": 0.16483516483516483,
"grad_norm": 17.073665708890324,
"learning_rate": 4.937101733256606e-07,
"logits/chosen": 5240.689453125,
"logits/rejected": 4608.19482421875,
"logps/chosen": -215.47265625,
"logps/rejected": -251.48239135742188,
"loss": 0.6445,
"rewards/accuracies": 0.6833332777023315,
"rewards/chosen": -61.55937957763672,
"rewards/margins": 32.7420539855957,
"rewards/rejected": -94.30143737792969,
"step": 210
},
{
"epoch": 0.1726844583987441,
"grad_norm": 12.221014555466162,
"learning_rate": 4.920911573406924e-07,
"logits/chosen": 6383.455078125,
"logits/rejected": 5361.6767578125,
"logps/chosen": -253.47500610351562,
"logps/rejected": -262.1346435546875,
"loss": 0.6407,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -57.13840866088867,
"rewards/margins": 40.999515533447266,
"rewards/rejected": -98.13792419433594,
"step": 220
},
{
"epoch": 0.18053375196232338,
"grad_norm": 13.329208345468095,
"learning_rate": 4.902902206053098e-07,
"logits/chosen": 5734.9677734375,
"logits/rejected": 5143.1357421875,
"logps/chosen": -279.87896728515625,
"logps/rejected": -315.6455078125,
"loss": 0.6407,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -92.54025268554688,
"rewards/margins": 43.33777618408203,
"rewards/rejected": -135.87803649902344,
"step": 230
},
{
"epoch": 0.18838304552590268,
"grad_norm": 10.653934386649361,
"learning_rate": 4.883087164434672e-07,
"logits/chosen": 5146.69921875,
"logits/rejected": 4056.839111328125,
"logps/chosen": -252.93594360351562,
"logps/rejected": -288.0379943847656,
"loss": 0.6314,
"rewards/accuracies": 0.6916667222976685,
"rewards/chosen": -85.30692291259766,
"rewards/margins": 48.041629791259766,
"rewards/rejected": -133.34857177734375,
"step": 240
},
{
"epoch": 0.19623233908948196,
"grad_norm": 13.800858939449958,
"learning_rate": 4.861481338675183e-07,
"logits/chosen": 6170.0908203125,
"logits/rejected": 5447.45458984375,
"logps/chosen": -256.0116271972656,
"logps/rejected": -326.3557434082031,
"loss": 0.6432,
"rewards/accuracies": 0.6750000715255737,
"rewards/chosen": -88.86774444580078,
"rewards/margins": 52.98130416870117,
"rewards/rejected": -141.8490447998047,
"step": 250
},
{
"epoch": 0.20408163265306123,
"grad_norm": 16.199410438264415,
"learning_rate": 4.838100964592904e-07,
"logits/chosen": 6424.16455078125,
"logits/rejected": 5117.61083984375,
"logps/chosen": -273.86700439453125,
"logps/rejected": -284.8200378417969,
"loss": 0.6465,
"rewards/accuracies": 0.7583333849906921,
"rewards/chosen": -74.1485824584961,
"rewards/margins": 44.447505950927734,
"rewards/rejected": -118.59608459472656,
"step": 260
},
{
"epoch": 0.2119309262166405,
"grad_norm": 11.286813732099095,
"learning_rate": 4.812963611500339e-07,
"logits/chosen": 6215.70947265625,
"logits/rejected": 5984.4306640625,
"logps/chosen": -285.27606201171875,
"logps/rejected": -319.1112365722656,
"loss": 0.6303,
"rewards/accuracies": 0.6916666626930237,
"rewards/chosen": -88.52018737792969,
"rewards/margins": 41.961997985839844,
"rewards/rejected": -130.48216247558594,
"step": 270
},
{
"epoch": 0.21978021978021978,
"grad_norm": 16.531534462677037,
"learning_rate": 4.786088169001671e-07,
"logits/chosen": 5287.22216796875,
"logits/rejected": 4560.978515625,
"logps/chosen": -243.2444305419922,
"logps/rejected": -313.148193359375,
"loss": 0.6282,
"rewards/accuracies": 0.75,
"rewards/chosen": -80.0935287475586,
"rewards/margins": 63.753204345703125,
"rewards/rejected": -143.8467559814453,
"step": 280
},
{
"epoch": 0.22762951334379905,
"grad_norm": 16.90462731667777,
"learning_rate": 4.7574948327980567e-07,
"logits/chosen": 7445.8447265625,
"logits/rejected": 5403.94482421875,
"logps/chosen": -331.681884765625,
"logps/rejected": -354.81011962890625,
"loss": 0.6276,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -94.65342712402344,
"rewards/margins": 77.64093780517578,
"rewards/rejected": -172.29437255859375,
"step": 290
},
{
"epoch": 0.23547880690737832,
"grad_norm": 13.245971629591574,
"learning_rate": 4.727205089511466e-07,
"logits/chosen": 5381.85302734375,
"logits/rejected": 5297.11083984375,
"logps/chosen": -255.672607421875,
"logps/rejected": -314.2789611816406,
"loss": 0.636,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -88.2001724243164,
"rewards/margins": 55.892372131347656,
"rewards/rejected": -144.09254455566406,
"step": 300
},
{
"epoch": 0.24332810047095763,
"grad_norm": 12.146215443307637,
"learning_rate": 4.6952417005384247e-07,
"logits/chosen": 6014.71875,
"logits/rejected": 5335.70556640625,
"logps/chosen": -260.8976745605469,
"logps/rejected": -300.9578857421875,
"loss": 0.6408,
"rewards/accuracies": 0.6833333373069763,
"rewards/chosen": -83.31695556640625,
"rewards/margins": 42.17268753051758,
"rewards/rejected": -125.4896469116211,
"step": 310
},
{
"epoch": 0.25117739403453687,
"grad_norm": 13.90650852576195,
"learning_rate": 4.661628684945851e-07,
"logits/chosen": 5960.9345703125,
"logits/rejected": 5119.1533203125,
"logps/chosen": -308.49700927734375,
"logps/rejected": -376.7956848144531,
"loss": 0.6322,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -107.44000244140625,
"rewards/margins": 67.33781433105469,
"rewards/rejected": -174.77781677246094,
"step": 320
},
{
"epoch": 0.25902668759811615,
"grad_norm": 18.153298332822626,
"learning_rate": 4.626391301421782e-07,
"logits/chosen": 5755.26318359375,
"logits/rejected": 5219.13671875,
"logps/chosen": -295.9870300292969,
"logps/rejected": -321.1059265136719,
"loss": 0.643,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -101.53385925292969,
"rewards/margins": 42.24925994873047,
"rewards/rejected": -143.78311157226562,
"step": 330
},
{
"epoch": 0.2668759811616955,
"grad_norm": 10.626065491733366,
"learning_rate": 4.5895560292945996e-07,
"logits/chosen": 6115.3408203125,
"logits/rejected": 6242.7255859375,
"logps/chosen": -266.7374267578125,
"logps/rejected": -334.5285949707031,
"loss": 0.6505,
"rewards/accuracies": 0.7166666984558105,
"rewards/chosen": -72.87915802001953,
"rewards/margins": 41.2818717956543,
"rewards/rejected": -114.16102600097656,
"step": 340
},
{
"epoch": 0.27472527472527475,
"grad_norm": 19.123518096220113,
"learning_rate": 4.5511505486349865e-07,
"logits/chosen": 6534.66162109375,
"logits/rejected": 5885.20654296875,
"logps/chosen": -251.6634979248047,
"logps/rejected": -319.6004943847656,
"loss": 0.628,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -56.837799072265625,
"rewards/margins": 55.940704345703125,
"rewards/rejected": -112.77848815917969,
"step": 350
},
{
"epoch": 0.282574568288854,
"grad_norm": 14.298240994707886,
"learning_rate": 4.5112037194555876e-07,
"logits/chosen": 6060.9228515625,
"logits/rejected": 5880.5185546875,
"logps/chosen": -276.15887451171875,
"logps/rejected": -374.9804992675781,
"loss": 0.626,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -98.23542785644531,
"rewards/margins": 77.46324920654297,
"rewards/rejected": -175.6986846923828,
"step": 360
},
{
"epoch": 0.2904238618524333,
"grad_norm": 14.585395330021852,
"learning_rate": 4.4697455600239863e-07,
"logits/chosen": 5437.0537109375,
"logits/rejected": 5045.03271484375,
"logps/chosen": -286.1153564453125,
"logps/rejected": -318.96649169921875,
"loss": 0.6403,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -101.69328308105469,
"rewards/margins": 49.202125549316406,
"rewards/rejected": -150.89541625976562,
"step": 370
},
{
"epoch": 0.29827315541601257,
"grad_norm": 12.011840689440003,
"learning_rate": 4.426807224305315e-07,
"logits/chosen": 6565.9169921875,
"logits/rejected": 5335.61669921875,
"logps/chosen": -314.1622009277344,
"logps/rejected": -326.3265380859375,
"loss": 0.6373,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -85.82119750976562,
"rewards/margins": 57.495765686035156,
"rewards/rejected": -143.3169708251953,
"step": 380
},
{
"epoch": 0.30612244897959184,
"grad_norm": 14.603970390337606,
"learning_rate": 4.3824209785514326e-07,
"logits/chosen": 6679.51708984375,
"logits/rejected": 5036.5615234375,
"logps/chosen": -299.2738342285156,
"logps/rejected": -336.33966064453125,
"loss": 0.6334,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -83.78034210205078,
"rewards/margins": 72.7061767578125,
"rewards/rejected": -156.4865264892578,
"step": 390
},
{
"epoch": 0.3139717425431711,
"grad_norm": 12.263570083382138,
"learning_rate": 4.3366201770542687e-07,
"logits/chosen": 5716.26025390625,
"logits/rejected": 5553.00634765625,
"logps/chosen": -288.0743713378906,
"logps/rejected": -344.66259765625,
"loss": 0.6377,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -97.28577423095703,
"rewards/margins": 58.6373176574707,
"rewards/rejected": -155.92308044433594,
"step": 400
},
{
"epoch": 0.3218210361067504,
"grad_norm": 30.10421699468723,
"learning_rate": 4.2894392370815567e-07,
"logits/chosen": 6109.4443359375,
"logits/rejected": 5399.1669921875,
"logps/chosen": -343.89434814453125,
"logps/rejected": -421.060302734375,
"loss": 0.6049,
"rewards/accuracies": 0.7833333015441895,
"rewards/chosen": -131.40748596191406,
"rewards/margins": 81.200439453125,
"rewards/rejected": -212.60791015625,
"step": 410
},
{
"epoch": 0.32967032967032966,
"grad_norm": 23.67201136623266,
"learning_rate": 4.2409136130137845e-07,
"logits/chosen": 5768.19677734375,
"logits/rejected": 5116.79296875,
"logps/chosen": -338.7046203613281,
"logps/rejected": -397.24517822265625,
"loss": 0.6238,
"rewards/accuracies": 0.75,
"rewards/chosen": -134.6746826171875,
"rewards/margins": 81.32905578613281,
"rewards/rejected": -216.00375366210938,
"step": 420
},
{
"epoch": 0.33751962323390894,
"grad_norm": 17.987511578768668,
"learning_rate": 4.1910797697018017e-07,
"logits/chosen": 5607.255859375,
"logits/rejected": 4614.8291015625,
"logps/chosen": -280.25738525390625,
"logps/rejected": -348.2909240722656,
"loss": 0.6312,
"rewards/accuracies": 0.8416666984558105,
"rewards/chosen": -100.51243591308594,
"rewards/margins": 86.54534912109375,
"rewards/rejected": -187.0577850341797,
"step": 430
},
{
"epoch": 0.3453689167974882,
"grad_norm": 16.95800834501449,
"learning_rate": 4.1399751550651084e-07,
"logits/chosen": 5963.57470703125,
"logits/rejected": 5890.51171875,
"logps/chosen": -275.5081787109375,
"logps/rejected": -343.4457702636719,
"loss": 0.6238,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -93.11483764648438,
"rewards/margins": 58.647361755371094,
"rewards/rejected": -151.76220703125,
"step": 440
},
{
"epoch": 0.3532182103610675,
"grad_norm": 11.15998480161967,
"learning_rate": 4.087638171951401e-07,
"logits/chosen": 6931.64697265625,
"logits/rejected": 4930.6923828125,
"logps/chosen": -293.3556213378906,
"logps/rejected": -322.02337646484375,
"loss": 0.6253,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -84.58689880371094,
"rewards/margins": 74.44380950927734,
"rewards/rejected": -159.0307159423828,
"step": 450
},
{
"epoch": 0.36106750392464676,
"grad_norm": 11.8715155801604,
"learning_rate": 4.034108149278543e-07,
"logits/chosen": 7102.58349609375,
"logits/rejected": 5428.9775390625,
"logps/chosen": -346.68634033203125,
"logps/rejected": -355.1321716308594,
"loss": 0.6207,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -96.25406646728516,
"rewards/margins": 68.67012786865234,
"rewards/rejected": -164.9241943359375,
"step": 460
},
{
"epoch": 0.36891679748822603,
"grad_norm": 16.98432474874273,
"learning_rate": 3.979425312480629e-07,
"logits/chosen": 6031.1064453125,
"logits/rejected": 5226.72216796875,
"logps/chosen": -335.190673828125,
"logps/rejected": -399.636962890625,
"loss": 0.6369,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -122.92594146728516,
"rewards/margins": 74.1447525024414,
"rewards/rejected": -197.07069396972656,
"step": 470
},
{
"epoch": 0.37676609105180536,
"grad_norm": 10.201495638028796,
"learning_rate": 3.923630753280357e-07,
"logits/chosen": 6538.5703125,
"logits/rejected": 5552.7998046875,
"logps/chosen": -298.9499816894531,
"logps/rejected": -345.0953369140625,
"loss": 0.6213,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -91.55477142333984,
"rewards/margins": 79.14476776123047,
"rewards/rejected": -170.6995391845703,
"step": 480
},
{
"epoch": 0.38461538461538464,
"grad_norm": 17.170911712736423,
"learning_rate": 3.866766398810424e-07,
"logits/chosen": 6079.12646484375,
"logits/rejected": 5729.90625,
"logps/chosen": -269.0557556152344,
"logps/rejected": -384.9722900390625,
"loss": 0.6034,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -92.447265625,
"rewards/margins": 90.50070190429688,
"rewards/rejected": -182.94796752929688,
"step": 490
},
{
"epoch": 0.3924646781789639,
"grad_norm": 17.90792915865348,
"learning_rate": 3.8088749801071496e-07,
"logits/chosen": 6501.1435546875,
"logits/rejected": 4916.962890625,
"logps/chosen": -370.4066467285156,
"logps/rejected": -442.2255859375,
"loss": 0.6376,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -148.45297241210938,
"rewards/margins": 91.25639343261719,
"rewards/rejected": -239.7093505859375,
"step": 500
},
{
"epoch": 0.4003139717425432,
"grad_norm": 11.093400815043566,
"learning_rate": 3.75e-07,
"logits/chosen": 5320.60302734375,
"logits/rejected": 4625.2607421875,
"logps/chosen": -280.2235107421875,
"logps/rejected": -348.7869567871094,
"loss": 0.613,
"rewards/accuracies": 0.8250001072883606,
"rewards/chosen": -90.74053955078125,
"rewards/margins": 91.68112182617188,
"rewards/rejected": -182.42164611816406,
"step": 510
},
{
"epoch": 0.40816326530612246,
"grad_norm": 16.873928239705048,
"learning_rate": 3.6901857004211443e-07,
"logits/chosen": 5609.70556640625,
"logits/rejected": 5175.36328125,
"logps/chosen": -295.0995178222656,
"logps/rejected": -360.041015625,
"loss": 0.6383,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -96.6335220336914,
"rewards/margins": 67.6194839477539,
"rewards/rejected": -164.2530059814453,
"step": 520
},
{
"epoch": 0.41601255886970173,
"grad_norm": 12.66871333637675,
"learning_rate": 3.6294770291596076e-07,
"logits/chosen": 6355.0244140625,
"logits/rejected": 5158.5986328125,
"logps/chosen": -314.4820251464844,
"logps/rejected": -356.3719177246094,
"loss": 0.6139,
"rewards/accuracies": 0.6999999284744263,
"rewards/chosen": -101.55928802490234,
"rewards/margins": 56.05229949951172,
"rewards/rejected": -157.611572265625,
"step": 530
},
{
"epoch": 0.423861852433281,
"grad_norm": 15.194295742706043,
"learning_rate": 3.5679196060850034e-07,
"logits/chosen": 6004.0322265625,
"logits/rejected": 5311.98583984375,
"logps/chosen": -328.72637939453125,
"logps/rejected": -383.19464111328125,
"loss": 0.6213,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -117.90850830078125,
"rewards/margins": 77.04728698730469,
"rewards/rejected": -194.95579528808594,
"step": 540
},
{
"epoch": 0.4317111459968603,
"grad_norm": 17.3949205074546,
"learning_rate": 3.505559688866229e-07,
"logits/chosen": 5810.20166015625,
"logits/rejected": 5345.8447265625,
"logps/chosen": -338.4986267089844,
"logps/rejected": -431.74285888671875,
"loss": 0.6214,
"rewards/accuracies": 0.73333340883255,
"rewards/chosen": -123.7582015991211,
"rewards/margins": 82.92054748535156,
"rewards/rejected": -206.6787567138672,
"step": 550
},
{
"epoch": 0.43956043956043955,
"grad_norm": 16.745493319885668,
"learning_rate": 3.4424441382108826e-07,
"logits/chosen": 5873.52099609375,
"logits/rejected": 5474.826171875,
"logps/chosen": -316.10479736328125,
"logps/rejected": -369.5398864746094,
"loss": 0.6278,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -112.82951354980469,
"rewards/margins": 63.50117874145508,
"rewards/rejected": -176.33071899414062,
"step": 560
},
{
"epoch": 0.4474097331240188,
"grad_norm": 20.733746374707195,
"learning_rate": 3.378620382651523e-07,
"logits/chosen": 6166.53125,
"logits/rejected": 5672.7578125,
"logps/chosen": -361.2176513671875,
"logps/rejected": -411.9073791503906,
"loss": 0.6119,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -118.25773620605469,
"rewards/margins": 69.87909698486328,
"rewards/rejected": -188.13682556152344,
"step": 570
},
{
"epoch": 0.4552590266875981,
"grad_norm": 39.46262291218352,
"learning_rate": 3.314136382905234e-07,
"logits/chosen": 6161.3798828125,
"logits/rejected": 5444.43408203125,
"logps/chosen": -331.9513854980469,
"logps/rejected": -450.3265686035156,
"loss": 0.623,
"rewards/accuracies": 0.8333333134651184,
"rewards/chosen": -119.53593444824219,
"rewards/margins": 117.47932434082031,
"rewards/rejected": -237.01522827148438,
"step": 580
},
{
"epoch": 0.4631083202511774,
"grad_norm": 16.486304218382795,
"learning_rate": 3.249040595833274e-07,
"logits/chosen": 6643.578125,
"logits/rejected": 5511.3427734375,
"logps/chosen": -372.27203369140625,
"logps/rejected": -422.9007873535156,
"loss": 0.6139,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -140.54617309570312,
"rewards/margins": 103.49578857421875,
"rewards/rejected": -244.04196166992188,
"step": 590
},
{
"epoch": 0.47095761381475665,
"grad_norm": 20.09270587846568,
"learning_rate": 3.1833819380279023e-07,
"logits/chosen": 6314.1455078125,
"logits/rejected": 5327.32275390625,
"logps/chosen": -312.32403564453125,
"logps/rejected": -400.52081298828125,
"loss": 0.6082,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -134.77569580078125,
"rewards/margins": 76.05888366699219,
"rewards/rejected": -210.83456420898438,
"step": 600
},
{
"epoch": 0.478806907378336,
"grad_norm": 13.1370939820266,
"learning_rate": 3.11720974905373e-07,
"logits/chosen": 6039.34130859375,
"logits/rejected": 5201.23388671875,
"logps/chosen": -330.3988952636719,
"logps/rejected": -401.22540283203125,
"loss": 0.6108,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -124.28079986572266,
"rewards/margins": 91.52204895019531,
"rewards/rejected": -215.80282592773438,
"step": 610
},
{
"epoch": 0.48665620094191525,
"grad_norm": 13.32277955977118,
"learning_rate": 3.0505737543712275e-07,
"logits/chosen": 5050.4599609375,
"logits/rejected": 4102.0205078125,
"logps/chosen": -343.1001281738281,
"logps/rejected": -393.78594970703125,
"loss": 0.6182,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -159.34146118164062,
"rewards/margins": 73.34559631347656,
"rewards/rejected": -232.6870574951172,
"step": 620
},
{
"epoch": 0.4945054945054945,
"grad_norm": 20.488144807475322,
"learning_rate": 2.9835240279702513e-07,
"logits/chosen": 6585.72119140625,
"logits/rejected": 5542.74755859375,
"logps/chosen": -375.4349365234375,
"logps/rejected": -429.8561096191406,
"loss": 0.6128,
"rewards/accuracies": 0.8083333969116211,
"rewards/chosen": -132.61892700195312,
"rewards/margins": 102.98915100097656,
"rewards/rejected": -235.6080322265625,
"step": 630
},
{
"epoch": 0.5023547880690737,
"grad_norm": 15.285970338736266,
"learning_rate": 2.9161109547416667e-07,
"logits/chosen": 6281.32177734375,
"logits/rejected": 5335.0029296875,
"logps/chosen": -345.87286376953125,
"logps/rejected": -405.34576416015625,
"loss": 0.6137,
"rewards/accuracies": 0.7000000476837158,
"rewards/chosen": -135.1112823486328,
"rewards/margins": 55.7255859375,
"rewards/rejected": -190.83685302734375,
"step": 640
},
{
"epoch": 0.5102040816326531,
"grad_norm": 13.467647025010642,
"learning_rate": 2.848385192615339e-07,
"logits/chosen": 5367.36474609375,
"logits/rejected": 4323.5107421875,
"logps/chosen": -317.5534362792969,
"logps/rejected": -361.20721435546875,
"loss": 0.6173,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -121.21412658691406,
"rewards/margins": 71.26534271240234,
"rewards/rejected": -192.47947692871094,
"step": 650
},
{
"epoch": 0.5180533751962323,
"grad_norm": 16.80725232592256,
"learning_rate": 2.780397634492949e-07,
"logits/chosen": 6023.45703125,
"logits/rejected": 4714.66015625,
"logps/chosen": -344.6996154785156,
"logps/rejected": -413.51885986328125,
"loss": 0.6221,
"rewards/accuracies": 0.841666579246521,
"rewards/chosen": -127.31312561035156,
"rewards/margins": 98.25833892822266,
"rewards/rejected": -225.57144165039062,
"step": 660
},
{
"epoch": 0.5259026687598116,
"grad_norm": 25.673672433544294,
"learning_rate": 2.71219937000424e-07,
"logits/chosen": 6010.3349609375,
"logits/rejected": 4891.86328125,
"logps/chosen": -337.61956787109375,
"logps/rejected": -385.28363037109375,
"loss": 0.6274,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -130.86009216308594,
"rewards/margins": 65.56916809082031,
"rewards/rejected": -196.42926025390625,
"step": 670
},
{
"epoch": 0.533751962323391,
"grad_norm": 18.590503250558747,
"learning_rate": 2.6438416471154273e-07,
"logits/chosen": 5842.89208984375,
"logits/rejected": 4827.79345703125,
"logps/chosen": -347.49615478515625,
"logps/rejected": -392.47955322265625,
"loss": 0.6184,
"rewards/accuracies": 0.7916666269302368,
"rewards/chosen": -133.83489990234375,
"rewards/margins": 79.41798400878906,
"rewards/rejected": -213.2528839111328,
"step": 680
},
{
"epoch": 0.5416012558869702,
"grad_norm": 17.15200634352273,
"learning_rate": 2.5753758336186326e-07,
"logits/chosen": 5809.44482421875,
"logits/rejected": 5264.39306640625,
"logps/chosen": -337.9293212890625,
"logps/rejected": -433.0084533691406,
"loss": 0.6031,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -128.82289123535156,
"rewards/margins": 87.98890686035156,
"rewards/rejected": -216.811767578125,
"step": 690
},
{
"epoch": 0.5494505494505495,
"grad_norm": 15.797111525049983,
"learning_rate": 2.5068533785312666e-07,
"logits/chosen": 5499.59619140625,
"logits/rejected": 5237.15771484375,
"logps/chosen": -330.9737854003906,
"logps/rejected": -413.58660888671875,
"loss": 0.6328,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -141.71347045898438,
"rewards/margins": 84.89921569824219,
"rewards/rejected": -226.61270141601562,
"step": 700
},
{
"epoch": 0.5572998430141287,
"grad_norm": 15.51787747131488,
"learning_rate": 2.4383257734343794e-07,
"logits/chosen": 5412.13134765625,
"logits/rejected": 5404.90283203125,
"logps/chosen": -334.6061706542969,
"logps/rejected": -422.1012268066406,
"loss": 0.6078,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -140.69480895996094,
"rewards/margins": 78.7857666015625,
"rewards/rejected": -219.48056030273438,
"step": 710
},
{
"epoch": 0.565149136577708,
"grad_norm": 19.301105032278493,
"learning_rate": 2.3698445137790258e-07,
"logits/chosen": 5852.1748046875,
"logits/rejected": 4949.2099609375,
"logps/chosen": -339.65594482421875,
"logps/rejected": -409.58099365234375,
"loss": 0.6125,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -120.65275573730469,
"rewards/margins": 89.16310119628906,
"rewards/rejected": -209.8158416748047,
"step": 720
},
{
"epoch": 0.5729984301412873,
"grad_norm": 17.264432929616067,
"learning_rate": 2.3014610601897157e-07,
"logits/chosen": 6345.07080078125,
"logits/rejected": 4777.46826171875,
"logps/chosen": -357.9107971191406,
"logps/rejected": -393.86749267578125,
"loss": 0.6193,
"rewards/accuracies": 0.7666667699813843,
"rewards/chosen": -132.76486206054688,
"rewards/margins": 86.02892303466797,
"rewards/rejected": -218.79379272460938,
"step": 730
},
{
"epoch": 0.5808477237048666,
"grad_norm": 16.99769270663946,
"learning_rate": 2.2332267997940513e-07,
"logits/chosen": 5260.65673828125,
"logits/rejected": 4413.0966796875,
"logps/chosen": -320.5991516113281,
"logps/rejected": -370.4900817871094,
"loss": 0.6068,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -128.17471313476562,
"rewards/margins": 79.66935729980469,
"rewards/rejected": -207.8440704345703,
"step": 740
},
{
"epoch": 0.5886970172684458,
"grad_norm": 19.890647703266293,
"learning_rate": 2.1651930076075723e-07,
"logits/chosen": 5752.275390625,
"logits/rejected": 5127.4677734375,
"logps/chosen": -327.98681640625,
"logps/rejected": -381.2528381347656,
"loss": 0.6251,
"rewards/accuracies": 0.73333340883255,
"rewards/chosen": -147.56088256835938,
"rewards/margins": 66.59766387939453,
"rewards/rejected": -214.1585693359375,
"step": 750
},
{
"epoch": 0.5965463108320251,
"grad_norm": 14.729863717931694,
"learning_rate": 2.0974108080028692e-07,
"logits/chosen": 6052.0751953125,
"logits/rejected": 4696.7666015625,
"logps/chosen": -337.3818664550781,
"logps/rejected": -385.4659118652344,
"loss": 0.6144,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -134.1309051513672,
"rewards/margins": 76.8044662475586,
"rewards/rejected": -210.9353790283203,
"step": 760
},
{
"epoch": 0.6043956043956044,
"grad_norm": 17.668089788843886,
"learning_rate": 2.0299311362918773e-07,
"logits/chosen": 6281.421875,
"logits/rejected": 5333.64794921875,
"logps/chosen": -369.94805908203125,
"logps/rejected": -441.9542541503906,
"loss": 0.6302,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -140.21438598632812,
"rewards/margins": 77.87971496582031,
"rewards/rejected": -218.09408569335938,
"step": 770
},
{
"epoch": 0.6122448979591837,
"grad_norm": 15.214960693707779,
"learning_rate": 1.962804700450265e-07,
"logits/chosen": 6088.87744140625,
"logits/rejected": 5759.99560546875,
"logps/chosen": -357.53375244140625,
"logps/rejected": -451.697509765625,
"loss": 0.6171,
"rewards/accuracies": 0.75,
"rewards/chosen": -141.428466796875,
"rewards/margins": 72.88134002685547,
"rewards/rejected": -214.30978393554688,
"step": 780
},
{
"epoch": 0.6200941915227629,
"grad_norm": 32.04354354649456,
"learning_rate": 1.8960819430126334e-07,
"logits/chosen": 5662.08056640625,
"logits/rejected": 4959.47998046875,
"logps/chosen": -362.9315490722656,
"logps/rejected": -448.6405334472656,
"loss": 0.6126,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -164.80050659179688,
"rewards/margins": 95.24226379394531,
"rewards/rejected": -260.04278564453125,
"step": 790
},
{
"epoch": 0.6279434850863422,
"grad_norm": 22.34135272884677,
"learning_rate": 1.8298130031671972e-07,
"logits/chosen": 5636.14599609375,
"logits/rejected": 4883.76123046875,
"logps/chosen": -372.5877380371094,
"logps/rejected": -449.95355224609375,
"loss": 0.6251,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -155.86485290527344,
"rewards/margins": 81.3904800415039,
"rewards/rejected": -237.25534057617188,
"step": 800
},
{
"epoch": 0.6357927786499215,
"grad_norm": 22.27973267170805,
"learning_rate": 1.7640476790784075e-07,
"logits/chosen": 5207.5654296875,
"logits/rejected": 4584.2197265625,
"logps/chosen": -348.1822204589844,
"logps/rejected": -463.4768981933594,
"loss": 0.6154,
"rewards/accuracies": 0.75,
"rewards/chosen": -145.72683715820312,
"rewards/margins": 96.45280456542969,
"rewards/rejected": -242.1796417236328,
"step": 810
},
{
"epoch": 0.6436420722135008,
"grad_norm": 21.878335066672264,
"learning_rate": 1.6988353904658492e-07,
"logits/chosen": 5700.16650390625,
"logits/rejected": 4322.6650390625,
"logps/chosen": -365.1958923339844,
"logps/rejected": -391.56353759765625,
"loss": 0.6154,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -143.98133850097656,
"rewards/margins": 78.89381408691406,
"rewards/rejected": -222.8751983642578,
"step": 820
},
{
"epoch": 0.6514913657770801,
"grad_norm": 30.16348408203026,
"learning_rate": 1.634225141467513e-07,
"logits/chosen": 5625.34326171875,
"logits/rejected": 4964.39306640625,
"logps/chosen": -347.97393798828125,
"logps/rejected": -428.678466796875,
"loss": 0.613,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -143.7879180908203,
"rewards/margins": 92.43571472167969,
"rewards/rejected": -236.2236328125,
"step": 830
},
{
"epoch": 0.6593406593406593,
"grad_norm": 12.309600681345202,
"learning_rate": 1.570265483815364e-07,
"logits/chosen": 6213.0498046875,
"logits/rejected": 5020.015625,
"logps/chosen": -358.1226806640625,
"logps/rejected": -418.3009338378906,
"loss": 0.618,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -130.79283142089844,
"rewards/margins": 74.71489715576172,
"rewards/rejected": -205.5077362060547,
"step": 840
},
{
"epoch": 0.6671899529042387,
"grad_norm": 12.13079365289034,
"learning_rate": 1.5070044803508691e-07,
"logits/chosen": 5740.8427734375,
"logits/rejected": 5088.7275390625,
"logps/chosen": -333.6675720214844,
"logps/rejected": -408.93280029296875,
"loss": 0.5962,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -118.1900863647461,
"rewards/margins": 90.28736877441406,
"rewards/rejected": -208.4774627685547,
"step": 850
},
{
"epoch": 0.6750392464678179,
"grad_norm": 17.975318476548516,
"learning_rate": 1.444489668907914e-07,
"logits/chosen": 6176.43212890625,
"logits/rejected": 5197.5263671875,
"logps/chosen": -364.1601867675781,
"logps/rejected": -395.86810302734375,
"loss": 0.6293,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -116.38951110839844,
"rewards/margins": 76.79676818847656,
"rewards/rejected": -193.186279296875,
"step": 860
},
{
"epoch": 0.6828885400313972,
"grad_norm": 18.675632935473597,
"learning_rate": 1.3827680265902232e-07,
"logits/chosen": 6169.16015625,
"logits/rejected": 5053.54638671875,
"logps/chosen": -336.77423095703125,
"logps/rejected": -384.07647705078125,
"loss": 0.6181,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -106.67745208740234,
"rewards/margins": 76.71006774902344,
"rewards/rejected": -183.38754272460938,
"step": 870
},
{
"epoch": 0.6907378335949764,
"grad_norm": 15.556936298105454,
"learning_rate": 1.3218859344701632e-07,
"logits/chosen": 5415.5537109375,
"logits/rejected": 5134.35888671875,
"logps/chosen": -307.6910400390625,
"logps/rejected": -403.2095947265625,
"loss": 0.6154,
"rewards/accuracies": 0.7583333849906921,
"rewards/chosen": -97.1800765991211,
"rewards/margins": 74.44038391113281,
"rewards/rejected": -171.62045288085938,
"step": 880
},
{
"epoch": 0.6985871271585558,
"grad_norm": 14.598418420491413,
"learning_rate": 1.2618891427354172e-07,
"logits/chosen": 6405.8955078125,
"logits/rejected": 5148.23828125,
"logps/chosen": -354.01531982421875,
"logps/rejected": -396.88348388671875,
"loss": 0.6246,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -96.41930389404297,
"rewards/margins": 88.68364715576172,
"rewards/rejected": -185.1029510498047,
"step": 890
},
{
"epoch": 0.706436420722135,
"grad_norm": 17.248604251826578,
"learning_rate": 1.202822736309758e-07,
"logits/chosen": 5427.36181640625,
"logits/rejected": 4946.220703125,
"logps/chosen": -309.7834777832031,
"logps/rejected": -402.7059326171875,
"loss": 0.6169,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -105.43426513671875,
"rewards/margins": 86.841552734375,
"rewards/rejected": -192.27581787109375,
"step": 900
},
{
"epoch": 0.7142857142857143,
"grad_norm": 21.236660119285595,
"learning_rate": 1.1447311009737299e-07,
"logits/chosen": 5334.93408203125,
"logits/rejected": 4981.6474609375,
"logps/chosen": -332.61944580078125,
"logps/rejected": -423.7569274902344,
"loss": 0.6253,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -124.39315032958984,
"rewards/margins": 91.43525695800781,
"rewards/rejected": -215.8284149169922,
"step": 910
},
{
"epoch": 0.7221350078492935,
"grad_norm": 14.521571204964287,
"learning_rate": 1.0876578900107053e-07,
"logits/chosen": 5887.0625,
"logits/rejected": 4832.818359375,
"logps/chosen": -360.1144714355469,
"logps/rejected": -408.10321044921875,
"loss": 0.6099,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -127.6928939819336,
"rewards/margins": 82.26800537109375,
"rewards/rejected": -209.9608917236328,
"step": 920
},
{
"epoch": 0.7299843014128728,
"grad_norm": 12.269584700329853,
"learning_rate": 1.0316459914033793e-07,
"logits/chosen": 5838.90185546875,
"logits/rejected": 4279.92529296875,
"logps/chosen": -368.8111877441406,
"logps/rejected": -407.0862121582031,
"loss": 0.6068,
"rewards/accuracies": 0.7833333015441895,
"rewards/chosen": -132.82225036621094,
"rewards/margins": 88.33971405029297,
"rewards/rejected": -221.16195678710938,
"step": 930
},
{
"epoch": 0.7378335949764521,
"grad_norm": 16.95040924011293,
"learning_rate": 9.767374956053584e-08,
"logits/chosen": 5631.70751953125,
"logits/rejected": 4867.74560546875,
"logps/chosen": -354.5127868652344,
"logps/rejected": -444.9837951660156,
"loss": 0.6195,
"rewards/accuracies": 0.7666667699813843,
"rewards/chosen": -136.21914672851562,
"rewards/margins": 103.7847671508789,
"rewards/rejected": -240.00390625,
"step": 940
},
{
"epoch": 0.7456828885400314,
"grad_norm": 15.455216526066362,
"learning_rate": 9.229736639120561e-08,
"logits/chosen": 5815.58251953125,
"logits/rejected": 5308.6826171875,
"logps/chosen": -353.8769836425781,
"logps/rejected": -416.19110107421875,
"loss": 0.6285,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -136.0758056640625,
"rewards/margins": 65.96086120605469,
"rewards/rejected": -202.03665161132812,
"step": 950
},
{
"epoch": 0.7535321821036107,
"grad_norm": 21.025656579521215,
"learning_rate": 8.70394897454659e-08,
"logits/chosen": 5706.39208984375,
"logits/rejected": 5004.17041015625,
"logps/chosen": -332.70684814453125,
"logps/rejected": -410.22308349609375,
"loss": 0.6127,
"rewards/accuracies": 0.8083332777023315,
"rewards/chosen": -114.27842712402344,
"rewards/margins": 93.46832275390625,
"rewards/rejected": -207.7467498779297,
"step": 960
},
{
"epoch": 0.7613814756671899,
"grad_norm": 22.662244586868777,
"learning_rate": 8.19040706840472e-08,
"logits/chosen": 5792.12451171875,
"logits/rejected": 4783.9365234375,
"logps/chosen": -364.58013916015625,
"logps/rejected": -429.33734130859375,
"loss": 0.6128,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -124.36016845703125,
"rewards/margins": 98.28662872314453,
"rewards/rejected": -222.6468048095703,
"step": 970
},
{
"epoch": 0.7692307692307693,
"grad_norm": 16.294899633616406,
"learning_rate": 7.689496824624525e-08,
"logits/chosen": 5524.9208984375,
"logits/rejected": 4383.34033203125,
"logps/chosen": -343.7428283691406,
"logps/rejected": -411.6790466308594,
"loss": 0.6013,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -114.92622375488281,
"rewards/margins": 106.25920104980469,
"rewards/rejected": -221.18539428710938,
"step": 980
},
{
"epoch": 0.7770800627943485,
"grad_norm": 24.656802253568358,
"learning_rate": 7.201594655002458e-08,
"logits/chosen": 5839.876953125,
"logits/rejected": 4818.5595703125,
"logps/chosen": -359.73309326171875,
"logps/rejected": -420.4481506347656,
"loss": 0.6001,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -133.83816528320312,
"rewards/margins": 92.54021453857422,
"rewards/rejected": -226.3783721923828,
"step": 990
},
{
"epoch": 0.7849293563579278,
"grad_norm": 19.139612377869543,
"learning_rate": 6.727067196345099e-08,
"logits/chosen": 5519.62890625,
"logits/rejected": 4601.50830078125,
"logps/chosen": -347.6911315917969,
"logps/rejected": -392.0986328125,
"loss": 0.6166,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -133.935302734375,
"rewards/margins": 77.81806945800781,
"rewards/rejected": -211.7533416748047,
"step": 1000
},
{
"epoch": 0.792778649921507,
"grad_norm": 18.4502462246621,
"learning_rate": 6.26627103495786e-08,
"logits/chosen": 5698.8525390625,
"logits/rejected": 4684.4365234375,
"logps/chosen": -344.99969482421875,
"logps/rejected": -406.09136962890625,
"loss": 0.626,
"rewards/accuracies": 0.7166666984558105,
"rewards/chosen": -130.29049682617188,
"rewards/margins": 84.36732482910156,
"rewards/rejected": -214.6577911376953,
"step": 1010
},
{
"epoch": 0.8006279434850864,
"grad_norm": 13.030843283044183,
"learning_rate": 5.8195524386862374e-08,
"logits/chosen": 5804.3681640625,
"logits/rejected": 5112.267578125,
"logps/chosen": -367.52532958984375,
"logps/rejected": -445.23822021484375,
"loss": 0.6133,
"rewards/accuracies": 0.8083332777023315,
"rewards/chosen": -119.3467025756836,
"rewards/margins": 99.81084442138672,
"rewards/rejected": -219.15756225585938,
"step": 1020
},
{
"epoch": 0.8084772370486656,
"grad_norm": 15.435517078983278,
"learning_rate": 5.38724709671092e-08,
"logits/chosen": 6208.55615234375,
"logits/rejected": 5785.6513671875,
"logps/chosen": -368.04693603515625,
"logps/rejected": -461.67291259765625,
"loss": 0.6042,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -136.5785675048828,
"rewards/margins": 91.97505950927734,
"rewards/rejected": -228.5536346435547,
"step": 1030
},
{
"epoch": 0.8163265306122449,
"grad_norm": 19.196729996375815,
"learning_rate": 4.969679867292276e-08,
"logits/chosen": 5469.60595703125,
"logits/rejected": 4901.0498046875,
"logps/chosen": -360.51763916015625,
"logps/rejected": -447.8094177246094,
"loss": 0.6096,
"rewards/accuracies": 0.73333340883255,
"rewards/chosen": -140.41592407226562,
"rewards/margins": 98.20952606201172,
"rewards/rejected": -238.62545776367188,
"step": 1040
},
{
"epoch": 0.8241758241758241,
"grad_norm": 15.63885700925835,
"learning_rate": 4.5671645336537416e-08,
"logits/chosen": 5541.3017578125,
"logits/rejected": 4979.87890625,
"logps/chosen": -379.06878662109375,
"logps/rejected": -450.43927001953125,
"loss": 0.6099,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -141.58819580078125,
"rewards/margins": 93.05669403076172,
"rewards/rejected": -234.6448974609375,
"step": 1050
},
{
"epoch": 0.8320251177394035,
"grad_norm": 71.6555736263913,
"learning_rate": 4.180003568187776e-08,
"logits/chosen": 6864.1591796875,
"logits/rejected": 5329.9736328125,
"logps/chosen": -406.0521240234375,
"logps/rejected": -433.7730407714844,
"loss": 0.6267,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -145.0647430419922,
"rewards/margins": 68.66908264160156,
"rewards/rejected": -213.7338409423828,
"step": 1060
},
{
"epoch": 0.8398744113029827,
"grad_norm": 17.29969333705125,
"learning_rate": 3.8084879051612144e-08,
"logits/chosen": 5685.4697265625,
"logits/rejected": 5125.021484375,
"logps/chosen": -358.2772521972656,
"logps/rejected": -418.6520080566406,
"loss": 0.6134,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -133.0342559814453,
"rewards/margins": 92.45338439941406,
"rewards/rejected": -225.48764038085938,
"step": 1070
},
{
"epoch": 0.847723704866562,
"grad_norm": 16.62350096619966,
"learning_rate": 3.452896722091128e-08,
"logits/chosen": 6247.75927734375,
"logits/rejected": 4747.95556640625,
"logps/chosen": -398.6324462890625,
"logps/rejected": -429.2178649902344,
"loss": 0.6032,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -132.06317138671875,
"rewards/margins": 96.17476654052734,
"rewards/rejected": -228.23794555664062,
"step": 1080
},
{
"epoch": 0.8555729984301413,
"grad_norm": 20.56158228380985,
"learning_rate": 3.11349722995527e-08,
"logits/chosen": 6290.5673828125,
"logits/rejected": 4655.23974609375,
"logps/chosen": -363.40936279296875,
"logps/rejected": -426.296630859375,
"loss": 0.6032,
"rewards/accuracies": 0.7333332896232605,
"rewards/chosen": -135.9347381591797,
"rewards/margins": 77.47732543945312,
"rewards/rejected": -213.4120635986328,
"step": 1090
},
{
"epoch": 0.8634222919937206,
"grad_norm": 17.638845220024372,
"learning_rate": 2.7905444723949762e-08,
"logits/chosen": 6115.4248046875,
"logits/rejected": 4982.52685546875,
"logps/chosen": -377.2098388671875,
"logps/rejected": -429.16619873046875,
"loss": 0.6167,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -138.67349243164062,
"rewards/margins": 100.99529266357422,
"rewards/rejected": -239.66879272460938,
"step": 1100
},
{
"epoch": 0.8712715855572999,
"grad_norm": 18.82352427435552,
"learning_rate": 2.484281134061142e-08,
"logits/chosen": 6460.3466796875,
"logits/rejected": 5133.16796875,
"logps/chosen": -407.51690673828125,
"logps/rejected": -459.20721435546875,
"loss": 0.608,
"rewards/accuracies": 0.7916666269302368,
"rewards/chosen": -142.90335083007812,
"rewards/margins": 94.03584289550781,
"rewards/rejected": -236.939208984375,
"step": 1110
},
{
"epoch": 0.8791208791208791,
"grad_norm": 38.731872878926325,
"learning_rate": 2.194937358247506e-08,
"logits/chosen": 6330.7939453125,
"logits/rejected": 5062.67822265625,
"logps/chosen": -387.6415100097656,
"logps/rejected": -456.4393615722656,
"loss": 0.611,
"rewards/accuracies": 0.7500001192092896,
"rewards/chosen": -142.5010223388672,
"rewards/margins": 97.16749572753906,
"rewards/rejected": -239.6685333251953,
"step": 1120
},
{
"epoch": 0.8869701726844584,
"grad_norm": 21.93873798257166,
"learning_rate": 1.9227305739481612e-08,
"logits/chosen": 5757.74462890625,
"logits/rejected": 4450.82373046875,
"logps/chosen": -360.2721862792969,
"logps/rejected": -406.9669189453125,
"loss": 0.6027,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -125.49015045166016,
"rewards/margins": 101.2511978149414,
"rewards/rejected": -226.7413330078125,
"step": 1130
},
{
"epoch": 0.8948194662480377,
"grad_norm": 21.229892802861798,
"learning_rate": 1.6678653324693787e-08,
"logits/chosen": 6381.58935546875,
"logits/rejected": 5084.40478515625,
"logps/chosen": -392.08349609375,
"logps/rejected": -447.29827880859375,
"loss": 0.5978,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -134.46275329589844,
"rewards/margins": 92.02247619628906,
"rewards/rejected": -226.48526000976562,
"step": 1140
},
{
"epoch": 0.902668759811617,
"grad_norm": 11.998405903395946,
"learning_rate": 1.4305331537183384e-08,
"logits/chosen": 5625.76708984375,
"logits/rejected": 5091.146484375,
"logps/chosen": -361.97503662109375,
"logps/rejected": -438.71697998046875,
"loss": 0.6003,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -134.9916229248047,
"rewards/margins": 84.23737335205078,
"rewards/rejected": -219.22900390625,
"step": 1150
},
{
"epoch": 0.9105180533751962,
"grad_norm": 20.001421395632878,
"learning_rate": 1.2109123822844653e-08,
"logits/chosen": 5778.00634765625,
"logits/rejected": 4505.45849609375,
"logps/chosen": -367.82598876953125,
"logps/rejected": -416.082275390625,
"loss": 0.6137,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -139.2692108154297,
"rewards/margins": 80.87498474121094,
"rewards/rejected": -220.14419555664062,
"step": 1160
},
{
"epoch": 0.9183673469387755,
"grad_norm": 18.383575962086507,
"learning_rate": 1.0091680534213387e-08,
"logits/chosen": 6360.2177734375,
"logits/rejected": 6036.2529296875,
"logps/chosen": -372.2303771972656,
"logps/rejected": -475.2777404785156,
"loss": 0.6101,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -129.28201293945312,
"rewards/margins": 99.24874877929688,
"rewards/rejected": -228.53073120117188,
"step": 1170
},
{
"epoch": 0.9262166405023547,
"grad_norm": 26.20378993221473,
"learning_rate": 8.254517690300944e-09,
"logits/chosen": 5551.3759765625,
"logits/rejected": 4948.4052734375,
"logps/chosen": -368.40777587890625,
"logps/rejected": -442.9326171875,
"loss": 0.6049,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -128.46182250976562,
"rewards/margins": 97.64901733398438,
"rewards/rejected": -226.11083984375,
"step": 1180
},
{
"epoch": 0.9340659340659341,
"grad_norm": 12.632107820641844,
"learning_rate": 6.599015837372907e-09,
"logits/chosen": 6045.20654296875,
"logits/rejected": 5189.9599609375,
"logps/chosen": -404.6395568847656,
"logps/rejected": -457.1123046875,
"loss": 0.6143,
"rewards/accuracies": 0.7333332896232605,
"rewards/chosen": -155.75405883789062,
"rewards/margins": 84.20575714111328,
"rewards/rejected": -239.95980834960938,
"step": 1190
},
{
"epoch": 0.9419152276295133,
"grad_norm": 32.82134972014968,
"learning_rate": 5.126419011529992e-09,
"logits/chosen": 6301.2685546875,
"logits/rejected": 5249.4482421875,
"logps/chosen": -383.8853454589844,
"logps/rejected": -460.536865234375,
"loss": 0.6054,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -128.22366333007812,
"rewards/margins": 113.64762878417969,
"rewards/rejected": -241.8712921142578,
"step": 1200
},
{
"epoch": 0.9497645211930926,
"grad_norm": 22.184372964812642,
"learning_rate": 3.837833803870177e-09,
"logits/chosen": 5859.86279296875,
"logits/rejected": 5063.07080078125,
"logps/chosen": -382.39556884765625,
"logps/rejected": -460.39459228515625,
"loss": 0.6185,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -142.2100830078125,
"rewards/margins": 99.98322296142578,
"rewards/rejected": -242.1933135986328,
"step": 1210
},
{
"epoch": 0.957613814756672,
"grad_norm": 24.474760694660887,
"learning_rate": 2.734228528934679e-09,
"logits/chosen": 7345.8125,
"logits/rejected": 5270.705078125,
"logps/chosen": -443.3330993652344,
"logps/rejected": -489.99658203125,
"loss": 0.6063,
"rewards/accuracies": 0.73333340883255,
"rewards/chosen": -146.56466674804688,
"rewards/margins": 98.23472595214844,
"rewards/rejected": -244.7993621826172,
"step": 1220
},
{
"epoch": 0.9654631083202512,
"grad_norm": 23.890847226085217,
"learning_rate": 1.8164324970625645e-09,
"logits/chosen": 6513.2109375,
"logits/rejected": 5010.6240234375,
"logps/chosen": -391.34930419921875,
"logps/rejected": -439.3382263183594,
"loss": 0.6264,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -130.76763916015625,
"rewards/margins": 95.57390594482422,
"rewards/rejected": -226.341552734375,
"step": 1230
},
{
"epoch": 0.9733124018838305,
"grad_norm": 20.592256484362352,
"learning_rate": 1.0851353912008642e-09,
"logits/chosen": 5547.0556640625,
"logits/rejected": 5059.66015625,
"logps/chosen": -377.5496826171875,
"logps/rejected": -469.046630859375,
"loss": 0.6146,
"rewards/accuracies": 0.7333332896232605,
"rewards/chosen": -145.93728637695312,
"rewards/margins": 87.95149230957031,
"rewards/rejected": -233.88876342773438,
"step": 1240
},
{
"epoch": 0.9811616954474097,
"grad_norm": 15.042680729243902,
"learning_rate": 5.408867486384471e-10,
"logits/chosen": 5707.8701171875,
"logits/rejected": 4727.94921875,
"logps/chosen": -356.4093017578125,
"logps/rejected": -407.896728515625,
"loss": 0.6075,
"rewards/accuracies": 0.7916666269302368,
"rewards/chosen": -126.1011734008789,
"rewards/margins": 92.36528778076172,
"rewards/rejected": -218.46646118164062,
"step": 1250
},
{
"epoch": 0.989010989010989,
"grad_norm": 17.914310013144693,
"learning_rate": 1.840955480532924e-10,
"logits/chosen": 5406.544921875,
"logits/rejected": 5018.2861328125,
"logps/chosen": -360.8941955566406,
"logps/rejected": -435.3475646972656,
"loss": 0.6055,
"rewards/accuracies": 0.75,
"rewards/chosen": -128.73361206054688,
"rewards/margins": 90.288818359375,
"rewards/rejected": -219.02243041992188,
"step": 1260
},
{
"epoch": 0.9968602825745683,
"grad_norm": 19.6748421836041,
"learning_rate": 1.502990218302247e-11,
"logits/chosen": 5662.17626953125,
"logits/rejected": 4531.2763671875,
"logps/chosen": -363.6100158691406,
"logps/rejected": -417.36590576171875,
"loss": 0.6148,
"rewards/accuracies": 0.7916666269302368,
"rewards/chosen": -139.1360321044922,
"rewards/margins": 91.9256362915039,
"rewards/rejected": -231.06167602539062,
"step": 1270
},
{
"epoch": 1.0,
"step": 1274,
"total_flos": 0.0,
"train_loss": 0.6281878652527718,
"train_runtime": 14442.3019,
"train_samples_per_second": 4.233,
"train_steps_per_second": 0.088
}
],
"logging_steps": 10,
"max_steps": 1274,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 2000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}