IL_BrierAll-7b-sft-full / trainer_state.json
TTTXXX01's picture
Model save
3ee7894 verified
raw
history blame
No virus
26.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.998691442030882,
"eval_steps": 500,
"global_step": 477,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002093692750588851,
"grad_norm": 1.7795599560645496,
"learning_rate": 1.0416666666666666e-08,
"logits/chosen": 4963.591796875,
"logits/rejected": 4602.0634765625,
"logps/chosen": -222.9984130859375,
"logps/rejected": -158.74850463867188,
"loss": 0.5,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.02093692750588851,
"grad_norm": 1.664277641531783,
"learning_rate": 1.0416666666666667e-07,
"logits/chosen": 6011.06005859375,
"logits/rejected": 5064.162109375,
"logps/chosen": -240.0664825439453,
"logps/rejected": -208.95372009277344,
"loss": 0.5,
"rewards/accuracies": 0.5034722089767456,
"rewards/chosen": 0.00021603053028229624,
"rewards/margins": 0.0005849565495736897,
"rewards/rejected": -0.0003689260338433087,
"step": 10
},
{
"epoch": 0.04187385501177702,
"grad_norm": 1.6870127509120665,
"learning_rate": 2.0833333333333333e-07,
"logits/chosen": 5908.4755859375,
"logits/rejected": 4882.568359375,
"logps/chosen": -231.4028778076172,
"logps/rejected": -196.9695281982422,
"loss": 0.4999,
"rewards/accuracies": 0.5531250238418579,
"rewards/chosen": 0.0007881122874096036,
"rewards/margins": 0.0005757558392360806,
"rewards/rejected": 0.00021235644817352295,
"step": 20
},
{
"epoch": 0.06281078251766553,
"grad_norm": 1.6176895769159747,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": 5731.59716796875,
"logits/rejected": 4970.974609375,
"logps/chosen": -205.80563354492188,
"logps/rejected": -182.66543579101562,
"loss": 0.4996,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.0034130061976611614,
"rewards/margins": 0.003777171252295375,
"rewards/rejected": -0.0003641647635959089,
"step": 30
},
{
"epoch": 0.08374771002355404,
"grad_norm": 1.7199138258926487,
"learning_rate": 4.1666666666666667e-07,
"logits/chosen": 5881.6220703125,
"logits/rejected": 5133.04638671875,
"logps/chosen": -199.26327514648438,
"logps/rejected": -189.4043426513672,
"loss": 0.4984,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.0059957681223750114,
"rewards/margins": 0.01181001029908657,
"rewards/rejected": -0.005814241245388985,
"step": 40
},
{
"epoch": 0.10468463752944256,
"grad_norm": 2.335343455285067,
"learning_rate": 4.999731868769026e-07,
"logits/chosen": 6244.0546875,
"logits/rejected": 5419.8291015625,
"logps/chosen": -200.3682403564453,
"logps/rejected": -195.88189697265625,
"loss": 0.4965,
"rewards/accuracies": 0.640625,
"rewards/chosen": -0.022861136123538017,
"rewards/margins": 0.02738131210207939,
"rewards/rejected": -0.05024244636297226,
"step": 50
},
{
"epoch": 0.12562156503533106,
"grad_norm": 1.985885842678977,
"learning_rate": 4.990353313429303e-07,
"logits/chosen": 6085.1591796875,
"logits/rejected": 5170.4423828125,
"logps/chosen": -202.0069122314453,
"logps/rejected": -189.2211456298828,
"loss": 0.494,
"rewards/accuracies": 0.640625,
"rewards/chosen": -0.06540889292955399,
"rewards/margins": 0.07115975767374039,
"rewards/rejected": -0.13656863570213318,
"step": 60
},
{
"epoch": 0.14655849254121958,
"grad_norm": 2.02720438835768,
"learning_rate": 4.967625656594781e-07,
"logits/chosen": 5949.5009765625,
"logits/rejected": 5459.0986328125,
"logps/chosen": -203.56887817382812,
"logps/rejected": -203.3651885986328,
"loss": 0.493,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -0.06014765053987503,
"rewards/margins": 0.07473154366016388,
"rewards/rejected": -0.1348791867494583,
"step": 70
},
{
"epoch": 0.16749542004710807,
"grad_norm": 2.2298529628194035,
"learning_rate": 4.93167072587771e-07,
"logits/chosen": 6108.1171875,
"logits/rejected": 5237.1611328125,
"logps/chosen": -200.6221160888672,
"logps/rejected": -197.19618225097656,
"loss": 0.4914,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": -0.08121322095394135,
"rewards/margins": 0.09713159501552582,
"rewards/rejected": -0.17834481596946716,
"step": 80
},
{
"epoch": 0.1884323475529966,
"grad_norm": 2.2234876584054275,
"learning_rate": 4.882681251368548e-07,
"logits/chosen": 6233.083984375,
"logits/rejected": 5252.95068359375,
"logps/chosen": -201.39984130859375,
"logps/rejected": -200.0003204345703,
"loss": 0.4901,
"rewards/accuracies": 0.6656249761581421,
"rewards/chosen": -0.08355093002319336,
"rewards/margins": 0.12262304872274399,
"rewards/rejected": -0.20617397129535675,
"step": 90
},
{
"epoch": 0.2093692750588851,
"grad_norm": 2.1862542481259792,
"learning_rate": 4.820919832540181e-07,
"logits/chosen": 6330.16259765625,
"logits/rejected": 5832.1982421875,
"logps/chosen": -203.98583984375,
"logps/rejected": -211.58010864257812,
"loss": 0.491,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": -0.07759369909763336,
"rewards/margins": 0.12538166344165802,
"rewards/rejected": -0.20297536253929138,
"step": 100
},
{
"epoch": 0.23030620256477363,
"grad_norm": 2.7054972610472565,
"learning_rate": 4.7467175306295647e-07,
"logits/chosen": 6421.41015625,
"logits/rejected": 5193.19287109375,
"logps/chosen": -198.57032775878906,
"logps/rejected": -209.23104858398438,
"loss": 0.4869,
"rewards/accuracies": 0.721875011920929,
"rewards/chosen": -0.0885535329580307,
"rewards/margins": 0.1617024838924408,
"rewards/rejected": -0.2502560019493103,
"step": 110
},
{
"epoch": 0.2512431300706621,
"grad_norm": 2.490243429862705,
"learning_rate": 4.6604720940421207e-07,
"logits/chosen": 6070.7314453125,
"logits/rejected": 5633.78369140625,
"logps/chosen": -196.9094696044922,
"logps/rejected": -213.2709197998047,
"loss": 0.4892,
"rewards/accuracies": 0.6781250238418579,
"rewards/chosen": -0.10950146615505219,
"rewards/margins": 0.1391303390264511,
"rewards/rejected": -0.2486318051815033,
"step": 120
},
{
"epoch": 0.2721800575765506,
"grad_norm": 2.7610246742258715,
"learning_rate": 4.5626458262912735e-07,
"logits/chosen": 6097.5146484375,
"logits/rejected": 5424.9013671875,
"logps/chosen": -197.1998748779297,
"logps/rejected": -216.65188598632812,
"loss": 0.4879,
"rewards/accuracies": 0.715624988079071,
"rewards/chosen": -0.08192776143550873,
"rewards/margins": 0.15857011079788208,
"rewards/rejected": -0.2404978722333908,
"step": 130
},
{
"epoch": 0.29311698508243916,
"grad_norm": 2.5211223485513323,
"learning_rate": 4.453763107901675e-07,
"logits/chosen": 5834.34814453125,
"logits/rejected": 5199.22119140625,
"logps/chosen": -204.2546844482422,
"logps/rejected": -208.1846923828125,
"loss": 0.4883,
"rewards/accuracies": 0.684374988079071,
"rewards/chosen": -0.10303564369678497,
"rewards/margins": 0.19196416437625885,
"rewards/rejected": -0.29499977827072144,
"step": 140
},
{
"epoch": 0.31405391258832765,
"grad_norm": 2.8015563362051052,
"learning_rate": 4.3344075855595097e-07,
"logits/chosen": 6411.4541015625,
"logits/rejected": 5283.06689453125,
"logps/chosen": -216.62496948242188,
"logps/rejected": -208.1221466064453,
"loss": 0.4855,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -0.07201126962900162,
"rewards/margins": 0.21424655616283417,
"rewards/rejected": -0.286257803440094,
"step": 150
},
{
"epoch": 0.33499084009421615,
"grad_norm": 3.380927510289668,
"learning_rate": 4.2052190435769554e-07,
"logits/chosen": 6130.6318359375,
"logits/rejected": 5110.82080078125,
"logps/chosen": -209.1565704345703,
"logps/rejected": -219.35220336914062,
"loss": 0.4824,
"rewards/accuracies": 0.7281249761581421,
"rewards/chosen": -0.10825005918741226,
"rewards/margins": 0.2637324929237366,
"rewards/rejected": -0.37198254466056824,
"step": 160
},
{
"epoch": 0.3559277676001047,
"grad_norm": 3.2112846087091484,
"learning_rate": 4.0668899744407567e-07,
"logits/chosen": 6476.86767578125,
"logits/rejected": 5530.86767578125,
"logps/chosen": -220.4525146484375,
"logps/rejected": -208.67733764648438,
"loss": 0.4833,
"rewards/accuracies": 0.703125,
"rewards/chosen": -0.12977644801139832,
"rewards/margins": 0.23194870352745056,
"rewards/rejected": -0.3617251515388489,
"step": 170
},
{
"epoch": 0.3768646951059932,
"grad_norm": 2.5422470233501713,
"learning_rate": 3.920161866827889e-07,
"logits/chosen": 6021.7919921875,
"logits/rejected": 5178.1259765625,
"logps/chosen": -220.57601928710938,
"logps/rejected": -207.26339721679688,
"loss": 0.4847,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.09060440957546234,
"rewards/margins": 0.20774026215076447,
"rewards/rejected": -0.2983446717262268,
"step": 180
},
{
"epoch": 0.39780162261188173,
"grad_norm": 5.303607389601098,
"learning_rate": 3.765821230985757e-07,
"logits/chosen": 6147.5517578125,
"logits/rejected": 5167.7958984375,
"logps/chosen": -208.33944702148438,
"logps/rejected": -221.4107208251953,
"loss": 0.4836,
"rewards/accuracies": 0.7406250238418579,
"rewards/chosen": -0.1318391114473343,
"rewards/margins": 0.3124375343322754,
"rewards/rejected": -0.4442766308784485,
"step": 190
},
{
"epoch": 0.4187385501177702,
"grad_norm": 2.6432475137123794,
"learning_rate": 3.604695382782159e-07,
"logits/chosen": 5748.025390625,
"logits/rejected": 4992.97021484375,
"logps/chosen": -193.7339630126953,
"logps/rejected": -207.1187286376953,
"loss": 0.4868,
"rewards/accuracies": 0.703125,
"rewards/chosen": -0.08920423686504364,
"rewards/margins": 0.2186780869960785,
"rewards/rejected": -0.30788230895996094,
"step": 200
},
{
"epoch": 0.4396754776236587,
"grad_norm": 3.3944179691537864,
"learning_rate": 3.4376480090239047e-07,
"logits/chosen": 6434.7412109375,
"logits/rejected": 5470.30224609375,
"logps/chosen": -219.40087890625,
"logps/rejected": -221.01614379882812,
"loss": 0.4847,
"rewards/accuracies": 0.746874988079071,
"rewards/chosen": -0.07487013936042786,
"rewards/margins": 0.21102896332740784,
"rewards/rejected": -0.2858991026878357,
"step": 210
},
{
"epoch": 0.46061240512954726,
"grad_norm": 2.9009193632951207,
"learning_rate": 3.265574537815398e-07,
"logits/chosen": 5810.1513671875,
"logits/rejected": 5406.72021484375,
"logps/chosen": -212.43923950195312,
"logps/rejected": -227.1441192626953,
"loss": 0.4829,
"rewards/accuracies": 0.753125011920929,
"rewards/chosen": -0.105693019926548,
"rewards/margins": 0.26153022050857544,
"rewards/rejected": -0.36722320318222046,
"step": 220
},
{
"epoch": 0.48154933263543576,
"grad_norm": 2.6189951178492614,
"learning_rate": 3.0893973387735683e-07,
"logits/chosen": 6018.71484375,
"logits/rejected": 5094.3134765625,
"logps/chosen": -207.9360809326172,
"logps/rejected": -231.08480834960938,
"loss": 0.4805,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.13205979764461517,
"rewards/margins": 0.36629724502563477,
"rewards/rejected": -0.49835705757141113,
"step": 230
},
{
"epoch": 0.5024862601413242,
"grad_norm": 2.9120071931474514,
"learning_rate": 2.910060778827554e-07,
"logits/chosen": 6167.8330078125,
"logits/rejected": 5222.9423828125,
"logps/chosen": -206.72628784179688,
"logps/rejected": -222.31649780273438,
"loss": 0.4816,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.10643552243709564,
"rewards/margins": 0.27724453806877136,
"rewards/rejected": -0.3836800754070282,
"step": 240
},
{
"epoch": 0.5234231876472127,
"grad_norm": 3.640526921626944,
"learning_rate": 2.7285261601056697e-07,
"logits/chosen": 6262.5078125,
"logits/rejected": 5280.09619140625,
"logps/chosen": -224.32699584960938,
"logps/rejected": -236.7519989013672,
"loss": 0.4832,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.11429344117641449,
"rewards/margins": 0.28984737396240234,
"rewards/rejected": -0.4041408598423004,
"step": 250
},
{
"epoch": 0.5443601151531012,
"grad_norm": 3.480502498738401,
"learning_rate": 2.5457665670441937e-07,
"logits/chosen": 6063.48681640625,
"logits/rejected": 5488.103515625,
"logps/chosen": -227.7340850830078,
"logps/rejected": -235.0940399169922,
"loss": 0.4823,
"rewards/accuracies": 0.746874988079071,
"rewards/chosen": -0.11375202983617783,
"rewards/margins": 0.27239111065864563,
"rewards/rejected": -0.38614311814308167,
"step": 260
},
{
"epoch": 0.5652970426589898,
"grad_norm": 3.100193815819693,
"learning_rate": 2.3627616503391812e-07,
"logits/chosen": 6130.5126953125,
"logits/rejected": 5626.0517578125,
"logps/chosen": -217.3065643310547,
"logps/rejected": -236.91329956054688,
"loss": 0.4834,
"rewards/accuracies": 0.753125011920929,
"rewards/chosen": -0.1151232123374939,
"rewards/margins": 0.2929432988166809,
"rewards/rejected": -0.40806645154953003,
"step": 270
},
{
"epoch": 0.5862339701648783,
"grad_norm": 3.6139410491493034,
"learning_rate": 2.1804923757009882e-07,
"logits/chosen": 6155.16357421875,
"logits/rejected": 5117.0361328125,
"logps/chosen": -224.9839630126953,
"logps/rejected": -229.80514526367188,
"loss": 0.4807,
"rewards/accuracies": 0.7718750238418579,
"rewards/chosen": -0.10252990573644638,
"rewards/margins": 0.33688706159591675,
"rewards/rejected": -0.4394169747829437,
"step": 280
},
{
"epoch": 0.6071708976707668,
"grad_norm": 3.0356982001656423,
"learning_rate": 1.9999357655598891e-07,
"logits/chosen": 6409.4736328125,
"logits/rejected": 6006.4580078125,
"logps/chosen": -239.07656860351562,
"logps/rejected": -265.92523193359375,
"loss": 0.4842,
"rewards/accuracies": 0.6968749761581421,
"rewards/chosen": -0.15593001246452332,
"rewards/margins": 0.2926904857158661,
"rewards/rejected": -0.4486205577850342,
"step": 290
},
{
"epoch": 0.6281078251766553,
"grad_norm": 4.271112938322883,
"learning_rate": 1.8220596619089573e-07,
"logits/chosen": 5861.82177734375,
"logits/rejected": 5508.8740234375,
"logps/chosen": -222.1393585205078,
"logps/rejected": -245.2710418701172,
"loss": 0.4845,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.11530385166406631,
"rewards/margins": 0.2702717185020447,
"rewards/rejected": -0.3855755925178528,
"step": 300
},
{
"epoch": 0.6490447526825438,
"grad_norm": 3.0832085833192004,
"learning_rate": 1.647817538357072e-07,
"logits/chosen": 6551.92529296875,
"logits/rejected": 5745.2666015625,
"logps/chosen": -234.02554321289062,
"logps/rejected": -243.51119995117188,
"loss": 0.4815,
"rewards/accuracies": 0.7281249761581421,
"rewards/chosen": -0.10852447897195816,
"rewards/margins": 0.22780513763427734,
"rewards/rejected": -0.3363296091556549,
"step": 310
},
{
"epoch": 0.6699816801884323,
"grad_norm": 5.627304230331988,
"learning_rate": 1.478143389201113e-07,
"logits/chosen": 6057.1337890625,
"logits/rejected": 5255.4736328125,
"logps/chosen": -222.4619903564453,
"logps/rejected": -239.05459594726562,
"loss": 0.4817,
"rewards/accuracies": 0.746874988079071,
"rewards/chosen": -0.09626330435276031,
"rewards/margins": 0.28026360273361206,
"rewards/rejected": -0.37652695178985596,
"step": 320
},
{
"epoch": 0.6909186076943209,
"grad_norm": 4.070544491993295,
"learning_rate": 1.3139467229135998e-07,
"logits/chosen": 5855.13671875,
"logits/rejected": 5428.80224609375,
"logps/chosen": -231.0357208251953,
"logps/rejected": -246.7843780517578,
"loss": 0.484,
"rewards/accuracies": 0.768750011920929,
"rewards/chosen": -0.11221712827682495,
"rewards/margins": 0.30028775334358215,
"rewards/rejected": -0.4125048518180847,
"step": 330
},
{
"epoch": 0.7118555352002094,
"grad_norm": 4.9587556176352905,
"learning_rate": 1.1561076868822755e-07,
"logits/chosen": 5686.6806640625,
"logits/rejected": 4937.55615234375,
"logps/chosen": -216.03945922851562,
"logps/rejected": -238.09805297851562,
"loss": 0.4819,
"rewards/accuracies": 0.7593749761581421,
"rewards/chosen": -0.16062280535697937,
"rewards/margins": 0.34666866064071655,
"rewards/rejected": -0.5072914958000183,
"step": 340
},
{
"epoch": 0.7327924627060979,
"grad_norm": 3.8596857041552393,
"learning_rate": 1.0054723495346482e-07,
"logits/chosen": 6366.03955078125,
"logits/rejected": 5157.458984375,
"logps/chosen": -255.4525604248047,
"logps/rejected": -263.61639404296875,
"loss": 0.4836,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.14537517726421356,
"rewards/margins": 0.3442801833152771,
"rewards/rejected": -0.48965534567832947,
"step": 350
},
{
"epoch": 0.7537293902119864,
"grad_norm": 4.404236860707489,
"learning_rate": 8.628481651367875e-08,
"logits/chosen": 6031.80615234375,
"logits/rejected": 5262.02734375,
"logps/chosen": -231.36318969726562,
"logps/rejected": -245.8846893310547,
"loss": 0.4812,
"rewards/accuracies": 0.765625,
"rewards/chosen": -0.10767906904220581,
"rewards/margins": 0.31250181794166565,
"rewards/rejected": -0.42018088698387146,
"step": 360
},
{
"epoch": 0.7746663177178749,
"grad_norm": 4.286082049703798,
"learning_rate": 7.289996455765748e-08,
"logits/chosen": 5882.796875,
"logits/rejected": 5024.01171875,
"logps/chosen": -250.63809204101562,
"logps/rejected": -253.8474884033203,
"loss": 0.4799,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.12153621017932892,
"rewards/margins": 0.3065151572227478,
"rewards/rejected": -0.4280513823032379,
"step": 370
},
{
"epoch": 0.7956032452237635,
"grad_norm": 4.2771383904693545,
"learning_rate": 6.046442623320145e-08,
"logits/chosen": 6166.33447265625,
"logits/rejected": 5370.65185546875,
"logps/chosen": -244.5850067138672,
"logps/rejected": -238.40518188476562,
"loss": 0.4816,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -0.10372950881719589,
"rewards/margins": 0.2737034261226654,
"rewards/rejected": -0.3774329721927643,
"step": 380
},
{
"epoch": 0.816540172729652,
"grad_norm": 4.7598934741028565,
"learning_rate": 4.904486005914027e-08,
"logits/chosen": 6437.08251953125,
"logits/rejected": 5671.439453125,
"logps/chosen": -249.28125,
"logps/rejected": -271.4849548339844,
"loss": 0.4818,
"rewards/accuracies": 0.753125011920929,
"rewards/chosen": -0.1305597722530365,
"rewards/margins": 0.3592670261859894,
"rewards/rejected": -0.4898267686367035,
"step": 390
},
{
"epoch": 0.8374771002355405,
"grad_norm": 3.0633087764980194,
"learning_rate": 3.8702478614051345e-08,
"logits/chosen": 5975.5859375,
"logits/rejected": 5307.0068359375,
"logps/chosen": -249.6516876220703,
"logps/rejected": -247.6414031982422,
"loss": 0.4821,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.12782196700572968,
"rewards/margins": 0.30399927496910095,
"rewards/rejected": -0.4318212568759918,
"step": 400
},
{
"epoch": 0.8584140277414289,
"grad_norm": 5.414232808843427,
"learning_rate": 2.9492720416985e-08,
"logits/chosen": 5987.8955078125,
"logits/rejected": 5288.86669921875,
"logps/chosen": -239.4297637939453,
"logps/rejected": -252.67214965820312,
"loss": 0.4809,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -0.14551712572574615,
"rewards/margins": 0.2954447865486145,
"rewards/rejected": -0.44096192717552185,
"step": 410
},
{
"epoch": 0.8793509552473174,
"grad_norm": 3.7651155714131734,
"learning_rate": 2.1464952759020856e-08,
"logits/chosen": 6339.23046875,
"logits/rejected": 5529.13037109375,
"logps/chosen": -254.3148193359375,
"logps/rejected": -267.2603759765625,
"loss": 0.481,
"rewards/accuracies": 0.734375,
"rewards/chosen": -0.12421433627605438,
"rewards/margins": 0.3070390820503235,
"rewards/rejected": -0.43125343322753906,
"step": 420
},
{
"epoch": 0.9002878827532059,
"grad_norm": 3.0456608776131104,
"learning_rate": 1.4662207078575684e-08,
"logits/chosen": 6244.36669921875,
"logits/rejected": 5038.8232421875,
"logps/chosen": -249.1537628173828,
"logps/rejected": -258.28509521484375,
"loss": 0.4806,
"rewards/accuracies": 0.765625,
"rewards/chosen": -0.11752188205718994,
"rewards/margins": 0.3456159830093384,
"rewards/rejected": -0.46313780546188354,
"step": 430
},
{
"epoch": 0.9212248102590945,
"grad_norm": 3.7152553962599533,
"learning_rate": 9.12094829893642e-09,
"logits/chosen": 6062.4091796875,
"logits/rejected": 5768.86572265625,
"logps/chosen": -247.6633758544922,
"logps/rejected": -270.07781982421875,
"loss": 0.4831,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.125531867146492,
"rewards/margins": 0.2730921804904938,
"rewards/rejected": -0.39862409234046936,
"step": 440
},
{
"epoch": 0.942161737764983,
"grad_norm": 3.385435845957556,
"learning_rate": 4.8708793644441086e-09,
"logits/chosen": 6208.9853515625,
"logits/rejected": 5350.4443359375,
"logps/chosen": -253.6076202392578,
"logps/rejected": -252.9407196044922,
"loss": 0.4804,
"rewards/accuracies": 0.753125011920929,
"rewards/chosen": -0.10648094117641449,
"rewards/margins": 0.2990611791610718,
"rewards/rejected": -0.40554213523864746,
"step": 450
},
{
"epoch": 0.9630986652708715,
"grad_norm": 3.849103407950397,
"learning_rate": 1.9347820230782295e-09,
"logits/chosen": 6395.10302734375,
"logits/rejected": 5409.55078125,
"logps/chosen": -276.77349853515625,
"logps/rejected": -277.45819091796875,
"loss": 0.4832,
"rewards/accuracies": 0.7093750238418579,
"rewards/chosen": -0.13324801623821259,
"rewards/margins": 0.28933390974998474,
"rewards/rejected": -0.42258191108703613,
"step": 460
},
{
"epoch": 0.98403559277676,
"grad_norm": 3.425749585050412,
"learning_rate": 3.2839470889836627e-10,
"logits/chosen": 5962.32763671875,
"logits/rejected": 5300.6298828125,
"logps/chosen": -245.10165405273438,
"logps/rejected": -248.36233520507812,
"loss": 0.481,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -0.1263255625963211,
"rewards/margins": 0.26447486877441406,
"rewards/rejected": -0.390800416469574,
"step": 470
},
{
"epoch": 0.998691442030882,
"step": 477,
"total_flos": 0.0,
"train_loss": 0.4856958399278813,
"train_runtime": 14922.9586,
"train_samples_per_second": 4.097,
"train_steps_per_second": 0.032
}
],
"logging_steps": 10,
"max_steps": 477,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}