All_like128-zephyr-7b-sft-full / trainer_state.json
TTTXXX01's picture
Model save
9e7d0a2 verified
raw
history blame contribute delete
No virus
26.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.998691442030882,
"eval_steps": 500,
"global_step": 477,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002093692750588851,
"grad_norm": 6813.720901215927,
"learning_rate": 1.0416666666666666e-08,
"logits/chosen": 5002.53564453125,
"logits/rejected": 4591.72021484375,
"logps/chosen": -265.6396789550781,
"logps/rejected": -206.22401428222656,
"loss": 503.0168,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.02093692750588851,
"grad_norm": 6935.582411893378,
"learning_rate": 1.0416666666666667e-07,
"logits/chosen": 5977.00244140625,
"logits/rejected": 5015.09765625,
"logps/chosen": -292.2739562988281,
"logps/rejected": -259.0675048828125,
"loss": 549.3314,
"rewards/accuracies": 0.4583333432674408,
"rewards/chosen": 0.0028684597928076982,
"rewards/margins": 0.0003068610094487667,
"rewards/rejected": 0.0025615987833589315,
"step": 10
},
{
"epoch": 0.04187385501177702,
"grad_norm": 3127.701327967131,
"learning_rate": 2.0833333333333333e-07,
"logits/chosen": 5864.72802734375,
"logits/rejected": 4844.66650390625,
"logps/chosen": -283.6185302734375,
"logps/rejected": -243.01513671875,
"loss": 523.6729,
"rewards/accuracies": 0.5562499761581421,
"rewards/chosen": 0.06974941492080688,
"rewards/margins": 0.002566719427704811,
"rewards/rejected": 0.06718270480632782,
"step": 20
},
{
"epoch": 0.06281078251766553,
"grad_norm": 2074.6624019206756,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": 5557.00439453125,
"logits/rejected": 4840.8193359375,
"logps/chosen": -256.04144287109375,
"logps/rejected": -223.24319458007812,
"loss": 494.8969,
"rewards/accuracies": 0.5687500238418579,
"rewards/chosen": 0.16196031868457794,
"rewards/margins": 0.006370093673467636,
"rewards/rejected": 0.1555902361869812,
"step": 30
},
{
"epoch": 0.08374771002355404,
"grad_norm": 1576.9313876726794,
"learning_rate": 4.1666666666666667e-07,
"logits/chosen": 5559.41650390625,
"logits/rejected": 4830.68994140625,
"logps/chosen": -251.85604858398438,
"logps/rejected": -236.69235229492188,
"loss": 496.3616,
"rewards/accuracies": 0.518750011920929,
"rewards/chosen": 0.22101028263568878,
"rewards/margins": 0.0033943026792258024,
"rewards/rejected": 0.217616006731987,
"step": 40
},
{
"epoch": 0.10468463752944256,
"grad_norm": 1640.8995064962066,
"learning_rate": 4.999731868769026e-07,
"logits/chosen": 5797.015625,
"logits/rejected": 5035.59326171875,
"logps/chosen": -253.31631469726562,
"logps/rejected": -242.33651733398438,
"loss": 492.3395,
"rewards/accuracies": 0.5218750238418579,
"rewards/chosen": 0.2530141770839691,
"rewards/margins": 0.0011868119472637773,
"rewards/rejected": 0.2518273890018463,
"step": 50
},
{
"epoch": 0.12562156503533106,
"grad_norm": 1480.537194221153,
"learning_rate": 4.990353313429303e-07,
"logits/chosen": 5540.62158203125,
"logits/rejected": 4727.97607421875,
"logps/chosen": -248.41848754882812,
"logps/rejected": -221.98336791992188,
"loss": 466.3484,
"rewards/accuracies": 0.5218750238418579,
"rewards/chosen": 0.30004000663757324,
"rewards/margins": 0.005084425210952759,
"rewards/rejected": 0.29495561122894287,
"step": 60
},
{
"epoch": 0.14655849254121958,
"grad_norm": 1439.2312712606115,
"learning_rate": 4.967625656594781e-07,
"logits/chosen": 5331.10791015625,
"logits/rejected": 4925.798828125,
"logps/chosen": -247.7162628173828,
"logps/rejected": -237.12353515625,
"loss": 465.379,
"rewards/accuracies": 0.518750011920929,
"rewards/chosen": 0.30591654777526855,
"rewards/margins": 0.009119048714637756,
"rewards/rejected": 0.2967974543571472,
"step": 70
},
{
"epoch": 0.16749542004710807,
"grad_norm": 1420.7895768869655,
"learning_rate": 4.93167072587771e-07,
"logits/chosen": 5414.23828125,
"logits/rejected": 4648.93115234375,
"logps/chosen": -242.97555541992188,
"logps/rejected": -222.2198486328125,
"loss": 464.9691,
"rewards/accuracies": 0.5531250238418579,
"rewards/chosen": 0.33111369609832764,
"rewards/margins": 0.022587263956665993,
"rewards/rejected": 0.3085264265537262,
"step": 80
},
{
"epoch": 0.1884323475529966,
"grad_norm": 1456.2100554164706,
"learning_rate": 4.882681251368548e-07,
"logits/chosen": 5535.8984375,
"logits/rejected": 4687.62255859375,
"logps/chosen": -242.0615234375,
"logps/rejected": -220.8953399658203,
"loss": 465.3252,
"rewards/accuracies": 0.5406249761581421,
"rewards/chosen": 0.35646066069602966,
"rewards/margins": 0.015677783638238907,
"rewards/rejected": 0.34078291058540344,
"step": 90
},
{
"epoch": 0.2093692750588851,
"grad_norm": 1536.760185739677,
"learning_rate": 4.820919832540181e-07,
"logits/chosen": 5591.662109375,
"logits/rejected": 5172.16259765625,
"logps/chosen": -243.97738647460938,
"logps/rejected": -236.50595092773438,
"loss": 460.8633,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": 0.3589131832122803,
"rewards/margins": -0.021181438118219376,
"rewards/rejected": 0.38009461760520935,
"step": 100
},
{
"epoch": 0.23030620256477363,
"grad_norm": 1408.5395404483565,
"learning_rate": 4.7467175306295647e-07,
"logits/chosen": 5752.2939453125,
"logits/rejected": 4664.9677734375,
"logps/chosen": -236.39962768554688,
"logps/rejected": -224.271484375,
"loss": 463.9909,
"rewards/accuracies": 0.5562499761581421,
"rewards/chosen": 0.382229745388031,
"rewards/margins": 0.03622853755950928,
"rewards/rejected": 0.34600120782852173,
"step": 110
},
{
"epoch": 0.2512431300706621,
"grad_norm": 1430.8052441244079,
"learning_rate": 4.6604720940421207e-07,
"logits/chosen": 5345.5830078125,
"logits/rejected": 4974.67724609375,
"logps/chosen": -230.8021240234375,
"logps/rejected": -229.9935302734375,
"loss": 472.1473,
"rewards/accuracies": 0.503125011920929,
"rewards/chosen": 0.36376887559890747,
"rewards/margins": -0.005053712520748377,
"rewards/rejected": 0.36882254481315613,
"step": 120
},
{
"epoch": 0.2721800575765506,
"grad_norm": 1613.4321490419964,
"learning_rate": 4.5626458262912735e-07,
"logits/chosen": 5406.8876953125,
"logits/rejected": 4819.064453125,
"logps/chosen": -233.12362670898438,
"logps/rejected": -232.2109375,
"loss": 465.5601,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.35393673181533813,
"rewards/margins": -0.014040246605873108,
"rewards/rejected": 0.36797699332237244,
"step": 130
},
{
"epoch": 0.29311698508243916,
"grad_norm": 1397.7266878069634,
"learning_rate": 4.453763107901675e-07,
"logits/chosen": 5204.806640625,
"logits/rejected": 4634.70947265625,
"logps/chosen": -235.88473510742188,
"logps/rejected": -216.02548217773438,
"loss": 475.0084,
"rewards/accuracies": 0.546875,
"rewards/chosen": 0.36020082235336304,
"rewards/margins": 0.009127211757004261,
"rewards/rejected": 0.35107359290122986,
"step": 140
},
{
"epoch": 0.31405391258832765,
"grad_norm": 1771.9334786366776,
"learning_rate": 4.3344075855595097e-07,
"logits/chosen": 5751.51171875,
"logits/rejected": 4749.1025390625,
"logps/chosen": -253.08169555664062,
"logps/rejected": -215.91110229492188,
"loss": 453.9507,
"rewards/accuracies": 0.5531250238418579,
"rewards/chosen": 0.3603329658508301,
"rewards/margins": 0.004536592401564121,
"rewards/rejected": 0.35579636693000793,
"step": 150
},
{
"epoch": 0.33499084009421615,
"grad_norm": 1436.0408028073434,
"learning_rate": 4.2052190435769554e-07,
"logits/chosen": 5464.77099609375,
"logits/rejected": 4553.0888671875,
"logps/chosen": -241.3671112060547,
"logps/rejected": -218.6112518310547,
"loss": 470.9686,
"rewards/accuracies": 0.550000011920929,
"rewards/chosen": 0.4322701394557953,
"rewards/margins": 0.054599881172180176,
"rewards/rejected": 0.3776702284812927,
"step": 160
},
{
"epoch": 0.3559277676001047,
"grad_norm": 1452.849884308315,
"learning_rate": 4.0668899744407567e-07,
"logits/chosen": 5712.3564453125,
"logits/rejected": 4878.48193359375,
"logps/chosen": -248.1728515625,
"logps/rejected": -206.45004272460938,
"loss": 457.2006,
"rewards/accuracies": 0.550000011920929,
"rewards/chosen": 0.34142884612083435,
"rewards/margins": -0.0005211926763877273,
"rewards/rejected": 0.34195005893707275,
"step": 170
},
{
"epoch": 0.3768646951059932,
"grad_norm": 1345.993393031555,
"learning_rate": 3.920161866827889e-07,
"logits/chosen": 5387.048828125,
"logits/rejected": 4639.03271484375,
"logps/chosen": -250.1925811767578,
"logps/rejected": -211.57275390625,
"loss": 463.2538,
"rewards/accuracies": 0.5218750238418579,
"rewards/chosen": 0.37102702260017395,
"rewards/margins": -0.013053232803940773,
"rewards/rejected": 0.3840802311897278,
"step": 180
},
{
"epoch": 0.39780162261188173,
"grad_norm": 1364.64342210683,
"learning_rate": 3.765821230985757e-07,
"logits/chosen": 5516.2822265625,
"logits/rejected": 4661.30517578125,
"logps/chosen": -231.8093719482422,
"logps/rejected": -209.94967651367188,
"loss": 460.1928,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": 0.37942126393318176,
"rewards/margins": 8.549987978767604e-05,
"rewards/rejected": 0.3793357312679291,
"step": 190
},
{
"epoch": 0.4187385501177702,
"grad_norm": 1501.9460217734675,
"learning_rate": 3.604695382782159e-07,
"logits/chosen": 5149.3212890625,
"logits/rejected": 4487.83251953125,
"logps/chosen": -217.900634765625,
"logps/rejected": -206.22817993164062,
"loss": 462.1984,
"rewards/accuracies": 0.5406249761581421,
"rewards/chosen": 0.3645021319389343,
"rewards/margins": -0.0018228956032544374,
"rewards/rejected": 0.3663250207901001,
"step": 200
},
{
"epoch": 0.4396754776236587,
"grad_norm": 1429.3016977538932,
"learning_rate": 3.4376480090239047e-07,
"logits/chosen": 5780.9111328125,
"logits/rejected": 4931.05810546875,
"logps/chosen": -248.72396850585938,
"logps/rejected": -224.09872436523438,
"loss": 466.6646,
"rewards/accuracies": 0.5375000238418579,
"rewards/chosen": 0.38043394684791565,
"rewards/margins": 0.004385782405734062,
"rewards/rejected": 0.37604817748069763,
"step": 210
},
{
"epoch": 0.46061240512954726,
"grad_norm": 1443.7077268756489,
"learning_rate": 3.265574537815398e-07,
"logits/chosen": 5190.0830078125,
"logits/rejected": 4833.0302734375,
"logps/chosen": -234.4996795654297,
"logps/rejected": -221.47488403320312,
"loss": 474.1076,
"rewards/accuracies": 0.5062500238418579,
"rewards/chosen": 0.36616024374961853,
"rewards/margins": -0.022525813430547714,
"rewards/rejected": 0.38868606090545654,
"step": 220
},
{
"epoch": 0.48154933263543576,
"grad_norm": 1524.9948933221324,
"learning_rate": 3.0893973387735683e-07,
"logits/chosen": 5389.8935546875,
"logits/rejected": 4572.7626953125,
"logps/chosen": -228.4185028076172,
"logps/rejected": -209.15811157226562,
"loss": 446.1821,
"rewards/accuracies": 0.5531250238418579,
"rewards/chosen": 0.39850276708602905,
"rewards/margins": 0.035766832530498505,
"rewards/rejected": 0.36273595690727234,
"step": 230
},
{
"epoch": 0.5024862601413242,
"grad_norm": 1425.7617156287813,
"learning_rate": 2.910060778827554e-07,
"logits/chosen": 5539.8349609375,
"logits/rejected": 4716.81201171875,
"logps/chosen": -227.1032257080078,
"logps/rejected": -210.68612670898438,
"loss": 466.6096,
"rewards/accuracies": 0.534375011920929,
"rewards/chosen": 0.38473668694496155,
"rewards/margins": -0.006211507134139538,
"rewards/rejected": 0.3909481465816498,
"step": 240
},
{
"epoch": 0.5234231876472127,
"grad_norm": 1407.421320970437,
"learning_rate": 2.7285261601056697e-07,
"logits/chosen": 5670.39453125,
"logits/rejected": 4789.2001953125,
"logps/chosen": -244.33895874023438,
"logps/rejected": -223.86453247070312,
"loss": 459.7925,
"rewards/accuracies": 0.5531250238418579,
"rewards/chosen": 0.4160972535610199,
"rewards/margins": 0.03399023413658142,
"rewards/rejected": 0.3821069598197937,
"step": 250
},
{
"epoch": 0.5443601151531012,
"grad_norm": 1368.2023715670355,
"learning_rate": 2.5457665670441937e-07,
"logits/chosen": 5489.0439453125,
"logits/rejected": 4975.576171875,
"logps/chosen": -245.1668243408203,
"logps/rejected": -221.49661254882812,
"loss": 458.9599,
"rewards/accuracies": 0.5062500238418579,
"rewards/chosen": 0.4090227484703064,
"rewards/margins": 0.013166209682822227,
"rewards/rejected": 0.3958565592765808,
"step": 260
},
{
"epoch": 0.5652970426589898,
"grad_norm": 1496.3498583187165,
"learning_rate": 2.3627616503391812e-07,
"logits/chosen": 5568.6240234375,
"logits/rejected": 5110.2138671875,
"logps/chosen": -233.6811065673828,
"logps/rejected": -220.8893585205078,
"loss": 447.9319,
"rewards/accuracies": 0.5562499761581421,
"rewards/chosen": 0.4093276560306549,
"rewards/margins": 0.014256368391215801,
"rewards/rejected": 0.3950712978839874,
"step": 270
},
{
"epoch": 0.5862339701648783,
"grad_norm": 1386.5146227495984,
"learning_rate": 2.1804923757009882e-07,
"logits/chosen": 5578.41455078125,
"logits/rejected": 4647.2431640625,
"logps/chosen": -241.32284545898438,
"logps/rejected": -207.7650146484375,
"loss": 448.7999,
"rewards/accuracies": 0.534375011920929,
"rewards/chosen": 0.3940281867980957,
"rewards/margins": 0.003837400348857045,
"rewards/rejected": 0.3901907801628113,
"step": 280
},
{
"epoch": 0.6071708976707668,
"grad_norm": 1565.300342790805,
"learning_rate": 1.9999357655598891e-07,
"logits/chosen": 5835.33984375,
"logits/rejected": 5479.173828125,
"logps/chosen": -249.04843139648438,
"logps/rejected": -245.552734375,
"loss": 468.733,
"rewards/accuracies": 0.5062500238418579,
"rewards/chosen": 0.4238561689853668,
"rewards/margins": -0.09983213245868683,
"rewards/rejected": 0.5236883163452148,
"step": 290
},
{
"epoch": 0.6281078251766553,
"grad_norm": 1477.060718071622,
"learning_rate": 1.8220596619089573e-07,
"logits/chosen": 5350.3173828125,
"logits/rejected": 5039.15966796875,
"logps/chosen": -233.1553192138672,
"logps/rejected": -227.7581024169922,
"loss": 463.6427,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.3819466233253479,
"rewards/margins": -0.04966121166944504,
"rewards/rejected": 0.43160781264305115,
"step": 300
},
{
"epoch": 0.6490447526825438,
"grad_norm": 1467.0428814727877,
"learning_rate": 1.647817538357072e-07,
"logits/chosen": 5974.5576171875,
"logits/rejected": 5248.5712890625,
"logps/chosen": -246.29739379882812,
"logps/rejected": -230.0133819580078,
"loss": 451.9249,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.4453219473361969,
"rewards/margins": 0.049888670444488525,
"rewards/rejected": 0.395433247089386,
"step": 310
},
{
"epoch": 0.6699816801884323,
"grad_norm": 1539.7938960746517,
"learning_rate": 1.478143389201113e-07,
"logits/chosen": 5526.9794921875,
"logits/rejected": 4800.4169921875,
"logps/chosen": -232.0591583251953,
"logps/rejected": -218.22909545898438,
"loss": 457.9831,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.3980428874492645,
"rewards/margins": -0.017790189012885094,
"rewards/rejected": 0.41583308577537537,
"step": 320
},
{
"epoch": 0.6909186076943209,
"grad_norm": 1416.4774770653923,
"learning_rate": 1.3139467229135998e-07,
"logits/chosen": 5347.95751953125,
"logits/rejected": 4977.25830078125,
"logps/chosen": -237.83120727539062,
"logps/rejected": -223.06503295898438,
"loss": 456.2857,
"rewards/accuracies": 0.515625,
"rewards/chosen": 0.3895305097103119,
"rewards/margins": -0.023113062605261803,
"rewards/rejected": 0.41264358162879944,
"step": 330
},
{
"epoch": 0.7118555352002094,
"grad_norm": 1354.3420498241383,
"learning_rate": 1.1561076868822755e-07,
"logits/chosen": 5184.0341796875,
"logits/rejected": 4512.36376953125,
"logps/chosen": -215.9802703857422,
"logps/rejected": -202.09982299804688,
"loss": 441.2616,
"rewards/accuracies": 0.5562499761581421,
"rewards/chosen": 0.4158664345741272,
"rewards/margins": 0.02483862265944481,
"rewards/rejected": 0.3910277783870697,
"step": 340
},
{
"epoch": 0.7327924627060979,
"grad_norm": 1399.0816486347255,
"learning_rate": 1.0054723495346482e-07,
"logits/chosen": 5854.7001953125,
"logits/rejected": 4766.1748046875,
"logps/chosen": -257.1776428222656,
"logps/rejected": -228.104736328125,
"loss": 471.4627,
"rewards/accuracies": 0.534375011920929,
"rewards/chosen": 0.4373684823513031,
"rewards/margins": 0.03503155708312988,
"rewards/rejected": 0.402336984872818,
"step": 350
},
{
"epoch": 0.7537293902119864,
"grad_norm": 1352.127266112888,
"learning_rate": 8.628481651367875e-08,
"logits/chosen": 5555.2060546875,
"logits/rejected": 4852.7666015625,
"logps/chosen": -235.2597198486328,
"logps/rejected": -216.91415405273438,
"loss": 443.8227,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": 0.42463842034339905,
"rewards/margins": 0.01125773973762989,
"rewards/rejected": 0.4133806824684143,
"step": 360
},
{
"epoch": 0.7746663177178749,
"grad_norm": 1283.6956906144683,
"learning_rate": 7.289996455765748e-08,
"logits/chosen": 5428.78271484375,
"logits/rejected": 4644.25390625,
"logps/chosen": -251.05245971679688,
"logps/rejected": -221.3125,
"loss": 453.7341,
"rewards/accuracies": 0.578125,
"rewards/chosen": 0.3774058520793915,
"rewards/margins": 0.02088645100593567,
"rewards/rejected": 0.3565194010734558,
"step": 370
},
{
"epoch": 0.7956032452237635,
"grad_norm": 1389.3936014370584,
"learning_rate": 6.046442623320145e-08,
"logits/chosen": 5676.46826171875,
"logits/rejected": 4961.1416015625,
"logps/chosen": -245.52609252929688,
"logps/rejected": -210.39529418945312,
"loss": 472.2963,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": 0.39175355434417725,
"rewards/margins": -0.012869933620095253,
"rewards/rejected": 0.40462350845336914,
"step": 380
},
{
"epoch": 0.816540172729652,
"grad_norm": 1904.7699931128577,
"learning_rate": 4.904486005914027e-08,
"logits/chosen": 5950.8603515625,
"logits/rejected": 5257.07080078125,
"logps/chosen": -246.93521118164062,
"logps/rejected": -232.27798461914062,
"loss": 459.8493,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.44814223051071167,
"rewards/margins": 0.023155368864536285,
"rewards/rejected": 0.42498689889907837,
"step": 390
},
{
"epoch": 0.8374771002355405,
"grad_norm": 1314.6946141188957,
"learning_rate": 3.8702478614051345e-08,
"logits/chosen": 5522.93212890625,
"logits/rejected": 4914.50146484375,
"logps/chosen": -246.7040557861328,
"logps/rejected": -212.8903045654297,
"loss": 463.632,
"rewards/accuracies": 0.528124988079071,
"rewards/chosen": 0.4420008063316345,
"rewards/margins": 0.01232604868710041,
"rewards/rejected": 0.42967477440834045,
"step": 400
},
{
"epoch": 0.8584140277414289,
"grad_norm": 1386.6765646261215,
"learning_rate": 2.9492720416985e-08,
"logits/chosen": 5542.53125,
"logits/rejected": 4905.96435546875,
"logps/chosen": -233.2512664794922,
"logps/rejected": -216.3258819580078,
"loss": 459.2343,
"rewards/accuracies": 0.550000011920929,
"rewards/chosen": 0.4861605167388916,
"rewards/margins": -0.16168272495269775,
"rewards/rejected": 0.6478432416915894,
"step": 410
},
{
"epoch": 0.8793509552473174,
"grad_norm": 1421.0973915303414,
"learning_rate": 2.1464952759020856e-08,
"logits/chosen": 5875.4296875,
"logits/rejected": 5138.45947265625,
"logps/chosen": -249.4132843017578,
"logps/rejected": -230.8553009033203,
"loss": 457.3266,
"rewards/accuracies": 0.48124998807907104,
"rewards/chosen": 0.42565909028053284,
"rewards/margins": 0.009328785352408886,
"rewards/rejected": 0.4163302779197693,
"step": 420
},
{
"epoch": 0.9002878827532059,
"grad_norm": 2530.194729647411,
"learning_rate": 1.4662207078575684e-08,
"logits/chosen": 5782.8046875,
"logits/rejected": 4674.46826171875,
"logps/chosen": -244.35745239257812,
"logps/rejected": -218.1392364501953,
"loss": 449.309,
"rewards/accuracies": 0.578125,
"rewards/chosen": 0.4324522614479065,
"rewards/margins": 0.03925667330622673,
"rewards/rejected": 0.39319556951522827,
"step": 430
},
{
"epoch": 0.9212248102590945,
"grad_norm": 1331.0653218757734,
"learning_rate": 9.12094829893642e-09,
"logits/chosen": 5607.314453125,
"logits/rejected": 5351.4794921875,
"logps/chosen": -240.36508178710938,
"logps/rejected": -235.7848358154297,
"loss": 447.1441,
"rewards/accuracies": 0.47187501192092896,
"rewards/chosen": 0.3817844092845917,
"rewards/margins": -0.020826738327741623,
"rewards/rejected": 0.4026111960411072,
"step": 440
},
{
"epoch": 0.942161737764983,
"grad_norm": 1400.3246977795131,
"learning_rate": 4.8708793644441086e-09,
"logits/chosen": 5760.21923828125,
"logits/rejected": 4969.435546875,
"logps/chosen": -248.47109985351562,
"logps/rejected": -217.5637969970703,
"loss": 450.0296,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.41864126920700073,
"rewards/margins": -0.020225150510668755,
"rewards/rejected": 0.43886643648147583,
"step": 450
},
{
"epoch": 0.9630986652708715,
"grad_norm": 1459.4316477205139,
"learning_rate": 1.9347820230782295e-09,
"logits/chosen": 5948.6728515625,
"logits/rejected": 5038.36181640625,
"logps/chosen": -268.30438232421875,
"logps/rejected": -239.8975067138672,
"loss": 486.2789,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": 0.4469468593597412,
"rewards/margins": -0.011204786598682404,
"rewards/rejected": 0.4581516683101654,
"step": 460
},
{
"epoch": 0.98403559277676,
"grad_norm": 1500.642382585496,
"learning_rate": 3.2839470889836627e-10,
"logits/chosen": 5545.70947265625,
"logits/rejected": 4929.12255859375,
"logps/chosen": -237.06051635742188,
"logps/rejected": -213.99795532226562,
"loss": 457.8427,
"rewards/accuracies": 0.528124988079071,
"rewards/chosen": 0.4377865195274353,
"rewards/margins": -0.0002623729524202645,
"rewards/rejected": 0.4380488991737366,
"step": 470
},
{
"epoch": 0.998691442030882,
"step": 477,
"total_flos": 0.0,
"train_loss": 465.78390522123135,
"train_runtime": 16522.4635,
"train_samples_per_second": 3.7,
"train_steps_per_second": 0.029
}
],
"logging_steps": 10,
"max_steps": 477,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}