bigheiniuJ's picture
Model save
79eea64 verified
raw
history blame
24.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 100,
"global_step": 478,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0020920502092050207,
"grad_norm": 10.133265544600611,
"learning_rate": 1.0416666666666666e-08,
"logits/chosen": -3.21875,
"logits/rejected": -3.21875,
"logps/chosen": -420.0,
"logps/rejected": -552.0,
"loss": 0.6914,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.02092050209205021,
"grad_norm": 9.184965981316628,
"learning_rate": 1.0416666666666667e-07,
"logits/chosen": -3.265625,
"logits/rejected": -3.28125,
"logps/chosen": -484.0,
"logps/rejected": -516.0,
"loss": 0.6919,
"rewards/accuracies": 0.1111111119389534,
"rewards/chosen": 0.001251220703125,
"rewards/margins": 0.00055694580078125,
"rewards/rejected": 0.00069427490234375,
"step": 10
},
{
"epoch": 0.04184100418410042,
"grad_norm": 10.105256126223187,
"learning_rate": 2.0833333333333333e-07,
"logits/chosen": -3.21875,
"logits/rejected": -3.234375,
"logps/chosen": -520.0,
"logps/rejected": -540.0,
"loss": 0.6902,
"rewards/accuracies": 0.3125,
"rewards/chosen": 0.00799560546875,
"rewards/margins": 0.004119873046875,
"rewards/rejected": 0.003875732421875,
"step": 20
},
{
"epoch": 0.06276150627615062,
"grad_norm": 8.97965579353216,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": -3.28125,
"logits/rejected": -3.265625,
"logps/chosen": -468.0,
"logps/rejected": -516.0,
"loss": 0.6829,
"rewards/accuracies": 0.5375000238418579,
"rewards/chosen": 0.040771484375,
"rewards/margins": 0.0198974609375,
"rewards/rejected": 0.0208740234375,
"step": 30
},
{
"epoch": 0.08368200836820083,
"grad_norm": 9.227660720825863,
"learning_rate": 4.1666666666666667e-07,
"logits/chosen": -3.25,
"logits/rejected": -3.328125,
"logps/chosen": -506.0,
"logps/rejected": -520.0,
"loss": 0.6627,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": 0.09130859375,
"rewards/margins": 0.059814453125,
"rewards/rejected": 0.03173828125,
"step": 40
},
{
"epoch": 0.10460251046025104,
"grad_norm": 9.819087605132305,
"learning_rate": 4.999733114418725e-07,
"logits/chosen": -3.359375,
"logits/rejected": -3.390625,
"logps/chosen": -498.0,
"logps/rejected": -544.0,
"loss": 0.6301,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": 0.0257568359375,
"rewards/margins": 0.1376953125,
"rewards/rejected": -0.11181640625,
"step": 50
},
{
"epoch": 0.12552301255230125,
"grad_norm": 12.048088131076954,
"learning_rate": 4.990398100856366e-07,
"logits/chosen": -3.53125,
"logits/rejected": -3.484375,
"logps/chosen": -568.0,
"logps/rejected": -604.0,
"loss": 0.5911,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -0.236328125,
"rewards/margins": 0.267578125,
"rewards/rejected": -0.50390625,
"step": 60
},
{
"epoch": 0.14644351464435146,
"grad_norm": 14.528700694610064,
"learning_rate": 4.967775735898179e-07,
"logits/chosen": -3.703125,
"logits/rejected": -3.65625,
"logps/chosen": -576.0,
"logps/rejected": -660.0,
"loss": 0.54,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.6015625,
"rewards/margins": 0.478515625,
"rewards/rejected": -1.078125,
"step": 70
},
{
"epoch": 0.16736401673640167,
"grad_norm": 14.89693527880136,
"learning_rate": 4.931986719649298e-07,
"logits/chosen": -3.78125,
"logits/rejected": -3.734375,
"logps/chosen": -680.0,
"logps/rejected": -792.0,
"loss": 0.468,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -1.4921875,
"rewards/margins": 0.75,
"rewards/rejected": -2.234375,
"step": 80
},
{
"epoch": 0.18828451882845187,
"grad_norm": 24.010753148040607,
"learning_rate": 4.883222001996351e-07,
"logits/chosen": -3.71875,
"logits/rejected": -3.734375,
"logps/chosen": -704.0,
"logps/rejected": -880.0,
"loss": 0.3987,
"rewards/accuracies": 0.84375,
"rewards/chosen": -2.03125,
"rewards/margins": 1.3125,
"rewards/rejected": -3.34375,
"step": 90
},
{
"epoch": 0.20920502092050208,
"grad_norm": 20.988032239641004,
"learning_rate": 4.821741763807186e-07,
"logits/chosen": -3.71875,
"logits/rejected": -3.703125,
"logps/chosen": -732.0,
"logps/rejected": -912.0,
"loss": 0.3407,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -2.46875,
"rewards/margins": 1.5859375,
"rewards/rejected": -4.0625,
"step": 100
},
{
"epoch": 0.20920502092050208,
"eval_logits/chosen": -3.75,
"eval_logits/rejected": -3.75,
"eval_logps/chosen": -724.0,
"eval_logps/rejected": -912.0,
"eval_loss": 0.3028574287891388,
"eval_rewards/accuracies": 0.8828125,
"eval_rewards/chosen": -2.25,
"eval_rewards/margins": 1.75,
"eval_rewards/rejected": -4.0,
"eval_runtime": 89.7294,
"eval_samples_per_second": 22.289,
"eval_steps_per_second": 0.357,
"step": 100
},
{
"epoch": 0.2301255230125523,
"grad_norm": 19.53117460181654,
"learning_rate": 4.747874028753375e-07,
"logits/chosen": -3.703125,
"logits/rejected": -3.65625,
"logps/chosen": -756.0,
"logps/rejected": -1024.0,
"loss": 0.2921,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": -2.578125,
"rewards/margins": 2.390625,
"rewards/rejected": -4.96875,
"step": 110
},
{
"epoch": 0.2510460251046025,
"grad_norm": 26.848552029083983,
"learning_rate": 4.662012913161997e-07,
"logits/chosen": -3.765625,
"logits/rejected": -3.75,
"logps/chosen": -692.0,
"logps/rejected": -1040.0,
"loss": 0.2388,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -2.15625,
"rewards/margins": 3.140625,
"rewards/rejected": -5.3125,
"step": 120
},
{
"epoch": 0.2719665271966527,
"grad_norm": 49.46963494787925,
"learning_rate": 4.5646165232345103e-07,
"logits/chosen": -3.75,
"logits/rejected": -3.671875,
"logps/chosen": -820.0,
"logps/rejected": -1376.0,
"loss": 0.2143,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": -3.265625,
"rewards/margins": 5.125,
"rewards/rejected": -8.375,
"step": 130
},
{
"epoch": 0.2928870292887029,
"grad_norm": 27.856656903315532,
"learning_rate": 4.456204510851956e-07,
"logits/chosen": -3.75,
"logits/rejected": -3.625,
"logps/chosen": -852.0,
"logps/rejected": -1416.0,
"loss": 0.196,
"rewards/accuracies": 0.875,
"rewards/chosen": -3.40625,
"rewards/margins": 5.25,
"rewards/rejected": -8.625,
"step": 140
},
{
"epoch": 0.3138075313807531,
"grad_norm": 23.325737015073706,
"learning_rate": 4.337355301007335e-07,
"logits/chosen": -3.796875,
"logits/rejected": -3.71875,
"logps/chosen": -740.0,
"logps/rejected": -1360.0,
"loss": 0.1871,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -2.453125,
"rewards/margins": 5.78125,
"rewards/rejected": -8.25,
"step": 150
},
{
"epoch": 0.33472803347280333,
"grad_norm": 40.59499239982402,
"learning_rate": 4.2087030056579986e-07,
"logits/chosen": -3.765625,
"logits/rejected": -3.65625,
"logps/chosen": -708.0,
"logps/rejected": -1296.0,
"loss": 0.1955,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -2.453125,
"rewards/margins": 5.5,
"rewards/rejected": -7.9375,
"step": 160
},
{
"epoch": 0.35564853556485354,
"grad_norm": 32.891772625367125,
"learning_rate": 4.070934040463998e-07,
"logits/chosen": -3.703125,
"logits/rejected": -3.734375,
"logps/chosen": -716.0,
"logps/rejected": -1320.0,
"loss": 0.2043,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": -2.359375,
"rewards/margins": 5.375,
"rewards/rejected": -7.75,
"step": 170
},
{
"epoch": 0.37656903765690375,
"grad_norm": 21.73830270472661,
"learning_rate": 3.9247834624635404e-07,
"logits/chosen": -3.765625,
"logits/rejected": -3.71875,
"logps/chosen": -780.0,
"logps/rejected": -1376.0,
"loss": 0.1924,
"rewards/accuracies": 0.90625,
"rewards/chosen": -2.578125,
"rewards/margins": 5.84375,
"rewards/rejected": -8.4375,
"step": 180
},
{
"epoch": 0.39748953974895396,
"grad_norm": 16.372098376222763,
"learning_rate": 3.7710310482256523e-07,
"logits/chosen": -3.8125,
"logits/rejected": -3.734375,
"logps/chosen": -696.0,
"logps/rejected": -1344.0,
"loss": 0.1693,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": -2.234375,
"rewards/margins": 5.96875,
"rewards/rejected": -8.1875,
"step": 190
},
{
"epoch": 0.41841004184100417,
"grad_norm": 41.66546161927783,
"learning_rate": 3.610497133404795e-07,
"logits/chosen": -3.703125,
"logits/rejected": -3.703125,
"logps/chosen": -844.0,
"logps/rejected": -1624.0,
"loss": 0.1574,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": -3.328125,
"rewards/margins": 7.625,
"rewards/rejected": -11.0,
"step": 200
},
{
"epoch": 0.41841004184100417,
"eval_logits/chosen": -3.765625,
"eval_logits/rejected": -3.71875,
"eval_logps/chosen": -780.0,
"eval_logps/rejected": -1536.0,
"eval_loss": 0.15842990577220917,
"eval_rewards/accuracies": 0.9296875,
"eval_rewards/chosen": -2.796875,
"eval_rewards/margins": 7.40625,
"eval_rewards/rejected": -10.1875,
"eval_runtime": 90.1471,
"eval_samples_per_second": 22.186,
"eval_steps_per_second": 0.355,
"step": 200
},
{
"epoch": 0.4393305439330544,
"grad_norm": 38.17786401364724,
"learning_rate": 3.4440382358952115e-07,
"logits/chosen": -3.71875,
"logits/rejected": -3.625,
"logps/chosen": -728.0,
"logps/rejected": -1584.0,
"loss": 0.169,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -2.578125,
"rewards/margins": 8.125,
"rewards/rejected": -10.75,
"step": 210
},
{
"epoch": 0.4602510460251046,
"grad_norm": 28.36201506292477,
"learning_rate": 3.272542485937368e-07,
"logits/chosen": -3.734375,
"logits/rejected": -3.703125,
"logps/chosen": -732.0,
"logps/rejected": -1560.0,
"loss": 0.1756,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -2.359375,
"rewards/margins": 8.0,
"rewards/rejected": -10.375,
"step": 220
},
{
"epoch": 0.4811715481171548,
"grad_norm": 37.321244440982746,
"learning_rate": 3.096924887558854e-07,
"logits/chosen": -3.8125,
"logits/rejected": -3.703125,
"logps/chosen": -800.0,
"logps/rejected": -1656.0,
"loss": 0.1563,
"rewards/accuracies": 0.90625,
"rewards/chosen": -2.78125,
"rewards/margins": 7.84375,
"rewards/rejected": -10.625,
"step": 230
},
{
"epoch": 0.502092050209205,
"grad_norm": 29.25849606336902,
"learning_rate": 2.9181224366319943e-07,
"logits/chosen": -3.71875,
"logits/rejected": -3.578125,
"logps/chosen": -716.0,
"logps/rejected": -1440.0,
"loss": 0.1714,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -2.328125,
"rewards/margins": 7.1875,
"rewards/rejected": -9.5625,
"step": 240
},
{
"epoch": 0.5230125523012552,
"grad_norm": 50.281378871196466,
"learning_rate": 2.7370891215954565e-07,
"logits/chosen": -3.734375,
"logits/rejected": -3.65625,
"logps/chosen": -760.0,
"logps/rejected": -1576.0,
"loss": 0.1625,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -2.6875,
"rewards/margins": 7.8125,
"rewards/rejected": -10.5,
"step": 250
},
{
"epoch": 0.5439330543933054,
"grad_norm": 35.703635573374726,
"learning_rate": 2.55479083351317e-07,
"logits/chosen": -3.734375,
"logits/rejected": -3.640625,
"logps/chosen": -800.0,
"logps/rejected": -1600.0,
"loss": 0.1357,
"rewards/accuracies": 0.90625,
"rewards/chosen": -3.078125,
"rewards/margins": 7.625,
"rewards/rejected": -10.6875,
"step": 260
},
{
"epoch": 0.5648535564853556,
"grad_norm": 24.94230756271867,
"learning_rate": 2.3722002126275822e-07,
"logits/chosen": -3.65625,
"logits/rejected": -3.65625,
"logps/chosen": -804.0,
"logps/rejected": -1544.0,
"loss": 0.1701,
"rewards/accuracies": 0.90625,
"rewards/chosen": -3.296875,
"rewards/margins": 7.1875,
"rewards/rejected": -10.5,
"step": 270
},
{
"epoch": 0.5857740585774058,
"grad_norm": 28.862452074602317,
"learning_rate": 2.19029145890313e-07,
"logits/chosen": -3.65625,
"logits/rejected": -3.578125,
"logps/chosen": -832.0,
"logps/rejected": -1520.0,
"loss": 0.1453,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -3.15625,
"rewards/margins": 6.625,
"rewards/rejected": -9.75,
"step": 280
},
{
"epoch": 0.606694560669456,
"grad_norm": 33.07346655704146,
"learning_rate": 2.0100351342479216e-07,
"logits/chosen": -3.59375,
"logits/rejected": -3.5625,
"logps/chosen": -840.0,
"logps/rejected": -1592.0,
"loss": 0.1752,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -3.4375,
"rewards/margins": 7.3125,
"rewards/rejected": -10.75,
"step": 290
},
{
"epoch": 0.6276150627615062,
"grad_norm": 28.81151134715954,
"learning_rate": 1.8323929841460178e-07,
"logits/chosen": -3.625,
"logits/rejected": -3.578125,
"logps/chosen": -720.0,
"logps/rejected": -1408.0,
"loss": 0.1551,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -2.359375,
"rewards/margins": 6.5,
"rewards/rejected": -8.875,
"step": 300
},
{
"epoch": 0.6276150627615062,
"eval_logits/chosen": -3.640625,
"eval_logits/rejected": -3.59375,
"eval_logps/chosen": -744.0,
"eval_logps/rejected": -1472.0,
"eval_loss": 0.13853245973587036,
"eval_rewards/accuracies": 0.93359375,
"eval_rewards/chosen": -2.453125,
"eval_rewards/margins": 7.15625,
"eval_rewards/rejected": -9.625,
"eval_runtime": 90.005,
"eval_samples_per_second": 22.221,
"eval_steps_per_second": 0.356,
"step": 300
},
{
"epoch": 0.6485355648535565,
"grad_norm": 39.90299619186908,
"learning_rate": 1.6583128063291573e-07,
"logits/chosen": -3.59375,
"logits/rejected": -3.53125,
"logps/chosen": -780.0,
"logps/rejected": -1672.0,
"loss": 0.1403,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -2.921875,
"rewards/margins": 8.5,
"rewards/rejected": -11.4375,
"step": 310
},
{
"epoch": 0.6694560669456067,
"grad_norm": 30.707942119385958,
"learning_rate": 1.488723393865766e-07,
"logits/chosen": -3.609375,
"logits/rejected": -3.578125,
"logps/chosen": -864.0,
"logps/rejected": -1776.0,
"loss": 0.1436,
"rewards/accuracies": 0.9375,
"rewards/chosen": -3.296875,
"rewards/margins": 9.0,
"rewards/rejected": -12.3125,
"step": 320
},
{
"epoch": 0.6903765690376569,
"grad_norm": 22.500572393034663,
"learning_rate": 1.3245295796480788e-07,
"logits/chosen": -3.5625,
"logits/rejected": -3.484375,
"logps/chosen": -888.0,
"logps/rejected": -1808.0,
"loss": 0.1576,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -3.828125,
"rewards/margins": 9.0,
"rewards/rejected": -12.8125,
"step": 330
},
{
"epoch": 0.7112970711297071,
"grad_norm": 27.937514221124733,
"learning_rate": 1.1666074087171627e-07,
"logits/chosen": -3.53125,
"logits/rejected": -3.46875,
"logps/chosen": -960.0,
"logps/rejected": -1920.0,
"loss": 0.1881,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -4.5625,
"rewards/margins": 9.3125,
"rewards/rejected": -13.875,
"step": 340
},
{
"epoch": 0.7322175732217573,
"grad_norm": 21.646791307337544,
"learning_rate": 1.0157994641835734e-07,
"logits/chosen": -3.609375,
"logits/rejected": -3.515625,
"logps/chosen": -808.0,
"logps/rejected": -1488.0,
"loss": 0.1677,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": -3.421875,
"rewards/margins": 6.4375,
"rewards/rejected": -9.875,
"step": 350
},
{
"epoch": 0.7531380753138075,
"grad_norm": 26.51641238698488,
"learning_rate": 8.729103716819111e-08,
"logits/chosen": -3.515625,
"logits/rejected": -3.4375,
"logps/chosen": -804.0,
"logps/rejected": -1520.0,
"loss": 0.154,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -2.9375,
"rewards/margins": 6.875,
"rewards/rejected": -9.8125,
"step": 360
},
{
"epoch": 0.7740585774058577,
"grad_norm": 32.61897967589747,
"learning_rate": 7.387025063449081e-08,
"logits/chosen": -3.59375,
"logits/rejected": -3.5,
"logps/chosen": -792.0,
"logps/rejected": -1480.0,
"loss": 0.1439,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -3.28125,
"rewards/margins": 6.46875,
"rewards/rejected": -9.75,
"step": 370
},
{
"epoch": 0.7949790794979079,
"grad_norm": 24.073566416356307,
"learning_rate": 6.138919252022435e-08,
"logits/chosen": -3.59375,
"logits/rejected": -3.53125,
"logps/chosen": -852.0,
"logps/rejected": -1696.0,
"loss": 0.1474,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -3.5,
"rewards/margins": 7.875,
"rewards/rejected": -11.375,
"step": 380
},
{
"epoch": 0.8158995815899581,
"grad_norm": 22.024234097844488,
"learning_rate": 4.991445467064689e-08,
"logits/chosen": -3.625,
"logits/rejected": -3.515625,
"logps/chosen": -820.0,
"logps/rejected": -1672.0,
"loss": 0.155,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": -3.421875,
"rewards/margins": 7.90625,
"rewards/rejected": -11.375,
"step": 390
},
{
"epoch": 0.8368200836820083,
"grad_norm": 22.5208730955551,
"learning_rate": 3.9507259776993954e-08,
"logits/chosen": -3.53125,
"logits/rejected": -3.46875,
"logps/chosen": -816.0,
"logps/rejected": -1592.0,
"loss": 0.1511,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -3.421875,
"rewards/margins": 7.3125,
"rewards/rejected": -10.6875,
"step": 400
},
{
"epoch": 0.8368200836820083,
"eval_logits/chosen": -3.5625,
"eval_logits/rejected": -3.5,
"eval_logps/chosen": -836.0,
"eval_logps/rejected": -1592.0,
"eval_loss": 0.13080458343029022,
"eval_rewards/accuracies": 0.94921875,
"eval_rewards/chosen": -3.34375,
"eval_rewards/margins": 7.46875,
"eval_rewards/rejected": -10.8125,
"eval_runtime": 90.1024,
"eval_samples_per_second": 22.197,
"eval_steps_per_second": 0.355,
"step": 400
},
{
"epoch": 0.8577405857740585,
"grad_norm": 33.05531795103649,
"learning_rate": 3.022313472693447e-08,
"logits/chosen": -3.609375,
"logits/rejected": -3.53125,
"logps/chosen": -836.0,
"logps/rejected": -1672.0,
"loss": 0.1302,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -3.5,
"rewards/margins": 7.78125,
"rewards/rejected": -11.25,
"step": 410
},
{
"epoch": 0.8786610878661087,
"grad_norm": 22.39150376227694,
"learning_rate": 2.2111614344599684e-08,
"logits/chosen": -3.515625,
"logits/rejected": -3.4375,
"logps/chosen": -820.0,
"logps/rejected": -1640.0,
"loss": 0.1296,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -3.3125,
"rewards/margins": 7.78125,
"rewards/rejected": -11.0625,
"step": 420
},
{
"epoch": 0.899581589958159,
"grad_norm": 34.532509934361165,
"learning_rate": 1.521597710086439e-08,
"logits/chosen": -3.625,
"logits/rejected": -3.5,
"logps/chosen": -804.0,
"logps/rejected": -1648.0,
"loss": 0.138,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -3.125,
"rewards/margins": 7.875,
"rewards/rejected": -11.0,
"step": 430
},
{
"epoch": 0.9205020920502092,
"grad_norm": 33.11707475610062,
"learning_rate": 9.57301420397924e-09,
"logits/chosen": -3.609375,
"logits/rejected": -3.5625,
"logps/chosen": -844.0,
"logps/rejected": -1632.0,
"loss": 0.1543,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -3.46875,
"rewards/margins": 7.5625,
"rewards/rejected": -11.0625,
"step": 440
},
{
"epoch": 0.9414225941422594,
"grad_norm": 46.292590138732955,
"learning_rate": 5.212833302556258e-09,
"logits/chosen": -3.515625,
"logits/rejected": -3.5,
"logps/chosen": -824.0,
"logps/rejected": -1632.0,
"loss": 0.1439,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -3.53125,
"rewards/margins": 7.75,
"rewards/rejected": -11.25,
"step": 450
},
{
"epoch": 0.9623430962343096,
"grad_norm": 13.872523653141515,
"learning_rate": 2.158697848236607e-09,
"logits/chosen": -3.546875,
"logits/rejected": -3.46875,
"logps/chosen": -864.0,
"logps/rejected": -1792.0,
"loss": 0.1534,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -3.34375,
"rewards/margins": 9.0,
"rewards/rejected": -12.3125,
"step": 460
},
{
"epoch": 0.9832635983263598,
"grad_norm": 40.836340138155315,
"learning_rate": 4.269029751107489e-10,
"logits/chosen": -3.609375,
"logits/rejected": -3.46875,
"logps/chosen": -784.0,
"logps/rejected": -1576.0,
"loss": 0.1607,
"rewards/accuracies": 0.90625,
"rewards/chosen": -3.234375,
"rewards/margins": 7.625,
"rewards/rejected": -10.875,
"step": 470
},
{
"epoch": 1.0,
"step": 478,
"total_flos": 0.0,
"train_loss": 0.251720839215123,
"train_runtime": 7290.7659,
"train_samples_per_second": 8.385,
"train_steps_per_second": 0.066
}
],
"logging_steps": 10,
"max_steps": 478,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}