|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9915492957746479, |
|
"eval_steps": 30, |
|
"global_step": 88, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001, |
|
"logits/chosen": -3.188204765319824, |
|
"logits/rejected": -2.849832534790039, |
|
"logps/chosen": -220.16908264160156, |
|
"logps/rejected": -186.17868041992188, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001, |
|
"logits/chosen": -3.417233943939209, |
|
"logits/rejected": -3.385444164276123, |
|
"logps/chosen": -511.65631103515625, |
|
"logps/rejected": -303.28558349609375, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001, |
|
"logits/chosen": -2.9685182571411133, |
|
"logits/rejected": -2.9099667072296143, |
|
"logps/chosen": -131.90631103515625, |
|
"logps/rejected": -127.09970092773438, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 13.127852439880371, |
|
"learning_rate": 9.999645980833454e-05, |
|
"logits/chosen": -3.3438305854797363, |
|
"logits/rejected": -2.7247352600097656, |
|
"logps/chosen": -212.61917114257812, |
|
"logps/rejected": -160.17083740234375, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 9.166139602661133, |
|
"learning_rate": 9.998583973465646e-05, |
|
"logits/chosen": -3.382075786590576, |
|
"logits/rejected": -2.574824094772339, |
|
"logps/chosen": -284.8162841796875, |
|
"logps/rejected": -199.35745239257812, |
|
"loss": 0.5279, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.21959751844406128, |
|
"rewards/margins": 0.6385258436203003, |
|
"rewards/rejected": -0.8581234216690063, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 7.322300910949707, |
|
"learning_rate": 9.99681412828496e-05, |
|
"logits/chosen": -3.271949291229248, |
|
"logits/rejected": -2.513960838317871, |
|
"logps/chosen": -372.6782531738281, |
|
"logps/rejected": -174.07479858398438, |
|
"loss": 0.2913, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.6500003933906555, |
|
"rewards/margins": 2.8001651763916016, |
|
"rewards/rejected": -2.1501646041870117, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 8.41703987121582, |
|
"learning_rate": 9.99433669591504e-05, |
|
"logits/chosen": -3.0961129665374756, |
|
"logits/rejected": -2.8720593452453613, |
|
"logps/chosen": -316.4073486328125, |
|
"logps/rejected": -167.79397583007812, |
|
"loss": 0.4464, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.1310131549835205, |
|
"rewards/margins": 1.2248451709747314, |
|
"rewards/rejected": -2.355858087539673, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 3.9780449867248535, |
|
"learning_rate": 9.991152027179307e-05, |
|
"logits/chosen": -2.9302220344543457, |
|
"logits/rejected": -2.5785322189331055, |
|
"logps/chosen": -165.38961791992188, |
|
"logps/rejected": -118.81493377685547, |
|
"loss": 0.1908, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.9433582425117493, |
|
"rewards/margins": 3.344132661819458, |
|
"rewards/rejected": -2.4007744789123535, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 7.300680637359619, |
|
"learning_rate": 9.987260573051269e-05, |
|
"logits/chosen": -3.1814284324645996, |
|
"logits/rejected": -3.0421230792999268, |
|
"logps/chosen": -258.6530456542969, |
|
"logps/rejected": -209.901611328125, |
|
"loss": 0.3466, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.742733895778656, |
|
"rewards/margins": 1.9031461477279663, |
|
"rewards/rejected": -2.6458799839019775, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 7.8778839111328125, |
|
"learning_rate": 9.982662884590662e-05, |
|
"logits/chosen": -2.9834322929382324, |
|
"logits/rejected": -2.546109199523926, |
|
"logps/chosen": -357.8366394042969, |
|
"logps/rejected": -137.680908203125, |
|
"loss": 0.4102, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.5062696933746338, |
|
"rewards/margins": 4.4324493408203125, |
|
"rewards/rejected": -4.938718795776367, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 11.32342529296875, |
|
"learning_rate": 9.977359612865423e-05, |
|
"logits/chosen": -2.992344379425049, |
|
"logits/rejected": -2.4726979732513428, |
|
"logps/chosen": -248.93341064453125, |
|
"logps/rejected": -167.0366668701172, |
|
"loss": 0.5224, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.211262047290802, |
|
"rewards/margins": 4.505336284637451, |
|
"rewards/rejected": -4.294074058532715, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 9.8779878616333, |
|
"learning_rate": 9.971351508859488e-05, |
|
"logits/chosen": -3.1347732543945312, |
|
"logits/rejected": -2.930368423461914, |
|
"logps/chosen": -286.450439453125, |
|
"logps/rejected": -202.72720336914062, |
|
"loss": 0.6846, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.4156379699707031, |
|
"rewards/margins": 3.582124710083008, |
|
"rewards/rejected": -4.997762680053711, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 8.25694465637207, |
|
"learning_rate": 9.964639423366442e-05, |
|
"logits/chosen": -3.121607780456543, |
|
"logits/rejected": -2.9769201278686523, |
|
"logps/chosen": -291.4040832519531, |
|
"logps/rejected": -219.06231689453125, |
|
"loss": 0.27, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.20223967730998993, |
|
"rewards/margins": 4.1018805503845215, |
|
"rewards/rejected": -3.8996407985687256, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 3.865050792694092, |
|
"learning_rate": 9.957224306869053e-05, |
|
"logits/chosen": -3.265730381011963, |
|
"logits/rejected": -2.9264979362487793, |
|
"logps/chosen": -269.4535827636719, |
|
"logps/rejected": -205.49166870117188, |
|
"loss": 0.1639, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.9554134607315063, |
|
"rewards/margins": 5.716032028198242, |
|
"rewards/rejected": -4.760618686676025, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 3.5297553539276123, |
|
"learning_rate": 9.949107209404665e-05, |
|
"logits/chosen": -2.461251735687256, |
|
"logits/rejected": -2.371189832687378, |
|
"logps/chosen": -259.4444885253906, |
|
"logps/rejected": -134.9102783203125, |
|
"loss": 0.2108, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.03092677891254425, |
|
"rewards/margins": 3.4555163383483887, |
|
"rewards/rejected": -3.4245896339416504, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 5.433193206787109, |
|
"learning_rate": 9.940289280416508e-05, |
|
"logits/chosen": -3.2052440643310547, |
|
"logits/rejected": -2.8054556846618652, |
|
"logps/chosen": -265.2724304199219, |
|
"logps/rejected": -225.47068786621094, |
|
"loss": 0.1771, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.08068099617958069, |
|
"rewards/margins": 4.355477333068848, |
|
"rewards/rejected": -4.274796485900879, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 10.783153533935547, |
|
"learning_rate": 9.930771768590933e-05, |
|
"logits/chosen": -3.0025675296783447, |
|
"logits/rejected": -3.019012451171875, |
|
"logps/chosen": -195.4888916015625, |
|
"logps/rejected": -198.30792236328125, |
|
"loss": 0.6906, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -2.109578847885132, |
|
"rewards/margins": 0.9626373052597046, |
|
"rewards/rejected": -3.072216033935547, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 6.201537132263184, |
|
"learning_rate": 9.92055602168058e-05, |
|
"logits/chosen": -3.2320845127105713, |
|
"logits/rejected": -2.9034526348114014, |
|
"logps/chosen": -226.6787109375, |
|
"logps/rejected": -132.74095153808594, |
|
"loss": 0.2037, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.05915778875350952, |
|
"rewards/margins": 3.9392547607421875, |
|
"rewards/rejected": -3.8800971508026123, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 11.768068313598633, |
|
"learning_rate": 9.909643486313533e-05, |
|
"logits/chosen": -3.178317070007324, |
|
"logits/rejected": -3.297847270965576, |
|
"logps/chosen": -412.5939636230469, |
|
"logps/rejected": -241.15750122070312, |
|
"loss": 0.5854, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.0589046478271484, |
|
"rewards/margins": 5.891679286956787, |
|
"rewards/rejected": -6.9505839347839355, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 8.77907943725586, |
|
"learning_rate": 9.898035707788463e-05, |
|
"logits/chosen": -3.237691640853882, |
|
"logits/rejected": -3.086668014526367, |
|
"logps/chosen": -311.6419372558594, |
|
"logps/rejected": -238.47996520996094, |
|
"loss": 0.4823, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.5970950126647949, |
|
"rewards/margins": 4.7628984451293945, |
|
"rewards/rejected": -5.3599934577941895, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 3.0071136951446533, |
|
"learning_rate": 9.885734329855798e-05, |
|
"logits/chosen": -3.2764744758605957, |
|
"logits/rejected": -2.930645704269409, |
|
"logps/chosen": -345.3895568847656, |
|
"logps/rejected": -216.34646606445312, |
|
"loss": 0.0629, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 1.1355853080749512, |
|
"rewards/margins": 6.265766620635986, |
|
"rewards/rejected": -5.130181312561035, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 11.331860542297363, |
|
"learning_rate": 9.872741094484965e-05, |
|
"logits/chosen": -3.3079967498779297, |
|
"logits/rejected": -2.7648227214813232, |
|
"logps/chosen": -351.6337585449219, |
|
"logps/rejected": -270.9600830078125, |
|
"loss": 0.369, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.9473640322685242, |
|
"rewards/margins": 2.920064687728882, |
|
"rewards/rejected": -3.8674285411834717, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 3.934148073196411, |
|
"learning_rate": 9.859057841617709e-05, |
|
"logits/chosen": -2.9965269565582275, |
|
"logits/rejected": -2.872361183166504, |
|
"logps/chosen": -424.9866943359375, |
|
"logps/rejected": -250.78515625, |
|
"loss": 0.0707, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.7661307454109192, |
|
"rewards/margins": 8.092283248901367, |
|
"rewards/rejected": -7.326152801513672, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 6.69138765335083, |
|
"learning_rate": 9.844686508907537e-05, |
|
"logits/chosen": -3.3082752227783203, |
|
"logits/rejected": -3.2877731323242188, |
|
"logps/chosen": -293.59478759765625, |
|
"logps/rejected": -179.19845581054688, |
|
"loss": 0.275, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -1.0576655864715576, |
|
"rewards/margins": 2.9845128059387207, |
|
"rewards/rejected": -4.042178153991699, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 14.109583854675293, |
|
"learning_rate": 9.829629131445342e-05, |
|
"logits/chosen": -2.856363534927368, |
|
"logits/rejected": -2.133051633834839, |
|
"logps/chosen": -203.2696533203125, |
|
"logps/rejected": -100.72183990478516, |
|
"loss": 0.8885, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.2501509189605713, |
|
"rewards/margins": 1.4751089811325073, |
|
"rewards/rejected": -2.725259780883789, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 11.02879810333252, |
|
"learning_rate": 9.81388784147121e-05, |
|
"logits/chosen": -3.1515085697174072, |
|
"logits/rejected": -2.761265754699707, |
|
"logps/chosen": -230.35438537597656, |
|
"logps/rejected": -280.5235290527344, |
|
"loss": 0.3744, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.2650168836116791, |
|
"rewards/margins": 4.330430507659912, |
|
"rewards/rejected": -4.065413475036621, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 5.773349761962891, |
|
"learning_rate": 9.797464868072488e-05, |
|
"logits/chosen": -3.1555724143981934, |
|
"logits/rejected": -2.7849271297454834, |
|
"logps/chosen": -204.43417358398438, |
|
"logps/rejected": -110.06712341308594, |
|
"loss": 0.2692, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.146636962890625, |
|
"rewards/margins": 2.842060089111328, |
|
"rewards/rejected": -3.988697052001953, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 5.401435375213623, |
|
"learning_rate": 9.780362536868113e-05, |
|
"logits/chosen": -2.565051317214966, |
|
"logits/rejected": -2.8752522468566895, |
|
"logps/chosen": -158.33001708984375, |
|
"logps/rejected": -139.02786254882812, |
|
"loss": 0.3653, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.32712388038635254, |
|
"rewards/margins": 2.7614259719848633, |
|
"rewards/rejected": -3.088550090789795, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 14.269856452941895, |
|
"learning_rate": 9.762583269679303e-05, |
|
"logits/chosen": -2.4300293922424316, |
|
"logits/rejected": -2.7993407249450684, |
|
"logps/chosen": -233.670166015625, |
|
"logps/rejected": -229.98846435546875, |
|
"loss": 0.82, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.4308565855026245, |
|
"rewards/margins": 1.704122543334961, |
|
"rewards/rejected": -3.134979248046875, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 11.34128189086914, |
|
"learning_rate": 9.744129584186598e-05, |
|
"logits/chosen": -2.8739359378814697, |
|
"logits/rejected": -3.0709590911865234, |
|
"logps/chosen": -187.5973663330078, |
|
"logps/rejected": -171.49639892578125, |
|
"loss": 0.8236, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.5466258525848389, |
|
"rewards/margins": 1.1274800300598145, |
|
"rewards/rejected": -2.674105644226074, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"eval_logits/chosen": -3.2232699394226074, |
|
"eval_logits/rejected": -2.919212818145752, |
|
"eval_logps/chosen": -186.79000854492188, |
|
"eval_logps/rejected": -195.58921813964844, |
|
"eval_loss": 0.004889195319265127, |
|
"eval_rewards/accuracies": 1.0, |
|
"eval_rewards/chosen": 1.5010696649551392, |
|
"eval_rewards/margins": 7.495745658874512, |
|
"eval_rewards/rejected": -5.994676113128662, |
|
"eval_runtime": 5.0142, |
|
"eval_samples_per_second": 1.994, |
|
"eval_steps_per_second": 0.997, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 13.044546127319336, |
|
"learning_rate": 9.725004093573342e-05, |
|
"logits/chosen": -2.972625255584717, |
|
"logits/rejected": -3.1085734367370605, |
|
"logps/chosen": -234.9746856689453, |
|
"logps/rejected": -170.3094024658203, |
|
"loss": 1.0699, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.7236368656158447, |
|
"rewards/margins": 2.1611523628234863, |
|
"rewards/rejected": -3.884788990020752, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 5.612191200256348, |
|
"learning_rate": 9.705209506155634e-05, |
|
"logits/chosen": -2.7585391998291016, |
|
"logits/rejected": -2.7289156913757324, |
|
"logps/chosen": -283.48614501953125, |
|
"logps/rejected": -195.35939025878906, |
|
"loss": 0.1887, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.6527029275894165, |
|
"rewards/margins": 3.757754325866699, |
|
"rewards/rejected": -4.410456657409668, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 5.19436502456665, |
|
"learning_rate": 9.68474862499881e-05, |
|
"logits/chosen": -2.993574857711792, |
|
"logits/rejected": -2.428236246109009, |
|
"logps/chosen": -300.1104431152344, |
|
"logps/rejected": -282.0811462402344, |
|
"loss": 0.2097, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.1330656111240387, |
|
"rewards/margins": 3.342900276184082, |
|
"rewards/rejected": -3.475965738296509, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 7.4402265548706055, |
|
"learning_rate": 9.663624347520505e-05, |
|
"logits/chosen": -3.148404359817505, |
|
"logits/rejected": -3.0182220935821533, |
|
"logps/chosen": -304.60955810546875, |
|
"logps/rejected": -140.74435424804688, |
|
"loss": 0.3689, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.3897354006767273, |
|
"rewards/margins": 3.62821102142334, |
|
"rewards/rejected": -4.017946243286133, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 6.259589195251465, |
|
"learning_rate": 9.641839665080363e-05, |
|
"logits/chosen": -2.65729022026062, |
|
"logits/rejected": -2.7510809898376465, |
|
"logps/chosen": -330.6309814453125, |
|
"logps/rejected": -256.57421875, |
|
"loss": 0.156, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.08656883984804153, |
|
"rewards/margins": 4.7406206130981445, |
|
"rewards/rejected": -4.654051780700684, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 6.500273704528809, |
|
"learning_rate": 9.619397662556435e-05, |
|
"logits/chosen": -2.995790719985962, |
|
"logits/rejected": -3.0307507514953613, |
|
"logps/chosen": -308.2181701660156, |
|
"logps/rejected": -172.92543029785156, |
|
"loss": 0.2963, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.4932462275028229, |
|
"rewards/margins": 2.582528829574585, |
|
"rewards/rejected": -3.075775146484375, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 5.707283973693848, |
|
"learning_rate": 9.596301517908328e-05, |
|
"logits/chosen": -3.033268451690674, |
|
"logits/rejected": -3.0536768436431885, |
|
"logps/chosen": -245.74594116210938, |
|
"logps/rejected": -160.86160278320312, |
|
"loss": 0.202, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.2917206287384033, |
|
"rewards/margins": 4.046570777893066, |
|
"rewards/rejected": -4.338291645050049, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 6.139472007751465, |
|
"learning_rate": 9.572554501727198e-05, |
|
"logits/chosen": -3.1614038944244385, |
|
"logits/rejected": -2.7908077239990234, |
|
"logps/chosen": -297.92730712890625, |
|
"logps/rejected": -190.27981567382812, |
|
"loss": 0.1892, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.4447726905345917, |
|
"rewards/margins": 4.278682708740234, |
|
"rewards/rejected": -4.723455429077148, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 3.791252851486206, |
|
"learning_rate": 9.548159976772592e-05, |
|
"logits/chosen": -3.0960066318511963, |
|
"logits/rejected": -2.7181763648986816, |
|
"logps/chosen": -223.69406127929688, |
|
"logps/rejected": -123.40463256835938, |
|
"loss": 0.1746, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.49881893396377563, |
|
"rewards/margins": 3.9385018348693848, |
|
"rewards/rejected": -4.437320709228516, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 11.180024147033691, |
|
"learning_rate": 9.523121397496269e-05, |
|
"logits/chosen": -3.0495779514312744, |
|
"logits/rejected": -2.918321132659912, |
|
"logps/chosen": -219.83587646484375, |
|
"logps/rejected": -188.59054565429688, |
|
"loss": 0.4327, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.5894317626953125, |
|
"rewards/margins": 2.6391894817352295, |
|
"rewards/rejected": -3.228621244430542, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 7.4187703132629395, |
|
"learning_rate": 9.497442309553016e-05, |
|
"logits/chosen": -2.807313919067383, |
|
"logits/rejected": -2.6464860439300537, |
|
"logps/chosen": -189.6230010986328, |
|
"logps/rejected": -151.01107788085938, |
|
"loss": 0.693, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -3.1926302909851074, |
|
"rewards/margins": 2.0725247859954834, |
|
"rewards/rejected": -5.26515531539917, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 8.239777565002441, |
|
"learning_rate": 9.471126349298556e-05, |
|
"logits/chosen": -2.6991612911224365, |
|
"logits/rejected": -2.4860870838165283, |
|
"logps/chosen": -249.05978393554688, |
|
"logps/rejected": -261.3404541015625, |
|
"loss": 0.3519, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -2.2070960998535156, |
|
"rewards/margins": 2.7090442180633545, |
|
"rewards/rejected": -4.916140079498291, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 12.044631958007812, |
|
"learning_rate": 9.444177243274618e-05, |
|
"logits/chosen": -3.069183111190796, |
|
"logits/rejected": -3.239974021911621, |
|
"logps/chosen": -191.36724853515625, |
|
"logps/rejected": -203.04444885253906, |
|
"loss": 0.8501, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -2.2000980377197266, |
|
"rewards/margins": 3.8068671226501465, |
|
"rewards/rejected": -6.006965160369873, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 10.961316108703613, |
|
"learning_rate": 9.41659880768122e-05, |
|
"logits/chosen": -3.117356538772583, |
|
"logits/rejected": -3.1276466846466064, |
|
"logps/chosen": -241.28115844726562, |
|
"logps/rejected": -217.63143920898438, |
|
"loss": 0.5688, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -1.914351463317871, |
|
"rewards/margins": 3.1862049102783203, |
|
"rewards/rejected": -5.100556373596191, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 11.185661315917969, |
|
"learning_rate": 9.388394947836279e-05, |
|
"logits/chosen": -3.105802536010742, |
|
"logits/rejected": -2.782811164855957, |
|
"logps/chosen": -346.5468444824219, |
|
"logps/rejected": -281.4388732910156, |
|
"loss": 0.5225, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.3682477474212646, |
|
"rewards/margins": 1.8964588642120361, |
|
"rewards/rejected": -3.264706611633301, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 11.120816230773926, |
|
"learning_rate": 9.359569657622574e-05, |
|
"logits/chosen": -3.119126319885254, |
|
"logits/rejected": -3.2095906734466553, |
|
"logps/chosen": -529.1534423828125, |
|
"logps/rejected": -279.0389404296875, |
|
"loss": 0.3314, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.4428637623786926, |
|
"rewards/margins": 6.021182060241699, |
|
"rewards/rejected": -5.578318119049072, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 8.270225524902344, |
|
"learning_rate": 9.330127018922194e-05, |
|
"logits/chosen": -2.903927803039551, |
|
"logits/rejected": -2.7225542068481445, |
|
"logps/chosen": -275.26318359375, |
|
"logps/rejected": -169.46505737304688, |
|
"loss": 0.6252, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -1.2819722890853882, |
|
"rewards/margins": 2.14062237739563, |
|
"rewards/rejected": -3.4225947856903076, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 3.7271859645843506, |
|
"learning_rate": 9.300071201038503e-05, |
|
"logits/chosen": -2.98264741897583, |
|
"logits/rejected": -2.486337184906006, |
|
"logps/chosen": -257.7012023925781, |
|
"logps/rejected": -152.14598083496094, |
|
"loss": 0.1543, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.4061692953109741, |
|
"rewards/margins": 4.309848785400391, |
|
"rewards/rejected": -4.716017723083496, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 5.926673889160156, |
|
"learning_rate": 9.26940646010574e-05, |
|
"logits/chosen": -3.2901933193206787, |
|
"logits/rejected": -2.8755884170532227, |
|
"logps/chosen": -191.0556640625, |
|
"logps/rejected": -104.35969543457031, |
|
"loss": 0.1687, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.4329254627227783, |
|
"rewards/margins": 4.122668266296387, |
|
"rewards/rejected": -4.555593490600586, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.26940646010574e-05, |
|
"logits/chosen": -3.4009902477264404, |
|
"logits/rejected": -3.2163825035095215, |
|
"logps/chosen": -228.1378936767578, |
|
"logps/rejected": -145.276123046875, |
|
"loss": 0.3099, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.2526423931121826, |
|
"rewards/margins": 4.154683589935303, |
|
"rewards/rejected": -3.90204119682312, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 6.389315128326416, |
|
"learning_rate": 9.238137138486318e-05, |
|
"logits/chosen": -3.3087496757507324, |
|
"logits/rejected": -3.0156049728393555, |
|
"logps/chosen": -367.84161376953125, |
|
"logps/rejected": -198.76544189453125, |
|
"loss": 0.1843, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.8194153308868408, |
|
"rewards/margins": 4.259294033050537, |
|
"rewards/rejected": -3.439878463745117, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 2.2384748458862305, |
|
"learning_rate": 9.206267664155907e-05, |
|
"logits/chosen": -3.412761926651001, |
|
"logits/rejected": -3.3606858253479004, |
|
"logps/chosen": -394.595947265625, |
|
"logps/rejected": -234.29969787597656, |
|
"loss": 0.0957, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.6140856742858887, |
|
"rewards/margins": 3.623807191848755, |
|
"rewards/rejected": -3.009721517562866, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 9.3274507522583, |
|
"learning_rate": 9.173802550076401e-05, |
|
"logits/chosen": -2.9212708473205566, |
|
"logits/rejected": -3.2253193855285645, |
|
"logps/chosen": -309.66741943359375, |
|
"logps/rejected": -194.5745849609375, |
|
"loss": 0.3459, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.11235330998897552, |
|
"rewards/margins": 2.6622633934020996, |
|
"rewards/rejected": -2.549910068511963, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 9.517582893371582, |
|
"learning_rate": 9.140746393556854e-05, |
|
"logits/chosen": -3.4288511276245117, |
|
"logits/rejected": -3.304213285446167, |
|
"logps/chosen": -376.1202392578125, |
|
"logps/rejected": -233.58489990234375, |
|
"loss": 0.5965, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.555878758430481, |
|
"rewards/margins": 4.904500961303711, |
|
"rewards/rejected": -4.3486223220825195, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 1.8991745710372925, |
|
"learning_rate": 9.107103875602459e-05, |
|
"logits/chosen": -2.924043655395508, |
|
"logits/rejected": -3.185393810272217, |
|
"logps/chosen": -341.2203063964844, |
|
"logps/rejected": -234.63758850097656, |
|
"loss": 0.0427, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.3599395751953125, |
|
"rewards/margins": 3.722670555114746, |
|
"rewards/rejected": -3.3627307415008545, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 8.762215614318848, |
|
"learning_rate": 9.072879760251679e-05, |
|
"logits/chosen": -3.2768988609313965, |
|
"logits/rejected": -3.0862388610839844, |
|
"logps/chosen": -224.249755859375, |
|
"logps/rejected": -180.57443237304688, |
|
"loss": 0.3701, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.7867403626441956, |
|
"rewards/margins": 2.9776391983032227, |
|
"rewards/rejected": -3.7643797397613525, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 8.915589332580566, |
|
"learning_rate": 9.038078893901634e-05, |
|
"logits/chosen": -3.2617459297180176, |
|
"logits/rejected": -3.202209711074829, |
|
"logps/chosen": -89.90280151367188, |
|
"logps/rejected": -147.65475463867188, |
|
"loss": 0.4777, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.5283554792404175, |
|
"rewards/margins": 2.442681312561035, |
|
"rewards/rejected": -2.971036911010742, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 11.555606842041016, |
|
"learning_rate": 9.002706204621803e-05, |
|
"logits/chosen": -3.001934766769409, |
|
"logits/rejected": -3.068469285964966, |
|
"logps/chosen": -344.8009948730469, |
|
"logps/rejected": -205.77664184570312, |
|
"loss": 0.3874, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.012376070022583008, |
|
"rewards/margins": 2.2519307136535645, |
|
"rewards/rejected": -2.2395546436309814, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 14.009621620178223, |
|
"learning_rate": 8.966766701456177e-05, |
|
"logits/chosen": -2.8611111640930176, |
|
"logits/rejected": -3.4576072692871094, |
|
"logps/chosen": -338.19970703125, |
|
"logps/rejected": -260.2259521484375, |
|
"loss": 0.7432, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.9131150841712952, |
|
"rewards/margins": 2.4023218154907227, |
|
"rewards/rejected": -3.315437078475952, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 4.895538330078125, |
|
"learning_rate": 8.930265473713938e-05, |
|
"logits/chosen": -3.3844141960144043, |
|
"logits/rejected": -3.3034396171569824, |
|
"logps/chosen": -369.8033447265625, |
|
"logps/rejected": -244.80897521972656, |
|
"loss": 0.2099, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.4544011354446411, |
|
"rewards/margins": 2.975295305252075, |
|
"rewards/rejected": -2.5208940505981445, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"eval_logits/chosen": -3.299164295196533, |
|
"eval_logits/rejected": -3.039053440093994, |
|
"eval_logps/chosen": -182.6027069091797, |
|
"eval_logps/rejected": -194.1580810546875, |
|
"eval_loss": 0.0008067694725468755, |
|
"eval_rewards/accuracies": 1.0, |
|
"eval_rewards/chosen": 2.757256269454956, |
|
"eval_rewards/margins": 8.322589874267578, |
|
"eval_rewards/rejected": -5.565333366394043, |
|
"eval_runtime": 4.7427, |
|
"eval_samples_per_second": 2.108, |
|
"eval_steps_per_second": 1.054, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 13.20186710357666, |
|
"learning_rate": 8.893207690248776e-05, |
|
"logits/chosen": -3.109037399291992, |
|
"logits/rejected": -3.326498508453369, |
|
"logps/chosen": -275.9548034667969, |
|
"logps/rejected": -253.3433380126953, |
|
"loss": 0.6256, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.315821647644043, |
|
"rewards/margins": 1.796335220336914, |
|
"rewards/rejected": -3.112156867980957, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 2.376776933670044, |
|
"learning_rate": 8.855598598726939e-05, |
|
"logits/chosen": -3.083179473876953, |
|
"logits/rejected": -3.2115023136138916, |
|
"logps/chosen": -234.42640686035156, |
|
"logps/rejected": -150.53904724121094, |
|
"loss": 0.0802, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.07728518545627594, |
|
"rewards/margins": 4.584873199462891, |
|
"rewards/rejected": -4.507587909698486, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 6.479050636291504, |
|
"learning_rate": 8.817443524884119e-05, |
|
"logits/chosen": -2.5378470420837402, |
|
"logits/rejected": -2.813725471496582, |
|
"logps/chosen": -321.4371337890625, |
|
"logps/rejected": -244.43296813964844, |
|
"loss": 0.2551, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.725128173828125, |
|
"rewards/margins": 6.416964530944824, |
|
"rewards/rejected": -5.691835880279541, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 8.595841407775879, |
|
"learning_rate": 8.778747871771292e-05, |
|
"logits/chosen": -3.3628735542297363, |
|
"logits/rejected": -2.6477882862091064, |
|
"logps/chosen": -491.04852294921875, |
|
"logps/rejected": -248.0843505859375, |
|
"loss": 0.4921, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.3514632284641266, |
|
"rewards/margins": 7.0789079666137695, |
|
"rewards/rejected": -6.727444648742676, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 12.204965591430664, |
|
"learning_rate": 8.739517118989605e-05, |
|
"logits/chosen": -3.0609679222106934, |
|
"logits/rejected": -3.0792436599731445, |
|
"logps/chosen": -297.3428649902344, |
|
"logps/rejected": -190.6544647216797, |
|
"loss": 0.5732, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.3494699001312256, |
|
"rewards/margins": 2.159775733947754, |
|
"rewards/rejected": -3.5092456340789795, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 8.17355728149414, |
|
"learning_rate": 8.69975682191442e-05, |
|
"logits/chosen": -3.373033046722412, |
|
"logits/rejected": -3.043926239013672, |
|
"logps/chosen": -578.4942626953125, |
|
"logps/rejected": -265.8677978515625, |
|
"loss": 0.3497, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.6642241477966309, |
|
"rewards/margins": 6.849948883056641, |
|
"rewards/rejected": -7.5141730308532715, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 6.397715091705322, |
|
"learning_rate": 8.659472610908627e-05, |
|
"logits/chosen": -2.711515426635742, |
|
"logits/rejected": -2.395803928375244, |
|
"logps/chosen": -203.78604125976562, |
|
"logps/rejected": -129.5730438232422, |
|
"loss": 0.1589, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -1.117239236831665, |
|
"rewards/margins": 4.5233540534973145, |
|
"rewards/rejected": -5.640593528747559, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.30370500683784485, |
|
"learning_rate": 8.618670190525352e-05, |
|
"logits/chosen": -3.0820484161376953, |
|
"logits/rejected": -3.0193471908569336, |
|
"logps/chosen": -378.7370300292969, |
|
"logps/rejected": -222.33255004882812, |
|
"loss": 0.0083, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.245200514793396, |
|
"rewards/margins": 7.6438798904418945, |
|
"rewards/rejected": -7.398679733276367, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 10.54161548614502, |
|
"learning_rate": 8.577355338700132e-05, |
|
"logits/chosen": -3.2389307022094727, |
|
"logits/rejected": -3.0821778774261475, |
|
"logps/chosen": -346.2233581542969, |
|
"logps/rejected": -192.50180053710938, |
|
"loss": 0.7383, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -1.195668339729309, |
|
"rewards/margins": 5.02830696105957, |
|
"rewards/rejected": -6.22397518157959, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 1.390268087387085, |
|
"learning_rate": 8.535533905932738e-05, |
|
"logits/chosen": -3.3620431423187256, |
|
"logits/rejected": -2.399602174758911, |
|
"logps/chosen": -221.27833557128906, |
|
"logps/rejected": -118.21566009521484, |
|
"loss": 0.0245, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.3484119176864624, |
|
"rewards/margins": 8.018238067626953, |
|
"rewards/rejected": -7.669825553894043, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 10.928914070129395, |
|
"learning_rate": 8.493211814458673e-05, |
|
"logits/chosen": -2.9451489448547363, |
|
"logits/rejected": -3.133699893951416, |
|
"logps/chosen": -201.42678833007812, |
|
"logps/rejected": -202.67453002929688, |
|
"loss": 0.3648, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -2.156554698944092, |
|
"rewards/margins": 4.664039611816406, |
|
"rewards/rejected": -6.820594310760498, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.7106189131736755, |
|
"learning_rate": 8.450395057410561e-05, |
|
"logits/chosen": -3.4210917949676514, |
|
"logits/rejected": -2.9528050422668457, |
|
"logps/chosen": -317.3839416503906, |
|
"logps/rejected": -170.9514617919922, |
|
"loss": 0.0172, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.5772140026092529, |
|
"rewards/margins": 6.574632167816162, |
|
"rewards/rejected": -7.151845455169678, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 11.109986305236816, |
|
"learning_rate": 8.407089697969457e-05, |
|
"logits/chosen": -3.1691198348999023, |
|
"logits/rejected": -2.9826912879943848, |
|
"logps/chosen": -377.9816589355469, |
|
"logps/rejected": -288.35528564453125, |
|
"loss": 0.4652, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.14444704353809357, |
|
"rewards/margins": 4.671531677246094, |
|
"rewards/rejected": -4.815978527069092, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 16.743736267089844, |
|
"learning_rate": 8.363301868506264e-05, |
|
"logits/chosen": -3.06179141998291, |
|
"logits/rejected": -2.977768898010254, |
|
"logps/chosen": -336.5064392089844, |
|
"logps/rejected": -273.6090393066406, |
|
"loss": 0.9035, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.7004997730255127, |
|
"rewards/margins": 3.5526602268218994, |
|
"rewards/rejected": -5.253159999847412, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 4.297181606292725, |
|
"learning_rate": 8.319037769713338e-05, |
|
"logits/chosen": -3.1873130798339844, |
|
"logits/rejected": -3.2429358959198, |
|
"logps/chosen": -295.90167236328125, |
|
"logps/rejected": -252.4221954345703, |
|
"loss": 0.0913, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.35775870084762573, |
|
"rewards/margins": 7.1074934005737305, |
|
"rewards/rejected": -7.46525239944458, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 1.5024635791778564, |
|
"learning_rate": 8.274303669726426e-05, |
|
"logits/chosen": -3.2978920936584473, |
|
"logits/rejected": -3.056913375854492, |
|
"logps/chosen": -209.08226013183594, |
|
"logps/rejected": -216.75137329101562, |
|
"loss": 0.0595, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.7533246278762817, |
|
"rewards/margins": 6.2144622802734375, |
|
"rewards/rejected": -6.96778678894043, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 6.813451766967773, |
|
"learning_rate": 8.229105903237044e-05, |
|
"logits/chosen": -3.1069507598876953, |
|
"logits/rejected": -2.643913507461548, |
|
"logps/chosen": -379.9039611816406, |
|
"logps/rejected": -199.5642547607422, |
|
"loss": 0.2955, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -2.039405345916748, |
|
"rewards/margins": 6.617141246795654, |
|
"rewards/rejected": -8.656547546386719, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 12.086407661437988, |
|
"learning_rate": 8.183450870595441e-05, |
|
"logits/chosen": -3.06406307220459, |
|
"logits/rejected": -3.0317187309265137, |
|
"logps/chosen": -369.6795654296875, |
|
"logps/rejected": -303.2950744628906, |
|
"loss": 0.4151, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.9069956541061401, |
|
"rewards/margins": 4.3820600509643555, |
|
"rewards/rejected": -6.289055824279785, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 3.3662607669830322, |
|
"learning_rate": 8.13734503690426e-05, |
|
"logits/chosen": -3.289384126663208, |
|
"logits/rejected": -3.1391568183898926, |
|
"logps/chosen": -345.8055114746094, |
|
"logps/rejected": -193.66909790039062, |
|
"loss": 0.1584, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.599603533744812, |
|
"rewards/margins": 6.25246524810791, |
|
"rewards/rejected": -6.852068901062012, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 4.315209865570068, |
|
"learning_rate": 8.090794931103026e-05, |
|
"logits/chosen": -3.228783369064331, |
|
"logits/rejected": -3.0682406425476074, |
|
"logps/chosen": -270.1308898925781, |
|
"logps/rejected": -199.65390014648438, |
|
"loss": 0.197, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -1.2517391443252563, |
|
"rewards/margins": 5.22841215133667, |
|
"rewards/rejected": -6.480151176452637, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 4.3422465324401855, |
|
"learning_rate": 8.043807145043604e-05, |
|
"logits/chosen": -3.3883039951324463, |
|
"logits/rejected": -2.8610763549804688, |
|
"logps/chosen": -375.4300231933594, |
|
"logps/rejected": -212.55474853515625, |
|
"loss": 0.0891, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.205045461654663, |
|
"rewards/margins": 7.316597938537598, |
|
"rewards/rejected": -8.521642684936523, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 14.285599708557129, |
|
"learning_rate": 7.996388332556735e-05, |
|
"logits/chosen": -3.384727954864502, |
|
"logits/rejected": -3.3384907245635986, |
|
"logps/chosen": -403.5943603515625, |
|
"logps/rejected": -331.4810791015625, |
|
"loss": 0.6671, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.1636757850646973, |
|
"rewards/margins": 2.9779176712036133, |
|
"rewards/rejected": -4.1415934562683105, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.26110827922821045, |
|
"learning_rate": 7.94854520850981e-05, |
|
"logits/chosen": -3.1430768966674805, |
|
"logits/rejected": -3.018350601196289, |
|
"logps/chosen": -502.6695556640625, |
|
"logps/rejected": -263.3882751464844, |
|
"loss": 0.0062, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 1.0688759088516235, |
|
"rewards/margins": 9.059045791625977, |
|
"rewards/rejected": -7.990170001983643, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 8.293683052062988, |
|
"learning_rate": 7.900284547855991e-05, |
|
"logits/chosen": -3.26918363571167, |
|
"logits/rejected": -2.9317312240600586, |
|
"logps/chosen": -287.8534851074219, |
|
"logps/rejected": -225.27447509765625, |
|
"loss": 0.1985, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.16245371103286743, |
|
"rewards/margins": 6.57786750793457, |
|
"rewards/rejected": -6.415413856506348, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 1.314470648765564, |
|
"learning_rate": 7.85161318467482e-05, |
|
"logits/chosen": -3.219449043273926, |
|
"logits/rejected": -2.8287854194641113, |
|
"logps/chosen": -366.5806884765625, |
|
"logps/rejected": -191.7723388671875, |
|
"loss": 0.0161, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.41258037090301514, |
|
"rewards/margins": 6.966887474060059, |
|
"rewards/rejected": -6.554306983947754, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 20.908584594726562, |
|
"learning_rate": 7.80253801120447e-05, |
|
"logits/chosen": -3.160271406173706, |
|
"logits/rejected": -3.0260157585144043, |
|
"logps/chosen": -382.6925048828125, |
|
"logps/rejected": -233.9979248046875, |
|
"loss": 0.7179, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.4525451958179474, |
|
"rewards/margins": 2.8419482707977295, |
|
"rewards/rejected": -3.2944936752319336, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 1.2079492807388306, |
|
"learning_rate": 7.753065976865744e-05, |
|
"logits/chosen": -3.191283702850342, |
|
"logits/rejected": -3.0339455604553223, |
|
"logps/chosen": -285.8114013671875, |
|
"logps/rejected": -169.56741333007812, |
|
"loss": 0.0272, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.33831024169921875, |
|
"rewards/margins": 6.575672149658203, |
|
"rewards/rejected": -6.237361907958984, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.654447615146637, |
|
"learning_rate": 7.703204087277988e-05, |
|
"logits/chosen": -3.2184348106384277, |
|
"logits/rejected": -3.2598187923431396, |
|
"logps/chosen": -163.65548706054688, |
|
"logps/rejected": -143.7698974609375, |
|
"loss": 0.0114, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 2.0235941410064697, |
|
"rewards/margins": 7.350224494934082, |
|
"rewards/rejected": -5.326630592346191, |
|
"step": 88 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 264, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 1, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|