|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9985693848354793, |
|
"eval_steps": 100, |
|
"global_step": 349, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 1483.079147156977, |
|
"learning_rate": 5.7142857142857136e-09, |
|
"logits/chosen": -4.490396976470947, |
|
"logits/rejected": -4.787891387939453, |
|
"logps/chosen": -300.56573486328125, |
|
"logps/rejected": -263.39849853515625, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 1678.9009350663757, |
|
"learning_rate": 5.714285714285714e-08, |
|
"logits/chosen": -4.292892932891846, |
|
"logits/rejected": -4.4704790115356445, |
|
"logps/chosen": -285.7367858886719, |
|
"logps/rejected": -241.40879821777344, |
|
"loss": 0.7232, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": 0.03021715022623539, |
|
"rewards/margins": 0.018014244735240936, |
|
"rewards/rejected": 0.012202905490994453, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 1307.5912774162387, |
|
"learning_rate": 1.1428571428571427e-07, |
|
"logits/chosen": -4.212913990020752, |
|
"logits/rejected": -4.504936695098877, |
|
"logps/chosen": -312.25714111328125, |
|
"logps/rejected": -247.5605926513672, |
|
"loss": 0.6348, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": 0.25936827063560486, |
|
"rewards/margins": 0.15551437437534332, |
|
"rewards/rejected": 0.10385391861200333, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 877.5830541670848, |
|
"learning_rate": 1.714285714285714e-07, |
|
"logits/chosen": -4.158999919891357, |
|
"logits/rejected": -4.378296852111816, |
|
"logps/chosen": -320.1895446777344, |
|
"logps/rejected": -273.6168518066406, |
|
"loss": 0.4394, |
|
"rewards/accuracies": 0.793749988079071, |
|
"rewards/chosen": 1.6762861013412476, |
|
"rewards/margins": 1.1370527744293213, |
|
"rewards/rejected": 0.5392333269119263, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 1094.7268353859254, |
|
"learning_rate": 1.9987489925657699e-07, |
|
"logits/chosen": -4.327412128448486, |
|
"logits/rejected": -4.590438365936279, |
|
"logps/chosen": -272.52813720703125, |
|
"logps/rejected": -226.4479522705078, |
|
"loss": 0.3519, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 3.0370898246765137, |
|
"rewards/margins": 2.1227898597717285, |
|
"rewards/rejected": 0.9143003225326538, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 1142.545986046605, |
|
"learning_rate": 1.98875970549573e-07, |
|
"logits/chosen": -4.374251365661621, |
|
"logits/rejected": -4.627996921539307, |
|
"logps/chosen": -264.49615478515625, |
|
"logps/rejected": -226.27197265625, |
|
"loss": 0.3689, |
|
"rewards/accuracies": 0.831250011920929, |
|
"rewards/chosen": 4.1793413162231445, |
|
"rewards/margins": 2.736725091934204, |
|
"rewards/rejected": 1.4426158666610718, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 635.1242404757879, |
|
"learning_rate": 1.968881042201029e-07, |
|
"logits/chosen": -4.35978889465332, |
|
"logits/rejected": -4.594507694244385, |
|
"logps/chosen": -296.86907958984375, |
|
"logps/rejected": -251.1016845703125, |
|
"loss": 0.3246, |
|
"rewards/accuracies": 0.8062499761581421, |
|
"rewards/chosen": 4.9698944091796875, |
|
"rewards/margins": 3.8340022563934326, |
|
"rewards/rejected": 1.1358922719955444, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 871.5895225513464, |
|
"learning_rate": 1.9393118250841894e-07, |
|
"logits/chosen": -4.349625587463379, |
|
"logits/rejected": -4.586968421936035, |
|
"logps/chosen": -285.0860595703125, |
|
"logps/rejected": -241.4550018310547, |
|
"loss": 0.3259, |
|
"rewards/accuracies": 0.8812500238418579, |
|
"rewards/chosen": 4.21413516998291, |
|
"rewards/margins": 3.7849984169006348, |
|
"rewards/rejected": 0.42913690209388733, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 869.2800598030351, |
|
"learning_rate": 1.900347799523094e-07, |
|
"logits/chosen": -4.277868747711182, |
|
"logits/rejected": -4.535857677459717, |
|
"logps/chosen": -318.2683410644531, |
|
"logps/rejected": -272.18206787109375, |
|
"loss": 0.2501, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 4.3253655433654785, |
|
"rewards/margins": 4.170114040374756, |
|
"rewards/rejected": 0.15525080263614655, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 616.9171514391553, |
|
"learning_rate": 1.8523786758850436e-07, |
|
"logits/chosen": -4.401219844818115, |
|
"logits/rejected": -4.6477251052856445, |
|
"logps/chosen": -280.7421569824219, |
|
"logps/rejected": -236.2926025390625, |
|
"loss": 0.2859, |
|
"rewards/accuracies": 0.856249988079071, |
|
"rewards/chosen": 3.923023223876953, |
|
"rewards/margins": 4.055254936218262, |
|
"rewards/rejected": -0.13223108649253845, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 735.4670744748103, |
|
"learning_rate": 1.795884231721841e-07, |
|
"logits/chosen": -4.318561553955078, |
|
"logits/rejected": -4.602439880371094, |
|
"logps/chosen": -267.45794677734375, |
|
"logps/rejected": -232.0417938232422, |
|
"loss": 0.3114, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 4.1319708824157715, |
|
"rewards/margins": 4.702456951141357, |
|
"rewards/rejected": -0.5704857110977173, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_logits/chosen": -3.226163148880005, |
|
"eval_logits/rejected": -3.226163148880005, |
|
"eval_logps/chosen": -161.294677734375, |
|
"eval_logps/rejected": -161.294677734375, |
|
"eval_loss": 0.6931471824645996, |
|
"eval_rewards/accuracies": 0.0, |
|
"eval_rewards/chosen": -2.8588294982910156, |
|
"eval_rewards/margins": 0.0, |
|
"eval_rewards/rejected": -2.8588294982910156, |
|
"eval_runtime": 1.5114, |
|
"eval_samples_per_second": 0.662, |
|
"eval_steps_per_second": 0.662, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 817.8641381018862, |
|
"learning_rate": 1.7314295131309637e-07, |
|
"logits/chosen": -4.324714660644531, |
|
"logits/rejected": -4.614729404449463, |
|
"logps/chosen": -293.42364501953125, |
|
"logps/rejected": -245.01620483398438, |
|
"loss": 0.3079, |
|
"rewards/accuracies": 0.8812500238418579, |
|
"rewards/chosen": 4.0577192306518555, |
|
"rewards/margins": 4.518521308898926, |
|
"rewards/rejected": -0.4608024060726166, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 926.642960527248, |
|
"learning_rate": 1.6596591832778466e-07, |
|
"logits/chosen": -4.334780216217041, |
|
"logits/rejected": -4.567566394805908, |
|
"logps/chosen": -298.56976318359375, |
|
"logps/rejected": -266.8869323730469, |
|
"loss": 0.2848, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": 3.83539080619812, |
|
"rewards/margins": 4.040234088897705, |
|
"rewards/rejected": -0.20484237372875214, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 675.9811999128359, |
|
"learning_rate": 1.5812910746042256e-07, |
|
"logits/chosen": -4.3064866065979, |
|
"logits/rejected": -4.553826332092285, |
|
"logps/chosen": -286.5328063964844, |
|
"logps/rejected": -242.8630828857422, |
|
"loss": 0.2782, |
|
"rewards/accuracies": 0.893750011920929, |
|
"rewards/chosen": 3.7822985649108887, |
|
"rewards/margins": 4.371214389801025, |
|
"rewards/rejected": -0.5889158248901367, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 1048.4484509272795, |
|
"learning_rate": 1.4971090092120542e-07, |
|
"logits/chosen": -4.304565906524658, |
|
"logits/rejected": -4.545400142669678, |
|
"logps/chosen": -294.17095947265625, |
|
"logps/rejected": -243.83935546875, |
|
"loss": 0.269, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 3.8339390754699707, |
|
"rewards/margins": 4.558647155761719, |
|
"rewards/rejected": -0.7247086763381958, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 844.0558457976633, |
|
"learning_rate": 1.4079549592320782e-07, |
|
"logits/chosen": -4.313709735870361, |
|
"logits/rejected": -4.621119499206543, |
|
"logps/chosen": -292.96661376953125, |
|
"logps/rejected": -248.0418701171875, |
|
"loss": 0.2727, |
|
"rewards/accuracies": 0.856249988079071, |
|
"rewards/chosen": 3.8533847332000732, |
|
"rewards/margins": 4.262558937072754, |
|
"rewards/rejected": -0.4091736674308777, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 684.2161612155076, |
|
"learning_rate": 1.3147206255874882e-07, |
|
"logits/chosen": -4.248518943786621, |
|
"logits/rejected": -4.44361686706543, |
|
"logps/chosen": -309.7388610839844, |
|
"logps/rejected": -272.90252685546875, |
|
"loss": 0.2961, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 4.352166652679443, |
|
"rewards/margins": 4.611546516418457, |
|
"rewards/rejected": -0.2593800127506256, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 795.319436926025, |
|
"learning_rate": 1.2183385193801653e-07, |
|
"logits/chosen": -4.289888381958008, |
|
"logits/rejected": -4.489100933074951, |
|
"logps/chosen": -272.3153991699219, |
|
"logps/rejected": -237.64114379882812, |
|
"loss": 0.2547, |
|
"rewards/accuracies": 0.918749988079071, |
|
"rewards/chosen": 4.135977745056152, |
|
"rewards/margins": 4.544943809509277, |
|
"rewards/rejected": -0.40896692872047424, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 917.2913745554154, |
|
"learning_rate": 1.1197726351017051e-07, |
|
"logits/chosen": -4.37355375289917, |
|
"logits/rejected": -4.5346221923828125, |
|
"logps/chosen": -261.6694641113281, |
|
"logps/rejected": -230.89599609375, |
|
"loss": 0.3104, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 3.4763107299804688, |
|
"rewards/margins": 3.877528429031372, |
|
"rewards/rejected": -0.4012181758880615, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 880.6071936953809, |
|
"learning_rate": 1.0200088089538943e-07, |
|
"logits/chosen": -4.33815860748291, |
|
"logits/rejected": -4.5543646812438965, |
|
"logps/chosen": -288.26873779296875, |
|
"logps/rejected": -253.7147216796875, |
|
"loss": 0.3129, |
|
"rewards/accuracies": 0.856249988079071, |
|
"rewards/chosen": 4.4660868644714355, |
|
"rewards/margins": 4.566544055938721, |
|
"rewards/rejected": -0.10045762360095978, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 781.3066688232569, |
|
"learning_rate": 9.20044858712785e-08, |
|
"logits/chosen": -4.292983055114746, |
|
"logits/rejected": -4.566291332244873, |
|
"logps/chosen": -315.58251953125, |
|
"logps/rejected": -270.0286560058594, |
|
"loss": 0.2741, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 4.502341270446777, |
|
"rewards/margins": 4.4218339920043945, |
|
"rewards/rejected": 0.08050797879695892, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"eval_logits/chosen": -3.2033309936523438, |
|
"eval_logits/rejected": -3.2033309936523438, |
|
"eval_logps/chosen": -160.92892456054688, |
|
"eval_logps/rejected": -160.92892456054688, |
|
"eval_loss": 0.6931471824645996, |
|
"eval_rewards/accuracies": 0.0, |
|
"eval_rewards/chosen": -2.675952911376953, |
|
"eval_rewards/margins": 0.0, |
|
"eval_rewards/rejected": -2.675952911376953, |
|
"eval_runtime": 1.48, |
|
"eval_samples_per_second": 0.676, |
|
"eval_steps_per_second": 0.676, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 1021.5696401893487, |
|
"learning_rate": 8.208806037554645e-08, |
|
"logits/chosen": -4.327147483825684, |
|
"logits/rejected": -4.6312479972839355, |
|
"logps/chosen": -292.85919189453125, |
|
"logps/rejected": -236.7397918701172, |
|
"loss": 0.2345, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 4.086556434631348, |
|
"rewards/margins": 4.589372634887695, |
|
"rewards/rejected": -0.5028160810470581, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 720.6830161967047, |
|
"learning_rate": 7.23507865067214e-08, |
|
"logits/chosen": -4.357525825500488, |
|
"logits/rejected": -4.7082133293151855, |
|
"logps/chosen": -298.2110900878906, |
|
"logps/rejected": -237.28903198242188, |
|
"loss": 0.241, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 3.84515118598938, |
|
"rewards/margins": 4.655531406402588, |
|
"rewards/rejected": -0.8103805780410767, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 784.9312094778309, |
|
"learning_rate": 6.289005452469778e-08, |
|
"logits/chosen": -4.388636112213135, |
|
"logits/rejected": -4.748753070831299, |
|
"logps/chosen": -288.0061340332031, |
|
"logps/rejected": -236.6077880859375, |
|
"loss": 0.268, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 3.5908846855163574, |
|
"rewards/margins": 4.1128339767456055, |
|
"rewards/rejected": -0.5219489932060242, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 765.61870702253, |
|
"learning_rate": 5.38004887728938e-08, |
|
"logits/chosen": -4.234927177429199, |
|
"logits/rejected": -4.550938606262207, |
|
"logps/chosen": -283.8558654785156, |
|
"logps/rejected": -242.86538696289062, |
|
"loss": 0.2908, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 3.562377452850342, |
|
"rewards/margins": 4.074954032897949, |
|
"rewards/rejected": -0.5125768780708313, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 787.5249084258048, |
|
"learning_rate": 4.517300126455066e-08, |
|
"logits/chosen": -4.290322303771973, |
|
"logits/rejected": -4.522026538848877, |
|
"logps/chosen": -292.47442626953125, |
|
"logps/rejected": -257.0675354003906, |
|
"loss": 0.2635, |
|
"rewards/accuracies": 0.8812500238418579, |
|
"rewards/chosen": 3.95862078666687, |
|
"rewards/margins": 4.515480995178223, |
|
"rewards/rejected": -0.5568601489067078, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 894.7487355536125, |
|
"learning_rate": 3.70938823990135e-08, |
|
"logits/chosen": -4.367636680603027, |
|
"logits/rejected": -4.524864196777344, |
|
"logps/chosen": -293.8681640625, |
|
"logps/rejected": -266.19622802734375, |
|
"loss": 0.2999, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 3.311814069747925, |
|
"rewards/margins": 3.883411407470703, |
|
"rewards/rejected": -0.5715969800949097, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 582.5138549963486, |
|
"learning_rate": 2.9643937902467277e-08, |
|
"logits/chosen": -4.23429012298584, |
|
"logits/rejected": -4.521827697753906, |
|
"logps/chosen": -280.5379333496094, |
|
"logps/rejected": -241.7247314453125, |
|
"loss": 0.2388, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 3.911522626876831, |
|
"rewards/margins": 4.728894233703613, |
|
"rewards/rejected": -0.8173705339431763, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 810.3954826895692, |
|
"learning_rate": 2.289768062527362e-08, |
|
"logits/chosen": -4.273584842681885, |
|
"logits/rejected": -4.6050519943237305, |
|
"logps/chosen": -283.619384765625, |
|
"logps/rejected": -233.9824981689453, |
|
"loss": 0.2895, |
|
"rewards/accuracies": 0.8812500238418579, |
|
"rewards/chosen": 3.576643466949463, |
|
"rewards/margins": 3.991179943084717, |
|
"rewards/rejected": -0.41453617811203003, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 730.6997584190797, |
|
"learning_rate": 1.6922585279389035e-08, |
|
"logits/chosen": -4.406073093414307, |
|
"logits/rejected": -4.6901140213012695, |
|
"logps/chosen": -294.35699462890625, |
|
"logps/rejected": -233.74594116210938, |
|
"loss": 0.2669, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 3.295900344848633, |
|
"rewards/margins": 3.856853485107422, |
|
"rewards/rejected": -0.5609533786773682, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 1254.3257141110294, |
|
"learning_rate": 1.1778413569831725e-08, |
|
"logits/chosen": -4.323153495788574, |
|
"logits/rejected": -4.481599807739258, |
|
"logps/chosen": -291.29742431640625, |
|
"logps/rejected": -254.1814422607422, |
|
"loss": 0.2695, |
|
"rewards/accuracies": 0.856249988079071, |
|
"rewards/chosen": 3.8485710620880127, |
|
"rewards/margins": 3.8490569591522217, |
|
"rewards/rejected": -0.0004864335060119629, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"eval_logits/chosen": -3.199521064758301, |
|
"eval_logits/rejected": -3.199521064758301, |
|
"eval_logps/chosen": -159.83721923828125, |
|
"eval_logps/rejected": -159.83721923828125, |
|
"eval_loss": 0.6931471824645996, |
|
"eval_rewards/accuracies": 0.0, |
|
"eval_rewards/chosen": -2.1301021575927734, |
|
"eval_rewards/margins": 0.0, |
|
"eval_rewards/rejected": -2.1301021575927734, |
|
"eval_runtime": 1.4681, |
|
"eval_samples_per_second": 0.681, |
|
"eval_steps_per_second": 0.681, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 1302.095010227008, |
|
"learning_rate": 7.516616470096315e-09, |
|
"logits/chosen": -4.3807525634765625, |
|
"logits/rejected": -4.600883483886719, |
|
"logps/chosen": -299.8600158691406, |
|
"logps/rejected": -248.4993438720703, |
|
"loss": 0.2545, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 4.215323448181152, |
|
"rewards/margins": 4.587640762329102, |
|
"rewards/rejected": -0.3723169267177582, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 875.4939401121867, |
|
"learning_rate": 4.179819619838454e-09, |
|
"logits/chosen": -4.2180280685424805, |
|
"logits/rejected": -4.570239067077637, |
|
"logps/chosen": -286.0614013671875, |
|
"logps/rejected": -234.3032684326172, |
|
"loss": 0.269, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 3.9898929595947266, |
|
"rewards/margins": 4.13907527923584, |
|
"rewards/rejected": -0.1491830050945282, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 1010.1848642033616, |
|
"learning_rate": 1.8013969917777483e-09, |
|
"logits/chosen": -4.380679130554199, |
|
"logits/rejected": -4.608093738555908, |
|
"logps/chosen": -278.8153381347656, |
|
"logps/rejected": -235.4047088623047, |
|
"loss": 0.247, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 3.914651870727539, |
|
"rewards/margins": 4.33533239364624, |
|
"rewards/rejected": -0.42068085074424744, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 702.698454631269, |
|
"learning_rate": 4.051370919176289e-10, |
|
"logits/chosen": -4.314477920532227, |
|
"logits/rejected": -4.502932548522949, |
|
"logps/chosen": -282.89666748046875, |
|
"logps/rejected": -246.3631134033203, |
|
"loss": 0.243, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 3.8934428691864014, |
|
"rewards/margins": 4.308337688446045, |
|
"rewards/rejected": -0.41489481925964355, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 349, |
|
"total_flos": 0.0, |
|
"train_loss": 0.30806411576476, |
|
"train_runtime": 5299.0292, |
|
"train_samples_per_second": 8.432, |
|
"train_steps_per_second": 0.066 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 349, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|