|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 2500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 427.15834655761716, |
|
"epoch": 0.002, |
|
"grad_norm": 1.8633235523265679, |
|
"kl": 0.0001178741455078125, |
|
"learning_rate": 4.0000000000000003e-07, |
|
"loss": 0.0, |
|
"reward": 0.6833333551883698, |
|
"reward_std": 0.4267221510410309, |
|
"rewards/accuracy_reward": 0.1875000052154064, |
|
"rewards/format_reward": 0.49583334624767306, |
|
"step": 5 |
|
}, |
|
{ |
|
"completion_length": 408.5041732788086, |
|
"epoch": 0.004, |
|
"grad_norm": 1.2450032484475768, |
|
"kl": 0.00022439956665039064, |
|
"learning_rate": 8.000000000000001e-07, |
|
"loss": 0.0, |
|
"reward": 0.6250000119209289, |
|
"reward_std": 0.452184134721756, |
|
"rewards/accuracy_reward": 0.15416667014360427, |
|
"rewards/format_reward": 0.4708333447575569, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 386.3708465576172, |
|
"epoch": 0.006, |
|
"grad_norm": 1.5807375329115385, |
|
"kl": 0.0007476806640625, |
|
"learning_rate": 1.2000000000000002e-06, |
|
"loss": 0.0, |
|
"reward": 0.7041666805744171, |
|
"reward_std": 0.4529884338378906, |
|
"rewards/accuracy_reward": 0.20833333991467953, |
|
"rewards/format_reward": 0.4958333492279053, |
|
"step": 15 |
|
}, |
|
{ |
|
"completion_length": 321.80001068115234, |
|
"epoch": 0.008, |
|
"grad_norm": 2.3656753548221094, |
|
"kl": 0.01384429931640625, |
|
"learning_rate": 1.6000000000000001e-06, |
|
"loss": 0.0006, |
|
"reward": 0.8583333551883697, |
|
"reward_std": 0.42067554742097857, |
|
"rewards/accuracy_reward": 0.1666666731238365, |
|
"rewards/format_reward": 0.6916666835546493, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 283.2541748046875, |
|
"epoch": 0.01, |
|
"grad_norm": 1.752088104129668, |
|
"kl": 0.03404541015625, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 0.0014, |
|
"reward": 0.9416666984558105, |
|
"reward_std": 0.2924864612519741, |
|
"rewards/accuracy_reward": 0.07916666977107525, |
|
"rewards/format_reward": 0.8625000238418579, |
|
"step": 25 |
|
}, |
|
{ |
|
"completion_length": 231.33334045410157, |
|
"epoch": 0.012, |
|
"grad_norm": 2.4751987426338737, |
|
"kl": 0.05478515625, |
|
"learning_rate": 2.4000000000000003e-06, |
|
"loss": 0.0022, |
|
"reward": 0.9500000298023223, |
|
"reward_std": 0.23476256504654885, |
|
"rewards/accuracy_reward": 0.05000000074505806, |
|
"rewards/format_reward": 0.900000023841858, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 227.18750915527343, |
|
"epoch": 0.014, |
|
"grad_norm": 1.0730481146387303, |
|
"kl": 0.041900634765625, |
|
"learning_rate": 2.8000000000000003e-06, |
|
"loss": 0.0017, |
|
"reward": 0.979166692495346, |
|
"reward_std": 0.2765915520489216, |
|
"rewards/accuracy_reward": 0.07916666977107525, |
|
"rewards/format_reward": 0.9000000298023224, |
|
"step": 35 |
|
}, |
|
{ |
|
"completion_length": 236.82500762939452, |
|
"epoch": 0.016, |
|
"grad_norm": 2.287053976433467, |
|
"kl": 0.028759765625, |
|
"learning_rate": 3.2000000000000003e-06, |
|
"loss": 0.0011, |
|
"reward": 1.0333333671092988, |
|
"reward_std": 0.30562743321061137, |
|
"rewards/accuracy_reward": 0.12500000409781933, |
|
"rewards/format_reward": 0.9083333551883698, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 251.8541717529297, |
|
"epoch": 0.018, |
|
"grad_norm": 2.002581508005556, |
|
"kl": 0.02606201171875, |
|
"learning_rate": 3.6000000000000003e-06, |
|
"loss": 0.001, |
|
"reward": 0.9375000238418579, |
|
"reward_std": 0.33919562846422197, |
|
"rewards/accuracy_reward": 0.10000000260770321, |
|
"rewards/format_reward": 0.8375000178813934, |
|
"step": 45 |
|
}, |
|
{ |
|
"completion_length": 211.71667556762696, |
|
"epoch": 0.02, |
|
"grad_norm": 1.260198604571592, |
|
"kl": 0.025537109375, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 0.001, |
|
"reward": 0.9625000476837158, |
|
"reward_std": 0.29876374155282975, |
|
"rewards/accuracy_reward": 0.08333333544433116, |
|
"rewards/format_reward": 0.8791666924953461, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 164.14583740234374, |
|
"epoch": 0.022, |
|
"grad_norm": 0.8963074266016013, |
|
"kl": 0.04442138671875, |
|
"learning_rate": 4.4e-06, |
|
"loss": 0.0018, |
|
"reward": 1.066666704416275, |
|
"reward_std": 0.2502697631716728, |
|
"rewards/accuracy_reward": 0.11666666939854622, |
|
"rewards/format_reward": 0.9500000178813934, |
|
"step": 55 |
|
}, |
|
{ |
|
"completion_length": 183.88750686645508, |
|
"epoch": 0.024, |
|
"grad_norm": 1.2329398710686466, |
|
"kl": 0.064013671875, |
|
"learning_rate": 4.800000000000001e-06, |
|
"loss": 0.0026, |
|
"reward": 1.025000023841858, |
|
"reward_std": 0.23044276759028434, |
|
"rewards/accuracy_reward": 0.07916666902601718, |
|
"rewards/format_reward": 0.9458333492279053, |
|
"step": 60 |
|
}, |
|
{ |
|
"completion_length": 248.5875045776367, |
|
"epoch": 0.026, |
|
"grad_norm": 0.8205760814794363, |
|
"kl": 0.03978271484375, |
|
"learning_rate": 5.2e-06, |
|
"loss": 0.0016, |
|
"reward": 1.0583333551883698, |
|
"reward_std": 0.20980931594967842, |
|
"rewards/accuracy_reward": 0.10000000335276127, |
|
"rewards/format_reward": 0.9583333551883697, |
|
"step": 65 |
|
}, |
|
{ |
|
"completion_length": 217.50833740234376, |
|
"epoch": 0.028, |
|
"grad_norm": 1.1658067578625744, |
|
"kl": 0.0590087890625, |
|
"learning_rate": 5.600000000000001e-06, |
|
"loss": 0.0024, |
|
"reward": 1.1916666984558106, |
|
"reward_std": 0.2719326362013817, |
|
"rewards/accuracy_reward": 0.2041666742414236, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 70 |
|
}, |
|
{ |
|
"completion_length": 207.8291748046875, |
|
"epoch": 0.03, |
|
"grad_norm": 1.496731257519424, |
|
"kl": 0.0521484375, |
|
"learning_rate": 6e-06, |
|
"loss": 0.0021, |
|
"reward": 1.0708333730697632, |
|
"reward_std": 0.11254597306251526, |
|
"rewards/accuracy_reward": 0.07916666977107525, |
|
"rewards/format_reward": 0.9916666686534882, |
|
"step": 75 |
|
}, |
|
{ |
|
"completion_length": 199.8000045776367, |
|
"epoch": 0.032, |
|
"grad_norm": 1.8251055882491147, |
|
"kl": 0.065869140625, |
|
"learning_rate": 6.4000000000000006e-06, |
|
"loss": 0.0026, |
|
"reward": 1.0916667103767395, |
|
"reward_std": 0.182654220610857, |
|
"rewards/accuracy_reward": 0.10416666865348816, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 80 |
|
}, |
|
{ |
|
"completion_length": 250.7291702270508, |
|
"epoch": 0.034, |
|
"grad_norm": 1.1675575803274054, |
|
"kl": 0.196044921875, |
|
"learning_rate": 6.800000000000001e-06, |
|
"loss": 0.0078, |
|
"reward": 1.2291666984558105, |
|
"reward_std": 0.2886375203728676, |
|
"rewards/accuracy_reward": 0.23750000447034836, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 85 |
|
}, |
|
{ |
|
"completion_length": 375.57500610351565, |
|
"epoch": 0.036, |
|
"grad_norm": 0.6674123847300312, |
|
"kl": 0.06044921875, |
|
"learning_rate": 7.2000000000000005e-06, |
|
"loss": 0.0024, |
|
"reward": 1.062500035762787, |
|
"reward_std": 0.30103383138775824, |
|
"rewards/accuracy_reward": 0.13750000670552254, |
|
"rewards/format_reward": 0.9250000238418579, |
|
"step": 90 |
|
}, |
|
{ |
|
"completion_length": 417.9250091552734, |
|
"epoch": 0.038, |
|
"grad_norm": 0.9023928645148662, |
|
"kl": 0.180029296875, |
|
"learning_rate": 7.600000000000001e-06, |
|
"loss": 0.0072, |
|
"reward": 1.1375000417232513, |
|
"reward_std": 0.3338733673095703, |
|
"rewards/accuracy_reward": 0.18750000782310963, |
|
"rewards/format_reward": 0.9500000238418579, |
|
"step": 95 |
|
}, |
|
{ |
|
"completion_length": 485.583349609375, |
|
"epoch": 0.04, |
|
"grad_norm": 0.9766684287780194, |
|
"kl": 0.089501953125, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.0036, |
|
"reward": 1.1083333611488342, |
|
"reward_std": 0.2985624983906746, |
|
"rewards/accuracy_reward": 0.17500000558793544, |
|
"rewards/format_reward": 0.9333333492279052, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_completion_length": 485.7147545447716, |
|
"eval_kl": 0.0946514423076923, |
|
"eval_loss": 0.0033234155271202326, |
|
"eval_reward": 1.195512863305899, |
|
"eval_reward_std": 0.3841831271465008, |
|
"eval_rewards/accuracy_reward": 0.278846161869856, |
|
"eval_rewards/format_reward": 0.916666681949909, |
|
"eval_runtime": 228.106, |
|
"eval_samples_per_second": 0.434, |
|
"eval_steps_per_second": 0.013, |
|
"step": 100 |
|
}, |
|
{ |
|
"completion_length": 545.5041870117187, |
|
"epoch": 0.042, |
|
"grad_norm": 0.7346390959845385, |
|
"kl": 0.0919921875, |
|
"learning_rate": 8.400000000000001e-06, |
|
"loss": 0.0037, |
|
"reward": 1.0458333671092988, |
|
"reward_std": 0.2975893825292587, |
|
"rewards/accuracy_reward": 0.1375000048428774, |
|
"rewards/format_reward": 0.9083333611488342, |
|
"step": 105 |
|
}, |
|
{ |
|
"completion_length": 442.87501525878906, |
|
"epoch": 0.044, |
|
"grad_norm": 0.9100982084517627, |
|
"kl": 0.10302734375, |
|
"learning_rate": 8.8e-06, |
|
"loss": 0.0041, |
|
"reward": 1.1416666865348817, |
|
"reward_std": 0.3882675111293793, |
|
"rewards/accuracy_reward": 0.2583333428949118, |
|
"rewards/format_reward": 0.8833333551883698, |
|
"step": 110 |
|
}, |
|
{ |
|
"completion_length": 436.5208404541016, |
|
"epoch": 0.046, |
|
"grad_norm": 1.0013758124491203, |
|
"kl": 0.079736328125, |
|
"learning_rate": 9.200000000000002e-06, |
|
"loss": 0.0032, |
|
"reward": 0.9875000238418579, |
|
"reward_std": 0.3188182398676872, |
|
"rewards/accuracy_reward": 0.09166666977107525, |
|
"rewards/format_reward": 0.8958333551883697, |
|
"step": 115 |
|
}, |
|
{ |
|
"completion_length": 382.6458435058594, |
|
"epoch": 0.048, |
|
"grad_norm": 0.8780597651919092, |
|
"kl": 0.0803466796875, |
|
"learning_rate": 9.600000000000001e-06, |
|
"loss": 0.0032, |
|
"reward": 1.0708333730697632, |
|
"reward_std": 0.2953009992837906, |
|
"rewards/accuracy_reward": 0.16250000074505805, |
|
"rewards/format_reward": 0.9083333492279053, |
|
"step": 120 |
|
}, |
|
{ |
|
"completion_length": 390.9791778564453, |
|
"epoch": 0.05, |
|
"grad_norm": 0.9115434520496168, |
|
"kl": 0.095361328125, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0038, |
|
"reward": 1.1208333730697633, |
|
"reward_std": 0.38652298897504805, |
|
"rewards/accuracy_reward": 0.22500000819563865, |
|
"rewards/format_reward": 0.8958333492279053, |
|
"step": 125 |
|
}, |
|
{ |
|
"completion_length": 384.08750915527344, |
|
"epoch": 0.052, |
|
"grad_norm": 1.097771502265398, |
|
"kl": 0.194140625, |
|
"learning_rate": 1.04e-05, |
|
"loss": 0.0078, |
|
"reward": 1.2000000536441804, |
|
"reward_std": 0.3203377678990364, |
|
"rewards/accuracy_reward": 0.24166667386889457, |
|
"rewards/format_reward": 0.9583333492279053, |
|
"step": 130 |
|
}, |
|
{ |
|
"completion_length": 339.4041778564453, |
|
"epoch": 0.054, |
|
"grad_norm": 5.345967756487471, |
|
"kl": 0.180859375, |
|
"learning_rate": 1.0800000000000002e-05, |
|
"loss": 0.0072, |
|
"reward": 1.2583333790302276, |
|
"reward_std": 0.3293462932109833, |
|
"rewards/accuracy_reward": 0.3416666749864817, |
|
"rewards/format_reward": 0.9166666865348816, |
|
"step": 135 |
|
}, |
|
{ |
|
"completion_length": 405.9458465576172, |
|
"epoch": 0.056, |
|
"grad_norm": 110.13539552794225, |
|
"kl": 0.408203125, |
|
"learning_rate": 1.1200000000000001e-05, |
|
"loss": 0.0163, |
|
"reward": 1.075000023841858, |
|
"reward_std": 0.40778897404670716, |
|
"rewards/accuracy_reward": 0.21666667386889457, |
|
"rewards/format_reward": 0.8583333551883697, |
|
"step": 140 |
|
}, |
|
{ |
|
"completion_length": 428.1416717529297, |
|
"epoch": 0.058, |
|
"grad_norm": 127.4037667780564, |
|
"kl": 0.7431640625, |
|
"learning_rate": 1.16e-05, |
|
"loss": 0.0297, |
|
"reward": 1.0958333790302277, |
|
"reward_std": 0.4978458255529404, |
|
"rewards/accuracy_reward": 0.2541666742414236, |
|
"rewards/format_reward": 0.8416666924953461, |
|
"step": 145 |
|
}, |
|
{ |
|
"completion_length": 346.7416748046875, |
|
"epoch": 0.06, |
|
"grad_norm": 1.024737259335268, |
|
"kl": 0.44423828125, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.0178, |
|
"reward": 1.1333333671092987, |
|
"reward_std": 0.3713921308517456, |
|
"rewards/accuracy_reward": 0.2583333425223827, |
|
"rewards/format_reward": 0.8750000178813935, |
|
"step": 150 |
|
}, |
|
{ |
|
"completion_length": 349.40001373291017, |
|
"epoch": 0.062, |
|
"grad_norm": 1.1085565147403205, |
|
"kl": 0.1763671875, |
|
"learning_rate": 1.2400000000000002e-05, |
|
"loss": 0.0071, |
|
"reward": 1.191666704416275, |
|
"reward_std": 0.4108735501766205, |
|
"rewards/accuracy_reward": 0.27916667312383653, |
|
"rewards/format_reward": 0.9125000238418579, |
|
"step": 155 |
|
}, |
|
{ |
|
"completion_length": 410.98334350585935, |
|
"epoch": 0.064, |
|
"grad_norm": 0.9679449380134207, |
|
"kl": 0.24033203125, |
|
"learning_rate": 1.2800000000000001e-05, |
|
"loss": 0.0096, |
|
"reward": 1.0500000238418579, |
|
"reward_std": 0.44029667377471926, |
|
"rewards/accuracy_reward": 0.18333333656191825, |
|
"rewards/format_reward": 0.8666666865348815, |
|
"step": 160 |
|
}, |
|
{ |
|
"completion_length": 440.6083465576172, |
|
"epoch": 0.066, |
|
"grad_norm": 3.6330210669194543, |
|
"kl": 0.3822265625, |
|
"learning_rate": 1.3200000000000002e-05, |
|
"loss": 0.0153, |
|
"reward": 1.337500023841858, |
|
"reward_std": 0.45285614132881163, |
|
"rewards/accuracy_reward": 0.43333334624767306, |
|
"rewards/format_reward": 0.9041666924953461, |
|
"step": 165 |
|
}, |
|
{ |
|
"completion_length": 371.08750915527344, |
|
"epoch": 0.068, |
|
"grad_norm": 1.4615175546831627, |
|
"kl": 16.89931640625, |
|
"learning_rate": 1.3600000000000002e-05, |
|
"loss": 0.6758, |
|
"reward": 0.9250000417232513, |
|
"reward_std": 0.5015424624085426, |
|
"rewards/accuracy_reward": 0.20000000670552254, |
|
"rewards/format_reward": 0.7250000208616256, |
|
"step": 170 |
|
}, |
|
{ |
|
"completion_length": 339.0166717529297, |
|
"epoch": 0.07, |
|
"grad_norm": 2.9473137749285523, |
|
"kl": 0.35244140625, |
|
"learning_rate": 1.4e-05, |
|
"loss": 0.0141, |
|
"reward": 1.154166692495346, |
|
"reward_std": 0.4933777332305908, |
|
"rewards/accuracy_reward": 0.30416667759418486, |
|
"rewards/format_reward": 0.8500000178813935, |
|
"step": 175 |
|
}, |
|
{ |
|
"completion_length": 992.5208557128906, |
|
"epoch": 0.072, |
|
"grad_norm": 4039.653812849788, |
|
"kl": 619.38203125, |
|
"learning_rate": 1.4400000000000001e-05, |
|
"loss": 24.8631, |
|
"reward": 0.5500000134110451, |
|
"reward_std": 0.49858903884887695, |
|
"rewards/accuracy_reward": 0.22083333805203437, |
|
"rewards/format_reward": 0.32916667461395266, |
|
"step": 180 |
|
}, |
|
{ |
|
"completion_length": 1968.1625366210938, |
|
"epoch": 0.074, |
|
"grad_norm": 19.777563892939803, |
|
"kl": 1548.0078125, |
|
"learning_rate": 1.48e-05, |
|
"loss": 61.9522, |
|
"reward": 0.10416666977107525, |
|
"reward_std": 0.18909453824162484, |
|
"rewards/accuracy_reward": 0.08333333544433116, |
|
"rewards/format_reward": 0.02083333395421505, |
|
"step": 185 |
|
}, |
|
{ |
|
"completion_length": 1960.37919921875, |
|
"epoch": 0.076, |
|
"grad_norm": 1.388860761683851, |
|
"kl": 1.40361328125, |
|
"learning_rate": 1.5200000000000002e-05, |
|
"loss": 0.0562, |
|
"reward": 0.08333333730697631, |
|
"reward_std": 0.14930254891514777, |
|
"rewards/accuracy_reward": 0.05000000074505806, |
|
"rewards/format_reward": 0.03333333432674408, |
|
"step": 190 |
|
}, |
|
{ |
|
"completion_length": 1365.8667114257812, |
|
"epoch": 0.078, |
|
"grad_norm": 3.225794828015121, |
|
"kl": 1.9984375, |
|
"learning_rate": 1.5600000000000003e-05, |
|
"loss": 0.0799, |
|
"reward": 0.21666667200624942, |
|
"reward_std": 0.32874541357159615, |
|
"rewards/accuracy_reward": 0.02500000037252903, |
|
"rewards/format_reward": 0.19166667200624943, |
|
"step": 195 |
|
}, |
|
{ |
|
"completion_length": 360.0791717529297, |
|
"epoch": 0.08, |
|
"grad_norm": 1.8347244902773923, |
|
"kl": 23.698828125, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.9454, |
|
"reward": 0.7083333611488343, |
|
"reward_std": 0.5879518806934356, |
|
"rewards/accuracy_reward": 0.10000000186264515, |
|
"rewards/format_reward": 0.6083333551883697, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"eval_completion_length": 283.7147533710186, |
|
"eval_kl": 4.560396634615385, |
|
"eval_loss": 0.19470153748989105, |
|
"eval_reward": 0.9615384982182429, |
|
"eval_reward_std": 0.4933858124109415, |
|
"eval_rewards/accuracy_reward": 0.17147436222204795, |
|
"eval_rewards/format_reward": 0.7900641147906964, |
|
"eval_runtime": 174.8426, |
|
"eval_samples_per_second": 0.566, |
|
"eval_steps_per_second": 0.017, |
|
"step": 200 |
|
}, |
|
{ |
|
"completion_length": 356.27500610351564, |
|
"epoch": 0.082, |
|
"grad_norm": 1.2924069593835132, |
|
"kl": 417792.38505859376, |
|
"learning_rate": 1.64e-05, |
|
"loss": 16711.4297, |
|
"reward": 0.9083333611488342, |
|
"reward_std": 0.38548276126384734, |
|
"rewards/accuracy_reward": 0.09166666939854622, |
|
"rewards/format_reward": 0.8166666805744172, |
|
"step": 205 |
|
}, |
|
{ |
|
"completion_length": 590.3875183105469, |
|
"epoch": 0.084, |
|
"grad_norm": 3.8553857289399875, |
|
"kl": 440014.5115234375, |
|
"learning_rate": 1.6800000000000002e-05, |
|
"loss": 17588.25, |
|
"reward": 0.9541666984558106, |
|
"reward_std": 0.4752687841653824, |
|
"rewards/accuracy_reward": 0.19166667051613331, |
|
"rewards/format_reward": 0.7625000059604645, |
|
"step": 210 |
|
}, |
|
{ |
|
"completion_length": 562.529183959961, |
|
"epoch": 0.086, |
|
"grad_norm": 1118.0087943573474, |
|
"kl": 25.59375, |
|
"learning_rate": 1.72e-05, |
|
"loss": 1.0267, |
|
"reward": 1.0333333730697631, |
|
"reward_std": 0.5306622147560119, |
|
"rewards/accuracy_reward": 0.2458333384245634, |
|
"rewards/format_reward": 0.7875000238418579, |
|
"step": 215 |
|
}, |
|
{ |
|
"completion_length": 472.8041748046875, |
|
"epoch": 0.088, |
|
"grad_norm": 0.7246088323208753, |
|
"kl": 0.5654296875, |
|
"learning_rate": 1.76e-05, |
|
"loss": 0.0226, |
|
"reward": 1.0250000357627869, |
|
"reward_std": 0.3851146653294563, |
|
"rewards/accuracy_reward": 0.16250000521540642, |
|
"rewards/format_reward": 0.8625000238418579, |
|
"step": 220 |
|
}, |
|
{ |
|
"completion_length": 367.68333892822267, |
|
"epoch": 0.09, |
|
"grad_norm": 0.8354974174200616, |
|
"kl": 0.2619140625, |
|
"learning_rate": 1.8e-05, |
|
"loss": 0.0105, |
|
"reward": 1.212500023841858, |
|
"reward_std": 0.2969694033265114, |
|
"rewards/accuracy_reward": 0.29583334140479567, |
|
"rewards/format_reward": 0.9166666746139527, |
|
"step": 225 |
|
}, |
|
{ |
|
"completion_length": 310.6916702270508, |
|
"epoch": 0.092, |
|
"grad_norm": 189.01184940389123, |
|
"kl": 2.4029296875, |
|
"learning_rate": 1.8400000000000003e-05, |
|
"loss": 0.096, |
|
"reward": 1.1291666984558106, |
|
"reward_std": 0.3020160228013992, |
|
"rewards/accuracy_reward": 0.18750000186264515, |
|
"rewards/format_reward": 0.9416666865348816, |
|
"step": 230 |
|
}, |
|
{ |
|
"completion_length": 369.8333404541016, |
|
"epoch": 0.094, |
|
"grad_norm": 37.154199182097, |
|
"kl": 1.755859375, |
|
"learning_rate": 1.88e-05, |
|
"loss": 0.0702, |
|
"reward": 0.9958333671092987, |
|
"reward_std": 0.33207904547452927, |
|
"rewards/accuracy_reward": 0.15000000186264514, |
|
"rewards/format_reward": 0.8458333551883698, |
|
"step": 235 |
|
}, |
|
{ |
|
"completion_length": 461.2375091552734, |
|
"epoch": 0.096, |
|
"grad_norm": 2.2527967247045093, |
|
"kl": 1.1052734375, |
|
"learning_rate": 1.9200000000000003e-05, |
|
"loss": 0.0442, |
|
"reward": 0.9791667044162751, |
|
"reward_std": 0.46096948683261874, |
|
"rewards/accuracy_reward": 0.20000000447034835, |
|
"rewards/format_reward": 0.7791666865348816, |
|
"step": 240 |
|
}, |
|
{ |
|
"completion_length": 445.3166809082031, |
|
"epoch": 0.098, |
|
"grad_norm": 3.932319563336608, |
|
"kl": 4.178515625, |
|
"learning_rate": 1.9600000000000002e-05, |
|
"loss": 0.1673, |
|
"reward": 1.0208333551883697, |
|
"reward_std": 0.45676523745059966, |
|
"rewards/accuracy_reward": 0.18333333656191825, |
|
"rewards/format_reward": 0.8375000178813934, |
|
"step": 245 |
|
}, |
|
{ |
|
"completion_length": 363.8000091552734, |
|
"epoch": 0.1, |
|
"grad_norm": 2.0338776721959007, |
|
"kl": 1.2009765625, |
|
"learning_rate": 2e-05, |
|
"loss": 0.048, |
|
"reward": 1.0333333671092988, |
|
"reward_std": 0.411553718149662, |
|
"rewards/accuracy_reward": 0.15416667126119138, |
|
"rewards/format_reward": 0.8791666924953461, |
|
"step": 250 |
|
}, |
|
{ |
|
"completion_length": 290.4333435058594, |
|
"epoch": 0.102, |
|
"grad_norm": 1.6678034955896959, |
|
"kl": 0.4701171875, |
|
"learning_rate": 1.9999756307053947e-05, |
|
"loss": 0.0188, |
|
"reward": 1.0000000417232513, |
|
"reward_std": 0.30890622958540914, |
|
"rewards/accuracy_reward": 0.1000000037252903, |
|
"rewards/format_reward": 0.9000000119209289, |
|
"step": 255 |
|
}, |
|
{ |
|
"completion_length": 216.17084197998048, |
|
"epoch": 0.104, |
|
"grad_norm": 8.82960885357952, |
|
"kl": 0.9080078125, |
|
"learning_rate": 1.9999025240093045e-05, |
|
"loss": 0.0363, |
|
"reward": 0.8416666924953461, |
|
"reward_std": 0.47480377554893494, |
|
"rewards/accuracy_reward": 0.14166666977107525, |
|
"rewards/format_reward": 0.7000000089406967, |
|
"step": 260 |
|
}, |
|
{ |
|
"completion_length": 222.05417022705078, |
|
"epoch": 0.106, |
|
"grad_norm": 3.461116528554778, |
|
"kl": 0.9873046875, |
|
"learning_rate": 1.9997806834748455e-05, |
|
"loss": 0.0395, |
|
"reward": 1.0333333730697631, |
|
"reward_std": 0.36838603541255, |
|
"rewards/accuracy_reward": 0.19583333618938922, |
|
"rewards/format_reward": 0.8375000119209289, |
|
"step": 265 |
|
}, |
|
{ |
|
"completion_length": 217.8041732788086, |
|
"epoch": 0.108, |
|
"grad_norm": 1.086426317188572, |
|
"kl": 0.4533203125, |
|
"learning_rate": 1.9996101150403543e-05, |
|
"loss": 0.0181, |
|
"reward": 1.0750000298023223, |
|
"reward_std": 0.2211028292775154, |
|
"rewards/accuracy_reward": 0.12916667051613331, |
|
"rewards/format_reward": 0.9458333551883698, |
|
"step": 270 |
|
}, |
|
{ |
|
"completion_length": 210.55833740234374, |
|
"epoch": 0.11, |
|
"grad_norm": 6.780769531887777, |
|
"kl": 1.0130859375, |
|
"learning_rate": 1.999390827019096e-05, |
|
"loss": 0.0406, |
|
"reward": 1.0833333730697632, |
|
"reward_std": 0.2511788114905357, |
|
"rewards/accuracy_reward": 0.13750000521540642, |
|
"rewards/format_reward": 0.9458333611488342, |
|
"step": 275 |
|
}, |
|
{ |
|
"completion_length": 256.6375076293945, |
|
"epoch": 0.112, |
|
"grad_norm": 1.7774790738197452, |
|
"kl": 0.76171875, |
|
"learning_rate": 1.9991228300988586e-05, |
|
"loss": 0.0305, |
|
"reward": 1.1208333671092987, |
|
"reward_std": 0.3549239993095398, |
|
"rewards/accuracy_reward": 0.19583334177732467, |
|
"rewards/format_reward": 0.9250000238418579, |
|
"step": 280 |
|
}, |
|
{ |
|
"completion_length": 272.54583740234375, |
|
"epoch": 0.114, |
|
"grad_norm": 14.821428692684155, |
|
"kl": 1.717578125, |
|
"learning_rate": 1.9988061373414342e-05, |
|
"loss": 0.0688, |
|
"reward": 0.9875000178813934, |
|
"reward_std": 0.3375275403261185, |
|
"rewards/accuracy_reward": 0.10416666902601719, |
|
"rewards/format_reward": 0.8833333492279053, |
|
"step": 285 |
|
}, |
|
{ |
|
"completion_length": 238.64167175292968, |
|
"epoch": 0.116, |
|
"grad_norm": 53.728519178410956, |
|
"kl": 1.67421875, |
|
"learning_rate": 1.9984407641819812e-05, |
|
"loss": 0.0671, |
|
"reward": 1.0708333611488343, |
|
"reward_std": 0.33341423869132997, |
|
"rewards/accuracy_reward": 0.1750000026077032, |
|
"rewards/format_reward": 0.8958333492279053, |
|
"step": 290 |
|
}, |
|
{ |
|
"completion_length": 242.93334197998047, |
|
"epoch": 0.118, |
|
"grad_norm": 2.576799775213784, |
|
"kl": 1.2841796875, |
|
"learning_rate": 1.9980267284282718e-05, |
|
"loss": 0.0514, |
|
"reward": 1.075000035762787, |
|
"reward_std": 0.39172317981719973, |
|
"rewards/accuracy_reward": 0.18750000596046448, |
|
"rewards/format_reward": 0.8875000178813934, |
|
"step": 295 |
|
}, |
|
{ |
|
"completion_length": 232.62500762939453, |
|
"epoch": 0.12, |
|
"grad_norm": 3.9883395042058307, |
|
"kl": 9.513671875, |
|
"learning_rate": 1.9975640502598243e-05, |
|
"loss": 0.3803, |
|
"reward": 0.9750000417232514, |
|
"reward_std": 0.44072358012199403, |
|
"rewards/accuracy_reward": 0.19166667088866235, |
|
"rewards/format_reward": 0.7833333432674408, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_completion_length": 222.86699030949518, |
|
"eval_kl": 1.2294170673076923, |
|
"eval_loss": 0.0528143048286438, |
|
"eval_reward": 1.0256410607924829, |
|
"eval_reward_std": 0.3615907682822301, |
|
"eval_rewards/accuracy_reward": 0.15224359270471793, |
|
"eval_rewards/format_reward": 0.8733974511806781, |
|
"eval_runtime": 198.2465, |
|
"eval_samples_per_second": 0.499, |
|
"eval_steps_per_second": 0.015, |
|
"step": 300 |
|
}, |
|
{ |
|
"completion_length": 207.7291702270508, |
|
"epoch": 0.122, |
|
"grad_norm": 2.620279026401797, |
|
"kl": 2.27109375, |
|
"learning_rate": 1.9970527522269204e-05, |
|
"loss": 0.0908, |
|
"reward": 1.0500000238418579, |
|
"reward_std": 0.29396146908402443, |
|
"rewards/accuracy_reward": 0.1375000048428774, |
|
"rewards/format_reward": 0.9125000238418579, |
|
"step": 305 |
|
}, |
|
{ |
|
"completion_length": 175.1833381652832, |
|
"epoch": 0.124, |
|
"grad_norm": 0.7567148884073016, |
|
"kl": 0.3833984375, |
|
"learning_rate": 1.9964928592495046e-05, |
|
"loss": 0.0153, |
|
"reward": 1.1708333849906922, |
|
"reward_std": 0.15448497384786605, |
|
"rewards/accuracy_reward": 0.1708333395421505, |
|
"rewards/format_reward": 1.0, |
|
"step": 310 |
|
}, |
|
{ |
|
"completion_length": 187.89583892822264, |
|
"epoch": 0.126, |
|
"grad_norm": 1.1365022325057257, |
|
"kl": 0.334765625, |
|
"learning_rate": 1.9958843986159705e-05, |
|
"loss": 0.0134, |
|
"reward": 1.1583333611488342, |
|
"reward_std": 0.16067556515336037, |
|
"rewards/accuracy_reward": 0.15833333805203437, |
|
"rewards/format_reward": 1.0, |
|
"step": 315 |
|
}, |
|
{ |
|
"completion_length": 158.3625015258789, |
|
"epoch": 0.128, |
|
"grad_norm": 1.0991071204841167, |
|
"kl": 0.416796875, |
|
"learning_rate": 1.9952273999818312e-05, |
|
"loss": 0.0167, |
|
"reward": 1.1583333611488342, |
|
"reward_std": 0.17899299263954163, |
|
"rewards/accuracy_reward": 0.16666667237877847, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 320 |
|
}, |
|
{ |
|
"completion_length": 216.79584121704102, |
|
"epoch": 0.13, |
|
"grad_norm": 0.8275882765202969, |
|
"kl": 0.419921875, |
|
"learning_rate": 1.9945218953682736e-05, |
|
"loss": 0.0168, |
|
"reward": 1.0916666984558105, |
|
"reward_std": 0.15066706389188766, |
|
"rewards/accuracy_reward": 0.10000000186264515, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 325 |
|
}, |
|
{ |
|
"completion_length": 258.4791732788086, |
|
"epoch": 0.132, |
|
"grad_norm": 1.1654941388041056, |
|
"kl": 0.3876953125, |
|
"learning_rate": 1.9937679191605964e-05, |
|
"loss": 0.0155, |
|
"reward": 1.087500023841858, |
|
"reward_std": 0.15936369448900223, |
|
"rewards/accuracy_reward": 0.10000000186264515, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 330 |
|
}, |
|
{ |
|
"completion_length": 275.38751220703125, |
|
"epoch": 0.134, |
|
"grad_norm": 21931.258042501762, |
|
"kl": 145.1587890625, |
|
"learning_rate": 1.992965508106537e-05, |
|
"loss": 5.8162, |
|
"reward": 1.054166704416275, |
|
"reward_std": 0.2149122439324856, |
|
"rewards/accuracy_reward": 0.10416666977107525, |
|
"rewards/format_reward": 0.9500000238418579, |
|
"step": 335 |
|
}, |
|
{ |
|
"completion_length": 1149.975033569336, |
|
"epoch": 0.136, |
|
"grad_norm": 398638.02963389497, |
|
"kl": 9480.2162109375, |
|
"learning_rate": 1.9921147013144782e-05, |
|
"loss": 379.039, |
|
"reward": 0.975000011920929, |
|
"reward_std": 0.21539116650819778, |
|
"rewards/accuracy_reward": 0.04583333432674408, |
|
"rewards/format_reward": 0.9291666924953461, |
|
"step": 340 |
|
}, |
|
{ |
|
"completion_length": 509.2916748046875, |
|
"epoch": 0.138, |
|
"grad_norm": 0.33160914978875666, |
|
"kl": 5.3359375, |
|
"learning_rate": 1.991215540251542e-05, |
|
"loss": 0.2131, |
|
"reward": 0.9791667103767395, |
|
"reward_std": 0.2079385258257389, |
|
"rewards/accuracy_reward": 0.03750000037252903, |
|
"rewards/format_reward": 0.9416666865348816, |
|
"step": 345 |
|
}, |
|
{ |
|
"completion_length": 265.97917022705076, |
|
"epoch": 0.14, |
|
"grad_norm": 1.35804758291205, |
|
"kl": 0.4572265625, |
|
"learning_rate": 1.9902680687415704e-05, |
|
"loss": 0.0183, |
|
"reward": 0.954166692495346, |
|
"reward_std": 0.32328550666570666, |
|
"rewards/accuracy_reward": 0.08750000111758709, |
|
"rewards/format_reward": 0.8666666924953461, |
|
"step": 350 |
|
}, |
|
{ |
|
"completion_length": 264.07083587646486, |
|
"epoch": 0.142, |
|
"grad_norm": 0.4918964435152788, |
|
"kl": 0.4173828125, |
|
"learning_rate": 1.9892723329629885e-05, |
|
"loss": 0.0167, |
|
"reward": 0.9791666984558105, |
|
"reward_std": 0.19311015233397483, |
|
"rewards/accuracy_reward": 0.04166666716337204, |
|
"rewards/format_reward": 0.9375000298023224, |
|
"step": 355 |
|
}, |
|
{ |
|
"completion_length": 252.10834350585938, |
|
"epoch": 0.144, |
|
"grad_norm": 0.5874567666668539, |
|
"kl": 0.4216796875, |
|
"learning_rate": 1.988228381446553e-05, |
|
"loss": 0.0169, |
|
"reward": 1.0708333671092987, |
|
"reward_std": 0.16931553408503533, |
|
"rewards/accuracy_reward": 0.08750000186264514, |
|
"rewards/format_reward": 0.9833333373069764, |
|
"step": 360 |
|
}, |
|
{ |
|
"completion_length": 252.58750915527344, |
|
"epoch": 0.146, |
|
"grad_norm": 0.9063762518758791, |
|
"kl": 0.40546875, |
|
"learning_rate": 1.987136265072988e-05, |
|
"loss": 0.0162, |
|
"reward": 1.0833333730697632, |
|
"reward_std": 0.16870678812265397, |
|
"rewards/accuracy_reward": 0.10833333507180214, |
|
"rewards/format_reward": 0.9750000178813935, |
|
"step": 365 |
|
}, |
|
{ |
|
"completion_length": 188.80833892822267, |
|
"epoch": 0.148, |
|
"grad_norm": 0.787412634085505, |
|
"kl": 0.47890625, |
|
"learning_rate": 1.985996037070505e-05, |
|
"loss": 0.0192, |
|
"reward": 1.0583333730697633, |
|
"reward_std": 0.1203794963657856, |
|
"rewards/accuracy_reward": 0.07083333544433117, |
|
"rewards/format_reward": 0.9875000059604645, |
|
"step": 370 |
|
}, |
|
{ |
|
"completion_length": 261.20001220703125, |
|
"epoch": 0.15, |
|
"grad_norm": 311.4303294674164, |
|
"kl": 9.298828125, |
|
"learning_rate": 1.9848077530122083e-05, |
|
"loss": 0.3741, |
|
"reward": 0.9875000357627869, |
|
"reward_std": 0.2795761555433273, |
|
"rewards/accuracy_reward": 0.07916666828095913, |
|
"rewards/format_reward": 0.9083333551883698, |
|
"step": 375 |
|
}, |
|
{ |
|
"completion_length": 609.112515258789, |
|
"epoch": 0.152, |
|
"grad_norm": 3.6435215764773003, |
|
"kl": 1.7125, |
|
"learning_rate": 1.983571470813386e-05, |
|
"loss": 0.0684, |
|
"reward": 0.7833333611488342, |
|
"reward_std": 0.4051990956068039, |
|
"rewards/accuracy_reward": 0.0416666679084301, |
|
"rewards/format_reward": 0.7416666924953461, |
|
"step": 380 |
|
}, |
|
{ |
|
"completion_length": 396.24168395996094, |
|
"epoch": 0.154, |
|
"grad_norm": 2.8787576101456454, |
|
"kl": 0.9396484375, |
|
"learning_rate": 1.982287250728689e-05, |
|
"loss": 0.0376, |
|
"reward": 0.9500000238418579, |
|
"reward_std": 0.3664922297000885, |
|
"rewards/accuracy_reward": 0.10416666902601719, |
|
"rewards/format_reward": 0.8458333492279053, |
|
"step": 385 |
|
}, |
|
{ |
|
"completion_length": 257.5208404541016, |
|
"epoch": 0.156, |
|
"grad_norm": 0.8769009991094786, |
|
"kl": 0.4197265625, |
|
"learning_rate": 1.9809551553491918e-05, |
|
"loss": 0.0168, |
|
"reward": 1.2250000357627868, |
|
"reward_std": 0.22191281020641326, |
|
"rewards/accuracy_reward": 0.2458333395421505, |
|
"rewards/format_reward": 0.9791666746139527, |
|
"step": 390 |
|
}, |
|
{ |
|
"completion_length": 234.5916748046875, |
|
"epoch": 0.158, |
|
"grad_norm": 0.8642430926189166, |
|
"kl": 0.4025390625, |
|
"learning_rate": 1.979575249599344e-05, |
|
"loss": 0.0161, |
|
"reward": 0.9750000298023224, |
|
"reward_std": 0.3880663111805916, |
|
"rewards/accuracy_reward": 0.1750000085681677, |
|
"rewards/format_reward": 0.8000000178813934, |
|
"step": 395 |
|
}, |
|
{ |
|
"completion_length": 195.85000457763672, |
|
"epoch": 0.16, |
|
"grad_norm": 1.1058693984901085, |
|
"kl": 0.441796875, |
|
"learning_rate": 1.9781476007338058e-05, |
|
"loss": 0.0177, |
|
"reward": 1.079166704416275, |
|
"reward_std": 0.21014036387205123, |
|
"rewards/accuracy_reward": 0.11666666753590108, |
|
"rewards/format_reward": 0.9625000119209289, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_completion_length": 170.86378596379205, |
|
"eval_kl": 0.44906850961538464, |
|
"eval_loss": 0.01773521862924099, |
|
"eval_reward": 1.054487210053664, |
|
"eval_reward_std": 0.11891960610563938, |
|
"eval_rewards/accuracy_reward": 0.06410256615624978, |
|
"eval_rewards/format_reward": 0.9903846245545608, |
|
"eval_runtime": 97.7782, |
|
"eval_samples_per_second": 1.012, |
|
"eval_steps_per_second": 0.031, |
|
"step": 400 |
|
}, |
|
{ |
|
"completion_length": 151.82083740234376, |
|
"epoch": 0.162, |
|
"grad_norm": 0.8251618816845695, |
|
"kl": 0.5263671875, |
|
"learning_rate": 1.9766722783341682e-05, |
|
"loss": 0.0211, |
|
"reward": 1.037500023841858, |
|
"reward_std": 0.08976087272167206, |
|
"rewards/accuracy_reward": 0.04166666753590107, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 405 |
|
}, |
|
{ |
|
"completion_length": 145.5250045776367, |
|
"epoch": 0.164, |
|
"grad_norm": 0.18821333790127082, |
|
"kl": 0.5125, |
|
"learning_rate": 1.9751493543055634e-05, |
|
"loss": 0.0205, |
|
"reward": 1.0458333611488342, |
|
"reward_std": 0.13215193450450896, |
|
"rewards/accuracy_reward": 0.05833333544433117, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 410 |
|
}, |
|
{ |
|
"completion_length": 209.22500610351562, |
|
"epoch": 0.166, |
|
"grad_norm": 0.540401444429271, |
|
"kl": 0.456640625, |
|
"learning_rate": 1.9735789028731603e-05, |
|
"loss": 0.0183, |
|
"reward": 1.0708333849906921, |
|
"reward_std": 0.1970970779657364, |
|
"rewards/accuracy_reward": 0.0958333346992731, |
|
"rewards/format_reward": 0.9750000178813935, |
|
"step": 415 |
|
}, |
|
{ |
|
"completion_length": 235.44583892822266, |
|
"epoch": 0.168, |
|
"grad_norm": 1.4057030696513535, |
|
"kl": 0.6052734375, |
|
"learning_rate": 1.9719610005785466e-05, |
|
"loss": 0.0242, |
|
"reward": 1.0291667044162751, |
|
"reward_std": 0.2490410476922989, |
|
"rewards/accuracy_reward": 0.11250000596046447, |
|
"rewards/format_reward": 0.916666692495346, |
|
"step": 420 |
|
}, |
|
{ |
|
"completion_length": 371.56251220703126, |
|
"epoch": 0.17, |
|
"grad_norm": 1.8260065247232258, |
|
"kl": 1.516015625, |
|
"learning_rate": 1.9702957262759964e-05, |
|
"loss": 0.0607, |
|
"reward": 0.9250000298023224, |
|
"reward_std": 0.4401633307337761, |
|
"rewards/accuracy_reward": 0.12083333618938923, |
|
"rewards/format_reward": 0.8041666924953461, |
|
"step": 425 |
|
}, |
|
{ |
|
"completion_length": 528.0041763305665, |
|
"epoch": 0.172, |
|
"grad_norm": 1.0316236154420109, |
|
"kl": 0.641015625, |
|
"learning_rate": 1.9685831611286312e-05, |
|
"loss": 0.0257, |
|
"reward": 0.8958333611488343, |
|
"reward_std": 0.34284951016306875, |
|
"rewards/accuracy_reward": 0.07083333469927311, |
|
"rewards/format_reward": 0.8250000178813934, |
|
"step": 430 |
|
}, |
|
{ |
|
"completion_length": 241.72084197998046, |
|
"epoch": 0.174, |
|
"grad_norm": 1.3580722638101046, |
|
"kl": 0.476171875, |
|
"learning_rate": 1.9668233886044597e-05, |
|
"loss": 0.0191, |
|
"reward": 1.137500035762787, |
|
"reward_std": 0.26374180018901827, |
|
"rewards/accuracy_reward": 0.1666666716337204, |
|
"rewards/format_reward": 0.9708333492279053, |
|
"step": 435 |
|
}, |
|
{ |
|
"completion_length": 224.58750610351564, |
|
"epoch": 0.176, |
|
"grad_norm": 1.020250043379181, |
|
"kl": 0.5072265625, |
|
"learning_rate": 1.9650164944723116e-05, |
|
"loss": 0.0203, |
|
"reward": 1.0666667103767395, |
|
"reward_std": 0.12149013355374336, |
|
"rewards/accuracy_reward": 0.07083333544433117, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 440 |
|
}, |
|
{ |
|
"completion_length": 229.78334197998046, |
|
"epoch": 0.178, |
|
"grad_norm": 0.5657766777808181, |
|
"kl": 0.5259765625, |
|
"learning_rate": 1.9631625667976584e-05, |
|
"loss": 0.021, |
|
"reward": 1.0666667222976685, |
|
"reward_std": 0.201721428334713, |
|
"rewards/accuracy_reward": 0.09166666865348816, |
|
"rewards/format_reward": 0.9750000178813935, |
|
"step": 445 |
|
}, |
|
{ |
|
"completion_length": 318.9833419799805, |
|
"epoch": 0.18, |
|
"grad_norm": 0.823275318702485, |
|
"kl": 0.48515625, |
|
"learning_rate": 1.961261695938319e-05, |
|
"loss": 0.0194, |
|
"reward": 1.1000000476837157, |
|
"reward_std": 0.22587689757347107, |
|
"rewards/accuracy_reward": 0.12500000409781933, |
|
"rewards/format_reward": 0.9750000059604644, |
|
"step": 450 |
|
}, |
|
{ |
|
"completion_length": 703.1000152587891, |
|
"epoch": 0.182, |
|
"grad_norm": 398.7784621859931, |
|
"kl": 160.9056640625, |
|
"learning_rate": 1.9593139745400575e-05, |
|
"loss": 6.4618, |
|
"reward": 1.1000000298023225, |
|
"reward_std": 0.256098273396492, |
|
"rewards/accuracy_reward": 0.1708333373069763, |
|
"rewards/format_reward": 0.9291666805744171, |
|
"step": 455 |
|
}, |
|
{ |
|
"completion_length": 1592.820848083496, |
|
"epoch": 0.184, |
|
"grad_norm": 0.7205262218883979, |
|
"kl": 3.237109375, |
|
"learning_rate": 1.9573194975320672e-05, |
|
"loss": 0.1296, |
|
"reward": 0.9125000298023224, |
|
"reward_std": 0.3876332946121693, |
|
"rewards/accuracy_reward": 0.07500000186264515, |
|
"rewards/format_reward": 0.8375000178813934, |
|
"step": 460 |
|
}, |
|
{ |
|
"completion_length": 242.61250915527344, |
|
"epoch": 0.186, |
|
"grad_norm": 0.5497982888539013, |
|
"kl": 0.45546875, |
|
"learning_rate": 1.9552783621223437e-05, |
|
"loss": 0.0182, |
|
"reward": 1.0958333611488342, |
|
"reward_std": 0.2071055732667446, |
|
"rewards/accuracy_reward": 0.10833333618938923, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 465 |
|
}, |
|
{ |
|
"completion_length": 240.020841217041, |
|
"epoch": 0.188, |
|
"grad_norm": 0.7431928948219811, |
|
"kl": 0.4419921875, |
|
"learning_rate": 1.9531906677929472e-05, |
|
"loss": 0.0177, |
|
"reward": 1.0583333849906922, |
|
"reward_std": 0.10746954828500747, |
|
"rewards/accuracy_reward": 0.06666666902601719, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 470 |
|
}, |
|
{ |
|
"completion_length": 265.0541763305664, |
|
"epoch": 0.19, |
|
"grad_norm": 0.7124969155812837, |
|
"kl": 0.394921875, |
|
"learning_rate": 1.9510565162951538e-05, |
|
"loss": 0.0158, |
|
"reward": 1.054166716337204, |
|
"reward_std": 0.16784698963165284, |
|
"rewards/accuracy_reward": 0.07916666865348816, |
|
"rewards/format_reward": 0.9750000178813935, |
|
"step": 475 |
|
}, |
|
{ |
|
"completion_length": 324.0500061035156, |
|
"epoch": 0.192, |
|
"grad_norm": 0.6002616441993323, |
|
"kl": 0.4072265625, |
|
"learning_rate": 1.9488760116444966e-05, |
|
"loss": 0.0163, |
|
"reward": 1.100000023841858, |
|
"reward_std": 0.23401278182864188, |
|
"rewards/accuracy_reward": 0.1541666679084301, |
|
"rewards/format_reward": 0.9458333432674408, |
|
"step": 480 |
|
}, |
|
{ |
|
"completion_length": 310.5416778564453, |
|
"epoch": 0.194, |
|
"grad_norm": 0.623552896500724, |
|
"kl": 0.386328125, |
|
"learning_rate": 1.9466492601156964e-05, |
|
"loss": 0.0154, |
|
"reward": 1.0958333730697631, |
|
"reward_std": 0.1777988240122795, |
|
"rewards/accuracy_reward": 0.10833333656191826, |
|
"rewards/format_reward": 0.9875000059604645, |
|
"step": 485 |
|
}, |
|
{ |
|
"completion_length": 269.23750762939454, |
|
"epoch": 0.196, |
|
"grad_norm": 0.18932160471227394, |
|
"kl": 0.45546875, |
|
"learning_rate": 1.944376370237481e-05, |
|
"loss": 0.0182, |
|
"reward": 1.1000000357627868, |
|
"reward_std": 0.12386635020375251, |
|
"rewards/accuracy_reward": 0.10416666865348816, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 490 |
|
}, |
|
{ |
|
"completion_length": 279.84167327880857, |
|
"epoch": 0.198, |
|
"grad_norm": 0.7399782553975255, |
|
"kl": 0.4009765625, |
|
"learning_rate": 1.942057452787297e-05, |
|
"loss": 0.0161, |
|
"reward": 1.1041666865348816, |
|
"reward_std": 0.18267755210399628, |
|
"rewards/accuracy_reward": 0.10833333544433117, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 495 |
|
}, |
|
{ |
|
"completion_length": 417.3166763305664, |
|
"epoch": 0.2, |
|
"grad_norm": 0.7406681135160039, |
|
"kl": 0.437890625, |
|
"learning_rate": 1.9396926207859085e-05, |
|
"loss": 0.0175, |
|
"reward": 1.0125000298023223, |
|
"reward_std": 0.21925573498010636, |
|
"rewards/accuracy_reward": 0.07500000223517418, |
|
"rewards/format_reward": 0.9375000238418579, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_completion_length": 388.1218050443209, |
|
"eval_kl": 0.3545673076923077, |
|
"eval_loss": 0.014274503104388714, |
|
"eval_reward": 1.0881410516225374, |
|
"eval_reward_std": 0.2549985142854544, |
|
"eval_rewards/accuracy_reward": 0.13141025946690485, |
|
"eval_rewards/format_reward": 0.9567307829856873, |
|
"eval_runtime": 229.97, |
|
"eval_samples_per_second": 0.43, |
|
"eval_steps_per_second": 0.013, |
|
"step": 500 |
|
}, |
|
{ |
|
"completion_length": 383.04168090820315, |
|
"epoch": 0.202, |
|
"grad_norm": 1.0637146992798727, |
|
"kl": 0.3994140625, |
|
"learning_rate": 1.937281989491892e-05, |
|
"loss": 0.016, |
|
"reward": 1.054166704416275, |
|
"reward_std": 0.26914927959442136, |
|
"rewards/accuracy_reward": 0.10416666939854621, |
|
"rewards/format_reward": 0.9500000238418579, |
|
"step": 505 |
|
}, |
|
{ |
|
"completion_length": 388.8250076293945, |
|
"epoch": 0.204, |
|
"grad_norm": 0.7997193167799139, |
|
"kl": 0.3890625, |
|
"learning_rate": 1.9348256763960146e-05, |
|
"loss": 0.0156, |
|
"reward": 1.041666704416275, |
|
"reward_std": 0.21228634268045427, |
|
"rewards/accuracy_reward": 0.09583333507180214, |
|
"rewards/format_reward": 0.9458333432674408, |
|
"step": 510 |
|
}, |
|
{ |
|
"completion_length": 307.4666748046875, |
|
"epoch": 0.206, |
|
"grad_norm": 0.8574230086065704, |
|
"kl": 0.436328125, |
|
"learning_rate": 1.9323238012155125e-05, |
|
"loss": 0.0175, |
|
"reward": 1.083333384990692, |
|
"reward_std": 0.25866183936595916, |
|
"rewards/accuracy_reward": 0.12083333656191826, |
|
"rewards/format_reward": 0.962500023841858, |
|
"step": 515 |
|
}, |
|
{ |
|
"completion_length": 367.52500915527344, |
|
"epoch": 0.208, |
|
"grad_norm": 0.6605626143593909, |
|
"kl": 0.404296875, |
|
"learning_rate": 1.9297764858882516e-05, |
|
"loss": 0.0162, |
|
"reward": 1.0375000476837157, |
|
"reward_std": 0.2624797485768795, |
|
"rewards/accuracy_reward": 0.11666667200624943, |
|
"rewards/format_reward": 0.9208333492279053, |
|
"step": 520 |
|
}, |
|
{ |
|
"completion_length": 393.74167785644534, |
|
"epoch": 0.21, |
|
"grad_norm": 0.8191056592078524, |
|
"kl": 0.36240234375, |
|
"learning_rate": 1.9271838545667876e-05, |
|
"loss": 0.0145, |
|
"reward": 1.1041667103767394, |
|
"reward_std": 0.2263012297451496, |
|
"rewards/accuracy_reward": 0.1375000048428774, |
|
"rewards/format_reward": 0.9666666865348816, |
|
"step": 525 |
|
}, |
|
{ |
|
"completion_length": 371.56251220703126, |
|
"epoch": 0.212, |
|
"grad_norm": 0.7002256272476179, |
|
"kl": 0.380859375, |
|
"learning_rate": 1.9245460336123136e-05, |
|
"loss": 0.0152, |
|
"reward": 1.1833333611488341, |
|
"reward_std": 0.2313358962535858, |
|
"rewards/accuracy_reward": 0.20000000335276127, |
|
"rewards/format_reward": 0.9833333492279053, |
|
"step": 530 |
|
}, |
|
{ |
|
"completion_length": 342.72084655761716, |
|
"epoch": 0.214, |
|
"grad_norm": 0.6239304105044072, |
|
"kl": 0.3921875, |
|
"learning_rate": 1.9218631515885007e-05, |
|
"loss": 0.0157, |
|
"reward": 1.1041666984558105, |
|
"reward_std": 0.15337080582976342, |
|
"rewards/accuracy_reward": 0.1208333358168602, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 535 |
|
}, |
|
{ |
|
"completion_length": 361.2583404541016, |
|
"epoch": 0.216, |
|
"grad_norm": 0.6914248861950051, |
|
"kl": 0.33828125, |
|
"learning_rate": 1.9191353392552346e-05, |
|
"loss": 0.0135, |
|
"reward": 1.0375000357627868, |
|
"reward_std": 0.1918194182217121, |
|
"rewards/accuracy_reward": 0.07083333395421505, |
|
"rewards/format_reward": 0.9666666805744171, |
|
"step": 540 |
|
}, |
|
{ |
|
"completion_length": 282.5916748046875, |
|
"epoch": 0.218, |
|
"grad_norm": 0.40052866572019247, |
|
"kl": 0.369921875, |
|
"learning_rate": 1.9163627295622397e-05, |
|
"loss": 0.0148, |
|
"reward": 1.112500047683716, |
|
"reward_std": 0.23641232177615165, |
|
"rewards/accuracy_reward": 0.12916666977107524, |
|
"rewards/format_reward": 0.9833333492279053, |
|
"step": 545 |
|
}, |
|
{ |
|
"completion_length": 264.9500106811523, |
|
"epoch": 0.22, |
|
"grad_norm": 0.6866747301077355, |
|
"kl": 0.3935546875, |
|
"learning_rate": 1.913545457642601e-05, |
|
"loss": 0.0157, |
|
"reward": 1.0791667222976684, |
|
"reward_std": 0.18432048112154006, |
|
"rewards/accuracy_reward": 0.09166666939854622, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 550 |
|
}, |
|
{ |
|
"completion_length": 259.1375076293945, |
|
"epoch": 0.222, |
|
"grad_norm": 0.44464019735481025, |
|
"kl": 0.39765625, |
|
"learning_rate": 1.910683660806177e-05, |
|
"loss": 0.0159, |
|
"reward": 1.025000023841858, |
|
"reward_std": 0.08862337395548821, |
|
"rewards/accuracy_reward": 0.03750000037252903, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 555 |
|
}, |
|
{ |
|
"completion_length": 251.38333740234376, |
|
"epoch": 0.224, |
|
"grad_norm": 0.535685767278896, |
|
"kl": 0.3630859375, |
|
"learning_rate": 1.907777478532909e-05, |
|
"loss": 0.0145, |
|
"reward": 1.0041666984558106, |
|
"reward_std": 0.11558076366782188, |
|
"rewards/accuracy_reward": 0.03333333432674408, |
|
"rewards/format_reward": 0.9708333492279053, |
|
"step": 560 |
|
}, |
|
{ |
|
"completion_length": 176.72083587646483, |
|
"epoch": 0.226, |
|
"grad_norm": 1.253081808635552, |
|
"kl": 0.3888671875, |
|
"learning_rate": 1.9048270524660197e-05, |
|
"loss": 0.0156, |
|
"reward": 1.0708333909511567, |
|
"reward_std": 0.1375594198703766, |
|
"rewards/accuracy_reward": 0.08750000409781933, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 565 |
|
}, |
|
{ |
|
"completion_length": 219.94584045410156, |
|
"epoch": 0.228, |
|
"grad_norm": 0.7666359774040074, |
|
"kl": 0.35390625, |
|
"learning_rate": 1.901832526405114e-05, |
|
"loss": 0.0141, |
|
"reward": 1.0250000298023223, |
|
"reward_std": 0.22138405814766884, |
|
"rewards/accuracy_reward": 0.08333333507180214, |
|
"rewards/format_reward": 0.9416666865348816, |
|
"step": 570 |
|
}, |
|
{ |
|
"completion_length": 325.93750915527346, |
|
"epoch": 0.23, |
|
"grad_norm": 0.6205649765301138, |
|
"kl": 0.36328125, |
|
"learning_rate": 1.8987940462991673e-05, |
|
"loss": 0.0145, |
|
"reward": 0.975000011920929, |
|
"reward_std": 0.19853876382112504, |
|
"rewards/accuracy_reward": 0.03750000074505806, |
|
"rewards/format_reward": 0.9375000238418579, |
|
"step": 575 |
|
}, |
|
{ |
|
"completion_length": 335.4583450317383, |
|
"epoch": 0.232, |
|
"grad_norm": 0.3814987875030443, |
|
"kl": 0.3525390625, |
|
"learning_rate": 1.895711760239413e-05, |
|
"loss": 0.0141, |
|
"reward": 0.9916666924953461, |
|
"reward_std": 0.15066706538200378, |
|
"rewards/accuracy_reward": 0.04166666828095913, |
|
"rewards/format_reward": 0.9500000238418579, |
|
"step": 580 |
|
}, |
|
{ |
|
"completion_length": 207.9416732788086, |
|
"epoch": 0.234, |
|
"grad_norm": 0.5782975897753057, |
|
"kl": 0.3814453125, |
|
"learning_rate": 1.892585818452126e-05, |
|
"loss": 0.0153, |
|
"reward": 1.0375000357627868, |
|
"reward_std": 0.12116261571645737, |
|
"rewards/accuracy_reward": 0.05000000186264515, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 585 |
|
}, |
|
{ |
|
"completion_length": 150.35000305175782, |
|
"epoch": 0.236, |
|
"grad_norm": 0.8439037056084903, |
|
"kl": 0.422265625, |
|
"learning_rate": 1.889416373291298e-05, |
|
"loss": 0.0169, |
|
"reward": 1.100000023841858, |
|
"reward_std": 0.1805025689303875, |
|
"rewards/accuracy_reward": 0.10833333656191826, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 590 |
|
}, |
|
{ |
|
"completion_length": 94.01666870117188, |
|
"epoch": 0.238, |
|
"grad_norm": 0.7394663774383612, |
|
"kl": 0.395703125, |
|
"learning_rate": 1.8862035792312148e-05, |
|
"loss": 0.0158, |
|
"reward": 1.0750000119209289, |
|
"reward_std": 0.16469117775559425, |
|
"rewards/accuracy_reward": 0.09166666716337205, |
|
"rewards/format_reward": 0.9833333492279053, |
|
"step": 595 |
|
}, |
|
{ |
|
"completion_length": 77.09166946411133, |
|
"epoch": 0.24, |
|
"grad_norm": 0.9471876589521598, |
|
"kl": 0.4751953125, |
|
"learning_rate": 1.8829475928589272e-05, |
|
"loss": 0.019, |
|
"reward": 1.0708333611488343, |
|
"reward_std": 0.1359931766986847, |
|
"rewards/accuracy_reward": 0.07500000186264515, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_completion_length": 124.43750528188852, |
|
"eval_kl": 0.4358473557692308, |
|
"eval_loss": 0.017942963168025017, |
|
"eval_reward": 1.0528846566493695, |
|
"eval_reward_std": 0.15011845901608467, |
|
"eval_rewards/accuracy_reward": 0.07692307935884365, |
|
"eval_rewards/format_reward": 0.9759615522164565, |
|
"eval_runtime": 169.086, |
|
"eval_samples_per_second": 0.586, |
|
"eval_steps_per_second": 0.018, |
|
"step": 600 |
|
}, |
|
{ |
|
"completion_length": 100.78750305175781, |
|
"epoch": 0.242, |
|
"grad_norm": 1.1299382774251023, |
|
"kl": 0.448046875, |
|
"learning_rate": 1.879648572866617e-05, |
|
"loss": 0.0179, |
|
"reward": 1.0416666865348816, |
|
"reward_std": 0.07013157680630684, |
|
"rewards/accuracy_reward": 0.04166666753590107, |
|
"rewards/format_reward": 1.0, |
|
"step": 605 |
|
}, |
|
{ |
|
"completion_length": 226.15834045410156, |
|
"epoch": 0.244, |
|
"grad_norm": 0.658103597322785, |
|
"kl": 0.474609375, |
|
"learning_rate": 1.8763066800438638e-05, |
|
"loss": 0.019, |
|
"reward": 0.9958333551883698, |
|
"reward_std": 0.1575162321329117, |
|
"rewards/accuracy_reward": 0.05833333544433117, |
|
"rewards/format_reward": 0.9375000119209289, |
|
"step": 610 |
|
}, |
|
{ |
|
"completion_length": 134.96250534057617, |
|
"epoch": 0.246, |
|
"grad_norm": 0.19951858933164002, |
|
"kl": 0.413671875, |
|
"learning_rate": 1.8729220772698096e-05, |
|
"loss": 0.0165, |
|
"reward": 1.0500000298023224, |
|
"reward_std": 0.11287702694535255, |
|
"rewards/accuracy_reward": 0.06250000186264515, |
|
"rewards/format_reward": 0.9875000059604645, |
|
"step": 615 |
|
}, |
|
{ |
|
"completion_length": 116.44583549499512, |
|
"epoch": 0.248, |
|
"grad_norm": 0.3702693586956776, |
|
"kl": 0.4185546875, |
|
"learning_rate": 1.869494929505219e-05, |
|
"loss": 0.0168, |
|
"reward": 1.0666666984558106, |
|
"reward_std": 0.09455960318446159, |
|
"rewards/accuracy_reward": 0.0750000026077032, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 620 |
|
}, |
|
{ |
|
"completion_length": 200.26667251586915, |
|
"epoch": 0.25, |
|
"grad_norm": 0.2852776482719371, |
|
"kl": 0.3765625, |
|
"learning_rate": 1.866025403784439e-05, |
|
"loss": 0.0151, |
|
"reward": 1.0041666984558106, |
|
"reward_std": 0.11226474940776825, |
|
"rewards/accuracy_reward": 0.03333333395421505, |
|
"rewards/format_reward": 0.9708333492279053, |
|
"step": 625 |
|
}, |
|
{ |
|
"completion_length": 215.9041748046875, |
|
"epoch": 0.252, |
|
"grad_norm": 0.5976353205994976, |
|
"kl": 0.3591796875, |
|
"learning_rate": 1.8625136692072577e-05, |
|
"loss": 0.0144, |
|
"reward": 1.0291666865348816, |
|
"reward_std": 0.1262888640165329, |
|
"rewards/accuracy_reward": 0.05416666865348816, |
|
"rewards/format_reward": 0.9750000178813935, |
|
"step": 630 |
|
}, |
|
{ |
|
"completion_length": 190.21250610351564, |
|
"epoch": 0.254, |
|
"grad_norm": 1.1842051623519905, |
|
"kl": 0.8265625, |
|
"learning_rate": 1.8589598969306646e-05, |
|
"loss": 0.0332, |
|
"reward": 1.0500000238418579, |
|
"reward_std": 0.24186472296714784, |
|
"rewards/accuracy_reward": 0.10000000223517418, |
|
"rewards/format_reward": 0.9500000238418579, |
|
"step": 635 |
|
}, |
|
{ |
|
"completion_length": 147.13750305175782, |
|
"epoch": 0.256, |
|
"grad_norm": 1.0641861499505445, |
|
"kl": 0.3560546875, |
|
"learning_rate": 1.855364260160507e-05, |
|
"loss": 0.0142, |
|
"reward": 0.9791666805744171, |
|
"reward_std": 0.19374003261327744, |
|
"rewards/accuracy_reward": 0.04166666716337204, |
|
"rewards/format_reward": 0.9375000059604645, |
|
"step": 640 |
|
}, |
|
{ |
|
"completion_length": 123.84167022705078, |
|
"epoch": 0.258, |
|
"grad_norm": 0.4937714387665741, |
|
"kl": 0.373828125, |
|
"learning_rate": 1.851726934143048e-05, |
|
"loss": 0.0149, |
|
"reward": 1.108333373069763, |
|
"reward_std": 0.12118594124913215, |
|
"rewards/accuracy_reward": 0.11666667014360428, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 645 |
|
}, |
|
{ |
|
"completion_length": 131.0833366394043, |
|
"epoch": 0.26, |
|
"grad_norm": 1.113245608082879, |
|
"kl": 0.362109375, |
|
"learning_rate": 1.848048096156426e-05, |
|
"loss": 0.0145, |
|
"reward": 1.0375000357627868, |
|
"reward_std": 0.13965441212058066, |
|
"rewards/accuracy_reward": 0.0541666679084301, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 650 |
|
}, |
|
{ |
|
"completion_length": 231.68333740234374, |
|
"epoch": 0.262, |
|
"grad_norm": 0.8526891285926977, |
|
"kl": 0.376953125, |
|
"learning_rate": 1.8443279255020153e-05, |
|
"loss": 0.0151, |
|
"reward": 1.0333333551883697, |
|
"reward_std": 0.1361908882856369, |
|
"rewards/accuracy_reward": 0.05416666753590107, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 655 |
|
}, |
|
{ |
|
"completion_length": 415.9833465576172, |
|
"epoch": 0.264, |
|
"grad_norm": 0.45480441770598673, |
|
"kl": 0.3181640625, |
|
"learning_rate": 1.8405666034956842e-05, |
|
"loss": 0.0127, |
|
"reward": 0.9791666865348816, |
|
"reward_std": 0.23286528065800666, |
|
"rewards/accuracy_reward": 0.0541666679084301, |
|
"rewards/format_reward": 0.9250000178813934, |
|
"step": 660 |
|
}, |
|
{ |
|
"completion_length": 267.25000762939453, |
|
"epoch": 0.266, |
|
"grad_norm": 0.6844922760236942, |
|
"kl": 0.333203125, |
|
"learning_rate": 1.836764313458962e-05, |
|
"loss": 0.0133, |
|
"reward": 1.0416666865348816, |
|
"reward_std": 0.17620573043823243, |
|
"rewards/accuracy_reward": 0.0666666679084301, |
|
"rewards/format_reward": 0.975000011920929, |
|
"step": 665 |
|
}, |
|
{ |
|
"completion_length": 205.49167175292968, |
|
"epoch": 0.268, |
|
"grad_norm": 0.4714832811705634, |
|
"kl": 0.3427734375, |
|
"learning_rate": 1.8329212407100996e-05, |
|
"loss": 0.0137, |
|
"reward": 1.0583333611488341, |
|
"reward_std": 0.09054399207234383, |
|
"rewards/accuracy_reward": 0.06666666753590107, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 670 |
|
}, |
|
{ |
|
"completion_length": 189.8000061035156, |
|
"epoch": 0.27, |
|
"grad_norm": 1.2221084115892056, |
|
"kl": 0.383984375, |
|
"learning_rate": 1.8290375725550417e-05, |
|
"loss": 0.0154, |
|
"reward": 1.0541667103767396, |
|
"reward_std": 0.14698250517249106, |
|
"rewards/accuracy_reward": 0.06666666865348816, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 675 |
|
}, |
|
{ |
|
"completion_length": 159.6666717529297, |
|
"epoch": 0.272, |
|
"grad_norm": 0.4965291293623947, |
|
"kl": 0.3876953125, |
|
"learning_rate": 1.8251134982782952e-05, |
|
"loss": 0.0155, |
|
"reward": 1.0625000238418578, |
|
"reward_std": 0.11656158864498138, |
|
"rewards/accuracy_reward": 0.07083333469927311, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 680 |
|
}, |
|
{ |
|
"completion_length": 151.78333740234376, |
|
"epoch": 0.274, |
|
"grad_norm": 0.7900759935925377, |
|
"kl": 0.4359375, |
|
"learning_rate": 1.821149209133704e-05, |
|
"loss": 0.0174, |
|
"reward": 1.0708333611488343, |
|
"reward_std": 0.14890312701463698, |
|
"rewards/accuracy_reward": 0.09166666865348816, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 685 |
|
}, |
|
{ |
|
"completion_length": 228.40834121704103, |
|
"epoch": 0.276, |
|
"grad_norm": 1.0352924993296302, |
|
"kl": 0.448828125, |
|
"learning_rate": 1.8171448983351284e-05, |
|
"loss": 0.018, |
|
"reward": 1.0291666865348816, |
|
"reward_std": 0.2831733852624893, |
|
"rewards/accuracy_reward": 0.09583333693444729, |
|
"rewards/format_reward": 0.9333333492279052, |
|
"step": 690 |
|
}, |
|
{ |
|
"completion_length": 266.6416732788086, |
|
"epoch": 0.278, |
|
"grad_norm": 0.4014065877432687, |
|
"kl": 0.3525390625, |
|
"learning_rate": 1.8131007610470278e-05, |
|
"loss": 0.0141, |
|
"reward": 1.0291667044162751, |
|
"reward_std": 0.21711408495903015, |
|
"rewards/accuracy_reward": 0.0791666679084301, |
|
"rewards/format_reward": 0.950000011920929, |
|
"step": 695 |
|
}, |
|
{ |
|
"completion_length": 231.82084197998046, |
|
"epoch": 0.28, |
|
"grad_norm": 0.4138240924988841, |
|
"kl": 0.387109375, |
|
"learning_rate": 1.8090169943749477e-05, |
|
"loss": 0.0155, |
|
"reward": 1.0291667103767395, |
|
"reward_std": 0.1521087557077408, |
|
"rewards/accuracy_reward": 0.06666666865348816, |
|
"rewards/format_reward": 0.9625000119209289, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"eval_completion_length": 198.6218003493089, |
|
"eval_kl": 0.3661358173076923, |
|
"eval_loss": 0.014910033904016018, |
|
"eval_reward": 1.049679513160999, |
|
"eval_reward_std": 0.18441820717774904, |
|
"eval_rewards/accuracy_reward": 0.08012820622668816, |
|
"eval_rewards/format_reward": 0.969551301919497, |
|
"eval_runtime": 181.1559, |
|
"eval_samples_per_second": 0.546, |
|
"eval_steps_per_second": 0.017, |
|
"step": 700 |
|
}, |
|
{ |
|
"completion_length": 204.03334274291993, |
|
"epoch": 0.282, |
|
"grad_norm": 0.8323688087389707, |
|
"kl": 0.39140625, |
|
"learning_rate": 1.804893797355914e-05, |
|
"loss": 0.0157, |
|
"reward": 1.0666666984558106, |
|
"reward_std": 0.21953659653663635, |
|
"rewards/accuracy_reward": 0.11250000074505806, |
|
"rewards/format_reward": 0.9541666805744171, |
|
"step": 705 |
|
}, |
|
{ |
|
"completion_length": 150.07083892822266, |
|
"epoch": 0.284, |
|
"grad_norm": 0.8298380461791608, |
|
"kl": 0.3693359375, |
|
"learning_rate": 1.8007313709487334e-05, |
|
"loss": 0.0148, |
|
"reward": 1.0500000178813935, |
|
"reward_std": 0.18621423542499543, |
|
"rewards/accuracy_reward": 0.10416666977107525, |
|
"rewards/format_reward": 0.9458333373069763, |
|
"step": 710 |
|
}, |
|
{ |
|
"completion_length": 121.3583381652832, |
|
"epoch": 0.286, |
|
"grad_norm": 0.7950998845132158, |
|
"kl": 0.447265625, |
|
"learning_rate": 1.7965299180241963e-05, |
|
"loss": 0.0179, |
|
"reward": 1.0833333611488343, |
|
"reward_std": 0.15956140235066413, |
|
"rewards/accuracy_reward": 0.09583333432674408, |
|
"rewards/format_reward": 0.9875000059604645, |
|
"step": 715 |
|
}, |
|
{ |
|
"completion_length": 140.52083740234374, |
|
"epoch": 0.288, |
|
"grad_norm": 0.7795378639075542, |
|
"kl": 0.45703125, |
|
"learning_rate": 1.792289643355191e-05, |
|
"loss": 0.0183, |
|
"reward": 1.108333373069763, |
|
"reward_std": 0.17277553156018258, |
|
"rewards/accuracy_reward": 0.12083333618938923, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 720 |
|
}, |
|
{ |
|
"completion_length": 148.92917022705078, |
|
"epoch": 0.29, |
|
"grad_norm": 1.3272943198870826, |
|
"kl": 0.498828125, |
|
"learning_rate": 1.788010753606722e-05, |
|
"loss": 0.02, |
|
"reward": 1.0708333849906921, |
|
"reward_std": 0.15627224519848823, |
|
"rewards/accuracy_reward": 0.08333333432674409, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 725 |
|
}, |
|
{ |
|
"completion_length": 175.61667098999024, |
|
"epoch": 0.292, |
|
"grad_norm": 0.6424382767772573, |
|
"kl": 0.464453125, |
|
"learning_rate": 1.78369345732584e-05, |
|
"loss": 0.0186, |
|
"reward": 1.041666716337204, |
|
"reward_std": 0.1477656200528145, |
|
"rewards/accuracy_reward": 0.07083333544433117, |
|
"rewards/format_reward": 0.9708333432674408, |
|
"step": 730 |
|
}, |
|
{ |
|
"completion_length": 126.27500381469727, |
|
"epoch": 0.294, |
|
"grad_norm": 0.7948572131207867, |
|
"kl": 0.639453125, |
|
"learning_rate": 1.7793379649314743e-05, |
|
"loss": 0.0256, |
|
"reward": 1.1208333611488341, |
|
"reward_std": 0.18972794637084006, |
|
"rewards/accuracy_reward": 0.13333333618938922, |
|
"rewards/format_reward": 0.9875000059604645, |
|
"step": 735 |
|
}, |
|
{ |
|
"completion_length": 94.61666946411133, |
|
"epoch": 0.296, |
|
"grad_norm": 1.0745927572590321, |
|
"kl": 0.5287109375, |
|
"learning_rate": 1.7749444887041797e-05, |
|
"loss": 0.0212, |
|
"reward": 1.0250000476837158, |
|
"reward_std": 0.1675926238298416, |
|
"rewards/accuracy_reward": 0.06666666753590107, |
|
"rewards/format_reward": 0.9583333432674408, |
|
"step": 740 |
|
}, |
|
{ |
|
"completion_length": 104.27500381469727, |
|
"epoch": 0.298, |
|
"grad_norm": 0.2227604024323154, |
|
"kl": 0.569140625, |
|
"learning_rate": 1.7705132427757895e-05, |
|
"loss": 0.0228, |
|
"reward": 1.0916667103767395, |
|
"reward_std": 0.10345393419265747, |
|
"rewards/accuracy_reward": 0.10000000186264515, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 745 |
|
}, |
|
{ |
|
"completion_length": 81.67500152587891, |
|
"epoch": 0.3, |
|
"grad_norm": 1.7233432115097311, |
|
"kl": 0.6505859375, |
|
"learning_rate": 1.766044443118978e-05, |
|
"loss": 0.026, |
|
"reward": 1.0416666865348816, |
|
"reward_std": 0.0905439905822277, |
|
"rewards/accuracy_reward": 0.04583333432674408, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 750 |
|
}, |
|
{ |
|
"completion_length": 101.40833587646485, |
|
"epoch": 0.302, |
|
"grad_norm": 0.8228291476842216, |
|
"kl": 0.559765625, |
|
"learning_rate": 1.761538307536737e-05, |
|
"loss": 0.0224, |
|
"reward": 1.0875000357627869, |
|
"reward_std": 0.11575513705611229, |
|
"rewards/accuracy_reward": 0.10833333656191826, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 755 |
|
}, |
|
{ |
|
"completion_length": 103.02917098999023, |
|
"epoch": 0.304, |
|
"grad_norm": 1.2801924295313796, |
|
"kl": 0.5697265625, |
|
"learning_rate": 1.7569950556517566e-05, |
|
"loss": 0.0228, |
|
"reward": 1.0833333730697632, |
|
"reward_std": 0.16625742092728615, |
|
"rewards/accuracy_reward": 0.08750000223517418, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 760 |
|
}, |
|
{ |
|
"completion_length": 109.6833366394043, |
|
"epoch": 0.306, |
|
"grad_norm": 1.231261037139189, |
|
"kl": 0.5431640625, |
|
"learning_rate": 1.7524149088957244e-05, |
|
"loss": 0.0217, |
|
"reward": 1.0708333611488343, |
|
"reward_std": 0.09324773102998733, |
|
"rewards/accuracy_reward": 0.0750000026077032, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 765 |
|
}, |
|
{ |
|
"completion_length": 142.60833740234375, |
|
"epoch": 0.308, |
|
"grad_norm": 0.5849963492276671, |
|
"kl": 0.5087890625, |
|
"learning_rate": 1.747798090498532e-05, |
|
"loss": 0.0203, |
|
"reward": 1.062500035762787, |
|
"reward_std": 0.0674278385937214, |
|
"rewards/accuracy_reward": 0.06250000074505806, |
|
"rewards/format_reward": 1.0, |
|
"step": 770 |
|
}, |
|
{ |
|
"completion_length": 161.4041717529297, |
|
"epoch": 0.31, |
|
"grad_norm": 0.9058008345995844, |
|
"kl": 0.57265625, |
|
"learning_rate": 1.7431448254773943e-05, |
|
"loss": 0.0229, |
|
"reward": 1.075000035762787, |
|
"reward_std": 0.14718020260334014, |
|
"rewards/accuracy_reward": 0.09583333730697632, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 775 |
|
}, |
|
{ |
|
"completion_length": 157.4166702270508, |
|
"epoch": 0.312, |
|
"grad_norm": 1.1313027943159109, |
|
"kl": 0.4814453125, |
|
"learning_rate": 1.7384553406258842e-05, |
|
"loss": 0.0193, |
|
"reward": 1.0875000357627869, |
|
"reward_std": 0.11366014108061791, |
|
"rewards/accuracy_reward": 0.08750000260770321, |
|
"rewards/format_reward": 1.0, |
|
"step": 780 |
|
}, |
|
{ |
|
"completion_length": 182.37500457763673, |
|
"epoch": 0.314, |
|
"grad_norm": 0.6238900338412194, |
|
"kl": 0.4681640625, |
|
"learning_rate": 1.7337298645028764e-05, |
|
"loss": 0.0187, |
|
"reward": 1.0333333611488342, |
|
"reward_std": 0.07763404920697212, |
|
"rewards/accuracy_reward": 0.03750000111758709, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 785 |
|
}, |
|
{ |
|
"completion_length": 149.3458381652832, |
|
"epoch": 0.316, |
|
"grad_norm": 0.16296278407454673, |
|
"kl": 0.462109375, |
|
"learning_rate": 1.7289686274214116e-05, |
|
"loss": 0.0185, |
|
"reward": 1.0458333492279053, |
|
"reward_std": 0.07685092613101005, |
|
"rewards/accuracy_reward": 0.05000000149011612, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 790 |
|
}, |
|
{ |
|
"completion_length": 130.19583587646486, |
|
"epoch": 0.318, |
|
"grad_norm": 0.7784532053538376, |
|
"kl": 0.43359375, |
|
"learning_rate": 1.7241718614374678e-05, |
|
"loss": 0.0173, |
|
"reward": 1.083333396911621, |
|
"reward_std": 0.13136881664395333, |
|
"rewards/accuracy_reward": 0.08750000298023224, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 795 |
|
}, |
|
{ |
|
"completion_length": 119.23750228881836, |
|
"epoch": 0.32, |
|
"grad_norm": 0.983387641880522, |
|
"kl": 0.4408203125, |
|
"learning_rate": 1.7193398003386514e-05, |
|
"loss": 0.0176, |
|
"reward": 1.0833333730697632, |
|
"reward_std": 0.14619938507676125, |
|
"rewards/accuracy_reward": 0.08333333507180214, |
|
"rewards/format_reward": 1.0, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"eval_completion_length": 115.19872049184946, |
|
"eval_kl": 0.44921875, |
|
"eval_loss": 0.01777729205787182, |
|
"eval_reward": 1.0897436233667226, |
|
"eval_reward_std": 0.08877197739023429, |
|
"eval_rewards/accuracy_reward": 0.09134615628192058, |
|
"eval_rewards/format_reward": 0.9983974374257601, |
|
"eval_runtime": 85.6402, |
|
"eval_samples_per_second": 1.156, |
|
"eval_steps_per_second": 0.035, |
|
"step": 800 |
|
}, |
|
{ |
|
"completion_length": 113.28750381469726, |
|
"epoch": 0.322, |
|
"grad_norm": 0.781174828098305, |
|
"kl": 0.453125, |
|
"learning_rate": 1.7144726796328034e-05, |
|
"loss": 0.0181, |
|
"reward": 1.145833396911621, |
|
"reward_std": 0.16145868599414825, |
|
"rewards/accuracy_reward": 0.14583334065973758, |
|
"rewards/format_reward": 1.0, |
|
"step": 805 |
|
}, |
|
{ |
|
"completion_length": 175.64167022705078, |
|
"epoch": 0.324, |
|
"grad_norm": 0.5727964458555536, |
|
"kl": 0.471875, |
|
"learning_rate": 1.709570736536521e-05, |
|
"loss": 0.0189, |
|
"reward": 1.1083333611488342, |
|
"reward_std": 0.07763404548168182, |
|
"rewards/accuracy_reward": 0.10833333544433117, |
|
"rewards/format_reward": 1.0, |
|
"step": 810 |
|
}, |
|
{ |
|
"completion_length": 246.63750762939452, |
|
"epoch": 0.326, |
|
"grad_norm": 1.0091346142420787, |
|
"kl": 0.4962890625, |
|
"learning_rate": 1.7046342099635948e-05, |
|
"loss": 0.0198, |
|
"reward": 1.0916666984558105, |
|
"reward_std": 0.16956989765167235, |
|
"rewards/accuracy_reward": 0.10000000223517418, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 815 |
|
}, |
|
{ |
|
"completion_length": 322.5458450317383, |
|
"epoch": 0.328, |
|
"grad_norm": 0.7989919925619189, |
|
"kl": 0.465625, |
|
"learning_rate": 1.6996633405133656e-05, |
|
"loss": 0.0186, |
|
"reward": 1.0750000476837158, |
|
"reward_std": 0.1654976300895214, |
|
"rewards/accuracy_reward": 0.08750000149011612, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 820 |
|
}, |
|
{ |
|
"completion_length": 341.00834503173826, |
|
"epoch": 0.33, |
|
"grad_norm": 0.7043145841881899, |
|
"kl": 0.5212890625, |
|
"learning_rate": 1.6946583704589973e-05, |
|
"loss": 0.0208, |
|
"reward": 1.0333333611488342, |
|
"reward_std": 0.15038584247231485, |
|
"rewards/accuracy_reward": 0.07083333395421505, |
|
"rewards/format_reward": 0.962500023841858, |
|
"step": 825 |
|
}, |
|
{ |
|
"completion_length": 241.1916732788086, |
|
"epoch": 0.332, |
|
"grad_norm": 0.6208263212333841, |
|
"kl": 0.469140625, |
|
"learning_rate": 1.68961954373567e-05, |
|
"loss": 0.0188, |
|
"reward": 1.0541666865348815, |
|
"reward_std": 0.1895946055650711, |
|
"rewards/accuracy_reward": 0.09583333693444729, |
|
"rewards/format_reward": 0.9583333611488343, |
|
"step": 830 |
|
}, |
|
{ |
|
"completion_length": 230.2041763305664, |
|
"epoch": 0.334, |
|
"grad_norm": 1.0022357187387183, |
|
"kl": 0.5720703125, |
|
"learning_rate": 1.684547105928689e-05, |
|
"loss": 0.0229, |
|
"reward": 0.991666704416275, |
|
"reward_std": 0.19037772417068483, |
|
"rewards/accuracy_reward": 0.05833333507180214, |
|
"rewards/format_reward": 0.9333333551883698, |
|
"step": 835 |
|
}, |
|
{ |
|
"completion_length": 264.27084197998045, |
|
"epoch": 0.336, |
|
"grad_norm": 0.9244091504002026, |
|
"kl": 0.5970703125, |
|
"learning_rate": 1.6794413042615168e-05, |
|
"loss": 0.0239, |
|
"reward": 1.0458333671092988, |
|
"reward_std": 0.23153360486030578, |
|
"rewards/accuracy_reward": 0.08750000260770321, |
|
"rewards/format_reward": 0.9583333551883697, |
|
"step": 840 |
|
}, |
|
{ |
|
"completion_length": 158.6208381652832, |
|
"epoch": 0.338, |
|
"grad_norm": 0.6420688589621387, |
|
"kl": 0.4923828125, |
|
"learning_rate": 1.6743023875837233e-05, |
|
"loss": 0.0197, |
|
"reward": 1.045833373069763, |
|
"reward_std": 0.11767575517296791, |
|
"rewards/accuracy_reward": 0.05416666865348816, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 845 |
|
}, |
|
{ |
|
"completion_length": 177.85000610351562, |
|
"epoch": 0.34, |
|
"grad_norm": 0.5413030087841071, |
|
"kl": 0.4517578125, |
|
"learning_rate": 1.6691306063588583e-05, |
|
"loss": 0.018, |
|
"reward": 1.0375000357627868, |
|
"reward_std": 0.09132710695266724, |
|
"rewards/accuracy_reward": 0.05000000149011612, |
|
"rewards/format_reward": 0.9875000059604645, |
|
"step": 850 |
|
}, |
|
{ |
|
"completion_length": 153.7375045776367, |
|
"epoch": 0.342, |
|
"grad_norm": 15.670714549698124, |
|
"kl": 0.607421875, |
|
"learning_rate": 1.6639262126522417e-05, |
|
"loss": 0.0243, |
|
"reward": 1.1208333730697633, |
|
"reward_std": 0.20280873402953148, |
|
"rewards/accuracy_reward": 0.15000000596046448, |
|
"rewards/format_reward": 0.9708333432674408, |
|
"step": 855 |
|
}, |
|
{ |
|
"completion_length": 153.3083351135254, |
|
"epoch": 0.344, |
|
"grad_norm": 0.5724519452372687, |
|
"kl": 0.4134765625, |
|
"learning_rate": 1.6586894601186804e-05, |
|
"loss": 0.0165, |
|
"reward": 1.0666667103767395, |
|
"reward_std": 0.11252264827489852, |
|
"rewards/accuracy_reward": 0.06666666828095913, |
|
"rewards/format_reward": 1.0, |
|
"step": 860 |
|
}, |
|
{ |
|
"completion_length": 185.32083740234376, |
|
"epoch": 0.346, |
|
"grad_norm": 0.4311092487621537, |
|
"kl": 0.4388671875, |
|
"learning_rate": 1.6534206039901057e-05, |
|
"loss": 0.0176, |
|
"reward": 1.0958333730697631, |
|
"reward_std": 0.11017328798770905, |
|
"rewards/accuracy_reward": 0.10000000111758708, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 865 |
|
}, |
|
{ |
|
"completion_length": 158.38750534057618, |
|
"epoch": 0.348, |
|
"grad_norm": 0.5107925238050466, |
|
"kl": 0.4388671875, |
|
"learning_rate": 1.6481199010631312e-05, |
|
"loss": 0.0176, |
|
"reward": 1.0791667103767395, |
|
"reward_std": 0.09132710918784141, |
|
"rewards/accuracy_reward": 0.07916666939854622, |
|
"rewards/format_reward": 1.0, |
|
"step": 870 |
|
}, |
|
{ |
|
"completion_length": 151.10000534057616, |
|
"epoch": 0.35, |
|
"grad_norm": 0.855945600131941, |
|
"kl": 0.462109375, |
|
"learning_rate": 1.6427876096865394e-05, |
|
"loss": 0.0185, |
|
"reward": 1.0916667103767395, |
|
"reward_std": 0.1482943795621395, |
|
"rewards/accuracy_reward": 0.0916666690260172, |
|
"rewards/format_reward": 1.0, |
|
"step": 875 |
|
}, |
|
{ |
|
"completion_length": 133.1916717529297, |
|
"epoch": 0.352, |
|
"grad_norm": 0.7382780134899118, |
|
"kl": 0.4478515625, |
|
"learning_rate": 1.63742398974869e-05, |
|
"loss": 0.0179, |
|
"reward": 1.1208333730697633, |
|
"reward_std": 0.1995551086962223, |
|
"rewards/accuracy_reward": 0.12916667051613331, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 880 |
|
}, |
|
{ |
|
"completion_length": 144.54583816528321, |
|
"epoch": 0.354, |
|
"grad_norm": 0.9737960502017814, |
|
"kl": 0.48125, |
|
"learning_rate": 1.632029302664851e-05, |
|
"loss": 0.0193, |
|
"reward": 1.1041666984558105, |
|
"reward_std": 0.17280239537358283, |
|
"rewards/accuracy_reward": 0.10833333544433117, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 885 |
|
}, |
|
{ |
|
"completion_length": 92.67500228881836, |
|
"epoch": 0.356, |
|
"grad_norm": 0.961078931712405, |
|
"kl": 0.51484375, |
|
"learning_rate": 1.6266038113644605e-05, |
|
"loss": 0.0206, |
|
"reward": 1.0833333611488343, |
|
"reward_std": 0.12578697353601456, |
|
"rewards/accuracy_reward": 0.08750000186264514, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 890 |
|
}, |
|
{ |
|
"completion_length": 89.66666793823242, |
|
"epoch": 0.358, |
|
"grad_norm": 0.7233667735051951, |
|
"kl": 0.580859375, |
|
"learning_rate": 1.6211477802783105e-05, |
|
"loss": 0.0232, |
|
"reward": 1.1083333611488342, |
|
"reward_std": 0.16165639609098434, |
|
"rewards/accuracy_reward": 0.1166666705161333, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 895 |
|
}, |
|
{ |
|
"completion_length": 93.3875015258789, |
|
"epoch": 0.36, |
|
"grad_norm": 0.4545503509815593, |
|
"kl": 0.7234375, |
|
"learning_rate": 1.6156614753256583e-05, |
|
"loss": 0.0289, |
|
"reward": 1.0666667103767395, |
|
"reward_std": 0.1506670705974102, |
|
"rewards/accuracy_reward": 0.07500000111758709, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_completion_length": 109.18750293438251, |
|
"eval_kl": 0.6490384615384616, |
|
"eval_loss": 0.025919783860445023, |
|
"eval_reward": 1.0737179609445424, |
|
"eval_reward_std": 0.12753102469902772, |
|
"eval_rewards/accuracy_reward": 0.08493589882094127, |
|
"eval_rewards/format_reward": 0.9887820619803208, |
|
"eval_runtime": 93.0566, |
|
"eval_samples_per_second": 1.064, |
|
"eval_steps_per_second": 0.032, |
|
"step": 900 |
|
}, |
|
{ |
|
"completion_length": 119.41250228881836, |
|
"epoch": 0.362, |
|
"grad_norm": 0.7762304601434084, |
|
"kl": 0.633984375, |
|
"learning_rate": 1.610145163901268e-05, |
|
"loss": 0.0253, |
|
"reward": 1.0500000417232513, |
|
"reward_std": 0.14235814958810805, |
|
"rewards/accuracy_reward": 0.07083333544433117, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 905 |
|
}, |
|
{ |
|
"completion_length": 98.3083366394043, |
|
"epoch": 0.364, |
|
"grad_norm": 0.1969125008394314, |
|
"kl": 0.5587890625, |
|
"learning_rate": 1.6045991148623752e-05, |
|
"loss": 0.0223, |
|
"reward": 1.0791666984558106, |
|
"reward_std": 0.14157502874732017, |
|
"rewards/accuracy_reward": 0.09583333618938923, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 910 |
|
}, |
|
{ |
|
"completion_length": 92.20833587646484, |
|
"epoch": 0.366, |
|
"grad_norm": 0.5710961469229422, |
|
"kl": 0.6203125, |
|
"learning_rate": 1.599023598515586e-05, |
|
"loss": 0.0248, |
|
"reward": 1.1000000476837157, |
|
"reward_std": 0.11095640808343887, |
|
"rewards/accuracy_reward": 0.1041666705161333, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 915 |
|
}, |
|
{ |
|
"completion_length": 169.32083740234376, |
|
"epoch": 0.368, |
|
"grad_norm": 0.32186205273931673, |
|
"kl": 0.6314453125, |
|
"learning_rate": 1.5934188866037017e-05, |
|
"loss": 0.0253, |
|
"reward": 1.1166667103767396, |
|
"reward_std": 0.20900286808609964, |
|
"rewards/accuracy_reward": 0.13750000223517417, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 920 |
|
}, |
|
{ |
|
"completion_length": 162.40833587646483, |
|
"epoch": 0.37, |
|
"grad_norm": 1.0716265467963713, |
|
"kl": 0.5517578125, |
|
"learning_rate": 1.5877852522924733e-05, |
|
"loss": 0.0221, |
|
"reward": 1.0875000357627869, |
|
"reward_std": 0.15046936124563218, |
|
"rewards/accuracy_reward": 0.09583333730697632, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 925 |
|
}, |
|
{ |
|
"completion_length": 117.15416870117187, |
|
"epoch": 0.372, |
|
"grad_norm": 0.1718307429317029, |
|
"kl": 0.58984375, |
|
"learning_rate": 1.5821229701572897e-05, |
|
"loss": 0.0236, |
|
"reward": 1.0500000238418579, |
|
"reward_std": 0.1483220286667347, |
|
"rewards/accuracy_reward": 0.06250000111758709, |
|
"rewards/format_reward": 0.9875000059604645, |
|
"step": 930 |
|
}, |
|
{ |
|
"completion_length": 101.3708366394043, |
|
"epoch": 0.374, |
|
"grad_norm": 3.113823270584589, |
|
"kl": 0.600390625, |
|
"learning_rate": 1.5764323161697933e-05, |
|
"loss": 0.024, |
|
"reward": 1.075000035762787, |
|
"reward_std": 0.15289186984300612, |
|
"rewards/accuracy_reward": 0.08750000335276127, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 935 |
|
}, |
|
{ |
|
"completion_length": 168.9416717529297, |
|
"epoch": 0.376, |
|
"grad_norm": 0.887035405128863, |
|
"kl": 0.6765625, |
|
"learning_rate": 1.570713567684432e-05, |
|
"loss": 0.0271, |
|
"reward": 1.037500023841858, |
|
"reward_std": 0.16739491894841194, |
|
"rewards/accuracy_reward": 0.06666666753590107, |
|
"rewards/format_reward": 0.9708333492279053, |
|
"step": 940 |
|
}, |
|
{ |
|
"completion_length": 110.50416793823243, |
|
"epoch": 0.378, |
|
"grad_norm": 1.1890810823941778, |
|
"kl": 0.5552734375, |
|
"learning_rate": 1.564967003424938e-05, |
|
"loss": 0.0222, |
|
"reward": 1.1666666984558105, |
|
"reward_std": 0.2173117831349373, |
|
"rewards/accuracy_reward": 0.1833333384245634, |
|
"rewards/format_reward": 0.9833333492279053, |
|
"step": 945 |
|
}, |
|
{ |
|
"completion_length": 166.82083892822266, |
|
"epoch": 0.38, |
|
"grad_norm": 0.9138187216102012, |
|
"kl": 0.530078125, |
|
"learning_rate": 1.5591929034707468e-05, |
|
"loss": 0.0212, |
|
"reward": 1.0625000298023224, |
|
"reward_std": 0.09673458784818649, |
|
"rewards/accuracy_reward": 0.07916666939854622, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 950 |
|
}, |
|
{ |
|
"completion_length": 146.1916702270508, |
|
"epoch": 0.382, |
|
"grad_norm": 1.0622486882329054, |
|
"kl": 0.5103515625, |
|
"learning_rate": 1.553391549243344e-05, |
|
"loss": 0.0204, |
|
"reward": 1.0541666984558105, |
|
"reward_std": 0.137278188765049, |
|
"rewards/accuracy_reward": 0.07083333618938922, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 955 |
|
}, |
|
{ |
|
"completion_length": 160.28750381469726, |
|
"epoch": 0.384, |
|
"grad_norm": 0.752318503568895, |
|
"kl": 0.520703125, |
|
"learning_rate": 1.5475632234925505e-05, |
|
"loss": 0.0208, |
|
"reward": 1.0375000357627868, |
|
"reward_std": 0.14157502725720406, |
|
"rewards/accuracy_reward": 0.06250000223517418, |
|
"rewards/format_reward": 0.9750000238418579, |
|
"step": 960 |
|
}, |
|
{ |
|
"completion_length": 140.64583740234374, |
|
"epoch": 0.386, |
|
"grad_norm": 0.6324158396336607, |
|
"kl": 0.5330078125, |
|
"learning_rate": 1.54170821028274e-05, |
|
"loss": 0.0213, |
|
"reward": 1.0500000238418579, |
|
"reward_std": 0.10286851897835732, |
|
"rewards/accuracy_reward": 0.06250000335276126, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 965 |
|
}, |
|
{ |
|
"completion_length": 154.11667098999024, |
|
"epoch": 0.388, |
|
"grad_norm": 0.6918653815816405, |
|
"kl": 0.50625, |
|
"learning_rate": 1.5358267949789968e-05, |
|
"loss": 0.0203, |
|
"reward": 1.0666667222976685, |
|
"reward_std": 0.16120432689785957, |
|
"rewards/accuracy_reward": 0.07916666902601718, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 970 |
|
}, |
|
{ |
|
"completion_length": 182.7916717529297, |
|
"epoch": 0.39, |
|
"grad_norm": 0.8349715169939224, |
|
"kl": 0.5755859375, |
|
"learning_rate": 1.529919264233205e-05, |
|
"loss": 0.023, |
|
"reward": 1.0500000298023224, |
|
"reward_std": 0.179521743953228, |
|
"rewards/accuracy_reward": 0.07916666902601718, |
|
"rewards/format_reward": 0.9708333432674408, |
|
"step": 975 |
|
}, |
|
{ |
|
"completion_length": 167.6125045776367, |
|
"epoch": 0.392, |
|
"grad_norm": 0.5431435446037667, |
|
"kl": 0.452734375, |
|
"learning_rate": 1.5239859059700794e-05, |
|
"loss": 0.0181, |
|
"reward": 1.0833333611488343, |
|
"reward_std": 0.17568050399422647, |
|
"rewards/accuracy_reward": 0.10000000447034836, |
|
"rewards/format_reward": 0.9833333492279053, |
|
"step": 980 |
|
}, |
|
{ |
|
"completion_length": 179.76250534057618, |
|
"epoch": 0.394, |
|
"grad_norm": 0.8606801988109793, |
|
"kl": 0.439453125, |
|
"learning_rate": 1.5180270093731305e-05, |
|
"loss": 0.0176, |
|
"reward": 1.0916666984558105, |
|
"reward_std": 0.17522490546107292, |
|
"rewards/accuracy_reward": 0.1083333358168602, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 985 |
|
}, |
|
{ |
|
"completion_length": 211.83334045410157, |
|
"epoch": 0.396, |
|
"grad_norm": 0.9351930624726825, |
|
"kl": 0.4248046875, |
|
"learning_rate": 1.5120428648705716e-05, |
|
"loss": 0.017, |
|
"reward": 1.0666666865348815, |
|
"reward_std": 0.20573704093694686, |
|
"rewards/accuracy_reward": 0.09583333544433117, |
|
"rewards/format_reward": 0.9708333492279053, |
|
"step": 990 |
|
}, |
|
{ |
|
"completion_length": 165.30833587646484, |
|
"epoch": 0.398, |
|
"grad_norm": 0.6856844913007303, |
|
"kl": 0.43984375, |
|
"learning_rate": 1.5060337641211637e-05, |
|
"loss": 0.0176, |
|
"reward": 1.1291666865348815, |
|
"reward_std": 0.16761595159769058, |
|
"rewards/accuracy_reward": 0.1416666679084301, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 995 |
|
}, |
|
{ |
|
"completion_length": 150.04167098999022, |
|
"epoch": 0.4, |
|
"grad_norm": 0.9635145258572108, |
|
"kl": 0.4900390625, |
|
"learning_rate": 1.5000000000000002e-05, |
|
"loss": 0.0196, |
|
"reward": 1.079166716337204, |
|
"reward_std": 0.1558768317103386, |
|
"rewards/accuracy_reward": 0.10000000335276127, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"eval_completion_length": 126.44872283935547, |
|
"eval_kl": 0.48978365384615385, |
|
"eval_loss": 0.0195956751704216, |
|
"eval_reward": 1.112179526915917, |
|
"eval_reward_std": 0.15462170598598626, |
|
"eval_rewards/accuracy_reward": 0.12179487499480064, |
|
"eval_rewards/format_reward": 0.9903846245545608, |
|
"eval_runtime": 127.709, |
|
"eval_samples_per_second": 0.775, |
|
"eval_steps_per_second": 0.023, |
|
"step": 1000 |
|
}, |
|
{ |
|
"completion_length": 168.5666702270508, |
|
"epoch": 0.402, |
|
"grad_norm": 1.0895511993093854, |
|
"kl": 0.4529296875, |
|
"learning_rate": 1.493941866584231e-05, |
|
"loss": 0.0181, |
|
"reward": 1.0875000476837158, |
|
"reward_std": 0.2581599496304989, |
|
"rewards/accuracy_reward": 0.12083333618938923, |
|
"rewards/format_reward": 0.9666666865348816, |
|
"step": 1005 |
|
}, |
|
{ |
|
"completion_length": 161.23333663940429, |
|
"epoch": 0.404, |
|
"grad_norm": 0.7389397460996036, |
|
"kl": 0.5572265625, |
|
"learning_rate": 1.4878596591387329e-05, |
|
"loss": 0.0223, |
|
"reward": 1.0583333551883698, |
|
"reward_std": 0.23565217033028601, |
|
"rewards/accuracy_reward": 0.09583333656191825, |
|
"rewards/format_reward": 0.9625000119209289, |
|
"step": 1010 |
|
}, |
|
{ |
|
"completion_length": 221.5958366394043, |
|
"epoch": 0.406, |
|
"grad_norm": 0.928302942173807, |
|
"kl": 0.5322265625, |
|
"learning_rate": 1.4817536741017153e-05, |
|
"loss": 0.0213, |
|
"reward": 1.0625000238418578, |
|
"reward_std": 0.26485596522688865, |
|
"rewards/accuracy_reward": 0.12500000335276126, |
|
"rewards/format_reward": 0.9375000178813935, |
|
"step": 1015 |
|
}, |
|
{ |
|
"completion_length": 152.99167175292968, |
|
"epoch": 0.408, |
|
"grad_norm": 0.9384235272703269, |
|
"kl": 0.541796875, |
|
"learning_rate": 1.4756242090702756e-05, |
|
"loss": 0.0216, |
|
"reward": 1.0958333611488342, |
|
"reward_std": 0.25780556723475456, |
|
"rewards/accuracy_reward": 0.12500000186264515, |
|
"rewards/format_reward": 0.9708333551883698, |
|
"step": 1020 |
|
}, |
|
{ |
|
"completion_length": 146.56250534057617, |
|
"epoch": 0.41, |
|
"grad_norm": 0.30172387560515257, |
|
"kl": 0.519140625, |
|
"learning_rate": 1.469471562785891e-05, |
|
"loss": 0.0208, |
|
"reward": 1.0750000417232513, |
|
"reward_std": 0.10206207036972045, |
|
"rewards/accuracy_reward": 0.08750000260770321, |
|
"rewards/format_reward": 0.9875000059604645, |
|
"step": 1025 |
|
}, |
|
{ |
|
"completion_length": 152.51666946411132, |
|
"epoch": 0.412, |
|
"grad_norm": 0.9294930359078027, |
|
"kl": 1.005859375, |
|
"learning_rate": 1.463296035119862e-05, |
|
"loss": 0.0402, |
|
"reward": 1.0500000357627868, |
|
"reward_std": 0.14619938284158707, |
|
"rewards/accuracy_reward": 0.0666666679084301, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 1030 |
|
}, |
|
{ |
|
"completion_length": 180.95000381469725, |
|
"epoch": 0.414, |
|
"grad_norm": 0.4453899299979498, |
|
"kl": 0.487109375, |
|
"learning_rate": 1.4570979270586944e-05, |
|
"loss": 0.0195, |
|
"reward": 1.075000035762787, |
|
"reward_std": 0.21077675521373748, |
|
"rewards/accuracy_reward": 0.10000000186264515, |
|
"rewards/format_reward": 0.975000011920929, |
|
"step": 1035 |
|
}, |
|
{ |
|
"completion_length": 138.85000457763672, |
|
"epoch": 0.416, |
|
"grad_norm": 0.7666780913080924, |
|
"kl": 0.5107421875, |
|
"learning_rate": 1.4508775406894308e-05, |
|
"loss": 0.0204, |
|
"reward": 1.0916666984558105, |
|
"reward_std": 0.12927382662892342, |
|
"rewards/accuracy_reward": 0.09583333730697632, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 1040 |
|
}, |
|
{ |
|
"completion_length": 226.35834045410155, |
|
"epoch": 0.418, |
|
"grad_norm": 0.6438784140914322, |
|
"kl": 0.449609375, |
|
"learning_rate": 1.4446351791849276e-05, |
|
"loss": 0.018, |
|
"reward": 1.0666667103767395, |
|
"reward_std": 0.1238663487136364, |
|
"rewards/accuracy_reward": 0.08333333730697631, |
|
"rewards/format_reward": 0.9833333492279053, |
|
"step": 1045 |
|
}, |
|
{ |
|
"completion_length": 244.09584121704103, |
|
"epoch": 0.42, |
|
"grad_norm": 0.5444784393227498, |
|
"kl": 0.4791015625, |
|
"learning_rate": 1.4383711467890776e-05, |
|
"loss": 0.0192, |
|
"reward": 1.079166704416275, |
|
"reward_std": 0.2215237647294998, |
|
"rewards/accuracy_reward": 0.12916667126119136, |
|
"rewards/format_reward": 0.950000011920929, |
|
"step": 1050 |
|
}, |
|
{ |
|
"completion_length": 177.3416732788086, |
|
"epoch": 0.422, |
|
"grad_norm": 0.9397555923299521, |
|
"kl": 0.5013671875, |
|
"learning_rate": 1.4320857488019826e-05, |
|
"loss": 0.0201, |
|
"reward": 1.0208333730697632, |
|
"reward_std": 0.14698250442743302, |
|
"rewards/accuracy_reward": 0.05416666716337204, |
|
"rewards/format_reward": 0.9666666805744171, |
|
"step": 1055 |
|
}, |
|
{ |
|
"completion_length": 173.94583740234376, |
|
"epoch": 0.424, |
|
"grad_norm": 0.7522880163840423, |
|
"kl": 0.4654296875, |
|
"learning_rate": 1.4257792915650728e-05, |
|
"loss": 0.0186, |
|
"reward": 1.0541667103767396, |
|
"reward_std": 0.12657008767127992, |
|
"rewards/accuracy_reward": 0.07500000298023224, |
|
"rewards/format_reward": 0.9791666746139527, |
|
"step": 1060 |
|
}, |
|
{ |
|
"completion_length": 231.49584045410157, |
|
"epoch": 0.426, |
|
"grad_norm": 0.1859104138975983, |
|
"kl": 0.47578125, |
|
"learning_rate": 1.4194520824461773e-05, |
|
"loss": 0.019, |
|
"reward": 1.0458333492279053, |
|
"reward_std": 0.16251266971230507, |
|
"rewards/accuracy_reward": 0.0750000026077032, |
|
"rewards/format_reward": 0.9708333551883698, |
|
"step": 1065 |
|
}, |
|
{ |
|
"completion_length": 247.60417709350585, |
|
"epoch": 0.428, |
|
"grad_norm": 0.6117317081335039, |
|
"kl": 0.447265625, |
|
"learning_rate": 1.413104429824542e-05, |
|
"loss": 0.0179, |
|
"reward": 1.0250000417232514, |
|
"reward_std": 0.16218162104487419, |
|
"rewards/accuracy_reward": 0.05833333507180214, |
|
"rewards/format_reward": 0.9666666805744171, |
|
"step": 1070 |
|
}, |
|
{ |
|
"completion_length": 171.2833366394043, |
|
"epoch": 0.43, |
|
"grad_norm": 0.9226683920061354, |
|
"kl": 0.4724609375, |
|
"learning_rate": 1.4067366430758004e-05, |
|
"loss": 0.0189, |
|
"reward": 1.0708333730697632, |
|
"reward_std": 0.20978599190711975, |
|
"rewards/accuracy_reward": 0.0916666679084301, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 1075 |
|
}, |
|
{ |
|
"completion_length": 251.2166763305664, |
|
"epoch": 0.432, |
|
"grad_norm": 0.6333366911834787, |
|
"kl": 0.42578125, |
|
"learning_rate": 1.4003490325568953e-05, |
|
"loss": 0.017, |
|
"reward": 1.0416666865348816, |
|
"reward_std": 0.17009865194559098, |
|
"rewards/accuracy_reward": 0.08333333693444729, |
|
"rewards/format_reward": 0.9583333492279053, |
|
"step": 1080 |
|
}, |
|
{ |
|
"completion_length": 269.02083892822264, |
|
"epoch": 0.434, |
|
"grad_norm": 0.8555804437546749, |
|
"kl": 0.4380859375, |
|
"learning_rate": 1.3939419095909513e-05, |
|
"loss": 0.0175, |
|
"reward": 1.0250000476837158, |
|
"reward_std": 0.22021322846412658, |
|
"rewards/accuracy_reward": 0.08750000186264514, |
|
"rewards/format_reward": 0.9375000178813935, |
|
"step": 1085 |
|
}, |
|
{ |
|
"completion_length": 165.95000534057618, |
|
"epoch": 0.436, |
|
"grad_norm": 0.9717530137454518, |
|
"kl": 0.4203125, |
|
"learning_rate": 1.3875155864521031e-05, |
|
"loss": 0.0168, |
|
"reward": 1.1333333730697632, |
|
"reward_std": 0.18290041536092758, |
|
"rewards/accuracy_reward": 0.15416667014360427, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 1090 |
|
}, |
|
{ |
|
"completion_length": 207.62084197998047, |
|
"epoch": 0.438, |
|
"grad_norm": 0.5696677734414208, |
|
"kl": 0.476953125, |
|
"learning_rate": 1.3810703763502744e-05, |
|
"loss": 0.0191, |
|
"reward": 1.0333333611488342, |
|
"reward_std": 0.13760571405291558, |
|
"rewards/accuracy_reward": 0.06250000186264515, |
|
"rewards/format_reward": 0.9708333492279053, |
|
"step": 1095 |
|
}, |
|
{ |
|
"completion_length": 214.5041702270508, |
|
"epoch": 0.44, |
|
"grad_norm": 0.2890840425623631, |
|
"kl": 0.4705078125, |
|
"learning_rate": 1.3746065934159123e-05, |
|
"loss": 0.0188, |
|
"reward": 1.0750000178813934, |
|
"reward_std": 0.26415714919567107, |
|
"rewards/accuracy_reward": 0.12083333656191826, |
|
"rewards/format_reward": 0.9541666805744171, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_completion_length": 180.66506958007812, |
|
"eval_kl": 0.5162259615384616, |
|
"eval_loss": 0.020862072706222534, |
|
"eval_reward": 1.09775644999284, |
|
"eval_reward_std": 0.16826987896974271, |
|
"eval_rewards/accuracy_reward": 0.12500000472825307, |
|
"eval_rewards/format_reward": 0.9727564316529494, |
|
"eval_runtime": 203.1184, |
|
"eval_samples_per_second": 0.487, |
|
"eval_steps_per_second": 0.015, |
|
"step": 1100 |
|
}, |
|
{ |
|
"completion_length": 163.7708396911621, |
|
"epoch": 0.442, |
|
"grad_norm": 0.777201350447929, |
|
"kl": 0.48671875, |
|
"learning_rate": 1.3681245526846782e-05, |
|
"loss": 0.0195, |
|
"reward": 1.0583333551883698, |
|
"reward_std": 0.16357700973749162, |
|
"rewards/accuracy_reward": 0.07916666902601718, |
|
"rewards/format_reward": 0.9791666746139527, |
|
"step": 1105 |
|
}, |
|
{ |
|
"completion_length": 219.01250305175782, |
|
"epoch": 0.444, |
|
"grad_norm": 0.8993155700071122, |
|
"kl": 0.503125, |
|
"learning_rate": 1.3616245700820922e-05, |
|
"loss": 0.0201, |
|
"reward": 1.1000000298023225, |
|
"reward_std": 0.26238192319869996, |
|
"rewards/accuracy_reward": 0.1541666705161333, |
|
"rewards/format_reward": 0.9458333551883698, |
|
"step": 1110 |
|
}, |
|
{ |
|
"completion_length": 134.5833396911621, |
|
"epoch": 0.446, |
|
"grad_norm": 1.3416395741871388, |
|
"kl": 0.648046875, |
|
"learning_rate": 1.3551069624081372e-05, |
|
"loss": 0.0259, |
|
"reward": 1.0333333790302277, |
|
"reward_std": 0.2724094092845917, |
|
"rewards/accuracy_reward": 0.11250000335276127, |
|
"rewards/format_reward": 0.9208333551883697, |
|
"step": 1115 |
|
}, |
|
{ |
|
"completion_length": 143.10417251586915, |
|
"epoch": 0.448, |
|
"grad_norm": 0.6893211262244785, |
|
"kl": 0.4814453125, |
|
"learning_rate": 1.3485720473218153e-05, |
|
"loss": 0.0192, |
|
"reward": 1.041666680574417, |
|
"reward_std": 0.28478238806128503, |
|
"rewards/accuracy_reward": 0.1208333358168602, |
|
"rewards/format_reward": 0.9208333492279053, |
|
"step": 1120 |
|
}, |
|
{ |
|
"completion_length": 178.47500534057616, |
|
"epoch": 0.45, |
|
"grad_norm": 0.750719996343038, |
|
"kl": 0.4640625, |
|
"learning_rate": 1.342020143325669e-05, |
|
"loss": 0.0186, |
|
"reward": 1.062500035762787, |
|
"reward_std": 0.20043605640530587, |
|
"rewards/accuracy_reward": 0.08333333618938923, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 1125 |
|
}, |
|
{ |
|
"completion_length": 194.51250457763672, |
|
"epoch": 0.452, |
|
"grad_norm": 0.9250247396623772, |
|
"kl": 0.5109375, |
|
"learning_rate": 1.3354515697502552e-05, |
|
"loss": 0.0204, |
|
"reward": 1.012500035762787, |
|
"reward_std": 0.11767575666308402, |
|
"rewards/accuracy_reward": 0.03333333432674408, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 1130 |
|
}, |
|
{ |
|
"completion_length": 134.37500457763673, |
|
"epoch": 0.454, |
|
"grad_norm": 0.4184828078418494, |
|
"kl": 0.5029296875, |
|
"learning_rate": 1.3288666467385834e-05, |
|
"loss": 0.0201, |
|
"reward": 1.1083333849906922, |
|
"reward_std": 0.15370185375213624, |
|
"rewards/accuracy_reward": 0.11250000186264515, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 1135 |
|
}, |
|
{ |
|
"completion_length": 141.8625045776367, |
|
"epoch": 0.456, |
|
"grad_norm": 1.540000600383148, |
|
"kl": 0.534765625, |
|
"learning_rate": 1.3222656952305113e-05, |
|
"loss": 0.0214, |
|
"reward": 1.104166704416275, |
|
"reward_std": 0.21041805893182755, |
|
"rewards/accuracy_reward": 0.12083333730697632, |
|
"rewards/format_reward": 0.9833333373069764, |
|
"step": 1140 |
|
}, |
|
{ |
|
"completion_length": 103.26250305175782, |
|
"epoch": 0.458, |
|
"grad_norm": 0.9729389527580656, |
|
"kl": 0.4873046875, |
|
"learning_rate": 1.3156490369471026e-05, |
|
"loss": 0.0195, |
|
"reward": 1.1083333969116211, |
|
"reward_std": 0.1348556809127331, |
|
"rewards/accuracy_reward": 0.10833333693444729, |
|
"rewards/format_reward": 1.0, |
|
"step": 1145 |
|
}, |
|
{ |
|
"completion_length": 100.60416984558105, |
|
"epoch": 0.46, |
|
"grad_norm": 0.6226549877758246, |
|
"kl": 0.5244140625, |
|
"learning_rate": 1.3090169943749475e-05, |
|
"loss": 0.021, |
|
"reward": 1.0666666865348815, |
|
"reward_std": 0.11078203096985817, |
|
"rewards/accuracy_reward": 0.07083333507180214, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 1150 |
|
}, |
|
{ |
|
"completion_length": 125.1916717529297, |
|
"epoch": 0.462, |
|
"grad_norm": 0.7322743259128145, |
|
"kl": 0.490234375, |
|
"learning_rate": 1.3023698907504447e-05, |
|
"loss": 0.0196, |
|
"reward": 1.0458333551883698, |
|
"reward_std": 0.1147707849740982, |
|
"rewards/accuracy_reward": 0.05833333432674408, |
|
"rewards/format_reward": 0.9875000059604645, |
|
"step": 1155 |
|
}, |
|
{ |
|
"completion_length": 171.20000762939452, |
|
"epoch": 0.464, |
|
"grad_norm": 0.8661955941081932, |
|
"kl": 0.433203125, |
|
"learning_rate": 1.2957080500440469e-05, |
|
"loss": 0.0173, |
|
"reward": 1.1125000417232513, |
|
"reward_std": 0.1469825029373169, |
|
"rewards/accuracy_reward": 0.12083333656191826, |
|
"rewards/format_reward": 0.9916666686534882, |
|
"step": 1160 |
|
}, |
|
{ |
|
"completion_length": 363.72084655761716, |
|
"epoch": 0.466, |
|
"grad_norm": 0.5690418918424673, |
|
"kl": 0.3953125, |
|
"learning_rate": 1.2890317969444716e-05, |
|
"loss": 0.0158, |
|
"reward": 0.9916666865348815, |
|
"reward_std": 0.17782215774059296, |
|
"rewards/accuracy_reward": 0.0416666679084301, |
|
"rewards/format_reward": 0.9500000178813934, |
|
"step": 1165 |
|
}, |
|
{ |
|
"completion_length": 340.24583892822267, |
|
"epoch": 0.468, |
|
"grad_norm": 0.5868426291161072, |
|
"kl": 0.3611328125, |
|
"learning_rate": 1.2823414568428767e-05, |
|
"loss": 0.0144, |
|
"reward": 1.041666704416275, |
|
"reward_std": 0.1920206516981125, |
|
"rewards/accuracy_reward": 0.0708333358168602, |
|
"rewards/format_reward": 0.9708333492279053, |
|
"step": 1170 |
|
}, |
|
{ |
|
"completion_length": 287.6666717529297, |
|
"epoch": 0.47, |
|
"grad_norm": 0.7021811883073036, |
|
"kl": 0.337109375, |
|
"learning_rate": 1.2756373558169992e-05, |
|
"loss": 0.0135, |
|
"reward": 1.045833373069763, |
|
"reward_std": 0.13058570101857186, |
|
"rewards/accuracy_reward": 0.05416666828095913, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 1175 |
|
}, |
|
{ |
|
"completion_length": 186.4791717529297, |
|
"epoch": 0.472, |
|
"grad_norm": 0.6684350780126657, |
|
"kl": 0.3828125, |
|
"learning_rate": 1.2689198206152657e-05, |
|
"loss": 0.0153, |
|
"reward": 1.0708333730697632, |
|
"reward_std": 0.11366014555096626, |
|
"rewards/accuracy_reward": 0.07500000223517418, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 1180 |
|
}, |
|
{ |
|
"completion_length": 177.3375045776367, |
|
"epoch": 0.474, |
|
"grad_norm": 0.6147456691558177, |
|
"kl": 0.3947265625, |
|
"learning_rate": 1.2621891786408648e-05, |
|
"loss": 0.0158, |
|
"reward": 1.0958333611488342, |
|
"reward_std": 0.14349564239382745, |
|
"rewards/accuracy_reward": 0.0958333346992731, |
|
"rewards/format_reward": 1.0, |
|
"step": 1185 |
|
}, |
|
{ |
|
"completion_length": 179.8791702270508, |
|
"epoch": 0.476, |
|
"grad_norm": 0.7097100700508777, |
|
"kl": 0.423046875, |
|
"learning_rate": 1.2554457579357906e-05, |
|
"loss": 0.0169, |
|
"reward": 1.0791666984558106, |
|
"reward_std": 0.16390806213021278, |
|
"rewards/accuracy_reward": 0.08333333544433116, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 1190 |
|
}, |
|
{ |
|
"completion_length": 175.40417098999023, |
|
"epoch": 0.478, |
|
"grad_norm": 0.3216081776765636, |
|
"kl": 0.401953125, |
|
"learning_rate": 1.2486898871648552e-05, |
|
"loss": 0.0161, |
|
"reward": 1.0625000476837159, |
|
"reward_std": 0.1101732850074768, |
|
"rewards/accuracy_reward": 0.06250000223517418, |
|
"rewards/format_reward": 1.0, |
|
"step": 1195 |
|
}, |
|
{ |
|
"completion_length": 195.07500457763672, |
|
"epoch": 0.48, |
|
"grad_norm": 0.8759106701000279, |
|
"kl": 0.4146484375, |
|
"learning_rate": 1.2419218955996677e-05, |
|
"loss": 0.0166, |
|
"reward": 1.075000035762787, |
|
"reward_std": 0.10886141434311866, |
|
"rewards/accuracy_reward": 0.07916666977107525, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"eval_completion_length": 196.80609365609976, |
|
"eval_kl": 0.4260817307692308, |
|
"eval_loss": 0.017165007069706917, |
|
"eval_reward": 1.115384647479424, |
|
"eval_reward_std": 0.14654561418753403, |
|
"eval_rewards/accuracy_reward": 0.12339744000480725, |
|
"eval_rewards/format_reward": 0.9919871871288006, |
|
"eval_runtime": 120.029, |
|
"eval_samples_per_second": 0.825, |
|
"eval_steps_per_second": 0.025, |
|
"step": 1200 |
|
}, |
|
{ |
|
"completion_length": 206.79583892822265, |
|
"epoch": 0.482, |
|
"grad_norm": 0.5225537669303568, |
|
"kl": 0.396875, |
|
"learning_rate": 1.23514211310259e-05, |
|
"loss": 0.0159, |
|
"reward": 1.075000035762787, |
|
"reward_std": 0.12177135646343232, |
|
"rewards/accuracy_reward": 0.0750000026077032, |
|
"rewards/format_reward": 1.0, |
|
"step": 1205 |
|
}, |
|
{ |
|
"completion_length": 206.87500457763673, |
|
"epoch": 0.484, |
|
"grad_norm": 0.344380593464978, |
|
"kl": 0.4521484375, |
|
"learning_rate": 1.2283508701106559e-05, |
|
"loss": 0.0181, |
|
"reward": 1.125000035762787, |
|
"reward_std": 0.17358550876379014, |
|
"rewards/accuracy_reward": 0.12500000260770322, |
|
"rewards/format_reward": 1.0, |
|
"step": 1210 |
|
}, |
|
{ |
|
"completion_length": 290.5125076293945, |
|
"epoch": 0.486, |
|
"grad_norm": 0.47708923896384847, |
|
"kl": 0.4142578125, |
|
"learning_rate": 1.2215484976194675e-05, |
|
"loss": 0.0166, |
|
"reward": 1.0833333611488343, |
|
"reward_std": 0.14718020632863044, |
|
"rewards/accuracy_reward": 0.09583333618938923, |
|
"rewards/format_reward": 0.9875000059604645, |
|
"step": 1215 |
|
}, |
|
{ |
|
"completion_length": 262.9541748046875, |
|
"epoch": 0.488, |
|
"grad_norm": 0.7337765212074902, |
|
"kl": 0.5099609375, |
|
"learning_rate": 1.2147353271670634e-05, |
|
"loss": 0.0204, |
|
"reward": 1.062500035762787, |
|
"reward_std": 0.2556539088487625, |
|
"rewards/accuracy_reward": 0.10000000260770321, |
|
"rewards/format_reward": 0.962500023841858, |
|
"step": 1220 |
|
}, |
|
{ |
|
"completion_length": 249.69584045410156, |
|
"epoch": 0.49, |
|
"grad_norm": 0.7659087296399365, |
|
"kl": 0.4314453125, |
|
"learning_rate": 1.2079116908177592e-05, |
|
"loss": 0.0173, |
|
"reward": 1.0708333611488343, |
|
"reward_std": 0.1630980782210827, |
|
"rewards/accuracy_reward": 0.08333333618938923, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 1225 |
|
}, |
|
{ |
|
"completion_length": 171.30417022705078, |
|
"epoch": 0.492, |
|
"grad_norm": 0.5881563006486139, |
|
"kl": 0.4369140625, |
|
"learning_rate": 1.2010779211459649e-05, |
|
"loss": 0.0175, |
|
"reward": 1.1333333611488343, |
|
"reward_std": 0.19801353365182878, |
|
"rewards/accuracy_reward": 0.1541666716337204, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 1230 |
|
}, |
|
{ |
|
"completion_length": 215.20000457763672, |
|
"epoch": 0.494, |
|
"grad_norm": 1.0090808424345548, |
|
"kl": 0.4369140625, |
|
"learning_rate": 1.194234351219972e-05, |
|
"loss": 0.0175, |
|
"reward": 1.0833333611488343, |
|
"reward_std": 0.23648979663848876, |
|
"rewards/accuracy_reward": 0.12500000335276126, |
|
"rewards/format_reward": 0.9583333492279053, |
|
"step": 1235 |
|
}, |
|
{ |
|
"completion_length": 186.05417404174804, |
|
"epoch": 0.496, |
|
"grad_norm": 0.79230261520844, |
|
"kl": 0.419140625, |
|
"learning_rate": 1.187381314585725e-05, |
|
"loss": 0.0168, |
|
"reward": 1.137500035762787, |
|
"reward_std": 0.21822471916675568, |
|
"rewards/accuracy_reward": 0.1541666690260172, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 1240 |
|
}, |
|
{ |
|
"completion_length": 170.16250457763672, |
|
"epoch": 0.498, |
|
"grad_norm": 0.6709583305642091, |
|
"kl": 0.455859375, |
|
"learning_rate": 1.1805191452505602e-05, |
|
"loss": 0.0182, |
|
"reward": 1.100000023841858, |
|
"reward_std": 0.1810879833996296, |
|
"rewards/accuracy_reward": 0.1250000014901161, |
|
"rewards/format_reward": 0.975000011920929, |
|
"step": 1245 |
|
}, |
|
{ |
|
"completion_length": 128.26667175292968, |
|
"epoch": 0.5, |
|
"grad_norm": 0.8833573142079981, |
|
"kl": 0.469921875, |
|
"learning_rate": 1.1736481776669307e-05, |
|
"loss": 0.0188, |
|
"reward": 1.1166667222976685, |
|
"reward_std": 0.14968623965978622, |
|
"rewards/accuracy_reward": 0.1166666705161333, |
|
"rewards/format_reward": 1.0, |
|
"step": 1250 |
|
}, |
|
{ |
|
"completion_length": 162.34583892822266, |
|
"epoch": 0.502, |
|
"grad_norm": 0.3294796625934881, |
|
"kl": 0.461328125, |
|
"learning_rate": 1.1667687467161025e-05, |
|
"loss": 0.0185, |
|
"reward": 1.1416667222976684, |
|
"reward_std": 0.20132601708173753, |
|
"rewards/accuracy_reward": 0.1500000063329935, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 1255 |
|
}, |
|
{ |
|
"completion_length": 207.9791732788086, |
|
"epoch": 0.504, |
|
"grad_norm": 0.7088847090687599, |
|
"kl": 0.490234375, |
|
"learning_rate": 1.159881187691835e-05, |
|
"loss": 0.0196, |
|
"reward": 1.087500023841858, |
|
"reward_std": 0.21892432272434234, |
|
"rewards/accuracy_reward": 0.11250000223517417, |
|
"rewards/format_reward": 0.9750000238418579, |
|
"step": 1260 |
|
}, |
|
{ |
|
"completion_length": 264.69584426879885, |
|
"epoch": 0.506, |
|
"grad_norm": 1.0708969943160458, |
|
"kl": 0.467578125, |
|
"learning_rate": 1.1529858362840383e-05, |
|
"loss": 0.0187, |
|
"reward": 1.062500035762787, |
|
"reward_std": 0.22483758330345155, |
|
"rewards/accuracy_reward": 0.10416666828095913, |
|
"rewards/format_reward": 0.9583333492279053, |
|
"step": 1265 |
|
}, |
|
{ |
|
"completion_length": 232.33334197998047, |
|
"epoch": 0.508, |
|
"grad_norm": 0.6585084882308617, |
|
"kl": 0.437109375, |
|
"learning_rate": 1.1460830285624119e-05, |
|
"loss": 0.0175, |
|
"reward": 1.075000023841858, |
|
"reward_std": 0.22963632121682168, |
|
"rewards/accuracy_reward": 0.10833333693444729, |
|
"rewards/format_reward": 0.9666666805744171, |
|
"step": 1270 |
|
}, |
|
{ |
|
"completion_length": 194.81666870117186, |
|
"epoch": 0.51, |
|
"grad_norm": 0.5578883725556453, |
|
"kl": 0.453125, |
|
"learning_rate": 1.1391731009600655e-05, |
|
"loss": 0.0181, |
|
"reward": 1.0458333611488342, |
|
"reward_std": 0.13407255634665488, |
|
"rewards/accuracy_reward": 0.06250000186264515, |
|
"rewards/format_reward": 0.9833333492279053, |
|
"step": 1275 |
|
}, |
|
{ |
|
"completion_length": 140.2375030517578, |
|
"epoch": 0.512, |
|
"grad_norm": 0.9319950337687952, |
|
"kl": 0.51640625, |
|
"learning_rate": 1.1322563902571227e-05, |
|
"loss": 0.0207, |
|
"reward": 1.100000023841858, |
|
"reward_std": 0.11636388301849365, |
|
"rewards/accuracy_reward": 0.10000000074505806, |
|
"rewards/format_reward": 1.0, |
|
"step": 1280 |
|
}, |
|
{ |
|
"completion_length": 117.49583740234375, |
|
"epoch": 0.514, |
|
"grad_norm": 0.6424020462953824, |
|
"kl": 0.5283203125, |
|
"learning_rate": 1.1253332335643043e-05, |
|
"loss": 0.0211, |
|
"reward": 1.0666667222976685, |
|
"reward_std": 0.12386635020375251, |
|
"rewards/accuracy_reward": 0.07083333469927311, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 1285 |
|
}, |
|
{ |
|
"completion_length": 160.48750686645508, |
|
"epoch": 0.516, |
|
"grad_norm": 0.09722259646016615, |
|
"kl": 0.529296875, |
|
"learning_rate": 1.1184039683065014e-05, |
|
"loss": 0.0212, |
|
"reward": 1.037500023841858, |
|
"reward_std": 0.05643851235508919, |
|
"rewards/accuracy_reward": 0.0416666679084301, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 1290 |
|
}, |
|
{ |
|
"completion_length": 180.3333366394043, |
|
"epoch": 0.518, |
|
"grad_norm": 1.4174701949018742, |
|
"kl": 0.5001953125, |
|
"learning_rate": 1.1114689322063255e-05, |
|
"loss": 0.02, |
|
"reward": 1.0708333730697632, |
|
"reward_std": 0.12866508215665817, |
|
"rewards/accuracy_reward": 0.08333333544433116, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 1295 |
|
}, |
|
{ |
|
"completion_length": 233.85000915527343, |
|
"epoch": 0.52, |
|
"grad_norm": 0.7535966288197256, |
|
"kl": 0.48671875, |
|
"learning_rate": 1.1045284632676535e-05, |
|
"loss": 0.0194, |
|
"reward": 1.066666692495346, |
|
"reward_std": 0.23691422641277313, |
|
"rewards/accuracy_reward": 0.1125000048428774, |
|
"rewards/format_reward": 0.9541666805744171, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_completion_length": 262.862191420335, |
|
"eval_kl": 0.47250600961538464, |
|
"eval_loss": 0.019559284672141075, |
|
"eval_reward": 1.0528846520643969, |
|
"eval_reward_std": 0.21832889031905395, |
|
"eval_rewards/accuracy_reward": 0.10096154218682876, |
|
"eval_rewards/format_reward": 0.951923090677995, |
|
"eval_runtime": 222.8563, |
|
"eval_samples_per_second": 0.444, |
|
"eval_steps_per_second": 0.013, |
|
"step": 1300 |
|
}, |
|
{ |
|
"completion_length": 204.50000762939453, |
|
"epoch": 0.522, |
|
"grad_norm": 0.582966083499623, |
|
"kl": 0.4453125, |
|
"learning_rate": 1.0975828997591496e-05, |
|
"loss": 0.0178, |
|
"reward": 1.0583333730697633, |
|
"reward_std": 0.1721936509013176, |
|
"rewards/accuracy_reward": 0.07916666753590107, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 1305 |
|
}, |
|
{ |
|
"completion_length": 184.75417098999023, |
|
"epoch": 0.524, |
|
"grad_norm": 0.7734702622967983, |
|
"kl": 0.4318359375, |
|
"learning_rate": 1.0906325801977804e-05, |
|
"loss": 0.0173, |
|
"reward": 1.0625000536441802, |
|
"reward_std": 0.2130418173968792, |
|
"rewards/accuracy_reward": 0.0916666690260172, |
|
"rewards/format_reward": 0.9708333432674408, |
|
"step": 1310 |
|
}, |
|
{ |
|
"completion_length": 170.43750457763673, |
|
"epoch": 0.526, |
|
"grad_norm": 0.8051360088361499, |
|
"kl": 0.4501953125, |
|
"learning_rate": 1.083677843332316e-05, |
|
"loss": 0.018, |
|
"reward": 1.0791667103767395, |
|
"reward_std": 0.1652999296784401, |
|
"rewards/accuracy_reward": 0.10000000260770321, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 1315 |
|
}, |
|
{ |
|
"completion_length": 174.08750686645507, |
|
"epoch": 0.528, |
|
"grad_norm": 1.1111528717015793, |
|
"kl": 0.45625, |
|
"learning_rate": 1.0767190281268187e-05, |
|
"loss": 0.0183, |
|
"reward": 1.0208333671092986, |
|
"reward_std": 0.14639708772301674, |
|
"rewards/accuracy_reward": 0.05000000260770321, |
|
"rewards/format_reward": 0.9708333492279053, |
|
"step": 1320 |
|
}, |
|
{ |
|
"completion_length": 200.2291717529297, |
|
"epoch": 0.53, |
|
"grad_norm": 0.8268417258688071, |
|
"kl": 0.4689453125, |
|
"learning_rate": 1.0697564737441254e-05, |
|
"loss": 0.0188, |
|
"reward": 1.1083333611488342, |
|
"reward_std": 0.1567763350903988, |
|
"rewards/accuracy_reward": 0.1333333369344473, |
|
"rewards/format_reward": 0.9750000178813935, |
|
"step": 1325 |
|
}, |
|
{ |
|
"completion_length": 165.32083740234376, |
|
"epoch": 0.532, |
|
"grad_norm": 0.2358374954563172, |
|
"kl": 0.4279296875, |
|
"learning_rate": 1.0627905195293135e-05, |
|
"loss": 0.0171, |
|
"reward": 1.129166704416275, |
|
"reward_std": 0.1762892484664917, |
|
"rewards/accuracy_reward": 0.14583333693444728, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 1330 |
|
}, |
|
{ |
|
"completion_length": 213.89167251586915, |
|
"epoch": 0.534, |
|
"grad_norm": 0.7895004407616136, |
|
"kl": 0.3943359375, |
|
"learning_rate": 1.055821504993164e-05, |
|
"loss": 0.0158, |
|
"reward": 1.025000023841858, |
|
"reward_std": 0.17843054831027985, |
|
"rewards/accuracy_reward": 0.05833333432674408, |
|
"rewards/format_reward": 0.9666666865348816, |
|
"step": 1335 |
|
}, |
|
{ |
|
"completion_length": 142.70000381469725, |
|
"epoch": 0.536, |
|
"grad_norm": 0.8360376478700801, |
|
"kl": 0.432421875, |
|
"learning_rate": 1.0488497697956134e-05, |
|
"loss": 0.0173, |
|
"reward": 1.0916667044162751, |
|
"reward_std": 0.15858057737350464, |
|
"rewards/accuracy_reward": 0.10833333656191826, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 1340 |
|
}, |
|
{ |
|
"completion_length": 133.95417022705078, |
|
"epoch": 0.538, |
|
"grad_norm": 0.7793211768536685, |
|
"kl": 0.434765625, |
|
"learning_rate": 1.0418756537291996e-05, |
|
"loss": 0.0174, |
|
"reward": 1.095833384990692, |
|
"reward_std": 0.17280238792300223, |
|
"rewards/accuracy_reward": 0.11250000521540642, |
|
"rewards/format_reward": 0.9833333492279053, |
|
"step": 1345 |
|
}, |
|
{ |
|
"completion_length": 123.75417022705078, |
|
"epoch": 0.54, |
|
"grad_norm": 0.9625905804344617, |
|
"kl": 0.43828125, |
|
"learning_rate": 1.0348994967025012e-05, |
|
"loss": 0.0175, |
|
"reward": 1.125000035762787, |
|
"reward_std": 0.18591004833579064, |
|
"rewards/accuracy_reward": 0.13333333507180214, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 1350 |
|
}, |
|
{ |
|
"completion_length": 166.07500762939452, |
|
"epoch": 0.542, |
|
"grad_norm": 0.19769259077551565, |
|
"kl": 0.444921875, |
|
"learning_rate": 1.0279216387235691e-05, |
|
"loss": 0.0178, |
|
"reward": 1.029166692495346, |
|
"reward_std": 0.11366014182567596, |
|
"rewards/accuracy_reward": 0.05416666828095913, |
|
"rewards/format_reward": 0.975000011920929, |
|
"step": 1355 |
|
}, |
|
{ |
|
"completion_length": 174.76250457763672, |
|
"epoch": 0.544, |
|
"grad_norm": 0.8083428884974917, |
|
"kl": 0.395703125, |
|
"learning_rate": 1.0209424198833571e-05, |
|
"loss": 0.0158, |
|
"reward": 1.075000023841858, |
|
"reward_std": 0.16870326176285744, |
|
"rewards/accuracy_reward": 0.09583333618938923, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 1360 |
|
}, |
|
{ |
|
"completion_length": 231.99167251586914, |
|
"epoch": 0.546, |
|
"grad_norm": 0.3581588542986591, |
|
"kl": 0.419140625, |
|
"learning_rate": 1.0139621803391454e-05, |
|
"loss": 0.0168, |
|
"reward": 1.1416667103767395, |
|
"reward_std": 0.21144871562719345, |
|
"rewards/accuracy_reward": 0.1791666727513075, |
|
"rewards/format_reward": 0.9625000119209289, |
|
"step": 1365 |
|
}, |
|
{ |
|
"completion_length": 184.47084121704103, |
|
"epoch": 0.548, |
|
"grad_norm": 0.7966076879518822, |
|
"kl": 0.4314453125, |
|
"learning_rate": 1.0069812602979617e-05, |
|
"loss": 0.0173, |
|
"reward": 1.0208333790302277, |
|
"reward_std": 0.1700151339173317, |
|
"rewards/accuracy_reward": 0.05000000149011612, |
|
"rewards/format_reward": 0.9708333492279053, |
|
"step": 1370 |
|
}, |
|
{ |
|
"completion_length": 129.3000045776367, |
|
"epoch": 0.55, |
|
"grad_norm": 0.8350147510340598, |
|
"kl": 0.444921875, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0178, |
|
"reward": 1.1250000238418578, |
|
"reward_std": 0.17009866163134574, |
|
"rewards/accuracy_reward": 0.1291666690260172, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 1375 |
|
}, |
|
{ |
|
"completion_length": 128.46250381469727, |
|
"epoch": 0.552, |
|
"grad_norm": 0.40110881221719935, |
|
"kl": 0.391796875, |
|
"learning_rate": 9.930187397020385e-06, |
|
"loss": 0.0157, |
|
"reward": 1.0666666984558106, |
|
"reward_std": 0.09804645925760269, |
|
"rewards/accuracy_reward": 0.06666666828095913, |
|
"rewards/format_reward": 1.0, |
|
"step": 1380 |
|
}, |
|
{ |
|
"completion_length": 148.71250610351564, |
|
"epoch": 0.554, |
|
"grad_norm": 0.8104207439007759, |
|
"kl": 0.4185546875, |
|
"learning_rate": 9.860378196608549e-06, |
|
"loss": 0.0167, |
|
"reward": 1.1000000357627868, |
|
"reward_std": 0.16956989839673042, |
|
"rewards/accuracy_reward": 0.10416666902601719, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 1385 |
|
}, |
|
{ |
|
"completion_length": 138.62917251586913, |
|
"epoch": 0.556, |
|
"grad_norm": 0.8861813782686773, |
|
"kl": 0.404296875, |
|
"learning_rate": 9.790575801166432e-06, |
|
"loss": 0.0162, |
|
"reward": 1.0875000476837158, |
|
"reward_std": 0.13599317520856857, |
|
"rewards/accuracy_reward": 0.09583333730697632, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 1390 |
|
}, |
|
{ |
|
"completion_length": 129.62500534057617, |
|
"epoch": 0.558, |
|
"grad_norm": 0.7959816358964777, |
|
"kl": 0.3986328125, |
|
"learning_rate": 9.720783612764314e-06, |
|
"loss": 0.0159, |
|
"reward": 1.1583333730697631, |
|
"reward_std": 0.14700583443045617, |
|
"rewards/accuracy_reward": 0.16250000670552253, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 1395 |
|
}, |
|
{ |
|
"completion_length": 140.90833587646483, |
|
"epoch": 0.56, |
|
"grad_norm": 1.3018520628065517, |
|
"kl": 0.44375, |
|
"learning_rate": 9.651005032974994e-06, |
|
"loss": 0.0178, |
|
"reward": 1.0708333611488343, |
|
"reward_std": 0.13808816894888878, |
|
"rewards/accuracy_reward": 0.08750000149011612, |
|
"rewards/format_reward": 0.9833333492279053, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"eval_completion_length": 139.72115854116586, |
|
"eval_kl": 0.42322716346153844, |
|
"eval_loss": 0.018276819959282875, |
|
"eval_reward": 1.118589768042931, |
|
"eval_reward_std": 0.17192490513508135, |
|
"eval_rewards/accuracy_reward": 0.1314102586072225, |
|
"eval_rewards/format_reward": 0.987179499406081, |
|
"eval_runtime": 160.9908, |
|
"eval_samples_per_second": 0.615, |
|
"eval_steps_per_second": 0.019, |
|
"step": 1400 |
|
}, |
|
{ |
|
"completion_length": 120.7958366394043, |
|
"epoch": 0.562, |
|
"grad_norm": 0.7447224368194102, |
|
"kl": 0.4337890625, |
|
"learning_rate": 9.581243462708007e-06, |
|
"loss": 0.0174, |
|
"reward": 1.0791666984558106, |
|
"reward_std": 0.10075020045042038, |
|
"rewards/accuracy_reward": 0.0791666679084301, |
|
"rewards/format_reward": 1.0, |
|
"step": 1405 |
|
}, |
|
{ |
|
"completion_length": 123.17500381469726, |
|
"epoch": 0.564, |
|
"grad_norm": 0.6943691730102636, |
|
"kl": 0.4384765625, |
|
"learning_rate": 9.511502302043867e-06, |
|
"loss": 0.0175, |
|
"reward": 1.0791667103767395, |
|
"reward_std": 0.16896116137504577, |
|
"rewards/accuracy_reward": 0.09166666865348816, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 1410 |
|
}, |
|
{ |
|
"completion_length": 144.02500381469727, |
|
"epoch": 0.566, |
|
"grad_norm": 0.7135215199384104, |
|
"kl": 0.435546875, |
|
"learning_rate": 9.441784950068362e-06, |
|
"loss": 0.0174, |
|
"reward": 1.0666666984558106, |
|
"reward_std": 0.11416204124689103, |
|
"rewards/accuracy_reward": 0.07500000111758709, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 1415 |
|
}, |
|
{ |
|
"completion_length": 157.21250534057617, |
|
"epoch": 0.568, |
|
"grad_norm": 1.0565897923645307, |
|
"kl": 0.4328125, |
|
"learning_rate": 9.372094804706867e-06, |
|
"loss": 0.0173, |
|
"reward": 1.0625000476837159, |
|
"reward_std": 0.13348714262247086, |
|
"rewards/accuracy_reward": 0.0750000026077032, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 1420 |
|
}, |
|
{ |
|
"completion_length": 140.8791717529297, |
|
"epoch": 0.57, |
|
"grad_norm": 1.0299599122671386, |
|
"kl": 0.5, |
|
"learning_rate": 9.302435262558748e-06, |
|
"loss": 0.02, |
|
"reward": 1.1166666924953461, |
|
"reward_std": 0.2273202881217003, |
|
"rewards/accuracy_reward": 0.14166666977107525, |
|
"rewards/format_reward": 0.9750000178813935, |
|
"step": 1425 |
|
}, |
|
{ |
|
"completion_length": 180.52083892822264, |
|
"epoch": 0.572, |
|
"grad_norm": 0.7151881659016156, |
|
"kl": 0.4361328125, |
|
"learning_rate": 9.232809718731815e-06, |
|
"loss": 0.0174, |
|
"reward": 1.091666692495346, |
|
"reward_std": 0.15498905703425409, |
|
"rewards/accuracy_reward": 0.11250000074505806, |
|
"rewards/format_reward": 0.9791666746139527, |
|
"step": 1430 |
|
}, |
|
{ |
|
"completion_length": 192.38750686645508, |
|
"epoch": 0.574, |
|
"grad_norm": 0.629276808623102, |
|
"kl": 0.4154296875, |
|
"learning_rate": 9.163221566676847e-06, |
|
"loss": 0.0166, |
|
"reward": 1.0916667103767395, |
|
"reward_std": 0.16951324194669723, |
|
"rewards/accuracy_reward": 0.1125000048428774, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 1435 |
|
}, |
|
{ |
|
"completion_length": 197.38333587646486, |
|
"epoch": 0.576, |
|
"grad_norm": 0.8364125432332793, |
|
"kl": 0.4513671875, |
|
"learning_rate": 9.093674198022201e-06, |
|
"loss": 0.0181, |
|
"reward": 1.108333373069763, |
|
"reward_std": 0.17300009950995446, |
|
"rewards/accuracy_reward": 0.12083333842456341, |
|
"rewards/format_reward": 0.9875000059604645, |
|
"step": 1440 |
|
}, |
|
{ |
|
"completion_length": 309.7000137329102, |
|
"epoch": 0.578, |
|
"grad_norm": 0.596028075225735, |
|
"kl": 0.4134765625, |
|
"learning_rate": 9.024171002408507e-06, |
|
"loss": 0.0165, |
|
"reward": 1.125000035762787, |
|
"reward_std": 0.18778266608715058, |
|
"rewards/accuracy_reward": 0.1416666701436043, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 1445 |
|
}, |
|
{ |
|
"completion_length": 327.8416778564453, |
|
"epoch": 0.58, |
|
"grad_norm": 0.5434179981843781, |
|
"kl": 0.403515625, |
|
"learning_rate": 8.954715367323468e-06, |
|
"loss": 0.0161, |
|
"reward": 1.0708333730697632, |
|
"reward_std": 0.2297428011894226, |
|
"rewards/accuracy_reward": 0.10833333507180214, |
|
"rewards/format_reward": 0.962500023841858, |
|
"step": 1450 |
|
}, |
|
{ |
|
"completion_length": 294.1958435058594, |
|
"epoch": 0.582, |
|
"grad_norm": 0.5416204560922091, |
|
"kl": 0.355078125, |
|
"learning_rate": 8.885310677936746e-06, |
|
"loss": 0.0142, |
|
"reward": 1.112500047683716, |
|
"reward_std": 0.18459817096590997, |
|
"rewards/accuracy_reward": 0.11666666939854622, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 1455 |
|
}, |
|
{ |
|
"completion_length": 270.0791732788086, |
|
"epoch": 0.584, |
|
"grad_norm": 0.6575350557585025, |
|
"kl": 0.38203125, |
|
"learning_rate": 8.815960316934991e-06, |
|
"loss": 0.0153, |
|
"reward": 1.075000035762787, |
|
"reward_std": 0.1396777369081974, |
|
"rewards/accuracy_reward": 0.1000000037252903, |
|
"rewards/format_reward": 0.9750000178813935, |
|
"step": 1460 |
|
}, |
|
{ |
|
"completion_length": 206.42917556762694, |
|
"epoch": 0.586, |
|
"grad_norm": 0.496919053076702, |
|
"kl": 0.3818359375, |
|
"learning_rate": 8.746667664356957e-06, |
|
"loss": 0.0153, |
|
"reward": 1.0708333611488343, |
|
"reward_std": 0.08033778220415115, |
|
"rewards/accuracy_reward": 0.07916666939854622, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 1465 |
|
}, |
|
{ |
|
"completion_length": 216.08333892822264, |
|
"epoch": 0.588, |
|
"grad_norm": 0.44350270218884014, |
|
"kl": 0.3541015625, |
|
"learning_rate": 8.677436097428775e-06, |
|
"loss": 0.0142, |
|
"reward": 1.0500000238418579, |
|
"reward_std": 0.13078340515494347, |
|
"rewards/accuracy_reward": 0.07083333469927311, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 1470 |
|
}, |
|
{ |
|
"completion_length": 235.42917175292968, |
|
"epoch": 0.59, |
|
"grad_norm": 0.31256519639828234, |
|
"kl": 0.369140625, |
|
"learning_rate": 8.60826899039935e-06, |
|
"loss": 0.0148, |
|
"reward": 1.0958333730697631, |
|
"reward_std": 0.1650187000632286, |
|
"rewards/accuracy_reward": 0.11666666977107525, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 1475 |
|
}, |
|
{ |
|
"completion_length": 233.67501068115234, |
|
"epoch": 0.592, |
|
"grad_norm": 0.3462078333529471, |
|
"kl": 0.3658203125, |
|
"learning_rate": 8.539169714375885e-06, |
|
"loss": 0.0146, |
|
"reward": 1.0666667103767395, |
|
"reward_std": 0.1788861334323883, |
|
"rewards/accuracy_reward": 0.09166667126119137, |
|
"rewards/format_reward": 0.9750000238418579, |
|
"step": 1480 |
|
}, |
|
{ |
|
"completion_length": 278.04584045410155, |
|
"epoch": 0.594, |
|
"grad_norm": 0.4375001521877564, |
|
"kl": 0.3640625, |
|
"learning_rate": 8.47014163715962e-06, |
|
"loss": 0.0146, |
|
"reward": 1.062500035762787, |
|
"reward_std": 0.2277390219271183, |
|
"rewards/accuracy_reward": 0.10833333730697632, |
|
"rewards/format_reward": 0.9541666865348816, |
|
"step": 1485 |
|
}, |
|
{ |
|
"completion_length": 227.55834045410157, |
|
"epoch": 0.596, |
|
"grad_norm": 0.4936381128270299, |
|
"kl": 0.384375, |
|
"learning_rate": 8.401188123081653e-06, |
|
"loss": 0.0154, |
|
"reward": 1.1166666746139526, |
|
"reward_std": 0.25464622527360914, |
|
"rewards/accuracy_reward": 0.15000000260770321, |
|
"rewards/format_reward": 0.9666666805744171, |
|
"step": 1490 |
|
}, |
|
{ |
|
"completion_length": 211.10417251586915, |
|
"epoch": 0.598, |
|
"grad_norm": 0.8293632244296539, |
|
"kl": 0.4357421875, |
|
"learning_rate": 8.332312532838978e-06, |
|
"loss": 0.0174, |
|
"reward": 1.0833333611488343, |
|
"reward_std": 0.15178123712539673, |
|
"rewards/accuracy_reward": 0.10000000074505806, |
|
"rewards/format_reward": 0.9833333492279053, |
|
"step": 1495 |
|
}, |
|
{ |
|
"completion_length": 235.98750534057618, |
|
"epoch": 0.6, |
|
"grad_norm": 0.7367990374860012, |
|
"kl": 0.376953125, |
|
"learning_rate": 8.263518223330698e-06, |
|
"loss": 0.0151, |
|
"reward": 1.0416666984558105, |
|
"reward_std": 0.2215484268963337, |
|
"rewards/accuracy_reward": 0.0833333346992731, |
|
"rewards/format_reward": 0.9583333492279053, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"eval_completion_length": 184.29808279184195, |
|
"eval_kl": 0.38716947115384615, |
|
"eval_loss": 0.014998482540249825, |
|
"eval_reward": 1.107371830023252, |
|
"eval_reward_std": 0.20388395740435675, |
|
"eval_rewards/accuracy_reward": 0.13141025932362446, |
|
"eval_rewards/format_reward": 0.9759615522164565, |
|
"eval_runtime": 170.0138, |
|
"eval_samples_per_second": 0.582, |
|
"eval_steps_per_second": 0.018, |
|
"step": 1500 |
|
}, |
|
{ |
|
"completion_length": 209.3416748046875, |
|
"epoch": 0.602, |
|
"grad_norm": 0.8772003587312531, |
|
"kl": 0.355859375, |
|
"learning_rate": 8.194808547494401e-06, |
|
"loss": 0.0142, |
|
"reward": 1.1166667103767396, |
|
"reward_std": 0.24232522323727607, |
|
"rewards/accuracy_reward": 0.1416666701436043, |
|
"rewards/format_reward": 0.9750000178813935, |
|
"step": 1505 |
|
}, |
|
{ |
|
"completion_length": 188.06250610351563, |
|
"epoch": 0.604, |
|
"grad_norm": 0.4103081085850965, |
|
"kl": 0.36171875, |
|
"learning_rate": 8.126186854142752e-06, |
|
"loss": 0.0145, |
|
"reward": 1.1583333730697631, |
|
"reward_std": 0.18080894947052, |
|
"rewards/accuracy_reward": 0.16666667051613332, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 1510 |
|
}, |
|
{ |
|
"completion_length": 230.85834197998048, |
|
"epoch": 0.606, |
|
"grad_norm": 0.18081100841917966, |
|
"kl": 0.355859375, |
|
"learning_rate": 8.057656487800283e-06, |
|
"loss": 0.0142, |
|
"reward": 1.0333333611488342, |
|
"reward_std": 0.13328943625092507, |
|
"rewards/accuracy_reward": 0.05416666828095913, |
|
"rewards/format_reward": 0.9791666746139527, |
|
"step": 1515 |
|
}, |
|
{ |
|
"completion_length": 233.42083892822265, |
|
"epoch": 0.608, |
|
"grad_norm": 0.6317667118077563, |
|
"kl": 0.3671875, |
|
"learning_rate": 7.989220788540356e-06, |
|
"loss": 0.0147, |
|
"reward": 1.1125000238418579, |
|
"reward_std": 0.21407929211854934, |
|
"rewards/accuracy_reward": 0.1375000037252903, |
|
"rewards/format_reward": 0.975000011920929, |
|
"step": 1520 |
|
}, |
|
{ |
|
"completion_length": 337.39584197998045, |
|
"epoch": 0.61, |
|
"grad_norm": 0.6816123949684233, |
|
"kl": 0.33828125, |
|
"learning_rate": 7.92088309182241e-06, |
|
"loss": 0.0135, |
|
"reward": 1.0833333730697632, |
|
"reward_std": 0.240230230987072, |
|
"rewards/accuracy_reward": 0.12916667051613331, |
|
"rewards/format_reward": 0.9541666746139527, |
|
"step": 1525 |
|
}, |
|
{ |
|
"completion_length": 224.86250762939454, |
|
"epoch": 0.612, |
|
"grad_norm": 0.5107336103473655, |
|
"kl": 0.3603515625, |
|
"learning_rate": 7.852646728329368e-06, |
|
"loss": 0.0144, |
|
"reward": 1.0916667103767395, |
|
"reward_std": 0.20342101380228997, |
|
"rewards/accuracy_reward": 0.10833333693444729, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 1530 |
|
}, |
|
{ |
|
"completion_length": 253.57084197998046, |
|
"epoch": 0.614, |
|
"grad_norm": 0.7130506106876767, |
|
"kl": 0.4244140625, |
|
"learning_rate": 7.784515023805328e-06, |
|
"loss": 0.017, |
|
"reward": 1.137500023841858, |
|
"reward_std": 0.19924872741103172, |
|
"rewards/accuracy_reward": 0.14583333767950535, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 1535 |
|
}, |
|
{ |
|
"completion_length": 246.7541748046875, |
|
"epoch": 0.616, |
|
"grad_norm": 0.8202318016415265, |
|
"kl": 0.3501953125, |
|
"learning_rate": 7.716491298893443e-06, |
|
"loss": 0.014, |
|
"reward": 1.108333373069763, |
|
"reward_std": 0.1968993701040745, |
|
"rewards/accuracy_reward": 0.12500000558793545, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 1540 |
|
}, |
|
{ |
|
"completion_length": 223.5791763305664, |
|
"epoch": 0.618, |
|
"grad_norm": 0.5800169184427162, |
|
"kl": 0.3650390625, |
|
"learning_rate": 7.6485788689741e-06, |
|
"loss": 0.0146, |
|
"reward": 1.0625000238418578, |
|
"reward_std": 0.14876977801322938, |
|
"rewards/accuracy_reward": 0.07500000298023224, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 1545 |
|
}, |
|
{ |
|
"completion_length": 228.31250457763673, |
|
"epoch": 0.62, |
|
"grad_norm": 0.7382503773664825, |
|
"kl": 0.4041015625, |
|
"learning_rate": 7.580781044003324e-06, |
|
"loss": 0.0162, |
|
"reward": 1.0916667103767395, |
|
"reward_std": 0.2120341219007969, |
|
"rewards/accuracy_reward": 0.1125000037252903, |
|
"rewards/format_reward": 0.9791666746139527, |
|
"step": 1550 |
|
}, |
|
{ |
|
"completion_length": 240.68334045410157, |
|
"epoch": 0.622, |
|
"grad_norm": 0.5083007731042197, |
|
"kl": 0.4341796875, |
|
"learning_rate": 7.513101128351454e-06, |
|
"loss": 0.0174, |
|
"reward": 1.0833333730697632, |
|
"reward_std": 0.22213384434580802, |
|
"rewards/accuracy_reward": 0.12083333805203438, |
|
"rewards/format_reward": 0.9625000178813934, |
|
"step": 1555 |
|
}, |
|
{ |
|
"completion_length": 246.29584045410155, |
|
"epoch": 0.624, |
|
"grad_norm": 0.7661993634854364, |
|
"kl": 0.3583984375, |
|
"learning_rate": 7.445542420642097e-06, |
|
"loss": 0.0143, |
|
"reward": 1.1000000298023225, |
|
"reward_std": 0.25303551703691485, |
|
"rewards/accuracy_reward": 0.12916667126119136, |
|
"rewards/format_reward": 0.9708333492279053, |
|
"step": 1560 |
|
}, |
|
{ |
|
"completion_length": 194.09584045410156, |
|
"epoch": 0.626, |
|
"grad_norm": 0.5324394879554415, |
|
"kl": 0.3693359375, |
|
"learning_rate": 7.378108213591355e-06, |
|
"loss": 0.0148, |
|
"reward": 1.1000000357627868, |
|
"reward_std": 0.1730000950396061, |
|
"rewards/accuracy_reward": 0.10833333656191826, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 1565 |
|
}, |
|
{ |
|
"completion_length": 228.5166748046875, |
|
"epoch": 0.628, |
|
"grad_norm": 0.6241877369872114, |
|
"kl": 0.36484375, |
|
"learning_rate": 7.310801793847344e-06, |
|
"loss": 0.0146, |
|
"reward": 1.1125000357627868, |
|
"reward_std": 0.2222403332591057, |
|
"rewards/accuracy_reward": 0.13750000558793546, |
|
"rewards/format_reward": 0.9750000238418579, |
|
"step": 1570 |
|
}, |
|
{ |
|
"completion_length": 197.89167175292968, |
|
"epoch": 0.63, |
|
"grad_norm": 0.329436159848971, |
|
"kl": 0.3484375, |
|
"learning_rate": 7.243626441830009e-06, |
|
"loss": 0.0139, |
|
"reward": 1.083333384990692, |
|
"reward_std": 0.12386635392904281, |
|
"rewards/accuracy_reward": 0.08750000223517418, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 1575 |
|
}, |
|
{ |
|
"completion_length": 220.6916748046875, |
|
"epoch": 0.632, |
|
"grad_norm": 0.9395190691678801, |
|
"kl": 0.35390625, |
|
"learning_rate": 7.176585431571235e-06, |
|
"loss": 0.0141, |
|
"reward": 1.1083333671092988, |
|
"reward_std": 0.18841607496142387, |
|
"rewards/accuracy_reward": 0.12916667014360428, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 1580 |
|
}, |
|
{ |
|
"completion_length": 232.3625045776367, |
|
"epoch": 0.634, |
|
"grad_norm": 0.788552192585705, |
|
"kl": 0.3609375, |
|
"learning_rate": 7.109682030555283e-06, |
|
"loss": 0.0144, |
|
"reward": 1.0833333432674408, |
|
"reward_std": 0.1897010862827301, |
|
"rewards/accuracy_reward": 0.1041666679084301, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 1585 |
|
}, |
|
{ |
|
"completion_length": 347.0833404541016, |
|
"epoch": 0.636, |
|
"grad_norm": 0.38934477847250354, |
|
"kl": 0.3583984375, |
|
"learning_rate": 7.042919499559538e-06, |
|
"loss": 0.0144, |
|
"reward": 1.004166692495346, |
|
"reward_std": 0.25163830146193505, |
|
"rewards/accuracy_reward": 0.07916666902601718, |
|
"rewards/format_reward": 0.9250000238418579, |
|
"step": 1590 |
|
}, |
|
{ |
|
"completion_length": 289.96250610351564, |
|
"epoch": 0.638, |
|
"grad_norm": 1.312643011859593, |
|
"kl": 0.3607421875, |
|
"learning_rate": 6.976301092495556e-06, |
|
"loss": 0.0144, |
|
"reward": 1.0250000357627869, |
|
"reward_std": 0.21382493525743484, |
|
"rewards/accuracy_reward": 0.08333333544433116, |
|
"rewards/format_reward": 0.9416666865348816, |
|
"step": 1595 |
|
}, |
|
{ |
|
"completion_length": 244.08750610351564, |
|
"epoch": 0.64, |
|
"grad_norm": 0.8343345060342183, |
|
"kl": 0.3611328125, |
|
"learning_rate": 6.909830056250527e-06, |
|
"loss": 0.0144, |
|
"reward": 1.0625000476837159, |
|
"reward_std": 0.20852040648460388, |
|
"rewards/accuracy_reward": 0.09166666939854622, |
|
"rewards/format_reward": 0.9708333551883698, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"eval_completion_length": 226.89583998460037, |
|
"eval_kl": 0.3518629807692308, |
|
"eval_loss": 0.01314304769039154, |
|
"eval_reward": 1.125000041264754, |
|
"eval_reward_std": 0.2221679870898907, |
|
"eval_rewards/accuracy_reward": 0.15224359270471793, |
|
"eval_rewards/format_reward": 0.9727564316529494, |
|
"eval_runtime": 188.0925, |
|
"eval_samples_per_second": 0.526, |
|
"eval_steps_per_second": 0.016, |
|
"step": 1600 |
|
}, |
|
{ |
|
"completion_length": 264.52917327880857, |
|
"epoch": 0.642, |
|
"grad_norm": 0.8851432430573316, |
|
"kl": 0.36953125, |
|
"learning_rate": 6.843509630528977e-06, |
|
"loss": 0.0148, |
|
"reward": 1.025000011920929, |
|
"reward_std": 0.20359539091587067, |
|
"rewards/accuracy_reward": 0.07500000074505805, |
|
"rewards/format_reward": 0.9500000178813934, |
|
"step": 1605 |
|
}, |
|
{ |
|
"completion_length": 219.15416870117187, |
|
"epoch": 0.644, |
|
"grad_norm": 0.8558209077276151, |
|
"kl": 0.3716796875, |
|
"learning_rate": 6.777343047694891e-06, |
|
"loss": 0.0148, |
|
"reward": 1.108333373069763, |
|
"reward_std": 0.17567697837948798, |
|
"rewards/accuracy_reward": 0.1375000011175871, |
|
"rewards/format_reward": 0.9708333551883698, |
|
"step": 1610 |
|
}, |
|
{ |
|
"completion_length": 234.85000610351562, |
|
"epoch": 0.646, |
|
"grad_norm": 0.432451594605514, |
|
"kl": 0.3794921875, |
|
"learning_rate": 6.711333532614168e-06, |
|
"loss": 0.0152, |
|
"reward": 1.0583333730697633, |
|
"reward_std": 0.21975762471556665, |
|
"rewards/accuracy_reward": 0.10833333730697632, |
|
"rewards/format_reward": 0.9500000238418579, |
|
"step": 1615 |
|
}, |
|
{ |
|
"completion_length": 213.1041717529297, |
|
"epoch": 0.648, |
|
"grad_norm": 1.3247688082665616, |
|
"kl": 0.3876953125, |
|
"learning_rate": 6.645484302497452e-06, |
|
"loss": 0.0155, |
|
"reward": 1.0958333730697631, |
|
"reward_std": 0.26502900272607804, |
|
"rewards/accuracy_reward": 0.1375000037252903, |
|
"rewards/format_reward": 0.9583333551883697, |
|
"step": 1620 |
|
}, |
|
{ |
|
"completion_length": 180.1125045776367, |
|
"epoch": 0.65, |
|
"grad_norm": 0.6778703705706532, |
|
"kl": 0.36484375, |
|
"learning_rate": 6.579798566743314e-06, |
|
"loss": 0.0146, |
|
"reward": 1.1000000298023225, |
|
"reward_std": 0.1525876834988594, |
|
"rewards/accuracy_reward": 0.12083333656191826, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 1625 |
|
}, |
|
{ |
|
"completion_length": 185.93334350585937, |
|
"epoch": 0.652, |
|
"grad_norm": 0.6029963592777737, |
|
"kl": 0.3892578125, |
|
"learning_rate": 6.5142795267818505e-06, |
|
"loss": 0.0156, |
|
"reward": 1.145833384990692, |
|
"reward_std": 0.19070877507328987, |
|
"rewards/accuracy_reward": 0.15833333805203437, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 1630 |
|
}, |
|
{ |
|
"completion_length": 230.4250030517578, |
|
"epoch": 0.654, |
|
"grad_norm": 0.5157745546659512, |
|
"kl": 0.3515625, |
|
"learning_rate": 6.448930375918632e-06, |
|
"loss": 0.0141, |
|
"reward": 1.0833333551883697, |
|
"reward_std": 0.18237519189715384, |
|
"rewards/accuracy_reward": 0.11250000335276127, |
|
"rewards/format_reward": 0.9708333432674408, |
|
"step": 1635 |
|
}, |
|
{ |
|
"completion_length": 257.05833740234374, |
|
"epoch": 0.656, |
|
"grad_norm": 0.7811139925774508, |
|
"kl": 0.3455078125, |
|
"learning_rate": 6.383754299179079e-06, |
|
"loss": 0.0138, |
|
"reward": 1.1041667103767394, |
|
"reward_std": 0.24514180719852446, |
|
"rewards/accuracy_reward": 0.1458333358168602, |
|
"rewards/format_reward": 0.9583333492279053, |
|
"step": 1640 |
|
}, |
|
{ |
|
"completion_length": 238.72083892822266, |
|
"epoch": 0.658, |
|
"grad_norm": 0.5624873094851147, |
|
"kl": 0.3234375, |
|
"learning_rate": 6.318754473153221e-06, |
|
"loss": 0.0129, |
|
"reward": 1.1041667103767394, |
|
"reward_std": 0.1908385895192623, |
|
"rewards/accuracy_reward": 0.1166666705161333, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 1645 |
|
}, |
|
{ |
|
"completion_length": 294.3958404541016, |
|
"epoch": 0.66, |
|
"grad_norm": 0.4438652564966346, |
|
"kl": 0.374609375, |
|
"learning_rate": 6.25393406584088e-06, |
|
"loss": 0.015, |
|
"reward": 1.1125000238418579, |
|
"reward_std": 0.18262089043855667, |
|
"rewards/accuracy_reward": 0.1416666701436043, |
|
"rewards/format_reward": 0.9708333551883698, |
|
"step": 1650 |
|
}, |
|
{ |
|
"completion_length": 280.0666732788086, |
|
"epoch": 0.662, |
|
"grad_norm": 0.8501811034862022, |
|
"kl": 0.4396484375, |
|
"learning_rate": 6.18929623649726e-06, |
|
"loss": 0.0176, |
|
"reward": 1.129166728258133, |
|
"reward_std": 0.2677574008703232, |
|
"rewards/accuracy_reward": 0.17083334028720856, |
|
"rewards/format_reward": 0.9583333551883697, |
|
"step": 1655 |
|
}, |
|
{ |
|
"completion_length": 256.05001068115234, |
|
"epoch": 0.664, |
|
"grad_norm": 0.786466356675622, |
|
"kl": 0.3712890625, |
|
"learning_rate": 6.124844135478971e-06, |
|
"loss": 0.0148, |
|
"reward": 1.1083333611488342, |
|
"reward_std": 0.15910932496190072, |
|
"rewards/accuracy_reward": 0.12916667237877846, |
|
"rewards/format_reward": 0.9791666746139527, |
|
"step": 1660 |
|
}, |
|
{ |
|
"completion_length": 269.5541748046875, |
|
"epoch": 0.666, |
|
"grad_norm": 0.6054140410900962, |
|
"kl": 0.4185546875, |
|
"learning_rate": 6.06058090409049e-06, |
|
"loss": 0.0167, |
|
"reward": 1.1041666984558105, |
|
"reward_std": 0.17600802853703498, |
|
"rewards/accuracy_reward": 0.11666667014360428, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 1665 |
|
}, |
|
{ |
|
"completion_length": 195.0500061035156, |
|
"epoch": 0.668, |
|
"grad_norm": 0.09195814274917967, |
|
"kl": 0.35390625, |
|
"learning_rate": 5.996509674431053e-06, |
|
"loss": 0.0141, |
|
"reward": 1.0541666984558105, |
|
"reward_std": 0.08435339778661728, |
|
"rewards/accuracy_reward": 0.07083333544433117, |
|
"rewards/format_reward": 0.9833333492279053, |
|
"step": 1670 |
|
}, |
|
{ |
|
"completion_length": 176.61250228881835, |
|
"epoch": 0.67, |
|
"grad_norm": 0.19096552721075902, |
|
"kl": 0.3958984375, |
|
"learning_rate": 5.932633569242e-06, |
|
"loss": 0.0158, |
|
"reward": 1.0625000476837159, |
|
"reward_std": 0.09324773177504539, |
|
"rewards/accuracy_reward": 0.06666666865348816, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 1675 |
|
}, |
|
{ |
|
"completion_length": 221.21250686645507, |
|
"epoch": 0.672, |
|
"grad_norm": 0.5670018996024074, |
|
"kl": 0.3828125, |
|
"learning_rate": 5.868955701754584e-06, |
|
"loss": 0.0153, |
|
"reward": 1.0833333730697632, |
|
"reward_std": 0.19533313140273095, |
|
"rewards/accuracy_reward": 0.10833333730697632, |
|
"rewards/format_reward": 0.9750000238418579, |
|
"step": 1680 |
|
}, |
|
{ |
|
"completion_length": 200.27083740234374, |
|
"epoch": 0.674, |
|
"grad_norm": 0.7884552413539826, |
|
"kl": 0.36875, |
|
"learning_rate": 5.8054791755382286e-06, |
|
"loss": 0.0148, |
|
"reward": 1.0708333730697632, |
|
"reward_std": 0.1910129614174366, |
|
"rewards/accuracy_reward": 0.08750000186264514, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 1685 |
|
}, |
|
{ |
|
"completion_length": 166.4583381652832, |
|
"epoch": 0.676, |
|
"grad_norm": 1.0096233303682502, |
|
"kl": 0.3990234375, |
|
"learning_rate": 5.742207084349274e-06, |
|
"loss": 0.016, |
|
"reward": 1.112500047683716, |
|
"reward_std": 0.20021617263555527, |
|
"rewards/accuracy_reward": 0.12500000484287738, |
|
"rewards/format_reward": 0.9875000059604645, |
|
"step": 1690 |
|
}, |
|
{ |
|
"completion_length": 129.7916702270508, |
|
"epoch": 0.678, |
|
"grad_norm": 0.7510832019121336, |
|
"kl": 0.4009765625, |
|
"learning_rate": 5.679142511980176e-06, |
|
"loss": 0.016, |
|
"reward": 1.1500000476837158, |
|
"reward_std": 0.16661179661750794, |
|
"rewards/accuracy_reward": 0.1541666716337204, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 1695 |
|
}, |
|
{ |
|
"completion_length": 149.3833381652832, |
|
"epoch": 0.68, |
|
"grad_norm": 0.2240382817532387, |
|
"kl": 0.3921875, |
|
"learning_rate": 5.616288532109225e-06, |
|
"loss": 0.0157, |
|
"reward": 1.1041666924953462, |
|
"reward_std": 0.17280239164829253, |
|
"rewards/accuracy_reward": 0.12083333656191826, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"eval_completion_length": 175.23558044433594, |
|
"eval_kl": 0.3936298076923077, |
|
"eval_loss": 0.015872277319431305, |
|
"eval_reward": 1.10737184377817, |
|
"eval_reward_std": 0.19909162332232183, |
|
"eval_rewards/accuracy_reward": 0.1378205187905293, |
|
"eval_rewards/format_reward": 0.9695512927495517, |
|
"eval_runtime": 187.6606, |
|
"eval_samples_per_second": 0.528, |
|
"eval_steps_per_second": 0.016, |
|
"step": 1700 |
|
}, |
|
{ |
|
"completion_length": 202.48333892822265, |
|
"epoch": 0.682, |
|
"grad_norm": 1.2902182940960338, |
|
"kl": 0.4435546875, |
|
"learning_rate": 5.553648208150728e-06, |
|
"loss": 0.0177, |
|
"reward": 1.0916666984558105, |
|
"reward_std": 0.27394800409674647, |
|
"rewards/accuracy_reward": 0.13333333767950534, |
|
"rewards/format_reward": 0.9583333551883697, |
|
"step": 1705 |
|
}, |
|
{ |
|
"completion_length": 217.67917251586914, |
|
"epoch": 0.684, |
|
"grad_norm": 0.8042172808984961, |
|
"kl": 0.4064453125, |
|
"learning_rate": 5.491224593105695e-06, |
|
"loss": 0.0163, |
|
"reward": 1.041666704416275, |
|
"reward_std": 0.19876980185508727, |
|
"rewards/accuracy_reward": 0.07916666939854622, |
|
"rewards/format_reward": 0.9625000178813934, |
|
"step": 1710 |
|
}, |
|
{ |
|
"completion_length": 214.5916732788086, |
|
"epoch": 0.686, |
|
"grad_norm": 0.9930554223957376, |
|
"kl": 0.380078125, |
|
"learning_rate": 5.429020729413062e-06, |
|
"loss": 0.0152, |
|
"reward": 1.108333373069763, |
|
"reward_std": 0.26563555002212524, |
|
"rewards/accuracy_reward": 0.14166666939854622, |
|
"rewards/format_reward": 0.9666666865348816, |
|
"step": 1715 |
|
}, |
|
{ |
|
"completion_length": 202.17083816528321, |
|
"epoch": 0.688, |
|
"grad_norm": 0.8686412520549874, |
|
"kl": 0.3771484375, |
|
"learning_rate": 5.367039648801386e-06, |
|
"loss": 0.0151, |
|
"reward": 1.141666716337204, |
|
"reward_std": 0.26376511752605436, |
|
"rewards/accuracy_reward": 0.17500000558793544, |
|
"rewards/format_reward": 0.9666666805744171, |
|
"step": 1720 |
|
}, |
|
{ |
|
"completion_length": 263.8541732788086, |
|
"epoch": 0.69, |
|
"grad_norm": 1.2120904832599089, |
|
"kl": 0.3544921875, |
|
"learning_rate": 5.305284372141095e-06, |
|
"loss": 0.0142, |
|
"reward": 1.066666716337204, |
|
"reward_std": 0.18272737711668013, |
|
"rewards/accuracy_reward": 0.09583333693444729, |
|
"rewards/format_reward": 0.9708333492279053, |
|
"step": 1725 |
|
}, |
|
{ |
|
"completion_length": 318.8791748046875, |
|
"epoch": 0.692, |
|
"grad_norm": 0.7490910563872372, |
|
"kl": 0.3595703125, |
|
"learning_rate": 5.243757909297247e-06, |
|
"loss": 0.0144, |
|
"reward": 1.0375000298023225, |
|
"reward_std": 0.20392291769385337, |
|
"rewards/accuracy_reward": 0.07500000186264515, |
|
"rewards/format_reward": 0.962500023841858, |
|
"step": 1730 |
|
}, |
|
{ |
|
"completion_length": 291.9708450317383, |
|
"epoch": 0.694, |
|
"grad_norm": 0.5173379094623531, |
|
"kl": 0.35703125, |
|
"learning_rate": 5.1824632589828465e-06, |
|
"loss": 0.0143, |
|
"reward": 1.0666666984558106, |
|
"reward_std": 0.12149013429880143, |
|
"rewards/accuracy_reward": 0.07916666865348816, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 1735 |
|
}, |
|
{ |
|
"completion_length": 229.35834197998048, |
|
"epoch": 0.696, |
|
"grad_norm": 1.1205426336140032, |
|
"kl": 0.334765625, |
|
"learning_rate": 5.121403408612672e-06, |
|
"loss": 0.0134, |
|
"reward": 1.1291666984558106, |
|
"reward_std": 0.15256435573101043, |
|
"rewards/accuracy_reward": 0.12916667014360428, |
|
"rewards/format_reward": 1.0, |
|
"step": 1740 |
|
}, |
|
{ |
|
"completion_length": 242.07083740234376, |
|
"epoch": 0.698, |
|
"grad_norm": 0.08229901659914558, |
|
"kl": 0.3361328125, |
|
"learning_rate": 5.060581334157693e-06, |
|
"loss": 0.0134, |
|
"reward": 1.1666666984558105, |
|
"reward_std": 0.13328943997621537, |
|
"rewards/accuracy_reward": 0.16666667237877847, |
|
"rewards/format_reward": 1.0, |
|
"step": 1745 |
|
}, |
|
{ |
|
"completion_length": 243.38333740234376, |
|
"epoch": 0.7, |
|
"grad_norm": 0.4810002013198661, |
|
"kl": 0.33203125, |
|
"learning_rate": 5.000000000000003e-06, |
|
"loss": 0.0133, |
|
"reward": 1.1208333492279052, |
|
"reward_std": 0.09865520298480987, |
|
"rewards/accuracy_reward": 0.12500000223517418, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 1750 |
|
}, |
|
{ |
|
"completion_length": 201.52500915527344, |
|
"epoch": 0.702, |
|
"grad_norm": 0.7845067243546726, |
|
"kl": 0.3431640625, |
|
"learning_rate": 4.939662358788364e-06, |
|
"loss": 0.0137, |
|
"reward": 1.1500000357627869, |
|
"reward_std": 0.1625961884856224, |
|
"rewards/accuracy_reward": 0.15000000074505807, |
|
"rewards/format_reward": 1.0, |
|
"step": 1755 |
|
}, |
|
{ |
|
"completion_length": 204.12083892822267, |
|
"epoch": 0.704, |
|
"grad_norm": 0.6792203039767948, |
|
"kl": 0.315625, |
|
"learning_rate": 4.879571351294287e-06, |
|
"loss": 0.0126, |
|
"reward": 1.1416667103767395, |
|
"reward_std": 0.16416243016719817, |
|
"rewards/accuracy_reward": 0.14166667237877845, |
|
"rewards/format_reward": 1.0, |
|
"step": 1760 |
|
}, |
|
{ |
|
"completion_length": 187.62500457763673, |
|
"epoch": 0.706, |
|
"grad_norm": 0.7070939109048883, |
|
"kl": 0.3744140625, |
|
"learning_rate": 4.8197299062687e-06, |
|
"loss": 0.015, |
|
"reward": 1.1416667103767395, |
|
"reward_std": 0.18841607943177224, |
|
"rewards/accuracy_reward": 0.1458333373069763, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 1765 |
|
}, |
|
{ |
|
"completion_length": 218.6666748046875, |
|
"epoch": 0.708, |
|
"grad_norm": 1.1283215566490286, |
|
"kl": 0.3330078125, |
|
"learning_rate": 4.76014094029921e-06, |
|
"loss": 0.0133, |
|
"reward": 1.0958333492279053, |
|
"reward_std": 0.14890312626957894, |
|
"rewards/accuracy_reward": 0.0958333346992731, |
|
"rewards/format_reward": 1.0, |
|
"step": 1770 |
|
}, |
|
{ |
|
"completion_length": 196.70000610351562, |
|
"epoch": 0.71, |
|
"grad_norm": 0.5380849514067082, |
|
"kl": 0.3201171875, |
|
"learning_rate": 4.700807357667953e-06, |
|
"loss": 0.0128, |
|
"reward": 1.1166667103767396, |
|
"reward_std": 0.12386635094881057, |
|
"rewards/accuracy_reward": 0.11666667126119137, |
|
"rewards/format_reward": 1.0, |
|
"step": 1775 |
|
}, |
|
{ |
|
"completion_length": 199.36250762939454, |
|
"epoch": 0.712, |
|
"grad_norm": 0.776557437915111, |
|
"kl": 0.3330078125, |
|
"learning_rate": 4.641732050210032e-06, |
|
"loss": 0.0133, |
|
"reward": 1.145833384990692, |
|
"reward_std": 0.17088177502155305, |
|
"rewards/accuracy_reward": 0.15416667200624942, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 1780 |
|
}, |
|
{ |
|
"completion_length": 199.9291748046875, |
|
"epoch": 0.714, |
|
"grad_norm": 0.7199296200116914, |
|
"kl": 0.344140625, |
|
"learning_rate": 4.582917897172603e-06, |
|
"loss": 0.0138, |
|
"reward": 1.1791666865348815, |
|
"reward_std": 0.18141550272703172, |
|
"rewards/accuracy_reward": 0.18750000223517418, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 1785 |
|
}, |
|
{ |
|
"completion_length": 240.93750610351563, |
|
"epoch": 0.716, |
|
"grad_norm": 0.82997985294821, |
|
"kl": 0.336328125, |
|
"learning_rate": 4.524367765074499e-06, |
|
"loss": 0.0135, |
|
"reward": 1.0583333849906922, |
|
"reward_std": 0.16817803531885148, |
|
"rewards/accuracy_reward": 0.07916666902601718, |
|
"rewards/format_reward": 0.9791666746139527, |
|
"step": 1790 |
|
}, |
|
{ |
|
"completion_length": 257.6500076293945, |
|
"epoch": 0.718, |
|
"grad_norm": 0.6940011512430222, |
|
"kl": 0.3623046875, |
|
"learning_rate": 4.46608450756656e-06, |
|
"loss": 0.0145, |
|
"reward": 1.1416667103767395, |
|
"reward_std": 0.19037772193551064, |
|
"rewards/accuracy_reward": 0.1791666716337204, |
|
"rewards/format_reward": 0.9625000298023224, |
|
"step": 1795 |
|
}, |
|
{ |
|
"completion_length": 346.0041778564453, |
|
"epoch": 0.72, |
|
"grad_norm": 0.4358586566289972, |
|
"kl": 0.315625, |
|
"learning_rate": 4.408070965292534e-06, |
|
"loss": 0.0126, |
|
"reward": 1.0125000298023223, |
|
"reward_std": 0.2788765422999859, |
|
"rewards/accuracy_reward": 0.08750000260770321, |
|
"rewards/format_reward": 0.9250000178813934, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"eval_completion_length": 265.323724599985, |
|
"eval_kl": 0.3454026442307692, |
|
"eval_loss": 0.013118223287165165, |
|
"eval_reward": 1.118589772627904, |
|
"eval_reward_std": 0.25780055958491105, |
|
"eval_rewards/accuracy_reward": 0.16346154361963272, |
|
"eval_rewards/format_reward": 0.9551282341663654, |
|
"eval_runtime": 239.8483, |
|
"eval_samples_per_second": 0.413, |
|
"eval_steps_per_second": 0.013, |
|
"step": 1800 |
|
}, |
|
{ |
|
"completion_length": 263.9666717529297, |
|
"epoch": 0.722, |
|
"grad_norm": 0.884361517453845, |
|
"kl": 0.3580078125, |
|
"learning_rate": 4.350329965750622e-06, |
|
"loss": 0.0143, |
|
"reward": 1.1083333790302277, |
|
"reward_std": 0.23719543740153312, |
|
"rewards/accuracy_reward": 0.14583333767950535, |
|
"rewards/format_reward": 0.9625000178813934, |
|
"step": 1805 |
|
}, |
|
{ |
|
"completion_length": 252.55001068115234, |
|
"epoch": 0.724, |
|
"grad_norm": 0.911989814522389, |
|
"kl": 0.3294921875, |
|
"learning_rate": 4.292864323155684e-06, |
|
"loss": 0.0132, |
|
"reward": 1.1416666865348817, |
|
"reward_std": 0.2740778163075447, |
|
"rewards/accuracy_reward": 0.17916667312383652, |
|
"rewards/format_reward": 0.9625000178813934, |
|
"step": 1810 |
|
}, |
|
{ |
|
"completion_length": 265.45834197998045, |
|
"epoch": 0.726, |
|
"grad_norm": 0.3701309923874372, |
|
"kl": 0.306640625, |
|
"learning_rate": 4.235676838302069e-06, |
|
"loss": 0.0123, |
|
"reward": 1.1125000357627868, |
|
"reward_std": 0.257410154491663, |
|
"rewards/accuracy_reward": 0.1583333384245634, |
|
"rewards/format_reward": 0.9541666865348816, |
|
"step": 1815 |
|
}, |
|
{ |
|
"completion_length": 227.68334045410157, |
|
"epoch": 0.728, |
|
"grad_norm": 0.9371502334838836, |
|
"kl": 0.323046875, |
|
"learning_rate": 4.178770298427107e-06, |
|
"loss": 0.0129, |
|
"reward": 1.137500035762787, |
|
"reward_std": 0.27804360538721085, |
|
"rewards/accuracy_reward": 0.15833333805203437, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 1820 |
|
}, |
|
{ |
|
"completion_length": 245.9291732788086, |
|
"epoch": 0.73, |
|
"grad_norm": 0.5701038034532563, |
|
"kl": 0.29423828125, |
|
"learning_rate": 4.12214747707527e-06, |
|
"loss": 0.0118, |
|
"reward": 1.1208333611488341, |
|
"reward_std": 0.22555280923843385, |
|
"rewards/accuracy_reward": 0.15416667386889457, |
|
"rewards/format_reward": 0.9666666805744171, |
|
"step": 1825 |
|
}, |
|
{ |
|
"completion_length": 233.24167404174804, |
|
"epoch": 0.732, |
|
"grad_norm": 0.2591556634095291, |
|
"kl": 0.3109375, |
|
"learning_rate": 4.065811133962987e-06, |
|
"loss": 0.0124, |
|
"reward": 1.0916666865348816, |
|
"reward_std": 0.1926060661673546, |
|
"rewards/accuracy_reward": 0.1250000014901161, |
|
"rewards/format_reward": 0.9666666805744171, |
|
"step": 1830 |
|
}, |
|
{ |
|
"completion_length": 238.20417327880858, |
|
"epoch": 0.734, |
|
"grad_norm": 0.9770366277050908, |
|
"kl": 0.34375, |
|
"learning_rate": 4.009764014844143e-06, |
|
"loss": 0.0137, |
|
"reward": 1.179166704416275, |
|
"reward_std": 0.2617766156792641, |
|
"rewards/accuracy_reward": 0.21666667088866234, |
|
"rewards/format_reward": 0.9625000119209289, |
|
"step": 1835 |
|
}, |
|
{ |
|
"completion_length": 189.57084045410156, |
|
"epoch": 0.736, |
|
"grad_norm": 0.5416908718652198, |
|
"kl": 0.319921875, |
|
"learning_rate": 3.954008851376252e-06, |
|
"loss": 0.0128, |
|
"reward": 1.1250000238418578, |
|
"reward_std": 0.14619938358664514, |
|
"rewards/accuracy_reward": 0.13750000223517417, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 1840 |
|
}, |
|
{ |
|
"completion_length": 219.25417251586913, |
|
"epoch": 0.738, |
|
"grad_norm": 0.6006299277423031, |
|
"kl": 0.3302734375, |
|
"learning_rate": 3.898548360987325e-06, |
|
"loss": 0.0132, |
|
"reward": 1.1000000476837157, |
|
"reward_std": 0.19881998300552367, |
|
"rewards/accuracy_reward": 0.12500000409781933, |
|
"rewards/format_reward": 0.9750000178813935, |
|
"step": 1845 |
|
}, |
|
{ |
|
"completion_length": 203.9916748046875, |
|
"epoch": 0.74, |
|
"grad_norm": 1.0816648176296837, |
|
"kl": 0.33779296875, |
|
"learning_rate": 3.8433852467434175e-06, |
|
"loss": 0.0135, |
|
"reward": 1.1833333849906922, |
|
"reward_std": 0.25205365046858785, |
|
"rewards/accuracy_reward": 0.21250000596046448, |
|
"rewards/format_reward": 0.9708333432674408, |
|
"step": 1850 |
|
}, |
|
{ |
|
"completion_length": 254.84584045410156, |
|
"epoch": 0.742, |
|
"grad_norm": 0.5651446585039597, |
|
"kl": 0.33984375, |
|
"learning_rate": 3.7885221972168974e-06, |
|
"loss": 0.0136, |
|
"reward": 1.0333333611488342, |
|
"reward_std": 0.202835600823164, |
|
"rewards/accuracy_reward": 0.07500000298023224, |
|
"rewards/format_reward": 0.9583333551883697, |
|
"step": 1855 |
|
}, |
|
{ |
|
"completion_length": 243.50000381469727, |
|
"epoch": 0.744, |
|
"grad_norm": 0.8741874945615253, |
|
"kl": 0.3328125, |
|
"learning_rate": 3.7339618863553983e-06, |
|
"loss": 0.0133, |
|
"reward": 1.1041667103767394, |
|
"reward_std": 0.23992215022444724, |
|
"rewards/accuracy_reward": 0.1416666705161333, |
|
"rewards/format_reward": 0.9625000119209289, |
|
"step": 1860 |
|
}, |
|
{ |
|
"completion_length": 183.93750534057617, |
|
"epoch": 0.746, |
|
"grad_norm": 0.7998884228143229, |
|
"kl": 0.35546875, |
|
"learning_rate": 3.679706973351491e-06, |
|
"loss": 0.0142, |
|
"reward": 1.1166666865348815, |
|
"reward_std": 0.16580181941390038, |
|
"rewards/accuracy_reward": 0.1291666693985462, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 1865 |
|
}, |
|
{ |
|
"completion_length": 197.3291748046875, |
|
"epoch": 0.748, |
|
"grad_norm": 0.7195504466555774, |
|
"kl": 0.3251953125, |
|
"learning_rate": 3.625760102513103e-06, |
|
"loss": 0.013, |
|
"reward": 1.0666667103767395, |
|
"reward_std": 0.18050257191061975, |
|
"rewards/accuracy_reward": 0.09166666865348816, |
|
"rewards/format_reward": 0.9750000178813935, |
|
"step": 1870 |
|
}, |
|
{ |
|
"completion_length": 186.4416717529297, |
|
"epoch": 0.75, |
|
"grad_norm": 0.8437020501088257, |
|
"kl": 0.3259765625, |
|
"learning_rate": 3.5721239031346067e-06, |
|
"loss": 0.013, |
|
"reward": 1.079166716337204, |
|
"reward_std": 0.18379171639680864, |
|
"rewards/accuracy_reward": 0.09583333693444729, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 1875 |
|
}, |
|
{ |
|
"completion_length": 180.53333587646483, |
|
"epoch": 0.752, |
|
"grad_norm": 0.6175522474967708, |
|
"kl": 0.3048828125, |
|
"learning_rate": 3.5188009893686916e-06, |
|
"loss": 0.0122, |
|
"reward": 1.1333333849906921, |
|
"reward_std": 0.18649545460939407, |
|
"rewards/accuracy_reward": 0.14166667237877845, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 1880 |
|
}, |
|
{ |
|
"completion_length": 189.47084045410156, |
|
"epoch": 0.754, |
|
"grad_norm": 0.7541283975676629, |
|
"kl": 0.33828125, |
|
"learning_rate": 3.4657939600989453e-06, |
|
"loss": 0.0135, |
|
"reward": 1.1291666984558106, |
|
"reward_std": 0.22653717398643494, |
|
"rewards/accuracy_reward": 0.14166666977107525, |
|
"rewards/format_reward": 0.9875000059604645, |
|
"step": 1885 |
|
}, |
|
{ |
|
"completion_length": 236.83334197998047, |
|
"epoch": 0.756, |
|
"grad_norm": 0.7192849833359639, |
|
"kl": 0.3146484375, |
|
"learning_rate": 3.4131053988131947e-06, |
|
"loss": 0.0126, |
|
"reward": 1.0875000298023223, |
|
"reward_std": 0.24265274703502654, |
|
"rewards/accuracy_reward": 0.12083333842456341, |
|
"rewards/format_reward": 0.9666666865348816, |
|
"step": 1890 |
|
}, |
|
{ |
|
"completion_length": 231.8416778564453, |
|
"epoch": 0.758, |
|
"grad_norm": 0.43887813755474286, |
|
"kl": 0.8873046875, |
|
"learning_rate": 3.360737873477584e-06, |
|
"loss": 0.0355, |
|
"reward": 1.0625000298023224, |
|
"reward_std": 0.1980368673801422, |
|
"rewards/accuracy_reward": 0.0958333358168602, |
|
"rewards/format_reward": 0.9666666865348816, |
|
"step": 1895 |
|
}, |
|
{ |
|
"completion_length": 273.9916748046875, |
|
"epoch": 0.76, |
|
"grad_norm": 0.4947009731663165, |
|
"kl": 0.334765625, |
|
"learning_rate": 3.308693936411421e-06, |
|
"loss": 0.0134, |
|
"reward": 1.0333333611488342, |
|
"reward_std": 0.2203465722501278, |
|
"rewards/accuracy_reward": 0.08333333507180214, |
|
"rewards/format_reward": 0.950000011920929, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"eval_completion_length": 207.45994215745193, |
|
"eval_kl": 0.3671875, |
|
"eval_loss": 0.013619143515825272, |
|
"eval_reward": 1.1314102869767408, |
|
"eval_reward_std": 0.21630300896672103, |
|
"eval_rewards/accuracy_reward": 0.1538461590042481, |
|
"eval_rewards/format_reward": 0.9775641147906964, |
|
"eval_runtime": 181.6984, |
|
"eval_samples_per_second": 0.545, |
|
"eval_steps_per_second": 0.017, |
|
"step": 1900 |
|
}, |
|
{ |
|
"completion_length": 215.0666732788086, |
|
"epoch": 0.762, |
|
"grad_norm": 0.4462722168761537, |
|
"kl": 0.32421875, |
|
"learning_rate": 3.2569761241627694e-06, |
|
"loss": 0.013, |
|
"reward": 1.1041667103767394, |
|
"reward_std": 0.18810799717903137, |
|
"rewards/accuracy_reward": 0.12916667014360428, |
|
"rewards/format_reward": 0.9750000238418579, |
|
"step": 1905 |
|
}, |
|
{ |
|
"completion_length": 214.52083892822264, |
|
"epoch": 0.764, |
|
"grad_norm": 0.6751355982112365, |
|
"kl": 0.34326171875, |
|
"learning_rate": 3.2055869573848374e-06, |
|
"loss": 0.0137, |
|
"reward": 1.1291666865348815, |
|
"reward_std": 0.2662478253245354, |
|
"rewards/accuracy_reward": 0.15416667126119138, |
|
"rewards/format_reward": 0.9750000238418579, |
|
"step": 1910 |
|
}, |
|
{ |
|
"completion_length": 221.32500381469725, |
|
"epoch": 0.766, |
|
"grad_norm": 0.892078258457626, |
|
"kl": 0.31328125, |
|
"learning_rate": 3.1545289407131128e-06, |
|
"loss": 0.0125, |
|
"reward": 1.200000035762787, |
|
"reward_std": 0.2645213954150677, |
|
"rewards/accuracy_reward": 0.2333333406597376, |
|
"rewards/format_reward": 0.9666666924953461, |
|
"step": 1915 |
|
}, |
|
{ |
|
"completion_length": 210.50000457763673, |
|
"epoch": 0.768, |
|
"grad_norm": 0.623883337401102, |
|
"kl": 0.3212890625, |
|
"learning_rate": 3.103804562643302e-06, |
|
"loss": 0.0129, |
|
"reward": 1.1000000476837157, |
|
"reward_std": 0.18917233720421792, |
|
"rewards/accuracy_reward": 0.12500000298023223, |
|
"rewards/format_reward": 0.9750000238418579, |
|
"step": 1920 |
|
}, |
|
{ |
|
"completion_length": 298.3458419799805, |
|
"epoch": 0.77, |
|
"grad_norm": 1.0767844089766165, |
|
"kl": 0.318359375, |
|
"learning_rate": 3.0534162954100264e-06, |
|
"loss": 0.0127, |
|
"reward": 1.016666704416275, |
|
"reward_std": 0.21427700370550157, |
|
"rewards/accuracy_reward": 0.07083333432674407, |
|
"rewards/format_reward": 0.9458333551883698, |
|
"step": 1925 |
|
}, |
|
{ |
|
"completion_length": 219.76667098999025, |
|
"epoch": 0.772, |
|
"grad_norm": 0.8725082553969676, |
|
"kl": 0.3330078125, |
|
"learning_rate": 3.003366594866345e-06, |
|
"loss": 0.0133, |
|
"reward": 1.062500035762787, |
|
"reward_std": 0.18512693047523499, |
|
"rewards/accuracy_reward": 0.0916666690260172, |
|
"rewards/format_reward": 0.9708333551883698, |
|
"step": 1930 |
|
}, |
|
{ |
|
"completion_length": 240.48750762939454, |
|
"epoch": 0.774, |
|
"grad_norm": 0.6293755791762184, |
|
"kl": 0.3767578125, |
|
"learning_rate": 2.953657900364053e-06, |
|
"loss": 0.0151, |
|
"reward": 1.0541667103767396, |
|
"reward_std": 0.23292546570301056, |
|
"rewards/accuracy_reward": 0.10416666939854621, |
|
"rewards/format_reward": 0.9500000238418579, |
|
"step": 1935 |
|
}, |
|
{ |
|
"completion_length": 174.57917175292968, |
|
"epoch": 0.776, |
|
"grad_norm": 0.6399864262576418, |
|
"kl": 0.36796875, |
|
"learning_rate": 2.9042926346347932e-06, |
|
"loss": 0.0147, |
|
"reward": 1.104166704416275, |
|
"reward_std": 0.13599317967891694, |
|
"rewards/accuracy_reward": 0.12083333805203438, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 1940 |
|
}, |
|
{ |
|
"completion_length": 227.00000381469727, |
|
"epoch": 0.778, |
|
"grad_norm": 0.8129271898918838, |
|
"kl": 0.32607421875, |
|
"learning_rate": 2.855273203671969e-06, |
|
"loss": 0.013, |
|
"reward": 1.0708333492279052, |
|
"reward_std": 0.2076343297958374, |
|
"rewards/accuracy_reward": 0.11250000186264515, |
|
"rewards/format_reward": 0.9583333551883697, |
|
"step": 1945 |
|
}, |
|
{ |
|
"completion_length": 264.7666717529297, |
|
"epoch": 0.78, |
|
"grad_norm": 0.8065918747460696, |
|
"kl": 0.3568359375, |
|
"learning_rate": 2.8066019966134907e-06, |
|
"loss": 0.0143, |
|
"reward": 1.1333333730697632, |
|
"reward_std": 0.3189865186810493, |
|
"rewards/accuracy_reward": 0.1875000037252903, |
|
"rewards/format_reward": 0.9458333671092987, |
|
"step": 1950 |
|
}, |
|
{ |
|
"completion_length": 233.88334197998046, |
|
"epoch": 0.782, |
|
"grad_norm": 1.0111951381404745, |
|
"kl": 0.3408203125, |
|
"learning_rate": 2.7582813856253276e-06, |
|
"loss": 0.0136, |
|
"reward": 1.0375000298023225, |
|
"reward_std": 0.20281226933002472, |
|
"rewards/accuracy_reward": 0.07083333507180214, |
|
"rewards/format_reward": 0.9666666924953461, |
|
"step": 1955 |
|
}, |
|
{ |
|
"completion_length": 168.03333892822266, |
|
"epoch": 0.784, |
|
"grad_norm": 0.8785903776163005, |
|
"kl": 0.534375, |
|
"learning_rate": 2.7103137257858867e-06, |
|
"loss": 0.0213, |
|
"reward": 1.0958333730697631, |
|
"reward_std": 0.2230503059923649, |
|
"rewards/accuracy_reward": 0.10833333730697632, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 1960 |
|
}, |
|
{ |
|
"completion_length": 201.39167327880858, |
|
"epoch": 0.786, |
|
"grad_norm": 0.47972834947637666, |
|
"kl": 0.37578125, |
|
"learning_rate": 2.6627013549712355e-06, |
|
"loss": 0.015, |
|
"reward": 1.0958333671092988, |
|
"reward_std": 0.19642044231295586, |
|
"rewards/accuracy_reward": 0.1291666716337204, |
|
"rewards/format_reward": 0.9666666805744171, |
|
"step": 1965 |
|
}, |
|
{ |
|
"completion_length": 172.42083740234375, |
|
"epoch": 0.788, |
|
"grad_norm": 0.8266813840530551, |
|
"kl": 0.3822265625, |
|
"learning_rate": 2.615446593741161e-06, |
|
"loss": 0.0153, |
|
"reward": 1.1416667103767395, |
|
"reward_std": 0.2167830340564251, |
|
"rewards/accuracy_reward": 0.16250000484287738, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 1970 |
|
}, |
|
{ |
|
"completion_length": 171.02917098999023, |
|
"epoch": 0.79, |
|
"grad_norm": 1.3199547899633295, |
|
"kl": 0.3583984375, |
|
"learning_rate": 2.5685517452260566e-06, |
|
"loss": 0.0143, |
|
"reward": 1.108333373069763, |
|
"reward_std": 0.2030666373670101, |
|
"rewards/accuracy_reward": 0.12500000596046448, |
|
"rewards/format_reward": 0.9833333492279053, |
|
"step": 1975 |
|
}, |
|
{ |
|
"completion_length": 166.7291702270508, |
|
"epoch": 0.792, |
|
"grad_norm": 0.9825519168859947, |
|
"kl": 0.3490234375, |
|
"learning_rate": 2.522019095014683e-06, |
|
"loss": 0.014, |
|
"reward": 1.1291667103767395, |
|
"reward_std": 0.19670166373252868, |
|
"rewards/accuracy_reward": 0.14583333693444728, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 1980 |
|
}, |
|
{ |
|
"completion_length": 197.48750686645508, |
|
"epoch": 0.794, |
|
"grad_norm": 1.0796352348885683, |
|
"kl": 0.333203125, |
|
"learning_rate": 2.4758509110427576e-06, |
|
"loss": 0.0133, |
|
"reward": 1.1166666865348815, |
|
"reward_std": 0.22812673598527908, |
|
"rewards/accuracy_reward": 0.1375000037252903, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 1985 |
|
}, |
|
{ |
|
"completion_length": 179.22500305175782, |
|
"epoch": 0.796, |
|
"grad_norm": 0.966571682281471, |
|
"kl": 0.36171875, |
|
"learning_rate": 2.4300494434824373e-06, |
|
"loss": 0.0145, |
|
"reward": 1.1458333730697632, |
|
"reward_std": 0.16198743879795074, |
|
"rewards/accuracy_reward": 0.15833333879709244, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 1990 |
|
}, |
|
{ |
|
"completion_length": 191.09584197998046, |
|
"epoch": 0.798, |
|
"grad_norm": 0.9230407106995351, |
|
"kl": 0.3421875, |
|
"learning_rate": 2.3846169246326345e-06, |
|
"loss": 0.0137, |
|
"reward": 1.1708333611488342, |
|
"reward_std": 0.24694958627223967, |
|
"rewards/accuracy_reward": 0.17916667088866234, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 1995 |
|
}, |
|
{ |
|
"completion_length": 207.94167251586913, |
|
"epoch": 0.8, |
|
"grad_norm": 0.8543610763790189, |
|
"kl": 0.3279296875, |
|
"learning_rate": 2.339555568810221e-06, |
|
"loss": 0.0131, |
|
"reward": 1.1958333730697632, |
|
"reward_std": 0.2837587997317314, |
|
"rewards/accuracy_reward": 0.22083333916962147, |
|
"rewards/format_reward": 0.9750000238418579, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_completion_length": 193.92628831129807, |
|
"eval_kl": 0.3342848557692308, |
|
"eval_loss": 0.014630368910729885, |
|
"eval_reward": 1.181089772627904, |
|
"eval_reward_std": 0.23239356164748853, |
|
"eval_rewards/accuracy_reward": 0.19871795220443836, |
|
"eval_rewards/format_reward": 0.9823718070983887, |
|
"eval_runtime": 192.2976, |
|
"eval_samples_per_second": 0.515, |
|
"eval_steps_per_second": 0.016, |
|
"step": 2000 |
|
}, |
|
{ |
|
"completion_length": 195.9958396911621, |
|
"epoch": 0.802, |
|
"grad_norm": 0.4271754543182463, |
|
"kl": 0.34921875, |
|
"learning_rate": 2.2948675722421086e-06, |
|
"loss": 0.014, |
|
"reward": 1.200000035762787, |
|
"reward_std": 0.20581314116716384, |
|
"rewards/accuracy_reward": 0.22083333991467952, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 2005 |
|
}, |
|
{ |
|
"completion_length": 174.7375045776367, |
|
"epoch": 0.804, |
|
"grad_norm": 0.9804456879969184, |
|
"kl": 0.3189453125, |
|
"learning_rate": 2.2505551129582047e-06, |
|
"loss": 0.0128, |
|
"reward": 1.2041666984558106, |
|
"reward_std": 0.20763080418109894, |
|
"rewards/accuracy_reward": 0.21666666939854623, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 2010 |
|
}, |
|
{ |
|
"completion_length": 140.03333740234376, |
|
"epoch": 0.806, |
|
"grad_norm": 0.5962293976596295, |
|
"kl": 0.369140625, |
|
"learning_rate": 2.206620350685257e-06, |
|
"loss": 0.0148, |
|
"reward": 1.0958333611488342, |
|
"reward_std": 0.10075019598007202, |
|
"rewards/accuracy_reward": 0.10000000335276127, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 2015 |
|
}, |
|
{ |
|
"completion_length": 204.6291748046875, |
|
"epoch": 0.808, |
|
"grad_norm": 0.4573375021288064, |
|
"kl": 0.3125, |
|
"learning_rate": 2.163065426741603e-06, |
|
"loss": 0.0125, |
|
"reward": 1.0583333849906922, |
|
"reward_std": 0.13136882334947586, |
|
"rewards/accuracy_reward": 0.07916666977107525, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 2020 |
|
}, |
|
{ |
|
"completion_length": 184.0541732788086, |
|
"epoch": 0.81, |
|
"grad_norm": 0.5759896485971067, |
|
"kl": 0.3591796875, |
|
"learning_rate": 2.119892463932781e-06, |
|
"loss": 0.0144, |
|
"reward": 1.0833333730697632, |
|
"reward_std": 0.1874762825667858, |
|
"rewards/accuracy_reward": 0.10416666977107525, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 2025 |
|
}, |
|
{ |
|
"completion_length": 205.35833969116212, |
|
"epoch": 0.812, |
|
"grad_norm": 0.8719265640204763, |
|
"kl": 0.336328125, |
|
"learning_rate": 2.0771035664480944e-06, |
|
"loss": 0.0135, |
|
"reward": 1.079166716337204, |
|
"reward_std": 0.19070877507328987, |
|
"rewards/accuracy_reward": 0.10833333805203438, |
|
"rewards/format_reward": 0.9708333432674408, |
|
"step": 2030 |
|
}, |
|
{ |
|
"completion_length": 181.62083892822267, |
|
"epoch": 0.814, |
|
"grad_norm": 0.7446256204044083, |
|
"kl": 0.321484375, |
|
"learning_rate": 2.0347008197580376e-06, |
|
"loss": 0.0129, |
|
"reward": 1.1708333849906922, |
|
"reward_std": 0.17628924921154976, |
|
"rewards/accuracy_reward": 0.17916667237877845, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 2035 |
|
}, |
|
{ |
|
"completion_length": 193.7916748046875, |
|
"epoch": 0.816, |
|
"grad_norm": 0.9746509464814747, |
|
"kl": 0.3251953125, |
|
"learning_rate": 1.9926862905126663e-06, |
|
"loss": 0.013, |
|
"reward": 1.0833333790302277, |
|
"reward_std": 0.1700986571609974, |
|
"rewards/accuracy_reward": 0.1041666705161333, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 2040 |
|
}, |
|
{ |
|
"completion_length": 173.7375015258789, |
|
"epoch": 0.818, |
|
"grad_norm": 0.8445304947872159, |
|
"kl": 0.367578125, |
|
"learning_rate": 1.95106202644086e-06, |
|
"loss": 0.0147, |
|
"reward": 1.1666667222976685, |
|
"reward_std": 0.16608304530382156, |
|
"rewards/accuracy_reward": 0.17500000707805158, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 2045 |
|
}, |
|
{ |
|
"completion_length": 243.4041732788086, |
|
"epoch": 0.82, |
|
"grad_norm": 0.860558343035885, |
|
"kl": 0.32216796875, |
|
"learning_rate": 1.9098300562505266e-06, |
|
"loss": 0.0129, |
|
"reward": 1.0791666984558106, |
|
"reward_std": 0.20652016922831534, |
|
"rewards/accuracy_reward": 0.1208333358168602, |
|
"rewards/format_reward": 0.9583333551883697, |
|
"step": 2050 |
|
}, |
|
{ |
|
"completion_length": 214.7291748046875, |
|
"epoch": 0.822, |
|
"grad_norm": 0.3936034046066415, |
|
"kl": 0.325, |
|
"learning_rate": 1.8689923895297247e-06, |
|
"loss": 0.013, |
|
"reward": 1.1208333611488341, |
|
"reward_std": 0.21031340062618256, |
|
"rewards/accuracy_reward": 0.13750000223517417, |
|
"rewards/format_reward": 0.9833333492279053, |
|
"step": 2055 |
|
}, |
|
{ |
|
"completion_length": 216.96667251586913, |
|
"epoch": 0.824, |
|
"grad_norm": 0.7044260701460072, |
|
"kl": 0.32158203125, |
|
"learning_rate": 1.8285510166487154e-06, |
|
"loss": 0.0129, |
|
"reward": 1.0708333730697632, |
|
"reward_std": 0.15930703431367874, |
|
"rewards/accuracy_reward": 0.09166667088866234, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 2060 |
|
}, |
|
{ |
|
"completion_length": 263.62917709350586, |
|
"epoch": 0.826, |
|
"grad_norm": 0.5498658754659237, |
|
"kl": 0.3486328125, |
|
"learning_rate": 1.7885079086629598e-06, |
|
"loss": 0.0139, |
|
"reward": 1.012500035762787, |
|
"reward_std": 0.18825939893722535, |
|
"rewards/accuracy_reward": 0.06666666828095913, |
|
"rewards/format_reward": 0.9458333492279053, |
|
"step": 2065 |
|
}, |
|
{ |
|
"completion_length": 214.28333740234376, |
|
"epoch": 0.828, |
|
"grad_norm": 0.6431284953050042, |
|
"kl": 0.3193359375, |
|
"learning_rate": 1.7488650172170496e-06, |
|
"loss": 0.0128, |
|
"reward": 1.100000023841858, |
|
"reward_std": 0.2267348773777485, |
|
"rewards/accuracy_reward": 0.12500000074505807, |
|
"rewards/format_reward": 0.9750000178813935, |
|
"step": 2070 |
|
}, |
|
{ |
|
"completion_length": 269.7416748046875, |
|
"epoch": 0.83, |
|
"grad_norm": 0.7105813170175688, |
|
"kl": 0.3037109375, |
|
"learning_rate": 1.709624274449584e-06, |
|
"loss": 0.0121, |
|
"reward": 1.0833333790302277, |
|
"reward_std": 0.24560517221689224, |
|
"rewards/accuracy_reward": 0.13333333656191826, |
|
"rewards/format_reward": 0.9500000178813934, |
|
"step": 2075 |
|
}, |
|
{ |
|
"completion_length": 230.74583892822267, |
|
"epoch": 0.832, |
|
"grad_norm": 0.4826351659850352, |
|
"kl": 0.33203125, |
|
"learning_rate": 1.6707875928990059e-06, |
|
"loss": 0.0133, |
|
"reward": 1.0958333730697631, |
|
"reward_std": 0.19321480840444566, |
|
"rewards/accuracy_reward": 0.12083333805203438, |
|
"rewards/format_reward": 0.975000011920929, |
|
"step": 2080 |
|
}, |
|
{ |
|
"completion_length": 250.7125045776367, |
|
"epoch": 0.834, |
|
"grad_norm": 0.8155955947722978, |
|
"kl": 0.3384765625, |
|
"learning_rate": 1.6323568654103838e-06, |
|
"loss": 0.0135, |
|
"reward": 1.1000000417232514, |
|
"reward_std": 0.2640728458762169, |
|
"rewards/accuracy_reward": 0.14583333842456342, |
|
"rewards/format_reward": 0.9541666865348816, |
|
"step": 2085 |
|
}, |
|
{ |
|
"completion_length": 234.16250610351562, |
|
"epoch": 0.836, |
|
"grad_norm": 0.6349006116990963, |
|
"kl": 0.334765625, |
|
"learning_rate": 1.5943339650431578e-06, |
|
"loss": 0.0134, |
|
"reward": 1.1000000596046449, |
|
"reward_std": 0.20900287851691246, |
|
"rewards/accuracy_reward": 0.12916666977107524, |
|
"rewards/format_reward": 0.9708333551883698, |
|
"step": 2090 |
|
}, |
|
{ |
|
"completion_length": 280.3166763305664, |
|
"epoch": 0.838, |
|
"grad_norm": 0.5436608157021742, |
|
"kl": 0.31298828125, |
|
"learning_rate": 1.5567207449798517e-06, |
|
"loss": 0.0125, |
|
"reward": 1.104166704416275, |
|
"reward_std": 0.22294211983680726, |
|
"rewards/accuracy_reward": 0.15000000558793544, |
|
"rewards/format_reward": 0.9541666865348816, |
|
"step": 2095 |
|
}, |
|
{ |
|
"completion_length": 251.92500915527344, |
|
"epoch": 0.84, |
|
"grad_norm": 0.9997986753777716, |
|
"kl": 0.3080078125, |
|
"learning_rate": 1.5195190384357405e-06, |
|
"loss": 0.0123, |
|
"reward": 1.0583333611488341, |
|
"reward_std": 0.14829438030719758, |
|
"rewards/accuracy_reward": 0.08750000186264514, |
|
"rewards/format_reward": 0.9708333492279053, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_completion_length": 230.33814885066107, |
|
"eval_kl": 0.32181490384615385, |
|
"eval_loss": 0.012829490937292576, |
|
"eval_reward": 1.126602590084076, |
|
"eval_reward_std": 0.23767680273606226, |
|
"eval_rewards/accuracy_reward": 0.15544871985912323, |
|
"eval_rewards/format_reward": 0.9711538599087641, |
|
"eval_runtime": 195.3635, |
|
"eval_samples_per_second": 0.507, |
|
"eval_steps_per_second": 0.015, |
|
"step": 2100 |
|
}, |
|
{ |
|
"completion_length": 210.4125068664551, |
|
"epoch": 0.842, |
|
"grad_norm": 0.3475757290411574, |
|
"kl": 0.3216796875, |
|
"learning_rate": 1.4827306585695234e-06, |
|
"loss": 0.0129, |
|
"reward": 1.0791666865348817, |
|
"reward_std": 0.16784698367118836, |
|
"rewards/accuracy_reward": 0.10000000223517418, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 2105 |
|
}, |
|
{ |
|
"completion_length": 200.43333892822267, |
|
"epoch": 0.844, |
|
"grad_norm": 0.6385339371882108, |
|
"kl": 0.3228515625, |
|
"learning_rate": 1.446357398394934e-06, |
|
"loss": 0.0129, |
|
"reward": 1.1625000476837157, |
|
"reward_std": 0.1953062780201435, |
|
"rewards/accuracy_reward": 0.1750000063329935, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 2110 |
|
}, |
|
{ |
|
"completion_length": 233.4250045776367, |
|
"epoch": 0.846, |
|
"grad_norm": 3.7096325260067315, |
|
"kl": 0.3455078125, |
|
"learning_rate": 1.4104010306933558e-06, |
|
"loss": 0.0138, |
|
"reward": 1.1166667103767396, |
|
"reward_std": 0.17760113030672073, |
|
"rewards/accuracy_reward": 0.1416666701436043, |
|
"rewards/format_reward": 0.9750000178813935, |
|
"step": 2115 |
|
}, |
|
{ |
|
"completion_length": 224.95417175292968, |
|
"epoch": 0.848, |
|
"grad_norm": 0.5825427147511982, |
|
"kl": 0.3935546875, |
|
"learning_rate": 1.3748633079274254e-06, |
|
"loss": 0.0157, |
|
"reward": 1.2041667103767395, |
|
"reward_std": 0.2667228579521179, |
|
"rewards/accuracy_reward": 0.22500000484287738, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 2120 |
|
}, |
|
{ |
|
"completion_length": 254.05833892822267, |
|
"epoch": 0.85, |
|
"grad_norm": 1.0034192906781798, |
|
"kl": 0.3423828125, |
|
"learning_rate": 1.339745962155613e-06, |
|
"loss": 0.0137, |
|
"reward": 1.1083333611488342, |
|
"reward_std": 0.21382493153214455, |
|
"rewards/accuracy_reward": 0.1416666679084301, |
|
"rewards/format_reward": 0.9666666805744171, |
|
"step": 2125 |
|
}, |
|
{ |
|
"completion_length": 230.9458366394043, |
|
"epoch": 0.852, |
|
"grad_norm": 0.8643081521849603, |
|
"kl": 0.3384765625, |
|
"learning_rate": 1.30505070494781e-06, |
|
"loss": 0.0135, |
|
"reward": 1.141666704416275, |
|
"reward_std": 0.2681451141834259, |
|
"rewards/accuracy_reward": 0.16666667126119136, |
|
"rewards/format_reward": 0.975000011920929, |
|
"step": 2130 |
|
}, |
|
{ |
|
"completion_length": 192.07917175292968, |
|
"epoch": 0.854, |
|
"grad_norm": 0.7868704806845438, |
|
"kl": 0.34873046875, |
|
"learning_rate": 1.2707792273019049e-06, |
|
"loss": 0.0139, |
|
"reward": 1.1791666984558105, |
|
"reward_std": 0.18512692525982857, |
|
"rewards/accuracy_reward": 0.18750000670552253, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 2135 |
|
}, |
|
{ |
|
"completion_length": 251.43750610351563, |
|
"epoch": 0.856, |
|
"grad_norm": 0.7371049494838817, |
|
"kl": 0.349609375, |
|
"learning_rate": 1.2369331995613664e-06, |
|
"loss": 0.014, |
|
"reward": 1.079166728258133, |
|
"reward_std": 0.2230503149330616, |
|
"rewards/accuracy_reward": 0.10416666939854621, |
|
"rewards/format_reward": 0.975000011920929, |
|
"step": 2140 |
|
}, |
|
{ |
|
"completion_length": 199.56250457763673, |
|
"epoch": 0.858, |
|
"grad_norm": 0.8177527730479223, |
|
"kl": 0.3154296875, |
|
"learning_rate": 1.2035142713338366e-06, |
|
"loss": 0.0126, |
|
"reward": 1.1625000357627868, |
|
"reward_std": 0.20501058548688889, |
|
"rewards/accuracy_reward": 0.17083333544433116, |
|
"rewards/format_reward": 0.9916666746139526, |
|
"step": 2145 |
|
}, |
|
{ |
|
"completion_length": 260.4166732788086, |
|
"epoch": 0.86, |
|
"grad_norm": 0.7068503679121204, |
|
"kl": 0.3138671875, |
|
"learning_rate": 1.1705240714107301e-06, |
|
"loss": 0.0126, |
|
"reward": 1.1208333730697633, |
|
"reward_std": 0.25823956951498983, |
|
"rewards/accuracy_reward": 0.14583333544433116, |
|
"rewards/format_reward": 0.975000011920929, |
|
"step": 2150 |
|
}, |
|
{ |
|
"completion_length": 259.20417938232424, |
|
"epoch": 0.862, |
|
"grad_norm": 0.42884092133676044, |
|
"kl": 0.3365234375, |
|
"learning_rate": 1.1379642076878528e-06, |
|
"loss": 0.0135, |
|
"reward": 1.0750000417232513, |
|
"reward_std": 0.20662664845585824, |
|
"rewards/accuracy_reward": 0.10416667088866234, |
|
"rewards/format_reward": 0.9708333551883698, |
|
"step": 2155 |
|
}, |
|
{ |
|
"completion_length": 262.93333892822267, |
|
"epoch": 0.864, |
|
"grad_norm": 0.8152710369638511, |
|
"kl": 0.3275390625, |
|
"learning_rate": 1.1058362670870248e-06, |
|
"loss": 0.0131, |
|
"reward": 1.0583333730697633, |
|
"reward_std": 0.17358550801873207, |
|
"rewards/accuracy_reward": 0.08750000074505807, |
|
"rewards/format_reward": 0.9708333492279053, |
|
"step": 2160 |
|
}, |
|
{ |
|
"completion_length": 247.0250045776367, |
|
"epoch": 0.866, |
|
"grad_norm": 0.688001798775682, |
|
"kl": 0.3095703125, |
|
"learning_rate": 1.0741418154787443e-06, |
|
"loss": 0.0124, |
|
"reward": 1.1208333849906922, |
|
"reward_std": 0.2076909951865673, |
|
"rewards/accuracy_reward": 0.13750000186264516, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 2165 |
|
}, |
|
{ |
|
"completion_length": 288.39583892822264, |
|
"epoch": 0.868, |
|
"grad_norm": 0.5822864693647378, |
|
"kl": 0.30859375, |
|
"learning_rate": 1.042882397605871e-06, |
|
"loss": 0.0124, |
|
"reward": 1.0625000417232513, |
|
"reward_std": 0.18209213241934777, |
|
"rewards/accuracy_reward": 0.09583333618938923, |
|
"rewards/format_reward": 0.9666666865348816, |
|
"step": 2170 |
|
}, |
|
{ |
|
"completion_length": 245.29584045410155, |
|
"epoch": 0.87, |
|
"grad_norm": 0.5072380090682689, |
|
"kl": 0.311328125, |
|
"learning_rate": 1.012059537008332e-06, |
|
"loss": 0.0125, |
|
"reward": 1.0875000476837158, |
|
"reward_std": 0.19070877134799957, |
|
"rewards/accuracy_reward": 0.11250000409781932, |
|
"rewards/format_reward": 0.9750000238418579, |
|
"step": 2175 |
|
}, |
|
{ |
|
"completion_length": 276.4583374023438, |
|
"epoch": 0.872, |
|
"grad_norm": 0.4273306679972882, |
|
"kl": 0.32421875, |
|
"learning_rate": 9.816747359488632e-07, |
|
"loss": 0.013, |
|
"reward": 1.1875000476837159, |
|
"reward_std": 0.23159026056528093, |
|
"rewards/accuracy_reward": 0.22916667200624943, |
|
"rewards/format_reward": 0.9583333432674408, |
|
"step": 2180 |
|
}, |
|
{ |
|
"completion_length": 244.8000045776367, |
|
"epoch": 0.874, |
|
"grad_norm": 0.6004900969175896, |
|
"kl": 0.315625, |
|
"learning_rate": 9.517294753398066e-07, |
|
"loss": 0.0126, |
|
"reward": 1.1333333730697632, |
|
"reward_std": 0.23348402082920075, |
|
"rewards/accuracy_reward": 0.15833333879709244, |
|
"rewards/format_reward": 0.9750000238418579, |
|
"step": 2185 |
|
}, |
|
{ |
|
"completion_length": 277.64584197998045, |
|
"epoch": 0.876, |
|
"grad_norm": 0.49930191441234933, |
|
"kl": 0.31064453125, |
|
"learning_rate": 9.222252146709143e-07, |
|
"loss": 0.0124, |
|
"reward": 1.1375000298023223, |
|
"reward_std": 0.19854526817798615, |
|
"rewards/accuracy_reward": 0.17500000707805158, |
|
"rewards/format_reward": 0.9625000178813934, |
|
"step": 2190 |
|
}, |
|
{ |
|
"completion_length": 259.41250762939455, |
|
"epoch": 0.878, |
|
"grad_norm": 1.2303820073949265, |
|
"kl": 0.3111328125, |
|
"learning_rate": 8.931633919382299e-07, |
|
"loss": 0.0125, |
|
"reward": 1.0958333730697631, |
|
"reward_std": 0.26156365871429443, |
|
"rewards/accuracy_reward": 0.13750000409781932, |
|
"rewards/format_reward": 0.9583333492279053, |
|
"step": 2195 |
|
}, |
|
{ |
|
"completion_length": 248.02083892822264, |
|
"epoch": 0.88, |
|
"grad_norm": 0.8176257324608204, |
|
"kl": 0.318359375, |
|
"learning_rate": 8.645454235739903e-07, |
|
"loss": 0.0127, |
|
"reward": 1.1000000357627868, |
|
"reward_std": 0.23674337863922118, |
|
"rewards/accuracy_reward": 0.13333333730697633, |
|
"rewards/format_reward": 0.9666666805744171, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"eval_completion_length": 236.52725102351263, |
|
"eval_kl": 0.3191105769230769, |
|
"eval_loss": 0.013348034583032131, |
|
"eval_reward": 1.158653892003573, |
|
"eval_reward_std": 0.21606751072865266, |
|
"eval_rewards/accuracy_reward": 0.17628205452974027, |
|
"eval_rewards/format_reward": 0.9823718116833613, |
|
"eval_runtime": 205.161, |
|
"eval_samples_per_second": 0.483, |
|
"eval_steps_per_second": 0.015, |
|
"step": 2200 |
|
}, |
|
{ |
|
"completion_length": 243.39167175292968, |
|
"epoch": 0.882, |
|
"grad_norm": 0.724908011352607, |
|
"kl": 0.2978515625, |
|
"learning_rate": 8.363727043776037e-07, |
|
"loss": 0.0119, |
|
"reward": 1.112500011920929, |
|
"reward_std": 0.17489738911390304, |
|
"rewards/accuracy_reward": 0.12916666753590106, |
|
"rewards/format_reward": 0.9833333492279053, |
|
"step": 2205 |
|
}, |
|
{ |
|
"completion_length": 250.98750762939454, |
|
"epoch": 0.884, |
|
"grad_norm": 0.6527429694726796, |
|
"kl": 0.3375, |
|
"learning_rate": 8.086466074476562e-07, |
|
"loss": 0.0135, |
|
"reward": 1.0916667044162751, |
|
"reward_std": 0.2350437968969345, |
|
"rewards/accuracy_reward": 0.12916667051613331, |
|
"rewards/format_reward": 0.9625000119209289, |
|
"step": 2210 |
|
}, |
|
{ |
|
"completion_length": 203.03750457763672, |
|
"epoch": 0.886, |
|
"grad_norm": 0.8656053781596581, |
|
"kl": 0.3228515625, |
|
"learning_rate": 7.81368484114996e-07, |
|
"loss": 0.0129, |
|
"reward": 1.1625000596046449, |
|
"reward_std": 0.1544849768280983, |
|
"rewards/accuracy_reward": 0.17916667200624942, |
|
"rewards/format_reward": 0.9833333492279053, |
|
"step": 2215 |
|
}, |
|
{ |
|
"completion_length": 253.7791732788086, |
|
"epoch": 0.888, |
|
"grad_norm": 0.7886606594118118, |
|
"kl": 0.3357421875, |
|
"learning_rate": 7.545396638768698e-07, |
|
"loss": 0.0134, |
|
"reward": 1.0958333730697631, |
|
"reward_std": 0.24795775413513182, |
|
"rewards/accuracy_reward": 0.13750000447034835, |
|
"rewards/format_reward": 0.9583333492279053, |
|
"step": 2220 |
|
}, |
|
{ |
|
"completion_length": 224.60834121704102, |
|
"epoch": 0.89, |
|
"grad_norm": 0.7745032909827045, |
|
"kl": 0.328125, |
|
"learning_rate": 7.281614543321269e-07, |
|
"loss": 0.0131, |
|
"reward": 1.158333384990692, |
|
"reward_std": 0.2810515329241753, |
|
"rewards/accuracy_reward": 0.17916667237877845, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 2225 |
|
}, |
|
{ |
|
"completion_length": 179.68333740234374, |
|
"epoch": 0.892, |
|
"grad_norm": 0.3205028122076133, |
|
"kl": 0.31044921875, |
|
"learning_rate": 7.022351411174866e-07, |
|
"loss": 0.0124, |
|
"reward": 1.2250000357627868, |
|
"reward_std": 0.18841607943177224, |
|
"rewards/accuracy_reward": 0.2250000037252903, |
|
"rewards/format_reward": 1.0, |
|
"step": 2230 |
|
}, |
|
{ |
|
"completion_length": 188.59167022705077, |
|
"epoch": 0.894, |
|
"grad_norm": 0.8894666456826428, |
|
"kl": 0.308203125, |
|
"learning_rate": 6.767619878448783e-07, |
|
"loss": 0.0123, |
|
"reward": 1.2125000357627869, |
|
"reward_std": 0.18571233451366426, |
|
"rewards/accuracy_reward": 0.2166666727513075, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 2235 |
|
}, |
|
{ |
|
"completion_length": 228.5041717529297, |
|
"epoch": 0.896, |
|
"grad_norm": 0.5946306291186744, |
|
"kl": 0.30029296875, |
|
"learning_rate": 6.517432360398556e-07, |
|
"loss": 0.012, |
|
"reward": 1.1791666984558105, |
|
"reward_std": 0.2665520176291466, |
|
"rewards/accuracy_reward": 0.20416666939854622, |
|
"rewards/format_reward": 0.9750000178813935, |
|
"step": 2240 |
|
}, |
|
{ |
|
"completion_length": 186.78333740234376, |
|
"epoch": 0.898, |
|
"grad_norm": 0.8312102347233332, |
|
"kl": 0.3220703125, |
|
"learning_rate": 6.271801050810856e-07, |
|
"loss": 0.0129, |
|
"reward": 1.187500035762787, |
|
"reward_std": 0.18710420057177543, |
|
"rewards/accuracy_reward": 0.18750000596046448, |
|
"rewards/format_reward": 1.0, |
|
"step": 2245 |
|
}, |
|
{ |
|
"completion_length": 216.27083892822264, |
|
"epoch": 0.9, |
|
"grad_norm": 1.0188499034367569, |
|
"kl": 0.31484375, |
|
"learning_rate": 6.030737921409169e-07, |
|
"loss": 0.0126, |
|
"reward": 1.1250000238418578, |
|
"reward_std": 0.20551247671246528, |
|
"rewards/accuracy_reward": 0.14583333805203438, |
|
"rewards/format_reward": 0.9791666746139527, |
|
"step": 2250 |
|
}, |
|
{ |
|
"completion_length": 229.68334045410157, |
|
"epoch": 0.902, |
|
"grad_norm": 0.3466462767720492, |
|
"kl": 0.297265625, |
|
"learning_rate": 5.794254721270331e-07, |
|
"loss": 0.0119, |
|
"reward": 1.1000000357627868, |
|
"reward_std": 0.20342101901769638, |
|
"rewards/accuracy_reward": 0.11250000149011612, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 2255 |
|
}, |
|
{ |
|
"completion_length": 225.60000610351562, |
|
"epoch": 0.904, |
|
"grad_norm": 0.5577180539267683, |
|
"kl": 0.3216796875, |
|
"learning_rate": 5.562362976251901e-07, |
|
"loss": 0.0129, |
|
"reward": 1.1208333849906922, |
|
"reward_std": 0.1947810523211956, |
|
"rewards/accuracy_reward": 0.1416666701436043, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 2260 |
|
}, |
|
{ |
|
"completion_length": 253.82501068115235, |
|
"epoch": 0.906, |
|
"grad_norm": 0.8118903219041304, |
|
"kl": 0.31328125, |
|
"learning_rate": 5.335073988430373e-07, |
|
"loss": 0.0125, |
|
"reward": 1.1166667222976685, |
|
"reward_std": 0.20122136250138284, |
|
"rewards/accuracy_reward": 0.13750000298023224, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 2265 |
|
}, |
|
{ |
|
"completion_length": 167.62500457763673, |
|
"epoch": 0.908, |
|
"grad_norm": 0.5609148058927211, |
|
"kl": 0.30732421875, |
|
"learning_rate": 5.112398835550348e-07, |
|
"loss": 0.0123, |
|
"reward": 1.1500000476837158, |
|
"reward_std": 0.18108798265457154, |
|
"rewards/accuracy_reward": 0.15416667200624942, |
|
"rewards/format_reward": 0.9958333373069763, |
|
"step": 2270 |
|
}, |
|
{ |
|
"completion_length": 262.2083404541016, |
|
"epoch": 0.91, |
|
"grad_norm": 0.40905385430671654, |
|
"kl": 0.3474609375, |
|
"learning_rate": 4.894348370484648e-07, |
|
"loss": 0.0139, |
|
"reward": 1.1041666984558105, |
|
"reward_std": 0.2402033679187298, |
|
"rewards/accuracy_reward": 0.15000000409781933, |
|
"rewards/format_reward": 0.9541666805744171, |
|
"step": 2275 |
|
}, |
|
{ |
|
"completion_length": 223.8916763305664, |
|
"epoch": 0.912, |
|
"grad_norm": 0.5281590255695859, |
|
"kl": 0.3130859375, |
|
"learning_rate": 4.6809332207053083e-07, |
|
"loss": 0.0125, |
|
"reward": 1.2041667222976684, |
|
"reward_std": 0.23390628695487975, |
|
"rewards/accuracy_reward": 0.2250000037252903, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 2280 |
|
}, |
|
{ |
|
"completion_length": 250.34584350585936, |
|
"epoch": 0.914, |
|
"grad_norm": 1.0063118812707683, |
|
"kl": 0.3205078125, |
|
"learning_rate": 4.4721637877656377e-07, |
|
"loss": 0.0128, |
|
"reward": 1.0708333611488343, |
|
"reward_std": 0.1668095014989376, |
|
"rewards/accuracy_reward": 0.10000000149011612, |
|
"rewards/format_reward": 0.9708333551883698, |
|
"step": 2285 |
|
}, |
|
{ |
|
"completion_length": 219.55417251586914, |
|
"epoch": 0.916, |
|
"grad_norm": 0.5679221458389377, |
|
"kl": 0.322265625, |
|
"learning_rate": 4.268050246793276e-07, |
|
"loss": 0.0129, |
|
"reward": 1.287500023841858, |
|
"reward_std": 0.2404845893383026, |
|
"rewards/accuracy_reward": 0.3000000104308128, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 2290 |
|
}, |
|
{ |
|
"completion_length": 249.7291748046875, |
|
"epoch": 0.918, |
|
"grad_norm": 0.37307292971981587, |
|
"kl": 0.296875, |
|
"learning_rate": 4.068602545994249e-07, |
|
"loss": 0.0119, |
|
"reward": 1.0250000298023223, |
|
"reward_std": 0.13485567793250083, |
|
"rewards/accuracy_reward": 0.05000000149011612, |
|
"rewards/format_reward": 0.9750000059604644, |
|
"step": 2295 |
|
}, |
|
{ |
|
"completion_length": 275.1625091552734, |
|
"epoch": 0.92, |
|
"grad_norm": 1.4329667808726139, |
|
"kl": 0.314453125, |
|
"learning_rate": 3.8738304061681107e-07, |
|
"loss": 0.0126, |
|
"reward": 1.125000035762787, |
|
"reward_std": 0.22324448227882385, |
|
"rewards/accuracy_reward": 0.16250000521540642, |
|
"rewards/format_reward": 0.9625000178813934, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"eval_completion_length": 234.14583998460037, |
|
"eval_kl": 0.32827524038461536, |
|
"eval_loss": 0.014351727440953255, |
|
"eval_reward": 1.1330128541359534, |
|
"eval_reward_std": 0.2333221613214566, |
|
"eval_rewards/accuracy_reward": 0.16346154433603471, |
|
"eval_rewards/format_reward": 0.9695513065044696, |
|
"eval_runtime": 215.8259, |
|
"eval_samples_per_second": 0.459, |
|
"eval_steps_per_second": 0.014, |
|
"step": 2300 |
|
}, |
|
{ |
|
"completion_length": 304.00000915527346, |
|
"epoch": 0.922, |
|
"grad_norm": 0.8276825063442558, |
|
"kl": 0.2984375, |
|
"learning_rate": 3.68374332023419e-07, |
|
"loss": 0.0119, |
|
"reward": 1.045833373069763, |
|
"reward_std": 0.2611467272043228, |
|
"rewards/accuracy_reward": 0.10416667014360428, |
|
"rewards/format_reward": 0.9416666984558105, |
|
"step": 2305 |
|
}, |
|
{ |
|
"completion_length": 239.92084045410155, |
|
"epoch": 0.924, |
|
"grad_norm": 0.5776581401348798, |
|
"kl": 0.3056640625, |
|
"learning_rate": 3.498350552768859e-07, |
|
"loss": 0.0122, |
|
"reward": 1.1250000476837159, |
|
"reward_std": 0.21731178238987922, |
|
"rewards/accuracy_reward": 0.14166666977107525, |
|
"rewards/format_reward": 0.9833333432674408, |
|
"step": 2310 |
|
}, |
|
{ |
|
"completion_length": 243.52084350585938, |
|
"epoch": 0.926, |
|
"grad_norm": 0.5717808613340961, |
|
"kl": 0.28369140625, |
|
"learning_rate": 3.3176611395540625e-07, |
|
"loss": 0.0114, |
|
"reward": 1.0875000357627869, |
|
"reward_std": 0.18861377909779548, |
|
"rewards/accuracy_reward": 0.1083333358168602, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 2315 |
|
}, |
|
{ |
|
"completion_length": 230.64167327880858, |
|
"epoch": 0.928, |
|
"grad_norm": 0.8434649020063801, |
|
"kl": 0.33193359375, |
|
"learning_rate": 3.1416838871368925e-07, |
|
"loss": 0.0133, |
|
"reward": 1.2041666984558106, |
|
"reward_std": 0.2520026877522469, |
|
"rewards/accuracy_reward": 0.22500000447034835, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 2320 |
|
}, |
|
{ |
|
"completion_length": 271.32084197998046, |
|
"epoch": 0.93, |
|
"grad_norm": 0.6927657151612084, |
|
"kl": 0.3126953125, |
|
"learning_rate": 2.970427372400353e-07, |
|
"loss": 0.0125, |
|
"reward": 1.0708333730697632, |
|
"reward_std": 0.21362721994519235, |
|
"rewards/accuracy_reward": 0.11250000335276127, |
|
"rewards/format_reward": 0.9583333492279053, |
|
"step": 2325 |
|
}, |
|
{ |
|
"completion_length": 287.02084197998045, |
|
"epoch": 0.932, |
|
"grad_norm": 0.5476312804811295, |
|
"kl": 0.31416015625, |
|
"learning_rate": 2.8038999421453827e-07, |
|
"loss": 0.0126, |
|
"reward": 1.1083333551883698, |
|
"reward_std": 0.21203412041068076, |
|
"rewards/accuracy_reward": 0.15416667200624942, |
|
"rewards/format_reward": 0.9541666746139527, |
|
"step": 2330 |
|
}, |
|
{ |
|
"completion_length": 286.46251220703124, |
|
"epoch": 0.934, |
|
"grad_norm": 0.6773822629722608, |
|
"kl": 0.3408203125, |
|
"learning_rate": 2.6421097126839714e-07, |
|
"loss": 0.0137, |
|
"reward": 1.0875000417232514, |
|
"reward_std": 0.22804322466254234, |
|
"rewards/accuracy_reward": 0.12916667126119136, |
|
"rewards/format_reward": 0.9583333551883697, |
|
"step": 2335 |
|
}, |
|
{ |
|
"completion_length": 233.5666748046875, |
|
"epoch": 0.936, |
|
"grad_norm": 0.9631119728257711, |
|
"kl": 0.3609375, |
|
"learning_rate": 2.4850645694436736e-07, |
|
"loss": 0.0144, |
|
"reward": 1.1500000357627869, |
|
"reward_std": 0.23964481428265572, |
|
"rewards/accuracy_reward": 0.17083333805203438, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 2340 |
|
}, |
|
{ |
|
"completion_length": 256.07084197998046, |
|
"epoch": 0.938, |
|
"grad_norm": 0.5283231336286257, |
|
"kl": 0.30322265625, |
|
"learning_rate": 2.332772166583208e-07, |
|
"loss": 0.0121, |
|
"reward": 1.112500047683716, |
|
"reward_std": 0.1783842422068119, |
|
"rewards/accuracy_reward": 0.14166667088866233, |
|
"rewards/format_reward": 0.9708333492279053, |
|
"step": 2345 |
|
}, |
|
{ |
|
"completion_length": 256.93334045410154, |
|
"epoch": 0.94, |
|
"grad_norm": 0.7126466204158886, |
|
"kl": 0.3, |
|
"learning_rate": 2.1852399266194312e-07, |
|
"loss": 0.012, |
|
"reward": 1.0958333611488342, |
|
"reward_std": 0.23403963819146156, |
|
"rewards/accuracy_reward": 0.1208333358168602, |
|
"rewards/format_reward": 0.9750000178813935, |
|
"step": 2350 |
|
}, |
|
{ |
|
"completion_length": 238.1291732788086, |
|
"epoch": 0.942, |
|
"grad_norm": 0.6637433721941498, |
|
"kl": 0.305859375, |
|
"learning_rate": 2.0424750400655947e-07, |
|
"loss": 0.0122, |
|
"reward": 1.1291667103767395, |
|
"reward_std": 0.21683285459876062, |
|
"rewards/accuracy_reward": 0.1541666716337204, |
|
"rewards/format_reward": 0.9750000238418579, |
|
"step": 2355 |
|
}, |
|
{ |
|
"completion_length": 245.8625061035156, |
|
"epoch": 0.944, |
|
"grad_norm": 0.8413001069576047, |
|
"kl": 0.3373046875, |
|
"learning_rate": 1.9044844650808468e-07, |
|
"loss": 0.0135, |
|
"reward": 1.0541666924953461, |
|
"reward_std": 0.18833255916833877, |
|
"rewards/accuracy_reward": 0.08750000223517418, |
|
"rewards/format_reward": 0.9666666865348816, |
|
"step": 2360 |
|
}, |
|
{ |
|
"completion_length": 262.5250045776367, |
|
"epoch": 0.946, |
|
"grad_norm": 0.6316109309990907, |
|
"kl": 0.3193359375, |
|
"learning_rate": 1.7712749271311392e-07, |
|
"loss": 0.0128, |
|
"reward": 1.0833333671092986, |
|
"reward_std": 0.20121916756033897, |
|
"rewards/accuracy_reward": 0.11250000186264515, |
|
"rewards/format_reward": 0.9708333492279053, |
|
"step": 2365 |
|
}, |
|
{ |
|
"completion_length": 243.39167175292968, |
|
"epoch": 0.948, |
|
"grad_norm": 0.6991473147311043, |
|
"kl": 0.3158203125, |
|
"learning_rate": 1.6428529186614195e-07, |
|
"loss": 0.0126, |
|
"reward": 1.2166667103767395, |
|
"reward_std": 0.2617532812058926, |
|
"rewards/accuracy_reward": 0.23750000447034836, |
|
"rewards/format_reward": 0.9791666746139527, |
|
"step": 2370 |
|
}, |
|
{ |
|
"completion_length": 248.2041748046875, |
|
"epoch": 0.95, |
|
"grad_norm": 0.9976654981267077, |
|
"kl": 0.29189453125, |
|
"learning_rate": 1.519224698779198e-07, |
|
"loss": 0.0117, |
|
"reward": 1.129166716337204, |
|
"reward_std": 0.21251304894685746, |
|
"rewards/accuracy_reward": 0.1541666705161333, |
|
"rewards/format_reward": 0.975000011920929, |
|
"step": 2375 |
|
}, |
|
{ |
|
"completion_length": 236.87084197998047, |
|
"epoch": 0.952, |
|
"grad_norm": 1.025244017091307, |
|
"kl": 0.312890625, |
|
"learning_rate": 1.400396292949513e-07, |
|
"loss": 0.0125, |
|
"reward": 1.0833333730697632, |
|
"reward_std": 0.17568050250411033, |
|
"rewards/accuracy_reward": 0.10416666977107525, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 2380 |
|
}, |
|
{ |
|
"completion_length": 239.7541717529297, |
|
"epoch": 0.954, |
|
"grad_norm": 0.5280779053111202, |
|
"kl": 0.29990234375, |
|
"learning_rate": 1.2863734927012094e-07, |
|
"loss": 0.012, |
|
"reward": 1.1083333790302277, |
|
"reward_std": 0.2078887030482292, |
|
"rewards/accuracy_reward": 0.13333333879709244, |
|
"rewards/format_reward": 0.975000011920929, |
|
"step": 2385 |
|
}, |
|
{ |
|
"completion_length": 257.35001068115236, |
|
"epoch": 0.956, |
|
"grad_norm": 0.7506657762258959, |
|
"kl": 0.33359375, |
|
"learning_rate": 1.1771618553447217e-07, |
|
"loss": 0.0133, |
|
"reward": 1.1083333611488342, |
|
"reward_std": 0.27584577947854994, |
|
"rewards/accuracy_reward": 0.15000000260770321, |
|
"rewards/format_reward": 0.9583333492279053, |
|
"step": 2390 |
|
}, |
|
{ |
|
"completion_length": 253.93750457763673, |
|
"epoch": 0.958, |
|
"grad_norm": 0.42898933144776946, |
|
"kl": 0.3265625, |
|
"learning_rate": 1.0727667037011668e-07, |
|
"loss": 0.0131, |
|
"reward": 1.062500035762787, |
|
"reward_std": 0.1832063004374504, |
|
"rewards/accuracy_reward": 0.08750000223517418, |
|
"rewards/format_reward": 0.9750000238418579, |
|
"step": 2395 |
|
}, |
|
{ |
|
"completion_length": 204.62084045410157, |
|
"epoch": 0.96, |
|
"grad_norm": 0.7711065338130425, |
|
"kl": 0.33046875, |
|
"learning_rate": 9.731931258429638e-08, |
|
"loss": 0.0132, |
|
"reward": 1.1500000417232514, |
|
"reward_std": 0.15125247687101365, |
|
"rewards/accuracy_reward": 0.1708333395421505, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"eval_completion_length": 232.98879065880408, |
|
"eval_kl": 0.32271634615384615, |
|
"eval_loss": 0.013007663190364838, |
|
"eval_reward": 1.1506410562075102, |
|
"eval_reward_std": 0.24151590065314218, |
|
"eval_rewards/accuracy_reward": 0.17788461996958807, |
|
"eval_rewards/format_reward": 0.9727564270679767, |
|
"eval_runtime": 214.5988, |
|
"eval_samples_per_second": 0.461, |
|
"eval_steps_per_second": 0.014, |
|
"step": 2400 |
|
}, |
|
{ |
|
"completion_length": 285.9666748046875, |
|
"epoch": 0.962, |
|
"grad_norm": 0.21086359841471036, |
|
"kl": 0.30234375, |
|
"learning_rate": 8.784459748458318e-08, |
|
"loss": 0.0121, |
|
"reward": 1.0583333611488341, |
|
"reward_std": 0.1348556734621525, |
|
"rewards/accuracy_reward": 0.09583333395421505, |
|
"rewards/format_reward": 0.9625000298023224, |
|
"step": 2405 |
|
}, |
|
{ |
|
"completion_length": 255.64167327880858, |
|
"epoch": 0.964, |
|
"grad_norm": 1.1574014329263573, |
|
"kl": 0.3009765625, |
|
"learning_rate": 7.885298685522235e-08, |
|
"loss": 0.012, |
|
"reward": 1.0875000357627869, |
|
"reward_std": 0.23103000968694687, |
|
"rewards/accuracy_reward": 0.12083333767950535, |
|
"rewards/format_reward": 0.9666666924953461, |
|
"step": 2410 |
|
}, |
|
{ |
|
"completion_length": 253.06250915527343, |
|
"epoch": 0.966, |
|
"grad_norm": 0.5696899940020976, |
|
"kl": 0.3279296875, |
|
"learning_rate": 7.034491893463059e-08, |
|
"loss": 0.0131, |
|
"reward": 1.0791667103767395, |
|
"reward_std": 0.21763930991292, |
|
"rewards/accuracy_reward": 0.10416666977107525, |
|
"rewards/format_reward": 0.9750000178813935, |
|
"step": 2415 |
|
}, |
|
{ |
|
"completion_length": 225.98750762939454, |
|
"epoch": 0.968, |
|
"grad_norm": 0.6929231238436696, |
|
"kl": 0.318359375, |
|
"learning_rate": 6.232080839403631e-08, |
|
"loss": 0.0127, |
|
"reward": 1.1458333611488343, |
|
"reward_std": 0.19018001332879067, |
|
"rewards/accuracy_reward": 0.1625000026077032, |
|
"rewards/format_reward": 0.9833333492279053, |
|
"step": 2420 |
|
}, |
|
{ |
|
"completion_length": 219.8541748046875, |
|
"epoch": 0.97, |
|
"grad_norm": 0.6209077338105314, |
|
"kl": 0.3103515625, |
|
"learning_rate": 5.4781046317267103e-08, |
|
"loss": 0.0124, |
|
"reward": 1.1625000357627868, |
|
"reward_std": 0.2506072849035263, |
|
"rewards/accuracy_reward": 0.17916667014360427, |
|
"rewards/format_reward": 0.9833333492279053, |
|
"step": 2425 |
|
}, |
|
{ |
|
"completion_length": 297.3958374023438, |
|
"epoch": 0.972, |
|
"grad_norm": 0.4301111358616339, |
|
"kl": 0.31669921875, |
|
"learning_rate": 4.772600018168816e-08, |
|
"loss": 0.0127, |
|
"reward": 1.016666692495346, |
|
"reward_std": 0.20313626676797866, |
|
"rewards/accuracy_reward": 0.07083333507180214, |
|
"rewards/format_reward": 0.9458333492279053, |
|
"step": 2430 |
|
}, |
|
{ |
|
"completion_length": 209.98750762939454, |
|
"epoch": 0.974, |
|
"grad_norm": 0.39213515299137125, |
|
"kl": 0.306640625, |
|
"learning_rate": 4.115601384029666e-08, |
|
"loss": 0.0123, |
|
"reward": 1.1333333730697632, |
|
"reward_std": 0.18591004163026809, |
|
"rewards/accuracy_reward": 0.14583334028720857, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 2435 |
|
}, |
|
{ |
|
"completion_length": 215.95000457763672, |
|
"epoch": 0.976, |
|
"grad_norm": 1.130278072056706, |
|
"kl": 0.3189453125, |
|
"learning_rate": 3.50714075049563e-08, |
|
"loss": 0.0128, |
|
"reward": 1.2416667222976685, |
|
"reward_std": 0.324382396042347, |
|
"rewards/accuracy_reward": 0.2666666753590107, |
|
"rewards/format_reward": 0.975000011920929, |
|
"step": 2440 |
|
}, |
|
{ |
|
"completion_length": 282.88750915527345, |
|
"epoch": 0.978, |
|
"grad_norm": 0.2692108788435084, |
|
"kl": 0.296875, |
|
"learning_rate": 2.947247773079753e-08, |
|
"loss": 0.0119, |
|
"reward": 1.0833333671092986, |
|
"reward_std": 0.153701850771904, |
|
"rewards/accuracy_reward": 0.11666667014360428, |
|
"rewards/format_reward": 0.9666666805744171, |
|
"step": 2445 |
|
}, |
|
{ |
|
"completion_length": 242.4875045776367, |
|
"epoch": 0.98, |
|
"grad_norm": 0.6295793167108874, |
|
"kl": 0.29150390625, |
|
"learning_rate": 2.4359497401758026e-08, |
|
"loss": 0.0117, |
|
"reward": 1.2041667103767395, |
|
"reward_std": 0.2249172128736973, |
|
"rewards/accuracy_reward": 0.23333334028720856, |
|
"rewards/format_reward": 0.9708333492279053, |
|
"step": 2450 |
|
}, |
|
{ |
|
"completion_length": 300.2125076293945, |
|
"epoch": 0.982, |
|
"grad_norm": 0.765547881156069, |
|
"kl": 0.3287109375, |
|
"learning_rate": 1.973271571728441e-08, |
|
"loss": 0.0131, |
|
"reward": 1.0958333730697631, |
|
"reward_std": 0.30428186282515524, |
|
"rewards/accuracy_reward": 0.1541666716337204, |
|
"rewards/format_reward": 0.9416666924953461, |
|
"step": 2455 |
|
}, |
|
{ |
|
"completion_length": 226.41250762939453, |
|
"epoch": 0.984, |
|
"grad_norm": 0.3857577216711371, |
|
"kl": 0.317578125, |
|
"learning_rate": 1.5592358180189782e-08, |
|
"loss": 0.0127, |
|
"reward": 1.0625000238418578, |
|
"reward_std": 0.12517823055386543, |
|
"rewards/accuracy_reward": 0.07500000149011612, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 2460 |
|
}, |
|
{ |
|
"completion_length": 227.72500610351562, |
|
"epoch": 0.986, |
|
"grad_norm": 0.6046244061961439, |
|
"kl": 0.33046875, |
|
"learning_rate": 1.1938626585660252e-08, |
|
"loss": 0.0132, |
|
"reward": 1.0916667103767395, |
|
"reward_std": 0.19912418201565743, |
|
"rewards/accuracy_reward": 0.11250000260770321, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 2465 |
|
}, |
|
{ |
|
"completion_length": 221.62083892822267, |
|
"epoch": 0.988, |
|
"grad_norm": 0.6204160140005214, |
|
"kl": 0.29775390625, |
|
"learning_rate": 8.771699011416169e-09, |
|
"loss": 0.0119, |
|
"reward": 1.1333333730697632, |
|
"reward_std": 0.22539967224001883, |
|
"rewards/accuracy_reward": 0.15416667237877846, |
|
"rewards/format_reward": 0.9791666805744171, |
|
"step": 2470 |
|
}, |
|
{ |
|
"completion_length": 210.60834045410155, |
|
"epoch": 0.99, |
|
"grad_norm": 1.1403201560013576, |
|
"kl": 0.3005859375, |
|
"learning_rate": 6.091729809042379e-09, |
|
"loss": 0.012, |
|
"reward": 1.1958333730697632, |
|
"reward_std": 0.2867168977856636, |
|
"rewards/accuracy_reward": 0.20833334028720857, |
|
"rewards/format_reward": 0.987500011920929, |
|
"step": 2475 |
|
}, |
|
{ |
|
"completion_length": 230.5500045776367, |
|
"epoch": 0.992, |
|
"grad_norm": 0.9268749218220572, |
|
"kl": 0.32685546875, |
|
"learning_rate": 3.898849596456477e-09, |
|
"loss": 0.0131, |
|
"reward": 1.1000000476837157, |
|
"reward_std": 0.18649545162916184, |
|
"rewards/accuracy_reward": 0.1208333358168602, |
|
"rewards/format_reward": 0.9791666686534881, |
|
"step": 2480 |
|
}, |
|
{ |
|
"completion_length": 235.82917175292968, |
|
"epoch": 0.994, |
|
"grad_norm": 0.49651550175485254, |
|
"kl": 0.28681640625, |
|
"learning_rate": 2.193165251545004e-09, |
|
"loss": 0.0115, |
|
"reward": 1.075000035762787, |
|
"reward_std": 0.17952174171805382, |
|
"rewards/accuracy_reward": 0.0958333346992731, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 2485 |
|
}, |
|
{ |
|
"completion_length": 256.3333404541016, |
|
"epoch": 0.996, |
|
"grad_norm": 0.1905486064300661, |
|
"kl": 0.3017578125, |
|
"learning_rate": 9.74759906957612e-10, |
|
"loss": 0.0121, |
|
"reward": 1.0375000357627868, |
|
"reward_std": 0.16711369305849075, |
|
"rewards/accuracy_reward": 0.06666666939854622, |
|
"rewards/format_reward": 0.9708333432674408, |
|
"step": 2490 |
|
}, |
|
{ |
|
"completion_length": 235.0958396911621, |
|
"epoch": 0.998, |
|
"grad_norm": 0.5058367377833421, |
|
"kl": 0.325, |
|
"learning_rate": 2.436929460525317e-10, |
|
"loss": 0.013, |
|
"reward": 1.1083333492279053, |
|
"reward_std": 0.16178620904684066, |
|
"rewards/accuracy_reward": 0.1375000011175871, |
|
"rewards/format_reward": 0.9708333492279053, |
|
"step": 2495 |
|
}, |
|
{ |
|
"completion_length": 244.67083892822265, |
|
"epoch": 1.0, |
|
"grad_norm": 0.3908725363308062, |
|
"kl": 0.359765625, |
|
"learning_rate": 0.0, |
|
"loss": 0.0144, |
|
"reward": 1.0833333611488343, |
|
"reward_std": 0.17759759426116944, |
|
"rewards/accuracy_reward": 0.10416666977107525, |
|
"rewards/format_reward": 0.9791666865348816, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_completion_length": 204.63622225247897, |
|
"eval_kl": 0.3269230769230769, |
|
"eval_loss": 0.012251908890902996, |
|
"eval_reward": 1.1634615751413198, |
|
"eval_reward_std": 0.17546415844788918, |
|
"eval_rewards/accuracy_reward": 0.1714743645145343, |
|
"eval_rewards/format_reward": 0.9919871871288006, |
|
"eval_runtime": 145.9043, |
|
"eval_samples_per_second": 0.679, |
|
"eval_steps_per_second": 0.021, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 2500, |
|
"total_flos": 0.0, |
|
"train_loss": 69.57985635299504, |
|
"train_runtime": 92793.5148, |
|
"train_samples_per_second": 0.216, |
|
"train_steps_per_second": 0.027 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 2500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|