|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 10, |
|
"global_step": 564, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 559.6541828155517, |
|
"epoch": 0.05333333333333334, |
|
"grad_norm": 0.13850688934326172, |
|
"kl": 0.000306999683380127, |
|
"learning_rate": 1.60427807486631e-05, |
|
"loss": 0.016, |
|
"reward": 0.02916666753590107, |
|
"reward_std": 0.07144345119595527, |
|
"rewards/accuracy_reward": 0.02916666753590107, |
|
"rewards/format_reward": 0.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 563.3125160217285, |
|
"epoch": 0.10666666666666667, |
|
"grad_norm": 0.0018512221286073327, |
|
"kl": 0.00039801597595214846, |
|
"learning_rate": 3.20855614973262e-05, |
|
"loss": -0.0104, |
|
"reward": 0.01666666716337204, |
|
"reward_std": 0.03332236036658287, |
|
"rewards/accuracy_reward": 0.01666666716337204, |
|
"rewards/format_reward": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 568.7541816711425, |
|
"epoch": 0.16, |
|
"grad_norm": 0.08763159811496735, |
|
"kl": 0.0006775140762329101, |
|
"learning_rate": 4.8128342245989304e-05, |
|
"loss": 0.0221, |
|
"reward": 0.02500000074505806, |
|
"reward_std": 0.05373477563261986, |
|
"rewards/accuracy_reward": 0.02500000074505806, |
|
"rewards/format_reward": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 544.433352279663, |
|
"epoch": 0.21333333333333335, |
|
"grad_norm": 0.07478004693984985, |
|
"kl": 0.0009945392608642577, |
|
"learning_rate": 6.41711229946524e-05, |
|
"loss": -0.007, |
|
"reward": 0.03750000074505806, |
|
"reward_std": 0.07493030950427056, |
|
"rewards/accuracy_reward": 0.03750000074505806, |
|
"rewards/format_reward": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 558.0416824340821, |
|
"epoch": 0.26666666666666666, |
|
"grad_norm": 0.0056296749971807, |
|
"kl": 0.0018447399139404296, |
|
"learning_rate": 8.021390374331551e-05, |
|
"loss": -0.0183, |
|
"reward": 0.03333333432674408, |
|
"reward_std": 0.07414718866348266, |
|
"rewards/accuracy_reward": 0.03333333432674408, |
|
"rewards/format_reward": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 514.7416809082031, |
|
"epoch": 0.32, |
|
"grad_norm": 0.04819507524371147, |
|
"kl": 0.004059267044067383, |
|
"learning_rate": 9.625668449197861e-05, |
|
"loss": 0.0222, |
|
"reward": 0.03750000074505806, |
|
"reward_std": 0.06742783784866332, |
|
"rewards/accuracy_reward": 0.03750000074505806, |
|
"rewards/format_reward": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"completion_length": 492.67084846496584, |
|
"epoch": 0.37333333333333335, |
|
"grad_norm": 0.017955631017684937, |
|
"kl": 0.00576171875, |
|
"learning_rate": 0.0001122994652406417, |
|
"loss": -0.0364, |
|
"reward": 0.06250000149011611, |
|
"reward_std": 0.12116261124610901, |
|
"rewards/accuracy_reward": 0.06250000149011611, |
|
"rewards/format_reward": 0.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"completion_length": 547.7750144958496, |
|
"epoch": 0.4266666666666667, |
|
"grad_norm": 0.1688847839832306, |
|
"kl": 0.008740234375, |
|
"learning_rate": 0.0001283422459893048, |
|
"loss": 0.0085, |
|
"reward": 0.06666666865348816, |
|
"reward_std": 0.12578697204589845, |
|
"rewards/accuracy_reward": 0.06666666865348816, |
|
"rewards/format_reward": 0.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"completion_length": 456.55834770202637, |
|
"epoch": 0.48, |
|
"grad_norm": 0.05612310767173767, |
|
"kl": 0.0114776611328125, |
|
"learning_rate": 0.0001443850267379679, |
|
"loss": -0.006, |
|
"reward": 0.12500000186264515, |
|
"reward_std": 0.18614107817411424, |
|
"rewards/accuracy_reward": 0.12500000186264515, |
|
"rewards/format_reward": 0.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"completion_length": 521.2875152587891, |
|
"epoch": 0.5333333333333333, |
|
"grad_norm": 0.06599655747413635, |
|
"kl": 0.018022537231445312, |
|
"learning_rate": 0.00016042780748663101, |
|
"loss": 0.0321, |
|
"reward": 0.1625000037252903, |
|
"reward_std": 0.2668332472443581, |
|
"rewards/accuracy_reward": 0.1625000037252903, |
|
"rewards/format_reward": 0.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"completion_length": 578.708349609375, |
|
"epoch": 0.5866666666666667, |
|
"grad_norm": 0.10739815980195999, |
|
"kl": 0.02028350830078125, |
|
"learning_rate": 0.0001764705882352941, |
|
"loss": 0.0315, |
|
"reward": 0.1541666690260172, |
|
"reward_std": 0.22565403282642366, |
|
"rewards/accuracy_reward": 0.1541666690260172, |
|
"rewards/format_reward": 0.0, |
|
"step": 110 |
|
}, |
|
{ |
|
"completion_length": 578.1541816711426, |
|
"epoch": 0.64, |
|
"grad_norm": 0.059991322457790375, |
|
"kl": 0.0370635986328125, |
|
"learning_rate": 0.00019251336898395722, |
|
"loss": 0.0516, |
|
"reward": 0.13333333656191826, |
|
"reward_std": 0.1974847875535488, |
|
"rewards/accuracy_reward": 0.13333333656191826, |
|
"rewards/format_reward": 0.0, |
|
"step": 120 |
|
}, |
|
{ |
|
"completion_length": 530.4750141143799, |
|
"epoch": 0.6933333333333334, |
|
"grad_norm": 0.30204519629478455, |
|
"kl": 0.07847900390625, |
|
"learning_rate": 0.0002085561497326203, |
|
"loss": -0.0095, |
|
"reward": 0.09166666865348816, |
|
"reward_std": 0.1606755696237087, |
|
"rewards/accuracy_reward": 0.09166666865348816, |
|
"rewards/format_reward": 0.0, |
|
"step": 130 |
|
}, |
|
{ |
|
"completion_length": 473.15001449584963, |
|
"epoch": 0.7466666666666667, |
|
"grad_norm": 0.04992169886827469, |
|
"kl": 0.194708251953125, |
|
"learning_rate": 0.0002245989304812834, |
|
"loss": 0.0413, |
|
"reward": 0.10833333544433117, |
|
"reward_std": 0.17916736453771592, |
|
"rewards/accuracy_reward": 0.10833333544433117, |
|
"rewards/format_reward": 0.0, |
|
"step": 140 |
|
}, |
|
{ |
|
"completion_length": 538.2416828155517, |
|
"epoch": 0.8, |
|
"grad_norm": 0.08135983347892761, |
|
"kl": 0.5037109375, |
|
"learning_rate": 0.0002406417112299465, |
|
"loss": 0.0135, |
|
"reward": 0.03750000111758709, |
|
"reward_std": 0.08435339704155922, |
|
"rewards/accuracy_reward": 0.03750000111758709, |
|
"rewards/format_reward": 0.0, |
|
"step": 150 |
|
}, |
|
{ |
|
"completion_length": 475.47501335144045, |
|
"epoch": 0.8533333333333334, |
|
"grad_norm": 0.2563917338848114, |
|
"kl": 0.57646484375, |
|
"learning_rate": 0.0002566844919786096, |
|
"loss": -0.0059, |
|
"reward": 0.07916666865348816, |
|
"reward_std": 0.16198744028806686, |
|
"rewards/accuracy_reward": 0.07916666865348816, |
|
"rewards/format_reward": 0.0, |
|
"step": 160 |
|
}, |
|
{ |
|
"completion_length": 380.40834312438966, |
|
"epoch": 0.9066666666666666, |
|
"grad_norm": 0.1821465641260147, |
|
"kl": 0.9548828125, |
|
"learning_rate": 0.0002727272727272727, |
|
"loss": -0.0106, |
|
"reward": 0.07083333469927311, |
|
"reward_std": 0.13215193897485733, |
|
"rewards/accuracy_reward": 0.07083333469927311, |
|
"rewards/format_reward": 0.0, |
|
"step": 170 |
|
}, |
|
{ |
|
"completion_length": 308.63750953674315, |
|
"epoch": 0.96, |
|
"grad_norm": 0.09076907485723495, |
|
"kl": 0.826904296875, |
|
"learning_rate": 0.0002887700534759358, |
|
"loss": 0.0082, |
|
"reward": 0.05833333469927311, |
|
"reward_std": 0.09595146998763085, |
|
"rewards/accuracy_reward": 0.05833333469927311, |
|
"rewards/format_reward": 0.0, |
|
"step": 180 |
|
}, |
|
{ |
|
"completion_length": 346.1315877814042, |
|
"epoch": 1.0106666666666666, |
|
"grad_norm": 0.010662930086255074, |
|
"kl": 0.3036980879934211, |
|
"learning_rate": 0.00029999764801714643, |
|
"loss": 0.011, |
|
"reward": 0.008771930085985284, |
|
"reward_std": 0.02148675212734624, |
|
"rewards/accuracy_reward": 0.008771930085985284, |
|
"rewards/format_reward": 0.0, |
|
"step": 190 |
|
}, |
|
{ |
|
"completion_length": 361.1166759490967, |
|
"epoch": 1.064, |
|
"grad_norm": 0.0097609031945467, |
|
"kl": 0.139459228515625, |
|
"learning_rate": 0.00029995583704046604, |
|
"loss": -0.0261, |
|
"reward": 0.01666666716337204, |
|
"reward_std": 0.04082482978701592, |
|
"rewards/accuracy_reward": 0.01666666716337204, |
|
"rewards/format_reward": 0.0, |
|
"step": 200 |
|
}, |
|
{ |
|
"completion_length": 408.6250129699707, |
|
"epoch": 1.1173333333333333, |
|
"grad_norm": 0.06736944615840912, |
|
"kl": 0.117303466796875, |
|
"learning_rate": 0.00029986177654701897, |
|
"loss": -0.0272, |
|
"reward": 0.05833333507180214, |
|
"reward_std": 0.12037949487566948, |
|
"rewards/accuracy_reward": 0.05833333507180214, |
|
"rewards/format_reward": 0.0, |
|
"step": 210 |
|
}, |
|
{ |
|
"completion_length": 416.32084465026855, |
|
"epoch": 1.1706666666666667, |
|
"grad_norm": 0.05257809907197952, |
|
"kl": 0.135888671875, |
|
"learning_rate": 0.0002997154993105566, |
|
"loss": 0.0094, |
|
"reward": 0.08750000186264514, |
|
"reward_std": 0.13755941689014434, |
|
"rewards/accuracy_reward": 0.08750000186264514, |
|
"rewards/format_reward": 0.0, |
|
"step": 220 |
|
}, |
|
{ |
|
"completion_length": 447.2416820526123, |
|
"epoch": 1.224, |
|
"grad_norm": 0.015010912902653217, |
|
"kl": 0.182366943359375, |
|
"learning_rate": 0.000299517056298851, |
|
"loss": -0.0199, |
|
"reward": 0.08750000186264514, |
|
"reward_std": 0.14506188556551933, |
|
"rewards/accuracy_reward": 0.08750000186264514, |
|
"rewards/format_reward": 0.0, |
|
"step": 230 |
|
}, |
|
{ |
|
"completion_length": 435.77084732055664, |
|
"epoch": 1.2773333333333334, |
|
"grad_norm": 0.07046622037887573, |
|
"kl": 0.4079345703125, |
|
"learning_rate": 0.00029926651665593604, |
|
"loss": 0.0026, |
|
"reward": 0.17500000298023224, |
|
"reward_std": 0.22626278176903725, |
|
"rewards/accuracy_reward": 0.17500000298023224, |
|
"rewards/format_reward": 0.0, |
|
"step": 240 |
|
}, |
|
{ |
|
"completion_length": 477.8125129699707, |
|
"epoch": 1.3306666666666667, |
|
"grad_norm": 0.09637847542762756, |
|
"kl": 1.467529296875, |
|
"learning_rate": 0.0002989639676780152, |
|
"loss": 0.0577, |
|
"reward": 0.03750000111758709, |
|
"reward_std": 0.07685092687606812, |
|
"rewards/accuracy_reward": 0.03750000111758709, |
|
"rewards/format_reward": 0.0, |
|
"step": 250 |
|
}, |
|
{ |
|
"completion_length": 524.6041809082031, |
|
"epoch": 1.384, |
|
"grad_norm": 0.14201654493808746, |
|
"kl": 1.02216796875, |
|
"learning_rate": 0.000298609514783045, |
|
"loss": 0.0522, |
|
"reward": 0.10000000298023223, |
|
"reward_std": 0.17952174469828605, |
|
"rewards/accuracy_reward": 0.10000000298023223, |
|
"rewards/format_reward": 0.0, |
|
"step": 260 |
|
}, |
|
{ |
|
"completion_length": 540.0458435058594, |
|
"epoch": 1.4373333333333334, |
|
"grad_norm": 0.12917852401733398, |
|
"kl": 1.3251953125, |
|
"learning_rate": 0.0002982032814740035, |
|
"loss": 0.0423, |
|
"reward": 0.08750000186264514, |
|
"reward_std": 0.15046936124563218, |
|
"rewards/accuracy_reward": 0.08750000186264514, |
|
"rewards/format_reward": 0.0, |
|
"step": 270 |
|
}, |
|
{ |
|
"completion_length": 563.3541828155518, |
|
"epoch": 1.4906666666666666, |
|
"grad_norm": 0.021925602108240128, |
|
"kl": 0.750732421875, |
|
"learning_rate": 0.00029774540929585847, |
|
"loss": 0.0621, |
|
"reward": 0.07916666828095913, |
|
"reward_std": 0.14506188482046128, |
|
"rewards/accuracy_reward": 0.07916666828095913, |
|
"rewards/format_reward": 0.0, |
|
"step": 280 |
|
}, |
|
{ |
|
"completion_length": 510.54584884643555, |
|
"epoch": 1.544, |
|
"grad_norm": 0.06929797679185867, |
|
"kl": 1.295654296875, |
|
"learning_rate": 0.0002972360577862478, |
|
"loss": 0.0718, |
|
"reward": 0.10000000223517418, |
|
"reward_std": 0.17358551546931267, |
|
"rewards/accuracy_reward": 0.10000000223517418, |
|
"rewards/format_reward": 0.0, |
|
"step": 290 |
|
}, |
|
{ |
|
"completion_length": 531.6000164031982, |
|
"epoch": 1.5973333333333333, |
|
"grad_norm": 0.13263341784477234, |
|
"kl": 1.4533935546875, |
|
"learning_rate": 0.0002966754044198919, |
|
"loss": 0.0413, |
|
"reward": 0.08750000186264514, |
|
"reward_std": 0.1579718306660652, |
|
"rewards/accuracy_reward": 0.08750000186264514, |
|
"rewards/format_reward": 0.0, |
|
"step": 300 |
|
}, |
|
{ |
|
"completion_length": 531.7875152587891, |
|
"epoch": 1.6506666666666665, |
|
"grad_norm": 0.024580156430602074, |
|
"kl": 1.09212646484375, |
|
"learning_rate": 0.0002960636445467553, |
|
"loss": 0.0409, |
|
"reward": 0.11250000223517417, |
|
"reward_std": 0.18727857917547225, |
|
"rewards/accuracy_reward": 0.11250000223517417, |
|
"rewards/format_reward": 0.0, |
|
"step": 310 |
|
}, |
|
{ |
|
"completion_length": 516.079179763794, |
|
"epoch": 1.704, |
|
"grad_norm": 0.15221922099590302, |
|
"kl": 1.27918701171875, |
|
"learning_rate": 0.00029540099132398094, |
|
"loss": 0.0843, |
|
"reward": 0.18750000558793545, |
|
"reward_std": 0.20018852651119232, |
|
"rewards/accuracy_reward": 0.18750000558793545, |
|
"rewards/format_reward": 0.0, |
|
"step": 320 |
|
}, |
|
{ |
|
"completion_length": 500.36251678466795, |
|
"epoch": 1.7573333333333334, |
|
"grad_norm": 0.1837584376335144, |
|
"kl": 2.253369140625, |
|
"learning_rate": 0.00029468767564161825, |
|
"loss": 0.0615, |
|
"reward": 0.11250000223517417, |
|
"reward_std": 0.17436863631010055, |
|
"rewards/accuracy_reward": 0.11250000223517417, |
|
"rewards/format_reward": 0.0, |
|
"step": 330 |
|
}, |
|
{ |
|
"completion_length": 593.9291820526123, |
|
"epoch": 1.8106666666666666, |
|
"grad_norm": 0.09892378002405167, |
|
"kl": 2.30611572265625, |
|
"learning_rate": 0.0002939239460421746, |
|
"loss": 0.0945, |
|
"reward": 0.1333333358168602, |
|
"reward_std": 0.17863860875368118, |
|
"rewards/accuracy_reward": 0.1333333358168602, |
|
"rewards/format_reward": 0.0, |
|
"step": 340 |
|
}, |
|
{ |
|
"completion_length": 705.7875190734864, |
|
"epoch": 1.8639999999999999, |
|
"grad_norm": 0.14847955107688904, |
|
"kl": 1.827001953125, |
|
"learning_rate": 0.0002931100686340142, |
|
"loss": 0.0556, |
|
"reward": 0.07916666865348816, |
|
"reward_std": 0.15448497384786605, |
|
"rewards/accuracy_reward": 0.07916666865348816, |
|
"rewards/format_reward": 0.0, |
|
"step": 350 |
|
}, |
|
{ |
|
"completion_length": 790.8541862487793, |
|
"epoch": 1.9173333333333333, |
|
"grad_norm": 0.0878988653421402, |
|
"kl": 1.0267333984375, |
|
"learning_rate": 0.00029224632699863766, |
|
"loss": 0.0278, |
|
"reward": 0.1458333373069763, |
|
"reward_std": 0.22444218024611473, |
|
"rewards/accuracy_reward": 0.1458333373069763, |
|
"rewards/format_reward": 0.0, |
|
"step": 360 |
|
}, |
|
{ |
|
"completion_length": 747.2500213623047, |
|
"epoch": 1.9706666666666668, |
|
"grad_norm": 0.3394763469696045, |
|
"kl": 1.31280517578125, |
|
"learning_rate": 0.00029133302209187267, |
|
"loss": 0.0296, |
|
"reward": 0.14166666977107525, |
|
"reward_std": 0.2158022090792656, |
|
"rewards/accuracy_reward": 0.14166666977107525, |
|
"rewards/format_reward": 0.0, |
|
"step": 370 |
|
}, |
|
{ |
|
"completion_length": 749.2280891820004, |
|
"epoch": 2.021333333333333, |
|
"grad_norm": 0.27024850249290466, |
|
"kl": 1.6449424342105263, |
|
"learning_rate": 0.00029037047213901135, |
|
"loss": 0.0596, |
|
"reward": 0.1008771948124233, |
|
"reward_std": 0.16995651392560257, |
|
"rewards/accuracy_reward": 0.1008771948124233, |
|
"rewards/format_reward": 0.0, |
|
"step": 380 |
|
}, |
|
{ |
|
"completion_length": 657.8583488464355, |
|
"epoch": 2.074666666666667, |
|
"grad_norm": 0.10394235700368881, |
|
"kl": 1.498828125, |
|
"learning_rate": 0.0002893590125239299, |
|
"loss": 0.0246, |
|
"reward": 0.1625000037252903, |
|
"reward_std": 0.21117785051465035, |
|
"rewards/accuracy_reward": 0.1625000037252903, |
|
"rewards/format_reward": 0.0, |
|
"step": 390 |
|
}, |
|
{ |
|
"completion_length": 566.1875144958497, |
|
"epoch": 2.128, |
|
"grad_norm": 0.5542222261428833, |
|
"kl": 2.505419921875, |
|
"learning_rate": 0.0002882989956722303, |
|
"loss": 0.0695, |
|
"reward": 0.16666667200624943, |
|
"reward_std": 0.20882849544286727, |
|
"rewards/accuracy_reward": 0.16666667200624943, |
|
"rewards/format_reward": 0.0, |
|
"step": 400 |
|
}, |
|
{ |
|
"completion_length": 553.8250183105469, |
|
"epoch": 2.1813333333333333, |
|
"grad_norm": 0.1271558701992035, |
|
"kl": 1.573876953125, |
|
"learning_rate": 0.0002871907909284432, |
|
"loss": 0.0686, |
|
"reward": 0.0416666679084301, |
|
"reward_std": 0.10206207036972045, |
|
"rewards/accuracy_reward": 0.0416666679084301, |
|
"rewards/format_reward": 0.0, |
|
"step": 410 |
|
}, |
|
{ |
|
"completion_length": 642.8250167846679, |
|
"epoch": 2.2346666666666666, |
|
"grad_norm": 0.029755637049674988, |
|
"kl": 0.3994384765625, |
|
"learning_rate": 0.0002860347844273365, |
|
"loss": 0.0353, |
|
"reward": 0.04166666753590107, |
|
"reward_std": 0.077634047716856, |
|
"rewards/accuracy_reward": 0.04166666753590107, |
|
"rewards/format_reward": 0.0, |
|
"step": 420 |
|
}, |
|
{ |
|
"completion_length": 633.3208526611328, |
|
"epoch": 2.288, |
|
"grad_norm": 0.09252170473337173, |
|
"kl": 0.3323974609375, |
|
"learning_rate": 0.0002848313789593736, |
|
"loss": 0.0383, |
|
"reward": 0.05000000111758709, |
|
"reward_std": 0.07763404548168182, |
|
"rewards/accuracy_reward": 0.05000000111758709, |
|
"rewards/format_reward": 0.0, |
|
"step": 430 |
|
}, |
|
{ |
|
"completion_length": 628.4416847229004, |
|
"epoch": 2.3413333333333335, |
|
"grad_norm": 0.06033443659543991, |
|
"kl": 0.28509521484375, |
|
"learning_rate": 0.0002835809938303674, |
|
"loss": 0.0458, |
|
"reward": 0.08333333507180214, |
|
"reward_std": 0.12037949562072754, |
|
"rewards/accuracy_reward": 0.08333333507180214, |
|
"rewards/format_reward": 0.0, |
|
"step": 440 |
|
}, |
|
{ |
|
"completion_length": 523.1208480834961, |
|
"epoch": 2.3946666666666667, |
|
"grad_norm": 0.1295376569032669, |
|
"kl": 0.518896484375, |
|
"learning_rate": 0.0002822840647153811, |
|
"loss": 0.0435, |
|
"reward": 0.06666666828095913, |
|
"reward_std": 0.12386635094881057, |
|
"rewards/accuracy_reward": 0.06666666828095913, |
|
"rewards/format_reward": 0.0, |
|
"step": 450 |
|
}, |
|
{ |
|
"completion_length": 490.62501640319823, |
|
"epoch": 2.448, |
|
"grad_norm": 0.22692793607711792, |
|
"kl": 0.84404296875, |
|
"learning_rate": 0.00028094104350692435, |
|
"loss": 0.0502, |
|
"reward": 0.10000000260770321, |
|
"reward_std": 0.12788196206092833, |
|
"rewards/accuracy_reward": 0.10000000260770321, |
|
"rewards/format_reward": 0.0, |
|
"step": 460 |
|
}, |
|
{ |
|
"completion_length": 495.0000144958496, |
|
"epoch": 2.501333333333333, |
|
"grad_norm": 0.1922207474708557, |
|
"kl": 0.9299072265625, |
|
"learning_rate": 0.00027955239815749913, |
|
"loss": 0.0628, |
|
"reward": 0.07083333507180214, |
|
"reward_std": 0.14157502651214598, |
|
"rewards/accuracy_reward": 0.07083333507180214, |
|
"rewards/format_reward": 0.0, |
|
"step": 470 |
|
}, |
|
{ |
|
"completion_length": 465.61668090820314, |
|
"epoch": 2.554666666666667, |
|
"grad_norm": 0.07803911715745926, |
|
"kl": 1.01708984375, |
|
"learning_rate": 0.0002781186125165503, |
|
"loss": 0.0505, |
|
"reward": 0.06250000111758709, |
|
"reward_std": 0.09673458859324455, |
|
"rewards/accuracy_reward": 0.06250000111758709, |
|
"rewards/format_reward": 0.0, |
|
"step": 480 |
|
}, |
|
{ |
|
"completion_length": 461.9125179290771, |
|
"epoch": 2.608, |
|
"grad_norm": 0.09385239332914352, |
|
"kl": 0.82843017578125, |
|
"learning_rate": 0.0002766401861618757, |
|
"loss": 0.0318, |
|
"reward": 0.05000000149011612, |
|
"reward_std": 0.11497201770544052, |
|
"rewards/accuracy_reward": 0.05000000149011612, |
|
"rewards/format_reward": 0.0, |
|
"step": 490 |
|
}, |
|
{ |
|
"completion_length": 425.1750095367432, |
|
"epoch": 2.6613333333333333, |
|
"grad_norm": 0.08281384408473969, |
|
"kl": 0.635888671875, |
|
"learning_rate": 0.000275117634225558, |
|
"loss": 0.0299, |
|
"reward": 0.02500000074505806, |
|
"reward_std": 0.05373477414250374, |
|
"rewards/accuracy_reward": 0.02500000074505806, |
|
"rewards/format_reward": 0.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"completion_length": 472.46251106262207, |
|
"epoch": 2.7146666666666666, |
|
"grad_norm": 0.1934070885181427, |
|
"kl": 0.387255859375, |
|
"learning_rate": 0.0002735514872144749, |
|
"loss": 0.0189, |
|
"reward": 0.10000000223517418, |
|
"reward_std": 0.1681780368089676, |
|
"rewards/accuracy_reward": 0.10000000223517418, |
|
"rewards/format_reward": 0.0, |
|
"step": 510 |
|
}, |
|
{ |
|
"completion_length": 452.3625148773193, |
|
"epoch": 2.768, |
|
"grad_norm": 0.13693872094154358, |
|
"kl": 0.7733154296875, |
|
"learning_rate": 0.0002719422908254538, |
|
"loss": 0.0353, |
|
"reward": 0.07916666828095913, |
|
"reward_std": 0.13755941689014434, |
|
"rewards/accuracy_reward": 0.07916666828095913, |
|
"rewards/format_reward": 0.0, |
|
"step": 520 |
|
}, |
|
{ |
|
"completion_length": 380.079178237915, |
|
"epoch": 2.8213333333333335, |
|
"grad_norm": 0.16365598142147064, |
|
"kl": 1.54091796875, |
|
"learning_rate": 0.000270290605755133, |
|
"loss": 0.0271, |
|
"reward": 0.07083333469927311, |
|
"reward_std": 0.10964453294873237, |
|
"rewards/accuracy_reward": 0.07083333469927311, |
|
"rewards/format_reward": 0.0, |
|
"step": 530 |
|
}, |
|
{ |
|
"completion_length": 423.37917976379396, |
|
"epoch": 2.8746666666666667, |
|
"grad_norm": 0.11597971618175507, |
|
"kl": 1.30352783203125, |
|
"learning_rate": 0.0002685970075045964, |
|
"loss": 0.0417, |
|
"reward": 0.15833333730697632, |
|
"reward_std": 0.2233046792447567, |
|
"rewards/accuracy_reward": 0.15833333730697632, |
|
"rewards/format_reward": 0.0, |
|
"step": 540 |
|
}, |
|
{ |
|
"completion_length": 405.8333465576172, |
|
"epoch": 2.928, |
|
"grad_norm": 0.06692465394735336, |
|
"kl": 1.92783203125, |
|
"learning_rate": 0.00026686208617885055, |
|
"loss": 0.0901, |
|
"reward": 0.03333333432674408, |
|
"reward_std": 0.06664471924304963, |
|
"rewards/accuracy_reward": 0.03333333432674408, |
|
"rewards/format_reward": 0.0, |
|
"step": 550 |
|
}, |
|
{ |
|
"completion_length": 475.11668281555177, |
|
"epoch": 2.981333333333333, |
|
"grad_norm": 0.1280931830406189, |
|
"kl": 0.60787353515625, |
|
"learning_rate": 0.00026508644628121244, |
|
"loss": 0.046, |
|
"reward": 0.12500000335276126, |
|
"reward_std": 0.20132602676749228, |
|
"rewards/accuracy_reward": 0.12500000335276126, |
|
"rewards/format_reward": 0.0, |
|
"step": 560 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1870, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 6, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|