phi3m0128-cds-0.8-kendall-onof-decrease-corr-max-2-simpo-max1500-default
/
checkpoint-400
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.3437902879243661, | |
"eval_steps": 50, | |
"global_step": 400, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.008594757198109154, | |
"grad_norm": 0.06708361208438873, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": 14.524938583374023, | |
"logits/rejected": 14.82593822479248, | |
"logps/chosen": -0.31433865427970886, | |
"logps/rejected": -0.32406437397003174, | |
"loss": 0.9442, | |
"rewards/accuracies": 0.4124999940395355, | |
"rewards/chosen": -0.4715079367160797, | |
"rewards/margins": 0.014588532969355583, | |
"rewards/rejected": -0.48609647154808044, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.017189514396218308, | |
"grad_norm": 0.056814808398485184, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": 14.309213638305664, | |
"logits/rejected": 14.978128433227539, | |
"logps/chosen": -0.31283506751060486, | |
"logps/rejected": -0.3911947011947632, | |
"loss": 0.928, | |
"rewards/accuracies": 0.550000011920929, | |
"rewards/chosen": -0.46925264596939087, | |
"rewards/margins": 0.1175394207239151, | |
"rewards/rejected": -0.5867919921875, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.02578427159432746, | |
"grad_norm": 0.061199307441711426, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": 14.68384075164795, | |
"logits/rejected": 15.338122367858887, | |
"logps/chosen": -0.3007296621799469, | |
"logps/rejected": -0.3204456865787506, | |
"loss": 0.9439, | |
"rewards/accuracies": 0.4375, | |
"rewards/chosen": -0.45109447836875916, | |
"rewards/margins": 0.029573997482657433, | |
"rewards/rejected": -0.48066848516464233, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.034379028792436615, | |
"grad_norm": 0.08423774689435959, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": 14.39265251159668, | |
"logits/rejected": 15.059102058410645, | |
"logps/chosen": -0.28216058015823364, | |
"logps/rejected": -0.33495840430259705, | |
"loss": 0.9184, | |
"rewards/accuracies": 0.4124999940395355, | |
"rewards/chosen": -0.42324090003967285, | |
"rewards/margins": 0.07919676601886749, | |
"rewards/rejected": -0.5024376511573792, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.042973785990545764, | |
"grad_norm": 0.06052614375948906, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": 14.383735656738281, | |
"logits/rejected": 15.029413223266602, | |
"logps/chosen": -0.27970507740974426, | |
"logps/rejected": -0.33213528990745544, | |
"loss": 0.9317, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.4195576310157776, | |
"rewards/margins": 0.07864536345005035, | |
"rewards/rejected": -0.49820294976234436, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.042973785990545764, | |
"eval_logits/chosen": 14.424538612365723, | |
"eval_logits/rejected": 15.006633758544922, | |
"eval_logps/chosen": -0.2923925220966339, | |
"eval_logps/rejected": -0.3531996011734009, | |
"eval_loss": 0.9324354529380798, | |
"eval_rewards/accuracies": 0.5052631497383118, | |
"eval_rewards/chosen": -0.43858882784843445, | |
"eval_rewards/margins": 0.09121060371398926, | |
"eval_rewards/rejected": -0.5297994017601013, | |
"eval_runtime": 26.3759, | |
"eval_samples_per_second": 28.549, | |
"eval_steps_per_second": 3.602, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.05156854318865492, | |
"grad_norm": 0.06899414211511612, | |
"learning_rate": 4.980286753286196e-06, | |
"logits/chosen": 14.888933181762695, | |
"logits/rejected": 15.33955192565918, | |
"logps/chosen": -0.2886829972267151, | |
"logps/rejected": -0.34016504883766174, | |
"loss": 0.9323, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -0.43302449584007263, | |
"rewards/margins": 0.07722309231758118, | |
"rewards/rejected": -0.5102475881576538, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.060163300386764075, | |
"grad_norm": 0.06679105013608932, | |
"learning_rate": 4.973180832407471e-06, | |
"logits/chosen": 14.721624374389648, | |
"logits/rejected": 15.614666938781738, | |
"logps/chosen": -0.29435139894485474, | |
"logps/rejected": -0.38699784874916077, | |
"loss": 0.9172, | |
"rewards/accuracies": 0.48750001192092896, | |
"rewards/chosen": -0.4415270686149597, | |
"rewards/margins": 0.13896968960762024, | |
"rewards/rejected": -0.5804967880249023, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.06875805758487323, | |
"grad_norm": 0.07169903814792633, | |
"learning_rate": 4.964990092676263e-06, | |
"logits/chosen": 13.848808288574219, | |
"logits/rejected": 14.609800338745117, | |
"logps/chosen": -0.26156893372535706, | |
"logps/rejected": -0.33030644059181213, | |
"loss": 0.9245, | |
"rewards/accuracies": 0.48750001192092896, | |
"rewards/chosen": -0.3923533856868744, | |
"rewards/margins": 0.10310628265142441, | |
"rewards/rejected": -0.495459645986557, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.07735281478298238, | |
"grad_norm": 0.06593246012926102, | |
"learning_rate": 4.9557181268217225e-06, | |
"logits/chosen": 14.603567123413086, | |
"logits/rejected": 14.994171142578125, | |
"logps/chosen": -0.3191321790218353, | |
"logps/rejected": -0.3477073311805725, | |
"loss": 0.9359, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -0.4786983132362366, | |
"rewards/margins": 0.042862698435783386, | |
"rewards/rejected": -0.5215609669685364, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.08594757198109153, | |
"grad_norm": 0.0718066617846489, | |
"learning_rate": 4.9453690018345144e-06, | |
"logits/chosen": 13.928094863891602, | |
"logits/rejected": 14.792709350585938, | |
"logps/chosen": -0.24115696549415588, | |
"logps/rejected": -0.3537539839744568, | |
"loss": 0.9066, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.3617354929447174, | |
"rewards/margins": 0.16889554262161255, | |
"rewards/rejected": -0.5306310653686523, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.08594757198109153, | |
"eval_logits/chosen": 14.40036392211914, | |
"eval_logits/rejected": 14.97786808013916, | |
"eval_logps/chosen": -0.2777771055698395, | |
"eval_logps/rejected": -0.3516874611377716, | |
"eval_loss": 0.9236211180686951, | |
"eval_rewards/accuracies": 0.5052631497383118, | |
"eval_rewards/chosen": -0.4166657328605652, | |
"eval_rewards/margins": 0.11086549609899521, | |
"eval_rewards/rejected": -0.5275312066078186, | |
"eval_runtime": 25.8056, | |
"eval_samples_per_second": 29.18, | |
"eval_steps_per_second": 3.681, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.09454232917920069, | |
"grad_norm": 0.06681054830551147, | |
"learning_rate": 4.933947257182901e-06, | |
"logits/chosen": 14.76116943359375, | |
"logits/rejected": 15.001077651977539, | |
"logps/chosen": -0.297056645154953, | |
"logps/rejected": -0.3221590518951416, | |
"loss": 0.929, | |
"rewards/accuracies": 0.44999998807907104, | |
"rewards/chosen": -0.4455850124359131, | |
"rewards/margins": 0.03765357658267021, | |
"rewards/rejected": -0.4832385182380676, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.10313708637730984, | |
"grad_norm": 0.10024584829807281, | |
"learning_rate": 4.921457902821578e-06, | |
"logits/chosen": 14.405306816101074, | |
"logits/rejected": 15.084524154663086, | |
"logps/chosen": -0.2726767361164093, | |
"logps/rejected": -0.3543504774570465, | |
"loss": 0.9299, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.40901508927345276, | |
"rewards/margins": 0.12251058965921402, | |
"rewards/rejected": -0.531525731086731, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.11173184357541899, | |
"grad_norm": 0.08629737794399261, | |
"learning_rate": 4.907906416994146e-06, | |
"logits/chosen": 14.073992729187012, | |
"logits/rejected": 14.882128715515137, | |
"logps/chosen": -0.2827032506465912, | |
"logps/rejected": -0.369393527507782, | |
"loss": 0.9109, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.42405492067337036, | |
"rewards/margins": 0.13003548979759216, | |
"rewards/rejected": -0.5540903806686401, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.12032660077352815, | |
"grad_norm": 0.07973086833953857, | |
"learning_rate": 4.893298743830168e-06, | |
"logits/chosen": 13.96656322479248, | |
"logits/rejected": 14.639463424682617, | |
"logps/chosen": -0.28426361083984375, | |
"logps/rejected": -0.3899250030517578, | |
"loss": 0.9138, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.4263954162597656, | |
"rewards/margins": 0.1584920585155487, | |
"rewards/rejected": -0.5848874449729919, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.1289213579716373, | |
"grad_norm": 0.08767445385456085, | |
"learning_rate": 4.8776412907378845e-06, | |
"logits/chosen": 13.705177307128906, | |
"logits/rejected": 14.19865608215332, | |
"logps/chosen": -0.26735779643058777, | |
"logps/rejected": -0.34726911783218384, | |
"loss": 0.9157, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.40103667974472046, | |
"rewards/margins": 0.1198669821023941, | |
"rewards/rejected": -0.5209037065505981, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.1289213579716373, | |
"eval_logits/chosen": 13.20260238647461, | |
"eval_logits/rejected": 13.959339141845703, | |
"eval_logps/chosen": -0.27623170614242554, | |
"eval_logps/rejected": -0.3724917769432068, | |
"eval_loss": 0.909102737903595, | |
"eval_rewards/accuracies": 0.557894766330719, | |
"eval_rewards/chosen": -0.4143475592136383, | |
"eval_rewards/margins": 0.14439010620117188, | |
"eval_rewards/rejected": -0.5587376356124878, | |
"eval_runtime": 25.7839, | |
"eval_samples_per_second": 29.204, | |
"eval_steps_per_second": 3.684, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.13751611516974646, | |
"grad_norm": 0.09749539196491241, | |
"learning_rate": 4.860940925593703e-06, | |
"logits/chosen": 13.301411628723145, | |
"logits/rejected": 14.054819107055664, | |
"logps/chosen": -0.2808162569999695, | |
"logps/rejected": -0.39500662684440613, | |
"loss": 0.9, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.4212244153022766, | |
"rewards/margins": 0.17128555476665497, | |
"rewards/rejected": -0.592509925365448, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.1461108723678556, | |
"grad_norm": 0.14965052902698517, | |
"learning_rate": 4.84320497372973e-06, | |
"logits/chosen": 12.261284828186035, | |
"logits/rejected": 13.0617036819458, | |
"logps/chosen": -0.29266461730003357, | |
"logps/rejected": -0.4265298843383789, | |
"loss": 0.896, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.43899694085121155, | |
"rewards/margins": 0.20079784095287323, | |
"rewards/rejected": -0.6397948265075684, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.15470562956596476, | |
"grad_norm": 0.13044072687625885, | |
"learning_rate": 4.824441214720629e-06, | |
"logits/chosen": 11.509119033813477, | |
"logits/rejected": 12.31033706665039, | |
"logps/chosen": -0.27384257316589355, | |
"logps/rejected": -0.3920982778072357, | |
"loss": 0.8911, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.4107638895511627, | |
"rewards/margins": 0.17738358676433563, | |
"rewards/rejected": -0.5881474018096924, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.1633003867640739, | |
"grad_norm": 0.16182811558246613, | |
"learning_rate": 4.804657878971252e-06, | |
"logits/chosen": 10.68933391571045, | |
"logits/rejected": 11.632065773010254, | |
"logps/chosen": -0.292975515127182, | |
"logps/rejected": -0.42257896065711975, | |
"loss": 0.9002, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.4394632875919342, | |
"rewards/margins": 0.19440510869026184, | |
"rewards/rejected": -0.633868396282196, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.17189514396218306, | |
"grad_norm": 0.181160107254982, | |
"learning_rate": 4.783863644106502e-06, | |
"logits/chosen": 10.593437194824219, | |
"logits/rejected": 11.435877799987793, | |
"logps/chosen": -0.32495418190956116, | |
"logps/rejected": -0.4480825364589691, | |
"loss": 0.8773, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.48743128776550293, | |
"rewards/margins": 0.18469250202178955, | |
"rewards/rejected": -0.6721237897872925, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.17189514396218306, | |
"eval_logits/chosen": 9.299257278442383, | |
"eval_logits/rejected": 10.055145263671875, | |
"eval_logps/chosen": -0.31059205532073975, | |
"eval_logps/rejected": -0.47102925181388855, | |
"eval_loss": 0.8721462488174438, | |
"eval_rewards/accuracies": 0.6105263233184814, | |
"eval_rewards/chosen": -0.4658880829811096, | |
"eval_rewards/margins": 0.24065588414669037, | |
"eval_rewards/rejected": -0.7065439224243164, | |
"eval_runtime": 25.78, | |
"eval_samples_per_second": 29.209, | |
"eval_steps_per_second": 3.685, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.18048990116029223, | |
"grad_norm": 0.24912959337234497, | |
"learning_rate": 4.762067631165049e-06, | |
"logits/chosen": 8.803088188171387, | |
"logits/rejected": 9.326388359069824, | |
"logps/chosen": -0.3249451816082001, | |
"logps/rejected": -0.44993042945861816, | |
"loss": 0.8484, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.4874177575111389, | |
"rewards/margins": 0.18747788667678833, | |
"rewards/rejected": -0.6748956441879272, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.18908465835840138, | |
"grad_norm": 0.319579541683197, | |
"learning_rate": 4.7392794005985324e-06, | |
"logits/chosen": 6.257112979888916, | |
"logits/rejected": 7.168400764465332, | |
"logps/chosen": -0.335318386554718, | |
"logps/rejected": -0.5439311265945435, | |
"loss": 0.8499, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.5029775500297546, | |
"rewards/margins": 0.31291908025741577, | |
"rewards/rejected": -0.8158966302871704, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.19767941555651053, | |
"grad_norm": 0.31494757533073425, | |
"learning_rate": 4.715508948078037e-06, | |
"logits/chosen": 5.725883960723877, | |
"logits/rejected": 5.9254865646362305, | |
"logps/chosen": -0.3735908567905426, | |
"logps/rejected": -0.5729750394821167, | |
"loss": 0.826, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.5603862404823303, | |
"rewards/margins": 0.2990763187408447, | |
"rewards/rejected": -0.859462559223175, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.20627417275461968, | |
"grad_norm": 0.46439653635025024, | |
"learning_rate": 4.690766700109659e-06, | |
"logits/chosen": 5.059751033782959, | |
"logits/rejected": 5.128623008728027, | |
"logps/chosen": -0.4083784222602844, | |
"logps/rejected": -0.6792675852775574, | |
"loss": 0.7992, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.612567663192749, | |
"rewards/margins": 0.40633392333984375, | |
"rewards/rejected": -1.0189014673233032, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.21486892995272883, | |
"grad_norm": 0.42406076192855835, | |
"learning_rate": 4.665063509461098e-06, | |
"logits/chosen": 4.128974437713623, | |
"logits/rejected": 4.141166687011719, | |
"logps/chosen": -0.4256651997566223, | |
"logps/rejected": -0.7279168367385864, | |
"loss": 0.7848, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.6384977102279663, | |
"rewards/margins": 0.4533773958683014, | |
"rewards/rejected": -1.0918750762939453, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.21486892995272883, | |
"eval_logits/chosen": 3.800307512283325, | |
"eval_logits/rejected": 3.1472771167755127, | |
"eval_logps/chosen": -0.4563433527946472, | |
"eval_logps/rejected": -0.8247694373130798, | |
"eval_loss": 0.7728626728057861, | |
"eval_rewards/accuracies": 0.6526315808296204, | |
"eval_rewards/chosen": -0.6845150589942932, | |
"eval_rewards/margins": 0.5526391267776489, | |
"eval_rewards/rejected": -1.237154245376587, | |
"eval_runtime": 25.7836, | |
"eval_samples_per_second": 29.205, | |
"eval_steps_per_second": 3.685, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.22346368715083798, | |
"grad_norm": 0.4071955680847168, | |
"learning_rate": 4.638410650401267e-06, | |
"logits/chosen": 3.169527530670166, | |
"logits/rejected": 2.603461503982544, | |
"logps/chosen": -0.5029922723770142, | |
"logps/rejected": -0.9469219446182251, | |
"loss": 0.7273, | |
"rewards/accuracies": 0.7124999761581421, | |
"rewards/chosen": -0.7544883489608765, | |
"rewards/margins": 0.6658946871757507, | |
"rewards/rejected": -1.4203828573226929, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.23205844434894715, | |
"grad_norm": 0.6253886222839355, | |
"learning_rate": 4.610819813755038e-06, | |
"logits/chosen": 3.8718018531799316, | |
"logits/rejected": 2.569753646850586, | |
"logps/chosen": -0.4955294132232666, | |
"logps/rejected": -0.8811863660812378, | |
"loss": 0.7483, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -0.7432941198348999, | |
"rewards/margins": 0.5784854888916016, | |
"rewards/rejected": -1.321779489517212, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.2406532015470563, | |
"grad_norm": 0.5592113733291626, | |
"learning_rate": 4.582303101775249e-06, | |
"logits/chosen": 3.4818286895751953, | |
"logits/rejected": 2.428328275680542, | |
"logps/chosen": -0.5700691342353821, | |
"logps/rejected": -1.010145664215088, | |
"loss": 0.7165, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.8551036715507507, | |
"rewards/margins": 0.6601148843765259, | |
"rewards/rejected": -1.5152184963226318, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.24924795874516545, | |
"grad_norm": 0.8438608050346375, | |
"learning_rate": 4.55287302283426e-06, | |
"logits/chosen": 2.5937914848327637, | |
"logits/rejected": 1.8570162057876587, | |
"logps/chosen": -0.592321515083313, | |
"logps/rejected": -1.1775600910186768, | |
"loss": 0.6685, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.8884822130203247, | |
"rewards/margins": 0.8778578042984009, | |
"rewards/rejected": -1.7663400173187256, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.2578427159432746, | |
"grad_norm": 2.9559757709503174, | |
"learning_rate": 4.522542485937369e-06, | |
"logits/chosen": 3.2419090270996094, | |
"logits/rejected": 1.9082870483398438, | |
"logps/chosen": -0.6832663416862488, | |
"logps/rejected": -1.5631868839263916, | |
"loss": 0.6009, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -1.0248994827270508, | |
"rewards/margins": 1.3198809623718262, | |
"rewards/rejected": -2.344780445098877, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.2578427159432746, | |
"eval_logits/chosen": 2.5470504760742188, | |
"eval_logits/rejected": 1.492888331413269, | |
"eval_logps/chosen": -0.7285813689231873, | |
"eval_logps/rejected": -1.8318607807159424, | |
"eval_loss": 0.5855891704559326, | |
"eval_rewards/accuracies": 0.7052631378173828, | |
"eval_rewards/chosen": -1.092872142791748, | |
"eval_rewards/margins": 1.6549187898635864, | |
"eval_rewards/rejected": -2.747790813446045, | |
"eval_runtime": 25.8105, | |
"eval_samples_per_second": 29.174, | |
"eval_steps_per_second": 3.681, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.2664374731413838, | |
"grad_norm": 1.4503060579299927, | |
"learning_rate": 4.491324795060491e-06, | |
"logits/chosen": 1.6672757863998413, | |
"logits/rejected": 0.7888604402542114, | |
"logps/chosen": -0.769140899181366, | |
"logps/rejected": -2.0822532176971436, | |
"loss": 0.512, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -1.1537113189697266, | |
"rewards/margins": 1.9696683883666992, | |
"rewards/rejected": -3.123379945755005, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.2750322303394929, | |
"grad_norm": 0.36741188168525696, | |
"learning_rate": 4.4592336433146e-06, | |
"logits/chosen": 2.6584715843200684, | |
"logits/rejected": 1.835911750793457, | |
"logps/chosen": -0.8400143384933472, | |
"logps/rejected": -1.9262489080429077, | |
"loss": 0.5405, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -1.2600215673446655, | |
"rewards/margins": 1.6293519735336304, | |
"rewards/rejected": -2.889373302459717, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.28362698753760207, | |
"grad_norm": 0.6233783960342407, | |
"learning_rate": 4.426283106939474e-06, | |
"logits/chosen": 3.2203617095947266, | |
"logits/rejected": 2.3215420246124268, | |
"logps/chosen": -0.7985933423042297, | |
"logps/rejected": -2.4170174598693848, | |
"loss": 0.5335, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -1.197890043258667, | |
"rewards/margins": 2.427635669708252, | |
"rewards/rejected": -3.625525712966919, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.2922217447357112, | |
"grad_norm": 1.0881849527359009, | |
"learning_rate": 4.3924876391293915e-06, | |
"logits/chosen": 2.229017734527588, | |
"logits/rejected": 1.2251309156417847, | |
"logps/chosen": -0.8058193325996399, | |
"logps/rejected": -2.810622215270996, | |
"loss": 0.4903, | |
"rewards/accuracies": 0.7124999761581421, | |
"rewards/chosen": -1.2087291479110718, | |
"rewards/margins": 3.007204294204712, | |
"rewards/rejected": -4.215933799743652, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.30081650193382037, | |
"grad_norm": 4.168415069580078, | |
"learning_rate": 4.357862063693486e-06, | |
"logits/chosen": 2.4198296070098877, | |
"logits/rejected": 1.5391919612884521, | |
"logps/chosen": -1.010558843612671, | |
"logps/rejected": -2.2362923622131348, | |
"loss": 0.5249, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -1.515838384628296, | |
"rewards/margins": 1.8385999202728271, | |
"rewards/rejected": -3.3544387817382812, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.30081650193382037, | |
"eval_logits/chosen": 2.996535539627075, | |
"eval_logits/rejected": 2.064058303833008, | |
"eval_logps/chosen": -0.8687878847122192, | |
"eval_logps/rejected": -2.9790267944335938, | |
"eval_loss": 0.5171241760253906, | |
"eval_rewards/accuracies": 0.7263157963752747, | |
"eval_rewards/chosen": -1.3031818866729736, | |
"eval_rewards/margins": 3.165358781814575, | |
"eval_rewards/rejected": -4.468540668487549, | |
"eval_runtime": 25.8152, | |
"eval_samples_per_second": 29.169, | |
"eval_steps_per_second": 3.68, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.3094112591319295, | |
"grad_norm": 0.5646592378616333, | |
"learning_rate": 4.322421568553529e-06, | |
"logits/chosen": 3.050445556640625, | |
"logits/rejected": 2.0960793495178223, | |
"logps/chosen": -0.7702202796936035, | |
"logps/rejected": -2.5967533588409424, | |
"loss": 0.5067, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -1.1553303003311157, | |
"rewards/margins": 2.739799737930298, | |
"rewards/rejected": -3.895130157470703, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.31800601633003867, | |
"grad_norm": 0.5547713041305542, | |
"learning_rate": 4.286181699082008e-06, | |
"logits/chosen": 2.7148895263671875, | |
"logits/rejected": 1.9958852529525757, | |
"logps/chosen": -0.9548311233520508, | |
"logps/rejected": -3.1348252296447754, | |
"loss": 0.4726, | |
"rewards/accuracies": 0.800000011920929, | |
"rewards/chosen": -1.4322465658187866, | |
"rewards/margins": 3.269991397857666, | |
"rewards/rejected": -4.702237606048584, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.3266007735281478, | |
"grad_norm": 3.4396660327911377, | |
"learning_rate": 4.249158351283414e-06, | |
"logits/chosen": 2.586766004562378, | |
"logits/rejected": 2.070089340209961, | |
"logps/chosen": -0.9903923273086548, | |
"logps/rejected": -3.0135743618011475, | |
"loss": 0.4801, | |
"rewards/accuracies": 0.6625000238418579, | |
"rewards/chosen": -1.485588550567627, | |
"rewards/margins": 3.034773349761963, | |
"rewards/rejected": -4.52036190032959, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.33519553072625696, | |
"grad_norm": 0.9405317306518555, | |
"learning_rate": 4.211367764821722e-06, | |
"logits/chosen": 4.370789527893066, | |
"logits/rejected": 3.165931224822998, | |
"logps/chosen": -0.7785463929176331, | |
"logps/rejected": -2.456723928451538, | |
"loss": 0.4585, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -1.1678194999694824, | |
"rewards/margins": 2.5172665119171143, | |
"rewards/rejected": -3.6850857734680176, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.3437902879243661, | |
"grad_norm": 0.7120731472969055, | |
"learning_rate": 4.172826515897146e-06, | |
"logits/chosen": 3.3425400257110596, | |
"logits/rejected": 2.6448545455932617, | |
"logps/chosen": -0.9174768328666687, | |
"logps/rejected": -3.047037124633789, | |
"loss": 0.4771, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -1.3762153387069702, | |
"rewards/margins": 3.194340229034424, | |
"rewards/rejected": -4.570555686950684, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.3437902879243661, | |
"eval_logits/chosen": 3.548964262008667, | |
"eval_logits/rejected": 2.7726428508758545, | |
"eval_logps/chosen": -1.0053316354751587, | |
"eval_logps/rejected": -3.487654447555542, | |
"eval_loss": 0.47841358184814453, | |
"eval_rewards/accuracies": 0.7368420958518982, | |
"eval_rewards/chosen": -1.5079973936080933, | |
"eval_rewards/margins": 3.723484992980957, | |
"eval_rewards/rejected": -5.231482028961182, | |
"eval_runtime": 25.8148, | |
"eval_samples_per_second": 29.169, | |
"eval_steps_per_second": 3.68, | |
"step": 400 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 9.121232912782459e+17, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |