|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.992, |
|
"eval_steps": 500, |
|
"global_step": 561, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02666666666666667, |
|
"grad_norm": 18.322052001953125, |
|
"learning_rate": 8.771929824561403e-08, |
|
"logits/chosen": -0.8889046907424927, |
|
"logits/rejected": -0.8736551403999329, |
|
"logps/chosen": -81.54634094238281, |
|
"logps/rejected": -75.81903839111328, |
|
"loss": 0.6912, |
|
"rewards/accuracies": 0.3499999940395355, |
|
"rewards/chosen": 0.0006732392939738929, |
|
"rewards/margins": 0.005079272203147411, |
|
"rewards/rejected": -0.004406032618135214, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05333333333333334, |
|
"grad_norm": 19.94486427307129, |
|
"learning_rate": 1.7543859649122805e-07, |
|
"logits/chosen": -0.9008264541625977, |
|
"logits/rejected": -0.8997122645378113, |
|
"logps/chosen": -56.543792724609375, |
|
"logps/rejected": -70.56910705566406, |
|
"loss": 0.6741, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.040357910096645355, |
|
"rewards/margins": 0.049730073660612106, |
|
"rewards/rejected": -0.009372168220579624, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 16.07955551147461, |
|
"learning_rate": 2.631578947368421e-07, |
|
"logits/chosen": -0.9306875467300415, |
|
"logits/rejected": -0.9334227442741394, |
|
"logps/chosen": -70.51209259033203, |
|
"logps/rejected": -84.35716247558594, |
|
"loss": 0.6422, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.06031479313969612, |
|
"rewards/margins": 0.09791518747806549, |
|
"rewards/rejected": -0.03760039433836937, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.10666666666666667, |
|
"grad_norm": 16.454225540161133, |
|
"learning_rate": 3.508771929824561e-07, |
|
"logits/chosen": -0.8976303339004517, |
|
"logits/rejected": -0.8699060678482056, |
|
"logps/chosen": -72.55310821533203, |
|
"logps/rejected": -87.03462219238281, |
|
"loss": 0.6069, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.05961589142680168, |
|
"rewards/margins": 0.28127092123031616, |
|
"rewards/rejected": -0.2216549664735794, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.13333333333333333, |
|
"grad_norm": 14.617012977600098, |
|
"learning_rate": 4.3859649122807013e-07, |
|
"logits/chosen": -0.9105884432792664, |
|
"logits/rejected": -0.9121261835098267, |
|
"logps/chosen": -81.52780151367188, |
|
"logps/rejected": -91.52813720703125, |
|
"loss": 0.6019, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.12029141187667847, |
|
"rewards/margins": 0.3524962365627289, |
|
"rewards/rejected": -0.2322048395872116, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 13.667928695678711, |
|
"learning_rate": 5.263157894736842e-07, |
|
"logits/chosen": -0.8984580039978027, |
|
"logits/rejected": -0.9042614102363586, |
|
"logps/chosen": -74.46806335449219, |
|
"logps/rejected": -76.2645034790039, |
|
"loss": 0.5363, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": 0.09522397816181183, |
|
"rewards/margins": 0.6430191993713379, |
|
"rewards/rejected": -0.547795295715332, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.18666666666666668, |
|
"grad_norm": 16.28107452392578, |
|
"learning_rate": 6.140350877192982e-07, |
|
"logits/chosen": -0.8978121876716614, |
|
"logits/rejected": -0.8999565243721008, |
|
"logps/chosen": -82.0078353881836, |
|
"logps/rejected": -80.48029327392578, |
|
"loss": 0.5021, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.27644580602645874, |
|
"rewards/margins": 0.4465056359767914, |
|
"rewards/rejected": -0.17005985975265503, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.21333333333333335, |
|
"grad_norm": 16.186918258666992, |
|
"learning_rate": 7.017543859649122e-07, |
|
"logits/chosen": -0.8643107414245605, |
|
"logits/rejected": -0.8585313558578491, |
|
"logps/chosen": -103.23567962646484, |
|
"logps/rejected": -90.96733856201172, |
|
"loss": 0.4986, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.10163059085607529, |
|
"rewards/margins": 0.47456789016723633, |
|
"rewards/rejected": -0.5761984586715698, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 14.479534149169922, |
|
"learning_rate": 7.894736842105263e-07, |
|
"logits/chosen": -0.9103773236274719, |
|
"logits/rejected": -0.9051542282104492, |
|
"logps/chosen": -67.92506408691406, |
|
"logps/rejected": -77.37336730957031, |
|
"loss": 0.4875, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.29286879301071167, |
|
"rewards/margins": 0.8011099100112915, |
|
"rewards/rejected": -0.5082410573959351, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.26666666666666666, |
|
"grad_norm": 12.070276260375977, |
|
"learning_rate": 8.771929824561403e-07, |
|
"logits/chosen": -0.8517910242080688, |
|
"logits/rejected": -0.8563734889030457, |
|
"logps/chosen": -92.3311767578125, |
|
"logps/rejected": -102.72621154785156, |
|
"loss": 0.4479, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.06940027326345444, |
|
"rewards/margins": 1.23981511592865, |
|
"rewards/rejected": -1.3092153072357178, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.29333333333333333, |
|
"grad_norm": 15.573699951171875, |
|
"learning_rate": 9.649122807017545e-07, |
|
"logits/chosen": -0.9419733285903931, |
|
"logits/rejected": -0.9363008737564087, |
|
"logps/chosen": -76.72134399414062, |
|
"logps/rejected": -82.520751953125, |
|
"loss": 0.527, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.09638428688049316, |
|
"rewards/margins": 0.900995135307312, |
|
"rewards/rejected": -0.8046110272407532, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 13.533390998840332, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9142201542854309, |
|
"logits/rejected": -0.9173645973205566, |
|
"logps/chosen": -79.86390686035156, |
|
"logps/rejected": -71.9579086303711, |
|
"loss": 0.5225, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.173385351896286, |
|
"rewards/margins": 1.1095057725906372, |
|
"rewards/rejected": -0.9361203908920288, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.3466666666666667, |
|
"grad_norm": 12.57950210571289, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.920464038848877, |
|
"logits/rejected": -0.9172080159187317, |
|
"logps/chosen": -80.74758911132812, |
|
"logps/rejected": -76.41374969482422, |
|
"loss": 0.5248, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.2707827687263489, |
|
"rewards/margins": 0.8655017614364624, |
|
"rewards/rejected": -0.5947191119194031, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.37333333333333335, |
|
"grad_norm": 17.20355224609375, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.8898583650588989, |
|
"logits/rejected": -0.8803698420524597, |
|
"logps/chosen": -80.18648529052734, |
|
"logps/rejected": -109.63679504394531, |
|
"loss": 0.3912, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.030266547575592995, |
|
"rewards/margins": 1.6781041622161865, |
|
"rewards/rejected": -1.7083708047866821, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 14.076622009277344, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9825113415718079, |
|
"logits/rejected": -0.9686470031738281, |
|
"logps/chosen": -72.63887786865234, |
|
"logps/rejected": -98.7930908203125, |
|
"loss": 0.4875, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.3350667357444763, |
|
"rewards/margins": 1.8856172561645508, |
|
"rewards/rejected": -1.5505504608154297, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.4266666666666667, |
|
"grad_norm": 13.67685317993164, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9678022265434265, |
|
"logits/rejected": -0.9671600461006165, |
|
"logps/chosen": -47.99736785888672, |
|
"logps/rejected": -77.59354400634766, |
|
"loss": 0.4304, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.5903635621070862, |
|
"rewards/margins": 1.1374304294586182, |
|
"rewards/rejected": -0.547066867351532, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.4533333333333333, |
|
"grad_norm": 12.650314331054688, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9299849271774292, |
|
"logits/rejected": -0.9215339422225952, |
|
"logps/chosen": -84.56604766845703, |
|
"logps/rejected": -96.4300308227539, |
|
"loss": 0.3804, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.6257888078689575, |
|
"rewards/margins": 1.4266914129257202, |
|
"rewards/rejected": -0.8009026646614075, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 12.806818962097168, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9175389409065247, |
|
"logits/rejected": -0.9051604270935059, |
|
"logps/chosen": -96.42292022705078, |
|
"logps/rejected": -106.83795166015625, |
|
"loss": 0.4908, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.018072601407766342, |
|
"rewards/margins": 1.4671040773391724, |
|
"rewards/rejected": -1.4851768016815186, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.5066666666666667, |
|
"grad_norm": 15.225826263427734, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9506580233573914, |
|
"logits/rejected": -0.9402367472648621, |
|
"logps/chosen": -83.20651245117188, |
|
"logps/rejected": -106.87049865722656, |
|
"loss": 0.4715, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.14173248410224915, |
|
"rewards/margins": 1.5761946439743042, |
|
"rewards/rejected": -1.434462308883667, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.5333333333333333, |
|
"grad_norm": 17.279712677001953, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.8913367390632629, |
|
"logits/rejected": -0.8918878436088562, |
|
"logps/chosen": -46.1912956237793, |
|
"logps/rejected": -58.76585006713867, |
|
"loss": 0.4605, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.3738502860069275, |
|
"rewards/margins": 1.1125816106796265, |
|
"rewards/rejected": -0.7387313842773438, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 15.407678604125977, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9308265447616577, |
|
"logits/rejected": -0.9138677716255188, |
|
"logps/chosen": -89.54922485351562, |
|
"logps/rejected": -90.88710021972656, |
|
"loss": 0.4469, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.10435737669467926, |
|
"rewards/margins": 1.630505919456482, |
|
"rewards/rejected": -1.5261485576629639, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.5866666666666667, |
|
"grad_norm": 14.938887596130371, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.965082049369812, |
|
"logits/rejected": -0.9715268015861511, |
|
"logps/chosen": -42.88313674926758, |
|
"logps/rejected": -61.1027717590332, |
|
"loss": 0.4947, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.0917281061410904, |
|
"rewards/margins": 0.6923992037773132, |
|
"rewards/rejected": -0.6006711721420288, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.6133333333333333, |
|
"grad_norm": 15.150486946105957, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -1.0348211526870728, |
|
"logits/rejected": -1.0350416898727417, |
|
"logps/chosen": -58.58684158325195, |
|
"logps/rejected": -69.80925750732422, |
|
"loss": 0.4301, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.741683304309845, |
|
"rewards/margins": 1.3792873620986938, |
|
"rewards/rejected": -0.6376041173934937, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 15.18857192993164, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9605352282524109, |
|
"logits/rejected": -0.9632300138473511, |
|
"logps/chosen": -66.8796615600586, |
|
"logps/rejected": -71.91226959228516, |
|
"loss": 0.4272, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.334277480840683, |
|
"rewards/margins": 1.3439124822616577, |
|
"rewards/rejected": -1.0096348524093628, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 17.82939338684082, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9578378796577454, |
|
"logits/rejected": -0.9434881210327148, |
|
"logps/chosen": -91.96797943115234, |
|
"logps/rejected": -118.94937896728516, |
|
"loss": 0.469, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.11757056415081024, |
|
"rewards/margins": 2.0075435638427734, |
|
"rewards/rejected": -1.8899730443954468, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.6933333333333334, |
|
"grad_norm": 14.026658058166504, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9356282353401184, |
|
"logits/rejected": -0.9223386645317078, |
|
"logps/chosen": -84.6489486694336, |
|
"logps/rejected": -97.97027587890625, |
|
"loss": 0.4071, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.1822044849395752, |
|
"rewards/margins": 1.0983351469039917, |
|
"rewards/rejected": -1.2805397510528564, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 16.48579978942871, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9593449831008911, |
|
"logits/rejected": -0.9539397358894348, |
|
"logps/chosen": -87.32046508789062, |
|
"logps/rejected": -104.6070327758789, |
|
"loss": 0.4735, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": 0.11158242076635361, |
|
"rewards/margins": 1.4765504598617554, |
|
"rewards/rejected": -1.3649680614471436, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.7466666666666667, |
|
"grad_norm": 15.402965545654297, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.8937503695487976, |
|
"logits/rejected": -0.8833414912223816, |
|
"logps/chosen": -56.55735397338867, |
|
"logps/rejected": -64.20155334472656, |
|
"loss": 0.444, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.04381413012742996, |
|
"rewards/margins": 1.1586054563522339, |
|
"rewards/rejected": -1.202419638633728, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.7733333333333333, |
|
"grad_norm": 13.747047424316406, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9597466588020325, |
|
"logits/rejected": -0.9614435434341431, |
|
"logps/chosen": -63.605621337890625, |
|
"logps/rejected": -67.96479797363281, |
|
"loss": 0.3972, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.4212762713432312, |
|
"rewards/margins": 0.9387065768241882, |
|
"rewards/rejected": -0.5174302458763123, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 12.082706451416016, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.8831542730331421, |
|
"logits/rejected": -0.8782299757003784, |
|
"logps/chosen": -89.6635971069336, |
|
"logps/rejected": -102.15290832519531, |
|
"loss": 0.4195, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.23645827174186707, |
|
"rewards/margins": 1.130458116531372, |
|
"rewards/rejected": -1.3669164180755615, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.8266666666666667, |
|
"grad_norm": 11.634166717529297, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9502588510513306, |
|
"logits/rejected": -0.9416471719741821, |
|
"logps/chosen": -68.0828857421875, |
|
"logps/rejected": -100.38713073730469, |
|
"loss": 0.3315, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.19413085281848907, |
|
"rewards/margins": 2.068560838699341, |
|
"rewards/rejected": -1.8744302988052368, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.8533333333333334, |
|
"grad_norm": 15.321958541870117, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -1.0011286735534668, |
|
"logits/rejected": -1.0069797039031982, |
|
"logps/chosen": -58.62431716918945, |
|
"logps/rejected": -81.33189392089844, |
|
"loss": 0.4751, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.06445435434579849, |
|
"rewards/margins": 1.0574184656143188, |
|
"rewards/rejected": -0.9929640889167786, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 13.829776763916016, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9930332899093628, |
|
"logits/rejected": -0.9925543069839478, |
|
"logps/chosen": -65.17711639404297, |
|
"logps/rejected": -79.74466705322266, |
|
"loss": 0.4647, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.07586468756198883, |
|
"rewards/margins": 0.9637776613235474, |
|
"rewards/rejected": -0.8879130482673645, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.9066666666666666, |
|
"grad_norm": 15.529192924499512, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9089356660842896, |
|
"logits/rejected": -0.9082063436508179, |
|
"logps/chosen": -73.97811126708984, |
|
"logps/rejected": -95.45903778076172, |
|
"loss": 0.4392, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.33137935400009155, |
|
"rewards/margins": 1.3767789602279663, |
|
"rewards/rejected": -1.7081581354141235, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.9333333333333333, |
|
"grad_norm": 16.684486389160156, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.97789067029953, |
|
"logits/rejected": -0.9672166705131531, |
|
"logps/chosen": -68.05913543701172, |
|
"logps/rejected": -76.677490234375, |
|
"loss": 0.3687, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.15937092900276184, |
|
"rewards/margins": 1.4713166952133179, |
|
"rewards/rejected": -1.3119456768035889, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 10.531698226928711, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9809746742248535, |
|
"logits/rejected": -0.963492214679718, |
|
"logps/chosen": -82.75611877441406, |
|
"logps/rejected": -115.19696044921875, |
|
"loss": 0.3942, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.1854589432477951, |
|
"rewards/margins": 2.5827653408050537, |
|
"rewards/rejected": -2.397306442260742, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.9866666666666667, |
|
"grad_norm": 13.4787015914917, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.96388179063797, |
|
"logits/rejected": -0.9607783555984497, |
|
"logps/chosen": -73.0042953491211, |
|
"logps/rejected": -70.76305389404297, |
|
"loss": 0.3791, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.26272085309028625, |
|
"rewards/margins": 1.4707610607147217, |
|
"rewards/rejected": -1.2080401182174683, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.0133333333333334, |
|
"grad_norm": 10.95159912109375, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.952096164226532, |
|
"logits/rejected": -0.9481242895126343, |
|
"logps/chosen": -65.6482162475586, |
|
"logps/rejected": -94.6727523803711, |
|
"loss": 0.3602, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.6763356924057007, |
|
"rewards/margins": 1.9549248218536377, |
|
"rewards/rejected": -1.2785890102386475, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 4.499619960784912, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9216955900192261, |
|
"logits/rejected": -0.9177412986755371, |
|
"logps/chosen": -57.536109924316406, |
|
"logps/rejected": -89.177734375, |
|
"loss": 0.1967, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.7885057330131531, |
|
"rewards/margins": 1.7107608318328857, |
|
"rewards/rejected": -0.9222550392150879, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.0666666666666667, |
|
"grad_norm": 7.151574611663818, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9569188952445984, |
|
"logits/rejected": -0.9455480575561523, |
|
"logps/chosen": -53.17787551879883, |
|
"logps/rejected": -78.4444351196289, |
|
"loss": 0.2083, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.8187260627746582, |
|
"rewards/margins": 2.6014370918273926, |
|
"rewards/rejected": -1.7827112674713135, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.0933333333333333, |
|
"grad_norm": 7.790274620056152, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9871414303779602, |
|
"logits/rejected": -0.9741198420524597, |
|
"logps/chosen": -88.66045379638672, |
|
"logps/rejected": -130.34814453125, |
|
"loss": 0.1628, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.14811792969703674, |
|
"rewards/margins": 3.1860337257385254, |
|
"rewards/rejected": -3.0379159450531006, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"grad_norm": 5.487607955932617, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -1.034203052520752, |
|
"logits/rejected": -1.020320177078247, |
|
"logps/chosen": -71.07328796386719, |
|
"logps/rejected": -107.5151138305664, |
|
"loss": 0.1747, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.050754476338624954, |
|
"rewards/margins": 3.4751205444335938, |
|
"rewards/rejected": -3.4243664741516113, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.1466666666666667, |
|
"grad_norm": 4.493232727050781, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9926775693893433, |
|
"logits/rejected": -0.9901949167251587, |
|
"logps/chosen": -85.27278900146484, |
|
"logps/rejected": -123.84268951416016, |
|
"loss": 0.1556, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.030883288010954857, |
|
"rewards/margins": 3.6463749408721924, |
|
"rewards/rejected": -3.677258253097534, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.1733333333333333, |
|
"grad_norm": 8.204482078552246, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -1.012113332748413, |
|
"logits/rejected": -1.012290596961975, |
|
"logps/chosen": -85.76399993896484, |
|
"logps/rejected": -119.85150146484375, |
|
"loss": 0.1645, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.43645215034484863, |
|
"rewards/margins": 3.1804490089416504, |
|
"rewards/rejected": -3.616900682449341, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 8.91741943359375, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9512453079223633, |
|
"logits/rejected": -0.9447379112243652, |
|
"logps/chosen": -99.74169921875, |
|
"logps/rejected": -125.4989013671875, |
|
"loss": 0.2412, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -1.6342785358428955, |
|
"rewards/margins": 3.1997177600860596, |
|
"rewards/rejected": -4.833995819091797, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.2266666666666666, |
|
"grad_norm": 8.655539512634277, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9638906717300415, |
|
"logits/rejected": -0.9550336599349976, |
|
"logps/chosen": -90.74691009521484, |
|
"logps/rejected": -129.67747497558594, |
|
"loss": 0.1931, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.6765635013580322, |
|
"rewards/margins": 3.924309492111206, |
|
"rewards/rejected": -4.600872993469238, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.2533333333333334, |
|
"grad_norm": 4.054169654846191, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -1.030977487564087, |
|
"logits/rejected": -1.0393321514129639, |
|
"logps/chosen": -54.60747528076172, |
|
"logps/rejected": -86.23637390136719, |
|
"loss": 0.2012, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.5984514951705933, |
|
"rewards/margins": 3.5396370887756348, |
|
"rewards/rejected": -2.941185712814331, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"grad_norm": 6.45543909072876, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -1.000688910484314, |
|
"logits/rejected": -0.9959505200386047, |
|
"logps/chosen": -51.59619140625, |
|
"logps/rejected": -78.5389633178711, |
|
"loss": 0.1828, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.38332298398017883, |
|
"rewards/margins": 2.686915874481201, |
|
"rewards/rejected": -2.3035929203033447, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.3066666666666666, |
|
"grad_norm": 7.938249111175537, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9679718017578125, |
|
"logits/rejected": -0.9519965052604675, |
|
"logps/chosen": -91.22569274902344, |
|
"logps/rejected": -120.7100601196289, |
|
"loss": 0.1685, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.6173192262649536, |
|
"rewards/margins": 3.579179286956787, |
|
"rewards/rejected": -4.196498870849609, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 1.3333333333333333, |
|
"grad_norm": 5.844974040985107, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9622223973274231, |
|
"logits/rejected": -0.9677648544311523, |
|
"logps/chosen": -92.24385070800781, |
|
"logps/rejected": -129.18057250976562, |
|
"loss": 0.1531, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.8045636415481567, |
|
"rewards/margins": 2.904844284057617, |
|
"rewards/rejected": -3.7094082832336426, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.3599999999999999, |
|
"grad_norm": 8.192586898803711, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9564617276191711, |
|
"logits/rejected": -0.953482985496521, |
|
"logps/chosen": -92.21900939941406, |
|
"logps/rejected": -115.93299865722656, |
|
"loss": 0.1831, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.9339162707328796, |
|
"rewards/margins": 3.1692094802856445, |
|
"rewards/rejected": -4.103126049041748, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 1.3866666666666667, |
|
"grad_norm": 11.612703323364258, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9484894871711731, |
|
"logits/rejected": -0.9465099573135376, |
|
"logps/chosen": -88.50215148925781, |
|
"logps/rejected": -122.58497619628906, |
|
"loss": 0.2373, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8389456868171692, |
|
"rewards/margins": 4.004569053649902, |
|
"rewards/rejected": -4.843514442443848, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.4133333333333333, |
|
"grad_norm": 3.950273036956787, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -1.0333240032196045, |
|
"logits/rejected": -1.026308298110962, |
|
"logps/chosen": -81.88277435302734, |
|
"logps/rejected": -115.6543960571289, |
|
"loss": 0.1738, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.251314640045166, |
|
"rewards/margins": 3.181288242340088, |
|
"rewards/rejected": -3.432603120803833, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"grad_norm": 9.259121894836426, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9529396891593933, |
|
"logits/rejected": -0.9476019740104675, |
|
"logps/chosen": -93.74283599853516, |
|
"logps/rejected": -117.82435607910156, |
|
"loss": 0.1717, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.17003877460956573, |
|
"rewards/margins": 3.552433490753174, |
|
"rewards/rejected": -3.7224719524383545, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.4666666666666668, |
|
"grad_norm": 17.85709571838379, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -1.0109868049621582, |
|
"logits/rejected": -1.0117450952529907, |
|
"logps/chosen": -93.20274353027344, |
|
"logps/rejected": -124.9032211303711, |
|
"loss": 0.2349, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.15422244369983673, |
|
"rewards/margins": 3.6027817726135254, |
|
"rewards/rejected": -3.757004499435425, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.4933333333333334, |
|
"grad_norm": 12.522117614746094, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -1.0297268629074097, |
|
"logits/rejected": -1.0234237909317017, |
|
"logps/chosen": -50.927391052246094, |
|
"logps/rejected": -76.98394775390625, |
|
"loss": 0.2888, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.37279123067855835, |
|
"rewards/margins": 2.9437782764434814, |
|
"rewards/rejected": -2.5709872245788574, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"grad_norm": 18.62285804748535, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.97401362657547, |
|
"logits/rejected": -0.9802001714706421, |
|
"logps/chosen": -63.81083297729492, |
|
"logps/rejected": -86.34494018554688, |
|
"loss": 0.2402, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -0.40913981199264526, |
|
"rewards/margins": 2.3060526847839355, |
|
"rewards/rejected": -2.7151923179626465, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 1.5466666666666666, |
|
"grad_norm": 14.160591125488281, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -1.0169212818145752, |
|
"logits/rejected": -1.0096107721328735, |
|
"logps/chosen": -66.8814926147461, |
|
"logps/rejected": -97.15755462646484, |
|
"loss": 0.1656, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.21899671852588654, |
|
"rewards/margins": 3.427858352661133, |
|
"rewards/rejected": -3.2088615894317627, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.5733333333333333, |
|
"grad_norm": 5.416359901428223, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9776760935783386, |
|
"logits/rejected": -0.9770166277885437, |
|
"logps/chosen": -67.19379425048828, |
|
"logps/rejected": -101.93242645263672, |
|
"loss": 0.2106, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.027063405141234398, |
|
"rewards/margins": 3.1595966815948486, |
|
"rewards/rejected": -3.1866602897644043, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 7.018192291259766, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -1.023803949356079, |
|
"logits/rejected": -1.0103000402450562, |
|
"logps/chosen": -63.242576599121094, |
|
"logps/rejected": -96.89302062988281, |
|
"loss": 0.1886, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": 0.034027088433504105, |
|
"rewards/margins": 3.0590438842773438, |
|
"rewards/rejected": -3.0250167846679688, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.6266666666666667, |
|
"grad_norm": 7.443521499633789, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -1.0116432905197144, |
|
"logits/rejected": -1.023240327835083, |
|
"logps/chosen": -84.08271026611328, |
|
"logps/rejected": -159.3538360595703, |
|
"loss": 0.1687, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -0.561992347240448, |
|
"rewards/margins": 4.012616157531738, |
|
"rewards/rejected": -4.57460880279541, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 1.6533333333333333, |
|
"grad_norm": 18.155902862548828, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -1.025331974029541, |
|
"logits/rejected": -1.021927833557129, |
|
"logps/chosen": -73.14906311035156, |
|
"logps/rejected": -121.94499206542969, |
|
"loss": 0.1807, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.2425353080034256, |
|
"rewards/margins": 3.504972457885742, |
|
"rewards/rejected": -3.7475082874298096, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.6800000000000002, |
|
"grad_norm": 11.773829460144043, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9748228192329407, |
|
"logits/rejected": -0.9767504930496216, |
|
"logps/chosen": -88.07221984863281, |
|
"logps/rejected": -147.24069213867188, |
|
"loss": 0.1454, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.8688488006591797, |
|
"rewards/margins": 4.263859748840332, |
|
"rewards/rejected": -5.1327080726623535, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 1.7066666666666666, |
|
"grad_norm": 11.361552238464355, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9369309544563293, |
|
"logits/rejected": -0.9380790591239929, |
|
"logps/chosen": -96.55718231201172, |
|
"logps/rejected": -119.29121398925781, |
|
"loss": 0.2044, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.5562452077865601, |
|
"rewards/margins": 3.295253038406372, |
|
"rewards/rejected": -3.8514983654022217, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.7333333333333334, |
|
"grad_norm": 10.781094551086426, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.99322509765625, |
|
"logits/rejected": -0.9963001012802124, |
|
"logps/chosen": -94.03239440917969, |
|
"logps/rejected": -129.18307495117188, |
|
"loss": 0.228, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -1.0290601253509521, |
|
"rewards/margins": 3.1235461235046387, |
|
"rewards/rejected": -4.152606010437012, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"grad_norm": 13.973287582397461, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9588006138801575, |
|
"logits/rejected": -0.9579130411148071, |
|
"logps/chosen": -93.3379135131836, |
|
"logps/rejected": -145.30789184570312, |
|
"loss": 0.1606, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.4644792675971985, |
|
"rewards/margins": 4.343870639801025, |
|
"rewards/rejected": -4.808349609375, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.7866666666666666, |
|
"grad_norm": 5.7010931968688965, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9664214253425598, |
|
"logits/rejected": -0.9622098207473755, |
|
"logps/chosen": -77.8204574584961, |
|
"logps/rejected": -139.20196533203125, |
|
"loss": 0.1036, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.5566782355308533, |
|
"rewards/margins": 4.256915092468262, |
|
"rewards/rejected": -4.81359338760376, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 1.8133333333333335, |
|
"grad_norm": 15.839357376098633, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9637653231620789, |
|
"logits/rejected": -0.9616791009902954, |
|
"logps/chosen": -97.48753356933594, |
|
"logps/rejected": -127.541259765625, |
|
"loss": 0.1682, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.562965452671051, |
|
"rewards/margins": 3.636152744293213, |
|
"rewards/rejected": -4.199118614196777, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.8399999999999999, |
|
"grad_norm": 7.101598739624023, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -1.0025413036346436, |
|
"logits/rejected": -1.0012590885162354, |
|
"logps/chosen": -79.85330963134766, |
|
"logps/rejected": -98.39595794677734, |
|
"loss": 0.1832, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.4088008403778076, |
|
"rewards/margins": 3.5112171173095703, |
|
"rewards/rejected": -3.102416515350342, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 1.8666666666666667, |
|
"grad_norm": 12.390422821044922, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -1.0001270771026611, |
|
"logits/rejected": -1.0041121244430542, |
|
"logps/chosen": -47.137535095214844, |
|
"logps/rejected": -87.8708267211914, |
|
"loss": 0.1838, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.07667568325996399, |
|
"rewards/margins": 3.5933804512023926, |
|
"rewards/rejected": -3.670056104660034, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.8933333333333333, |
|
"grad_norm": 6.624851226806641, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -1.0326136350631714, |
|
"logits/rejected": -1.0377240180969238, |
|
"logps/chosen": -78.13581848144531, |
|
"logps/rejected": -121.1558837890625, |
|
"loss": 0.1175, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.22754593193531036, |
|
"rewards/margins": 3.7420687675476074, |
|
"rewards/rejected": -3.9696147441864014, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"grad_norm": 9.035065650939941, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.959603488445282, |
|
"logits/rejected": -0.9709904789924622, |
|
"logps/chosen": -76.04480743408203, |
|
"logps/rejected": -128.50241088867188, |
|
"loss": 0.1593, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.2988315522670746, |
|
"rewards/margins": 3.922844648361206, |
|
"rewards/rejected": -4.221675395965576, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.9466666666666668, |
|
"grad_norm": 5.306190013885498, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9443675875663757, |
|
"logits/rejected": -0.9363471865653992, |
|
"logps/chosen": -99.31537628173828, |
|
"logps/rejected": -136.64498901367188, |
|
"loss": 0.1425, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -1.223646879196167, |
|
"rewards/margins": 3.9859280586242676, |
|
"rewards/rejected": -5.209575653076172, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 1.9733333333333334, |
|
"grad_norm": 12.788728713989258, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9819198846817017, |
|
"logits/rejected": -0.9810832142829895, |
|
"logps/chosen": -79.12177276611328, |
|
"logps/rejected": -109.90110778808594, |
|
"loss": 0.209, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.3604089915752411, |
|
"rewards/margins": 3.0035877227783203, |
|
"rewards/rejected": -3.3639965057373047, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 13.587727546691895, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9736833572387695, |
|
"logits/rejected": -0.9709518551826477, |
|
"logps/chosen": -111.94571685791016, |
|
"logps/rejected": -123.80615234375, |
|
"loss": 0.1791, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -1.4601728916168213, |
|
"rewards/margins": 3.195746660232544, |
|
"rewards/rejected": -4.655919551849365, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.026666666666667, |
|
"grad_norm": 2.3324151039123535, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9637060165405273, |
|
"logits/rejected": -0.9625332951545715, |
|
"logps/chosen": -64.67164611816406, |
|
"logps/rejected": -107.06028747558594, |
|
"loss": 0.0579, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.13231408596038818, |
|
"rewards/margins": 4.237712383270264, |
|
"rewards/rejected": -4.370026588439941, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.0533333333333332, |
|
"grad_norm": 2.0872855186462402, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9391244649887085, |
|
"logits/rejected": -0.9333323240280151, |
|
"logps/chosen": -53.344078063964844, |
|
"logps/rejected": -118.3141098022461, |
|
"loss": 0.0571, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.09113900363445282, |
|
"rewards/margins": 4.982771873474121, |
|
"rewards/rejected": -5.073911190032959, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"grad_norm": 3.842839479446411, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -1.0063703060150146, |
|
"logits/rejected": -1.0035226345062256, |
|
"logps/chosen": -85.92240905761719, |
|
"logps/rejected": -147.6622314453125, |
|
"loss": 0.0788, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.7325506210327148, |
|
"rewards/margins": 5.368863105773926, |
|
"rewards/rejected": -6.101413249969482, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.1066666666666665, |
|
"grad_norm": 2.944322347640991, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9857255816459656, |
|
"logits/rejected": -0.9889955520629883, |
|
"logps/chosen": -86.60405731201172, |
|
"logps/rejected": -146.21925354003906, |
|
"loss": 0.0604, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.44693589210510254, |
|
"rewards/margins": 5.308945655822754, |
|
"rewards/rejected": -5.755881309509277, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 2.1333333333333333, |
|
"grad_norm": 1.7155674695968628, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.994691014289856, |
|
"logits/rejected": -0.9987820386886597, |
|
"logps/chosen": -74.14541625976562, |
|
"logps/rejected": -137.02389526367188, |
|
"loss": 0.0644, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.06674417108297348, |
|
"rewards/margins": 6.20205020904541, |
|
"rewards/rejected": -6.268795013427734, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"grad_norm": 1.7242580652236938, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9776423573493958, |
|
"logits/rejected": -0.9695422053337097, |
|
"logps/chosen": -79.72328186035156, |
|
"logps/rejected": -142.98239135742188, |
|
"loss": 0.0439, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.8770833015441895, |
|
"rewards/margins": 6.5918288230896, |
|
"rewards/rejected": -7.4689130783081055, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 2.1866666666666665, |
|
"grad_norm": 2.5285656452178955, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.943740963935852, |
|
"logits/rejected": -0.9352688789367676, |
|
"logps/chosen": -92.97003936767578, |
|
"logps/rejected": -145.69248962402344, |
|
"loss": 0.0871, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -1.433349370956421, |
|
"rewards/margins": 5.817093372344971, |
|
"rewards/rejected": -7.250443458557129, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.2133333333333334, |
|
"grad_norm": 6.4492597579956055, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9524329304695129, |
|
"logits/rejected": -0.9578531384468079, |
|
"logps/chosen": -79.1036148071289, |
|
"logps/rejected": -126.85892486572266, |
|
"loss": 0.0692, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -1.3942753076553345, |
|
"rewards/margins": 5.110877990722656, |
|
"rewards/rejected": -6.505153656005859, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"grad_norm": 3.9530835151672363, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9325224757194519, |
|
"logits/rejected": -0.9292308688163757, |
|
"logps/chosen": -92.8379135131836, |
|
"logps/rejected": -124.98873138427734, |
|
"loss": 0.0744, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -1.4230420589447021, |
|
"rewards/margins": 4.427516937255859, |
|
"rewards/rejected": -5.850558757781982, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.2666666666666666, |
|
"grad_norm": 3.244112491607666, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9953921437263489, |
|
"logits/rejected": -0.9901760220527649, |
|
"logps/chosen": -90.95811462402344, |
|
"logps/rejected": -153.2058563232422, |
|
"loss": 0.0505, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.1016261577606201, |
|
"rewards/margins": 5.2358317375183105, |
|
"rewards/rejected": -6.33745813369751, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 2.2933333333333334, |
|
"grad_norm": 3.9486782550811768, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9572321176528931, |
|
"logits/rejected": -0.963973343372345, |
|
"logps/chosen": -107.50956726074219, |
|
"logps/rejected": -146.10031127929688, |
|
"loss": 0.0639, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.6813853979110718, |
|
"rewards/margins": 5.3139801025390625, |
|
"rewards/rejected": -5.995365619659424, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"grad_norm": 3.333291530609131, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9470894932746887, |
|
"logits/rejected": -0.9312503933906555, |
|
"logps/chosen": -92.68052673339844, |
|
"logps/rejected": -140.1178436279297, |
|
"loss": 0.0572, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.656975507736206, |
|
"rewards/margins": 5.908856391906738, |
|
"rewards/rejected": -6.565831661224365, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 2.3466666666666667, |
|
"grad_norm": 9.420110702514648, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.905168354511261, |
|
"logits/rejected": -0.905125617980957, |
|
"logps/chosen": -82.85682678222656, |
|
"logps/rejected": -134.2998046875, |
|
"loss": 0.1159, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -1.2471545934677124, |
|
"rewards/margins": 5.145517826080322, |
|
"rewards/rejected": -6.392672538757324, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.3733333333333335, |
|
"grad_norm": 2.3797969818115234, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9869009256362915, |
|
"logits/rejected": -0.9733754396438599, |
|
"logps/chosen": -124.99552917480469, |
|
"logps/rejected": -176.72329711914062, |
|
"loss": 0.0451, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -2.0458180904388428, |
|
"rewards/margins": 6.536404609680176, |
|
"rewards/rejected": -8.582223892211914, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 6.350354194641113, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9659305810928345, |
|
"logits/rejected": -0.9653702974319458, |
|
"logps/chosen": -71.59037017822266, |
|
"logps/rejected": -125.0820083618164, |
|
"loss": 0.0761, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.7029816508293152, |
|
"rewards/margins": 5.654323577880859, |
|
"rewards/rejected": -6.35730504989624, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.4266666666666667, |
|
"grad_norm": 3.5830423831939697, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.963516891002655, |
|
"logits/rejected": -0.9658119082450867, |
|
"logps/chosen": -102.05826568603516, |
|
"logps/rejected": -157.795654296875, |
|
"loss": 0.0456, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.9112430810928345, |
|
"rewards/margins": 5.601228713989258, |
|
"rewards/rejected": -6.512471675872803, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 2.453333333333333, |
|
"grad_norm": 1.1380105018615723, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9547368884086609, |
|
"logits/rejected": -0.9605048298835754, |
|
"logps/chosen": -108.07603454589844, |
|
"logps/rejected": -171.28262329101562, |
|
"loss": 0.0595, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -1.361283779144287, |
|
"rewards/margins": 6.42099142074585, |
|
"rewards/rejected": -7.782275199890137, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"grad_norm": 1.1063487529754639, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.914827823638916, |
|
"logits/rejected": -0.9137645959854126, |
|
"logps/chosen": -85.6763687133789, |
|
"logps/rejected": -160.23040771484375, |
|
"loss": 0.0519, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -1.18534255027771, |
|
"rewards/margins": 6.746701240539551, |
|
"rewards/rejected": -7.932042598724365, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 2.506666666666667, |
|
"grad_norm": 1.4177899360656738, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.941451370716095, |
|
"logits/rejected": -0.9365523457527161, |
|
"logps/chosen": -77.06974029541016, |
|
"logps/rejected": -124.00151062011719, |
|
"loss": 0.055, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -0.9638670086860657, |
|
"rewards/margins": 4.646106719970703, |
|
"rewards/rejected": -5.609973907470703, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 2.533333333333333, |
|
"grad_norm": 3.090573787689209, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9199577569961548, |
|
"logits/rejected": -0.9237100481987, |
|
"logps/chosen": -86.69384765625, |
|
"logps/rejected": -112.1557846069336, |
|
"loss": 0.0502, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -1.1065205335617065, |
|
"rewards/margins": 4.314031600952148, |
|
"rewards/rejected": -5.4205522537231445, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"grad_norm": 7.529189109802246, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9499224424362183, |
|
"logits/rejected": -0.9461296200752258, |
|
"logps/chosen": -98.30276489257812, |
|
"logps/rejected": -169.53909301757812, |
|
"loss": 0.0692, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.8648740649223328, |
|
"rewards/margins": 7.417494773864746, |
|
"rewards/rejected": -8.282369613647461, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.586666666666667, |
|
"grad_norm": 4.119349479675293, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9638736844062805, |
|
"logits/rejected": -0.9807848930358887, |
|
"logps/chosen": -82.62672424316406, |
|
"logps/rejected": -177.9151611328125, |
|
"loss": 0.0633, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.7524802684783936, |
|
"rewards/margins": 7.268258094787598, |
|
"rewards/rejected": -9.02073860168457, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 2.6133333333333333, |
|
"grad_norm": 3.560368299484253, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9251823425292969, |
|
"logits/rejected": -0.9247922897338867, |
|
"logps/chosen": -87.5182876586914, |
|
"logps/rejected": -169.91879272460938, |
|
"loss": 0.1587, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -1.2214720249176025, |
|
"rewards/margins": 6.715635776519775, |
|
"rewards/rejected": -7.937108039855957, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"grad_norm": 1.6422832012176514, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9349163174629211, |
|
"logits/rejected": -0.9354310035705566, |
|
"logps/chosen": -70.24932861328125, |
|
"logps/rejected": -146.46348571777344, |
|
"loss": 0.0691, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.6841042637825012, |
|
"rewards/margins": 6.8183794021606445, |
|
"rewards/rejected": -7.502483367919922, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 2.6666666666666665, |
|
"grad_norm": 2.153717041015625, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9372892379760742, |
|
"logits/rejected": -0.9442557096481323, |
|
"logps/chosen": -74.71461486816406, |
|
"logps/rejected": -149.56219482421875, |
|
"loss": 0.0355, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.513080894947052, |
|
"rewards/margins": 6.21486759185791, |
|
"rewards/rejected": -6.727948188781738, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.6933333333333334, |
|
"grad_norm": 2.593008279800415, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9294622540473938, |
|
"logits/rejected": -0.9304571151733398, |
|
"logps/chosen": -90.94770050048828, |
|
"logps/rejected": -147.8937225341797, |
|
"loss": 0.0517, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -1.548829197883606, |
|
"rewards/margins": 6.368669033050537, |
|
"rewards/rejected": -7.917498588562012, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 2.7199999999999998, |
|
"grad_norm": 12.588436126708984, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9092562794685364, |
|
"logits/rejected": -0.9060165286064148, |
|
"logps/chosen": -83.18638610839844, |
|
"logps/rejected": -130.56942749023438, |
|
"loss": 0.0565, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -1.80876886844635, |
|
"rewards/margins": 5.5646467208862305, |
|
"rewards/rejected": -7.373415946960449, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 2.7466666666666666, |
|
"grad_norm": 3.6610798835754395, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -1.00751531124115, |
|
"logits/rejected": -1.0096744298934937, |
|
"logps/chosen": -91.13447570800781, |
|
"logps/rejected": -148.82164001464844, |
|
"loss": 0.0285, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.2526090145111084, |
|
"rewards/margins": 6.453673362731934, |
|
"rewards/rejected": -7.706282138824463, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 2.7733333333333334, |
|
"grad_norm": 1.0663306713104248, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9299656748771667, |
|
"logits/rejected": -0.9350908994674683, |
|
"logps/chosen": -94.07073211669922, |
|
"logps/rejected": -164.70330810546875, |
|
"loss": 0.0384, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.654295563697815, |
|
"rewards/margins": 6.949212074279785, |
|
"rewards/rejected": -8.603507995605469, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"grad_norm": 6.753617286682129, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9451917409896851, |
|
"logits/rejected": -0.9450856447219849, |
|
"logps/chosen": -101.37158203125, |
|
"logps/rejected": -149.12391662597656, |
|
"loss": 0.0611, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -2.0039877891540527, |
|
"rewards/margins": 6.462770938873291, |
|
"rewards/rejected": -8.46675968170166, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 2.8266666666666667, |
|
"grad_norm": 9.86711597442627, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9280322194099426, |
|
"logits/rejected": -0.9299230575561523, |
|
"logps/chosen": -94.41612243652344, |
|
"logps/rejected": -168.96092224121094, |
|
"loss": 0.0731, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -1.973374366760254, |
|
"rewards/margins": 6.930324554443359, |
|
"rewards/rejected": -8.90369987487793, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 2.8533333333333335, |
|
"grad_norm": 10.411066055297852, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9307094812393188, |
|
"logits/rejected": -0.9296162724494934, |
|
"logps/chosen": -87.57542419433594, |
|
"logps/rejected": -134.86257934570312, |
|
"loss": 0.0633, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -1.4023369550704956, |
|
"rewards/margins": 5.565145969390869, |
|
"rewards/rejected": -6.967482566833496, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"grad_norm": 1.9066485166549683, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9522913098335266, |
|
"logits/rejected": -0.9543063044548035, |
|
"logps/chosen": -90.59437561035156, |
|
"logps/rejected": -152.72274780273438, |
|
"loss": 0.1226, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -2.1479926109313965, |
|
"rewards/margins": 6.265242576599121, |
|
"rewards/rejected": -8.41323471069336, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.9066666666666667, |
|
"grad_norm": 1.3979381322860718, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9681941866874695, |
|
"logits/rejected": -0.9737881422042847, |
|
"logps/chosen": -101.29634094238281, |
|
"logps/rejected": -180.4349822998047, |
|
"loss": 0.0586, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -2.112703323364258, |
|
"rewards/margins": 6.271733283996582, |
|
"rewards/rejected": -8.38443660736084, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 2.9333333333333336, |
|
"grad_norm": 7.896867275238037, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9267778396606445, |
|
"logits/rejected": -0.9226524233818054, |
|
"logps/chosen": -86.86418914794922, |
|
"logps/rejected": -140.9725341796875, |
|
"loss": 0.0609, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.6871792078018188, |
|
"rewards/margins": 5.857264995574951, |
|
"rewards/rejected": -7.544443607330322, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"grad_norm": 2.1880979537963867, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9265161752700806, |
|
"logits/rejected": -0.9284146428108215, |
|
"logps/chosen": -86.45133972167969, |
|
"logps/rejected": -158.9849853515625, |
|
"loss": 0.0429, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.7444225549697876, |
|
"rewards/margins": 7.387899875640869, |
|
"rewards/rejected": -9.132322311401367, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 2.986666666666667, |
|
"grad_norm": 0.5332115888595581, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -0.9316242933273315, |
|
"logits/rejected": -0.9294928312301636, |
|
"logps/chosen": -89.85722351074219, |
|
"logps/rejected": -164.3010711669922, |
|
"loss": 0.0779, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.4647209644317627, |
|
"rewards/margins": 7.412230491638184, |
|
"rewards/rejected": -8.876951217651367, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 2.992, |
|
"step": 561, |
|
"total_flos": 0.0, |
|
"train_loss": 0.24264521944982376, |
|
"train_runtime": 1521.7244, |
|
"train_samples_per_second": 11.827, |
|
"train_steps_per_second": 0.369 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 561, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|