|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9994242947610823, |
|
"eval_steps": 500, |
|
"global_step": 434, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 41.760020423030454, |
|
"learning_rate": 1.1363636363636363e-08, |
|
"logits/chosen": -4.852883815765381, |
|
"logits/rejected": -5.038845539093018, |
|
"logps/chosen": -266.87384033203125, |
|
"logps/rejected": -341.158935546875, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 35.2404459189821, |
|
"learning_rate": 1.1363636363636363e-07, |
|
"logits/chosen": -4.649970054626465, |
|
"logits/rejected": -4.861534118652344, |
|
"logps/chosen": -329.62591552734375, |
|
"logps/rejected": -358.3674621582031, |
|
"loss": 0.6926, |
|
"rewards/accuracies": 0.4236111044883728, |
|
"rewards/chosen": -0.003552212379872799, |
|
"rewards/margins": 0.00014615118561778218, |
|
"rewards/rejected": -0.0036983639001846313, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 38.80276500420471, |
|
"learning_rate": 2.2727272727272726e-07, |
|
"logits/chosen": -4.702324867248535, |
|
"logits/rejected": -4.93120813369751, |
|
"logps/chosen": -323.15277099609375, |
|
"logps/rejected": -350.1238708496094, |
|
"loss": 0.6758, |
|
"rewards/accuracies": 0.609375, |
|
"rewards/chosen": -0.09294315427541733, |
|
"rewards/margins": 0.03752884641289711, |
|
"rewards/rejected": -0.13047200441360474, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 44.150425429765605, |
|
"learning_rate": 3.4090909090909085e-07, |
|
"logits/chosen": -4.943687915802002, |
|
"logits/rejected": -5.059121608734131, |
|
"logps/chosen": -358.85186767578125, |
|
"logps/rejected": -428.28997802734375, |
|
"loss": 0.6356, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -0.5626996159553528, |
|
"rewards/margins": 0.20829734206199646, |
|
"rewards/rejected": -0.7709969282150269, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 40.317859636548626, |
|
"learning_rate": 4.545454545454545e-07, |
|
"logits/chosen": -4.930764198303223, |
|
"logits/rejected": -5.059735298156738, |
|
"logps/chosen": -406.4983825683594, |
|
"logps/rejected": -480.08984375, |
|
"loss": 0.6181, |
|
"rewards/accuracies": 0.640625, |
|
"rewards/chosen": -0.804176926612854, |
|
"rewards/margins": 0.33279144763946533, |
|
"rewards/rejected": -1.1369682550430298, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 41.480329345920765, |
|
"learning_rate": 4.997080567080816e-07, |
|
"logits/chosen": -5.149637222290039, |
|
"logits/rejected": -5.36204719543457, |
|
"logps/chosen": -328.46356201171875, |
|
"logps/rejected": -432.31854248046875, |
|
"loss": 0.5989, |
|
"rewards/accuracies": 0.7093750238418579, |
|
"rewards/chosen": -0.586052417755127, |
|
"rewards/margins": 0.4240691661834717, |
|
"rewards/rejected": -1.0101215839385986, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 45.66439552100321, |
|
"learning_rate": 4.979264274553905e-07, |
|
"logits/chosen": -5.387632846832275, |
|
"logits/rejected": -5.600171089172363, |
|
"logps/chosen": -370.166259765625, |
|
"logps/rejected": -468.5503845214844, |
|
"loss": 0.5899, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.6436477899551392, |
|
"rewards/margins": 0.5441282987594604, |
|
"rewards/rejected": -1.1877760887145996, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 58.36091247583923, |
|
"learning_rate": 4.945369001834514e-07, |
|
"logits/chosen": -5.68746280670166, |
|
"logits/rejected": -5.95988655090332, |
|
"logps/chosen": -392.7463684082031, |
|
"logps/rejected": -469.6172790527344, |
|
"loss": 0.582, |
|
"rewards/accuracies": 0.6656249761581421, |
|
"rewards/chosen": -0.8184071779251099, |
|
"rewards/margins": 0.44322291016578674, |
|
"rewards/rejected": -1.2616300582885742, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 44.87515888678066, |
|
"learning_rate": 4.895614572772916e-07, |
|
"logits/chosen": -5.767647743225098, |
|
"logits/rejected": -6.016678810119629, |
|
"logps/chosen": -372.30438232421875, |
|
"logps/rejected": -485.02850341796875, |
|
"loss": 0.5658, |
|
"rewards/accuracies": 0.6656249761581421, |
|
"rewards/chosen": -0.6735275983810425, |
|
"rewards/margins": 0.523722767829895, |
|
"rewards/rejected": -1.1972506046295166, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 52.56006964617041, |
|
"learning_rate": 4.830323663933919e-07, |
|
"logits/chosen": -5.834498405456543, |
|
"logits/rejected": -6.01763916015625, |
|
"logps/chosen": -381.7452392578125, |
|
"logps/rejected": -476.9891662597656, |
|
"loss": 0.5659, |
|
"rewards/accuracies": 0.6656249761581421, |
|
"rewards/chosen": -0.829800009727478, |
|
"rewards/margins": 0.5006512403488159, |
|
"rewards/rejected": -1.3304513692855835, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 55.69644328341761, |
|
"learning_rate": 4.74991971191553e-07, |
|
"logits/chosen": -5.832143306732178, |
|
"logits/rejected": -6.132119655609131, |
|
"logps/chosen": -373.9161682128906, |
|
"logps/rejected": -479.5479431152344, |
|
"loss": 0.5632, |
|
"rewards/accuracies": 0.6781250238418579, |
|
"rewards/chosen": -0.693705677986145, |
|
"rewards/margins": 0.6272264719009399, |
|
"rewards/rejected": -1.320932149887085, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 55.95067399887189, |
|
"learning_rate": 4.6549241672001225e-07, |
|
"logits/chosen": -6.0772786140441895, |
|
"logits/rejected": -6.3634233474731445, |
|
"logps/chosen": -431.4512634277344, |
|
"logps/rejected": -542.3984985351562, |
|
"loss": 0.5495, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -1.0600697994232178, |
|
"rewards/margins": 0.6067067384719849, |
|
"rewards/rejected": -1.6667766571044922, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 48.05186252180738, |
|
"learning_rate": 4.5459531123479673e-07, |
|
"logits/chosen": -6.061857223510742, |
|
"logits/rejected": -6.299743175506592, |
|
"logps/chosen": -365.68341064453125, |
|
"logps/rejected": -485.478271484375, |
|
"loss": 0.5609, |
|
"rewards/accuracies": 0.6968749761581421, |
|
"rewards/chosen": -0.7879248261451721, |
|
"rewards/margins": 0.5600650906562805, |
|
"rewards/rejected": -1.347989797592163, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 56.849323822551575, |
|
"learning_rate": 4.4237132664654147e-07, |
|
"logits/chosen": -6.179199695587158, |
|
"logits/rejected": -6.447500705718994, |
|
"logps/chosen": -373.60626220703125, |
|
"logps/rejected": -507.93450927734375, |
|
"loss": 0.552, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.806027889251709, |
|
"rewards/margins": 0.6611197590827942, |
|
"rewards/rejected": -1.467147707939148, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 51.72316001946749, |
|
"learning_rate": 4.2889974018603024e-07, |
|
"logits/chosen": -6.394374847412109, |
|
"logits/rejected": -6.6608686447143555, |
|
"logps/chosen": -405.6766662597656, |
|
"logps/rejected": -509.36065673828125, |
|
"loss": 0.5535, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.810355544090271, |
|
"rewards/margins": 0.6747696399688721, |
|
"rewards/rejected": -1.4851253032684326, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 56.109386147317295, |
|
"learning_rate": 4.142679202609327e-07, |
|
"logits/chosen": -6.469355583190918, |
|
"logits/rejected": -6.71613073348999, |
|
"logps/chosen": -389.1719665527344, |
|
"logps/rejected": -522.1373291015625, |
|
"loss": 0.5577, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.880142867565155, |
|
"rewards/margins": 0.8348616361618042, |
|
"rewards/rejected": -1.715004563331604, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 52.97530937607907, |
|
"learning_rate": 3.9857075983815435e-07, |
|
"logits/chosen": -6.588133811950684, |
|
"logits/rejected": -6.864637851715088, |
|
"logps/chosen": -383.6964416503906, |
|
"logps/rejected": -510.33111572265625, |
|
"loss": 0.5416, |
|
"rewards/accuracies": 0.746874988079071, |
|
"rewards/chosen": -0.7866679430007935, |
|
"rewards/margins": 0.8415088653564453, |
|
"rewards/rejected": -1.6281766891479492, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 55.77171290205848, |
|
"learning_rate": 3.8191006102653317e-07, |
|
"logits/chosen": -6.785226345062256, |
|
"logits/rejected": -7.160543918609619, |
|
"logps/chosen": -397.39068603515625, |
|
"logps/rejected": -506.77398681640625, |
|
"loss": 0.5291, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": -0.8594799041748047, |
|
"rewards/margins": 0.8123966455459595, |
|
"rewards/rejected": -1.6718765497207642, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 54.05797462315199, |
|
"learning_rate": 3.6439387485109883e-07, |
|
"logits/chosen": -6.769280433654785, |
|
"logits/rejected": -7.096776008605957, |
|
"logps/chosen": -387.23101806640625, |
|
"logps/rejected": -501.2037048339844, |
|
"loss": 0.5379, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.8674057126045227, |
|
"rewards/margins": 0.7561392784118652, |
|
"rewards/rejected": -1.6235449314117432, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 56.88992267517494, |
|
"learning_rate": 3.4613580050071274e-07, |
|
"logits/chosen": -6.776444435119629, |
|
"logits/rejected": -6.957192897796631, |
|
"logps/chosen": -407.02349853515625, |
|
"logps/rejected": -540.9013671875, |
|
"loss": 0.5546, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -1.035926103591919, |
|
"rewards/margins": 0.7458062171936035, |
|
"rewards/rejected": -1.7817323207855225, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 48.953131469141674, |
|
"learning_rate": 3.272542485937368e-07, |
|
"logits/chosen": -6.675101280212402, |
|
"logits/rejected": -6.999788761138916, |
|
"logps/chosen": -379.14483642578125, |
|
"logps/rejected": -494.6246032714844, |
|
"loss": 0.532, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": -0.6772204637527466, |
|
"rewards/margins": 0.6743000745773315, |
|
"rewards/rejected": -1.3515205383300781, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 59.16183169308755, |
|
"learning_rate": 3.078716732397358e-07, |
|
"logits/chosen": -6.790053367614746, |
|
"logits/rejected": -7.169698238372803, |
|
"logps/chosen": -404.49749755859375, |
|
"logps/rejected": -520.8421020507812, |
|
"loss": 0.5446, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.8419032096862793, |
|
"rewards/margins": 0.7671428918838501, |
|
"rewards/rejected": -1.609046220779419, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 51.81706264683318, |
|
"learning_rate": 2.881137778775863e-07, |
|
"logits/chosen": -6.8976874351501465, |
|
"logits/rejected": -7.1951165199279785, |
|
"logps/chosen": -386.2132873535156, |
|
"logps/rejected": -497.3343811035156, |
|
"loss": 0.5343, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.8116914629936218, |
|
"rewards/margins": 0.7524455785751343, |
|
"rewards/rejected": -1.5641369819641113, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 55.97935905060418, |
|
"learning_rate": 2.681087000404406e-07, |
|
"logits/chosen": -6.740978240966797, |
|
"logits/rejected": -7.123350620269775, |
|
"logps/chosen": -392.87066650390625, |
|
"logps/rejected": -503.4215393066406, |
|
"loss": 0.5453, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.8021535873413086, |
|
"rewards/margins": 0.7195227742195129, |
|
"rewards/rejected": -1.5216763019561768, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 49.37528181856528, |
|
"learning_rate": 2.4798618033465255e-07, |
|
"logits/chosen": -6.945050239562988, |
|
"logits/rejected": -7.23672342300415, |
|
"logps/chosen": -388.6627502441406, |
|
"logps/rejected": -537.4052124023438, |
|
"loss": 0.5321, |
|
"rewards/accuracies": 0.7406250238418579, |
|
"rewards/chosen": -0.9606342315673828, |
|
"rewards/margins": 0.8123178482055664, |
|
"rewards/rejected": -1.7729520797729492, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 54.105423705553996, |
|
"learning_rate": 2.278767210221604e-07, |
|
"logits/chosen": -6.868166923522949, |
|
"logits/rejected": -7.291008949279785, |
|
"logps/chosen": -392.64984130859375, |
|
"logps/rejected": -484.0719299316406, |
|
"loss": 0.5417, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.779350996017456, |
|
"rewards/margins": 0.814343273639679, |
|
"rewards/rejected": -1.5936942100524902, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 56.43268807738197, |
|
"learning_rate": 2.0791073966324034e-07, |
|
"logits/chosen": -7.0641374588012695, |
|
"logits/rejected": -7.506702423095703, |
|
"logps/chosen": -392.3079528808594, |
|
"logps/rejected": -523.6732788085938, |
|
"loss": 0.5417, |
|
"rewards/accuracies": 0.7281249761581421, |
|
"rewards/chosen": -0.9223122596740723, |
|
"rewards/margins": 0.6978656053543091, |
|
"rewards/rejected": -1.6201778650283813, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 57.791839351610484, |
|
"learning_rate": 1.8821772330858257e-07, |
|
"logits/chosen": -7.262925624847412, |
|
"logits/rejected": -7.752607822418213, |
|
"logps/chosen": -377.09088134765625, |
|
"logps/rejected": -489.00811767578125, |
|
"loss": 0.5303, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.7971266508102417, |
|
"rewards/margins": 0.7223898768424988, |
|
"rewards/rejected": -1.5195165872573853, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 52.90664589103769, |
|
"learning_rate": 1.6892538872607933e-07, |
|
"logits/chosen": -7.113929748535156, |
|
"logits/rejected": -7.584033012390137, |
|
"logps/chosen": -380.8609619140625, |
|
"logps/rejected": -483.22265625, |
|
"loss": 0.5418, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.7374104261398315, |
|
"rewards/margins": 0.6971267461776733, |
|
"rewards/rejected": -1.4345371723175049, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 52.54626658594162, |
|
"learning_rate": 1.5015885410857614e-07, |
|
"logits/chosen": -7.044780731201172, |
|
"logits/rejected": -7.396302700042725, |
|
"logps/chosen": -402.6374206542969, |
|
"logps/rejected": -516.0399169921875, |
|
"loss": 0.5395, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.8658789396286011, |
|
"rewards/margins": 0.7155196666717529, |
|
"rewards/rejected": -1.581398606300354, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 49.517270927729605, |
|
"learning_rate": 1.320398276343795e-07, |
|
"logits/chosen": -7.218907356262207, |
|
"logits/rejected": -7.622152805328369, |
|
"logps/chosen": -381.68310546875, |
|
"logps/rejected": -497.5284118652344, |
|
"loss": 0.5304, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.8602964282035828, |
|
"rewards/margins": 0.7747164368629456, |
|
"rewards/rejected": -1.6350128650665283, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 56.86193381024086, |
|
"learning_rate": 1.1468581814301717e-07, |
|
"logits/chosen": -7.034183502197266, |
|
"logits/rejected": -7.478652000427246, |
|
"logps/chosen": -426.84246826171875, |
|
"logps/rejected": -530.8802490234375, |
|
"loss": 0.5331, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.9773043394088745, |
|
"rewards/margins": 0.6900089979171753, |
|
"rewards/rejected": -1.6673132181167603, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 54.684846687132904, |
|
"learning_rate": 9.82093730453222e-08, |
|
"logits/chosen": -7.0423383712768555, |
|
"logits/rejected": -7.299139499664307, |
|
"logps/chosen": -368.8961181640625, |
|
"logps/rejected": -539.9181518554688, |
|
"loss": 0.5349, |
|
"rewards/accuracies": 0.7281249761581421, |
|
"rewards/chosen": -0.8125068545341492, |
|
"rewards/margins": 0.9283336400985718, |
|
"rewards/rejected": -1.7408406734466553, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 50.829923121131, |
|
"learning_rate": 8.271734841028552e-08, |
|
"logits/chosen": -7.086381435394287, |
|
"logits/rejected": -7.435737609863281, |
|
"logps/chosen": -373.8357849121094, |
|
"logps/rejected": -515.7593994140625, |
|
"loss": 0.5378, |
|
"rewards/accuracies": 0.7281249761581421, |
|
"rewards/chosen": -0.8114317655563354, |
|
"rewards/margins": 0.869731068611145, |
|
"rewards/rejected": -1.6811625957489014, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 55.18994010345068, |
|
"learning_rate": 6.831021596244424e-08, |
|
"logits/chosen": -7.145020484924316, |
|
"logits/rejected": -7.365900993347168, |
|
"logps/chosen": -399.44464111328125, |
|
"logps/rejected": -547.0018920898438, |
|
"loss": 0.5091, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.8901287317276001, |
|
"rewards/margins": 0.9701870083808899, |
|
"rewards/rejected": -1.8603156805038452, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 54.117959716474104, |
|
"learning_rate": 5.508141148419443e-08, |
|
"logits/chosen": -7.050479888916016, |
|
"logits/rejected": -7.3631181716918945, |
|
"logps/chosen": -395.1244201660156, |
|
"logps/rejected": -546.9639282226562, |
|
"loss": 0.5287, |
|
"rewards/accuracies": 0.746874988079071, |
|
"rewards/chosen": -0.9601947069168091, |
|
"rewards/margins": 0.8646580576896667, |
|
"rewards/rejected": -1.8248529434204102, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 59.05434076734903, |
|
"learning_rate": 4.311672884888756e-08, |
|
"logits/chosen": -7.259505271911621, |
|
"logits/rejected": -7.578073024749756, |
|
"logps/chosen": -371.8418884277344, |
|
"logps/rejected": -483.1830139160156, |
|
"loss": 0.5215, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.8195570707321167, |
|
"rewards/margins": 0.8197137117385864, |
|
"rewards/rejected": -1.6392707824707031, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 54.176289282529886, |
|
"learning_rate": 3.249376361464021e-08, |
|
"logits/chosen": -7.098161220550537, |
|
"logits/rejected": -7.438362121582031, |
|
"logps/chosen": -408.6588439941406, |
|
"logps/rejected": -542.7982788085938, |
|
"loss": 0.5208, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.9817103147506714, |
|
"rewards/margins": 0.7650116682052612, |
|
"rewards/rejected": -1.746721863746643, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 60.447603354663286, |
|
"learning_rate": 2.3281409787363648e-08, |
|
"logits/chosen": -7.142265319824219, |
|
"logits/rejected": -7.481961727142334, |
|
"logps/chosen": -387.88934326171875, |
|
"logps/rejected": -509.31011962890625, |
|
"loss": 0.5432, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.8285897374153137, |
|
"rewards/margins": 0.6989187598228455, |
|
"rewards/rejected": -1.5275084972381592, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 53.49129578625785, |
|
"learning_rate": 1.553941301669892e-08, |
|
"logits/chosen": -7.165548801422119, |
|
"logits/rejected": -7.568087577819824, |
|
"logps/chosen": -370.10491943359375, |
|
"logps/rejected": -490.369873046875, |
|
"loss": 0.5315, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.814159095287323, |
|
"rewards/margins": 0.7283869981765747, |
|
"rewards/rejected": -1.542546033859253, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 52.37813865787704, |
|
"learning_rate": 9.31798312255233e-09, |
|
"logits/chosen": -7.114771842956543, |
|
"logits/rejected": -7.5348968505859375, |
|
"logps/chosen": -390.9321594238281, |
|
"logps/rejected": -521.2406005859375, |
|
"loss": 0.5151, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.8433186411857605, |
|
"rewards/margins": 0.7988412976264954, |
|
"rewards/rejected": -1.6421600580215454, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 53.77720647192081, |
|
"learning_rate": 4.657468465146641e-09, |
|
"logits/chosen": -7.192900657653809, |
|
"logits/rejected": -7.582442283630371, |
|
"logps/chosen": -385.35809326171875, |
|
"logps/rejected": -510.85919189453125, |
|
"loss": 0.5351, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.8629342913627625, |
|
"rewards/margins": 0.8567607998847961, |
|
"rewards/rejected": -1.7196948528289795, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 59.87565888131157, |
|
"learning_rate": 1.5880942704217526e-09, |
|
"logits/chosen": -6.9229278564453125, |
|
"logits/rejected": -7.265672206878662, |
|
"logps/chosen": -432.6807556152344, |
|
"logps/rejected": -540.7777099609375, |
|
"loss": 0.5192, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.9274924993515015, |
|
"rewards/margins": 0.6948927640914917, |
|
"rewards/rejected": -1.6223852634429932, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 54.84275580772891, |
|
"learning_rate": 1.297666078462767e-10, |
|
"logits/chosen": -7.139874458312988, |
|
"logits/rejected": -7.5435380935668945, |
|
"logps/chosen": -396.08514404296875, |
|
"logps/rejected": -513.01806640625, |
|
"loss": 0.5269, |
|
"rewards/accuracies": 0.7593749761581421, |
|
"rewards/chosen": -0.8496484756469727, |
|
"rewards/margins": 0.8382279276847839, |
|
"rewards/rejected": -1.6878764629364014, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 434, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5533875935638006, |
|
"train_runtime": 12702.5109, |
|
"train_samples_per_second": 8.749, |
|
"train_steps_per_second": 0.034 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 434, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|