|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 637, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.006279434850863423, |
|
"grad_norm": 10.468690716679266, |
|
"learning_rate": 3.125e-08, |
|
"logits/chosen": -2.8471853733062744, |
|
"logits/rejected": -2.8164334297180176, |
|
"logps/chosen": -315.592529296875, |
|
"logps/rejected": -230.47390747070312, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06279434850863422, |
|
"grad_norm": 9.878509369466363, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": -2.8302619457244873, |
|
"logits/rejected": -2.8141226768493652, |
|
"logps/chosen": -265.6414794921875, |
|
"logps/rejected": -265.56695556640625, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.4548611044883728, |
|
"rewards/chosen": 0.0005584080354310572, |
|
"rewards/margins": 0.0001245423045475036, |
|
"rewards/rejected": 0.00043386564357206225, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12558869701726844, |
|
"grad_norm": 10.278326132890863, |
|
"learning_rate": 4.990353313429303e-07, |
|
"logits/chosen": -2.8090524673461914, |
|
"logits/rejected": -2.788933277130127, |
|
"logps/chosen": -253.39077758789062, |
|
"logps/rejected": -246.2702178955078, |
|
"loss": 0.6882, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.019792597740888596, |
|
"rewards/margins": 0.011014475487172604, |
|
"rewards/rejected": 0.00877812597900629, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.18838304552590268, |
|
"grad_norm": 11.048616082787978, |
|
"learning_rate": 4.882681251368548e-07, |
|
"logits/chosen": -2.763706684112549, |
|
"logits/rejected": -2.75482439994812, |
|
"logps/chosen": -270.31256103515625, |
|
"logps/rejected": -236.15945434570312, |
|
"loss": 0.6709, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.038934122771024704, |
|
"rewards/margins": 0.04564380645751953, |
|
"rewards/rejected": -0.006709681358188391, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.25117739403453687, |
|
"grad_norm": 11.185166495621557, |
|
"learning_rate": 4.6604720940421207e-07, |
|
"logits/chosen": -2.7219185829162598, |
|
"logits/rejected": -2.701842784881592, |
|
"logps/chosen": -254.3495330810547, |
|
"logps/rejected": -254.6168212890625, |
|
"loss": 0.6605, |
|
"rewards/accuracies": 0.6781250238418579, |
|
"rewards/chosen": -0.03600288927555084, |
|
"rewards/margins": 0.07350684702396393, |
|
"rewards/rejected": -0.10950972884893417, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3139717425431711, |
|
"grad_norm": 13.063361771402542, |
|
"learning_rate": 4.3344075855595097e-07, |
|
"logits/chosen": -2.6782164573669434, |
|
"logits/rejected": -2.6529879570007324, |
|
"logps/chosen": -274.06939697265625, |
|
"logps/rejected": -282.3935546875, |
|
"loss": 0.634, |
|
"rewards/accuracies": 0.721875011920929, |
|
"rewards/chosen": -0.07310701161623001, |
|
"rewards/margins": 0.18823260068893433, |
|
"rewards/rejected": -0.26133960485458374, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.37676609105180536, |
|
"grad_norm": 15.023213763564414, |
|
"learning_rate": 3.920161866827889e-07, |
|
"logits/chosen": -2.7063775062561035, |
|
"logits/rejected": -2.677776336669922, |
|
"logps/chosen": -301.80023193359375, |
|
"logps/rejected": -276.10565185546875, |
|
"loss": 0.6256, |
|
"rewards/accuracies": 0.6968749761581421, |
|
"rewards/chosen": -0.1930098533630371, |
|
"rewards/margins": 0.21952715516090393, |
|
"rewards/rejected": -0.41253700852394104, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.43956043956043955, |
|
"grad_norm": 15.201166100431667, |
|
"learning_rate": 3.4376480090239047e-07, |
|
"logits/chosen": -2.7157435417175293, |
|
"logits/rejected": -2.6909706592559814, |
|
"logps/chosen": -312.8190612792969, |
|
"logps/rejected": -312.00616455078125, |
|
"loss": 0.6094, |
|
"rewards/accuracies": 0.671875, |
|
"rewards/chosen": -0.30874696373939514, |
|
"rewards/margins": 0.24815471470355988, |
|
"rewards/rejected": -0.556901752948761, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5023547880690737, |
|
"grad_norm": 22.235789809346436, |
|
"learning_rate": 2.910060778827554e-07, |
|
"logits/chosen": -2.711451292037964, |
|
"logits/rejected": -2.6855673789978027, |
|
"logps/chosen": -325.4831237792969, |
|
"logps/rejected": -324.86187744140625, |
|
"loss": 0.5897, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": -0.31690555810928345, |
|
"rewards/margins": 0.35460132360458374, |
|
"rewards/rejected": -0.6715068817138672, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.565149136577708, |
|
"grad_norm": 18.534325691706606, |
|
"learning_rate": 2.3627616503391812e-07, |
|
"logits/chosen": -2.72774338722229, |
|
"logits/rejected": -2.7118916511535645, |
|
"logps/chosen": -298.873779296875, |
|
"logps/rejected": -330.8174133300781, |
|
"loss": 0.5713, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.43646472692489624, |
|
"rewards/margins": 0.34521809220314026, |
|
"rewards/rejected": -0.7816828489303589, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.6279434850863422, |
|
"grad_norm": 19.673309131968132, |
|
"learning_rate": 1.8220596619089573e-07, |
|
"logits/chosen": -2.6677684783935547, |
|
"logits/rejected": -2.6389312744140625, |
|
"logps/chosen": -322.52081298828125, |
|
"logps/rejected": -328.7558288574219, |
|
"loss": 0.5875, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": -0.37618938088417053, |
|
"rewards/margins": 0.3784511685371399, |
|
"rewards/rejected": -0.7546405792236328, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.1726844583987441, |
|
"grad_norm": 31.95579631076908, |
|
"learning_rate": 4.920911573406924e-07, |
|
"logits/chosen": -2.737246513366699, |
|
"logits/rejected": -2.694401979446411, |
|
"logps/chosen": -324.00128173828125, |
|
"logps/rejected": -355.02178955078125, |
|
"loss": 0.4353, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -0.3587312698364258, |
|
"rewards/margins": 0.7477051615715027, |
|
"rewards/rejected": -1.1064363718032837, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.18838304552590268, |
|
"grad_norm": 35.71890678681308, |
|
"learning_rate": 4.883087164434672e-07, |
|
"logits/chosen": -2.626753568649292, |
|
"logits/rejected": -2.6289310455322266, |
|
"logps/chosen": -319.3797302246094, |
|
"logps/rejected": -375.67864990234375, |
|
"loss": 0.4172, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": -0.4024926722049713, |
|
"rewards/margins": 0.8855185508728027, |
|
"rewards/rejected": -1.2880111932754517, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.20408163265306123, |
|
"grad_norm": 44.90656496644609, |
|
"learning_rate": 4.838100964592904e-07, |
|
"logits/chosen": -2.5899486541748047, |
|
"logits/rejected": -2.5745112895965576, |
|
"logps/chosen": -272.218994140625, |
|
"logps/rejected": -347.6167297363281, |
|
"loss": 0.4832, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.4248787760734558, |
|
"rewards/margins": 0.8124195337295532, |
|
"rewards/rejected": -1.2372982501983643, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.21978021978021978, |
|
"grad_norm": 76.16900405992267, |
|
"learning_rate": 4.786088169001671e-07, |
|
"logits/chosen": -2.5720114707946777, |
|
"logits/rejected": -2.5637192726135254, |
|
"logps/chosen": -289.96759033203125, |
|
"logps/rejected": -435.85308837890625, |
|
"loss": 0.3981, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": -0.3887278437614441, |
|
"rewards/margins": 0.9565574526786804, |
|
"rewards/rejected": -1.3452852964401245, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.23547880690737832, |
|
"grad_norm": 70.20692587613685, |
|
"learning_rate": 4.727205089511466e-07, |
|
"logits/chosen": -2.5425446033477783, |
|
"logits/rejected": -2.525315523147583, |
|
"logps/chosen": -313.8908996582031, |
|
"logps/rejected": -377.4201354980469, |
|
"loss": 0.4486, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": -0.7373483180999756, |
|
"rewards/margins": 0.8309048414230347, |
|
"rewards/rejected": -1.5682532787322998, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.25117739403453687, |
|
"grad_norm": 49.06765782836144, |
|
"learning_rate": 4.661628684945851e-07, |
|
"logits/chosen": -2.573646068572998, |
|
"logits/rejected": -2.5104339122772217, |
|
"logps/chosen": -370.089599609375, |
|
"logps/rejected": -390.0255126953125, |
|
"loss": 0.4023, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -0.8807455897331238, |
|
"rewards/margins": 0.7309385538101196, |
|
"rewards/rejected": -1.6116840839385986, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.2668759811616955, |
|
"grad_norm": 54.18143326196735, |
|
"learning_rate": 4.5895560292945996e-07, |
|
"logits/chosen": -2.5094046592712402, |
|
"logits/rejected": -2.519862413406372, |
|
"logps/chosen": -328.95098876953125, |
|
"logps/rejected": -409.6293029785156, |
|
"loss": 0.4229, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -0.8252763748168945, |
|
"rewards/margins": 0.8684083223342896, |
|
"rewards/rejected": -1.6936845779418945, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.282574568288854, |
|
"grad_norm": 47.86539813882727, |
|
"learning_rate": 4.5112037194555876e-07, |
|
"logits/chosen": -2.4061732292175293, |
|
"logits/rejected": -2.336141586303711, |
|
"logps/chosen": -340.967529296875, |
|
"logps/rejected": -416.18438720703125, |
|
"loss": 0.4129, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": -0.9469585418701172, |
|
"rewards/margins": 1.003477692604065, |
|
"rewards/rejected": -1.9504363536834717, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.29827315541601257, |
|
"grad_norm": 47.40203923720411, |
|
"learning_rate": 4.426807224305315e-07, |
|
"logits/chosen": -2.434738874435425, |
|
"logits/rejected": -2.4182467460632324, |
|
"logps/chosen": -390.0809631347656, |
|
"logps/rejected": -541.5980224609375, |
|
"loss": 0.39, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": -0.907140851020813, |
|
"rewards/margins": 1.477752447128296, |
|
"rewards/rejected": -2.3848931789398193, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.3139717425431711, |
|
"grad_norm": 59.480022233916614, |
|
"learning_rate": 4.3366201770542687e-07, |
|
"logits/chosen": -2.3925621509552, |
|
"logits/rejected": -2.3594958782196045, |
|
"logps/chosen": -399.2648010253906, |
|
"logps/rejected": -485.82061767578125, |
|
"loss": 0.3722, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -1.242919683456421, |
|
"rewards/margins": 1.0100038051605225, |
|
"rewards/rejected": -2.2529234886169434, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.32967032967032966, |
|
"grad_norm": 54.533637965345, |
|
"learning_rate": 4.2409136130137845e-07, |
|
"logits/chosen": -2.391516923904419, |
|
"logits/rejected": -2.3367998600006104, |
|
"logps/chosen": -410.27752685546875, |
|
"logps/rejected": -488.3935546875, |
|
"loss": 0.3723, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": -1.2089805603027344, |
|
"rewards/margins": 1.3162455558776855, |
|
"rewards/rejected": -2.52522611618042, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.3453689167974882, |
|
"grad_norm": 134.3137263377747, |
|
"learning_rate": 4.1399751550651084e-07, |
|
"logits/chosen": -2.2474474906921387, |
|
"logits/rejected": -2.273486852645874, |
|
"logps/chosen": -422.23419189453125, |
|
"logps/rejected": -493.7196350097656, |
|
"loss": 0.4143, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -1.3034000396728516, |
|
"rewards/margins": 1.1734570264816284, |
|
"rewards/rejected": -2.4768571853637695, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.36106750392464676, |
|
"grad_norm": 51.06322946002842, |
|
"learning_rate": 4.034108149278543e-07, |
|
"logits/chosen": -2.246279716491699, |
|
"logits/rejected": -2.218812942504883, |
|
"logps/chosen": -396.13433837890625, |
|
"logps/rejected": -494.9988708496094, |
|
"loss": 0.3583, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -1.4342329502105713, |
|
"rewards/margins": 1.3676201105117798, |
|
"rewards/rejected": -2.8018529415130615, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.37676609105180536, |
|
"grad_norm": 53.106943652386406, |
|
"learning_rate": 3.923630753280357e-07, |
|
"logits/chosen": -2.296950578689575, |
|
"logits/rejected": -2.193643808364868, |
|
"logps/chosen": -446.6080017089844, |
|
"logps/rejected": -504.3912658691406, |
|
"loss": 0.439, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -1.5059573650360107, |
|
"rewards/margins": 1.111060380935669, |
|
"rewards/rejected": -2.617017984390259, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.3924646781789639, |
|
"grad_norm": 31.823002355070333, |
|
"learning_rate": 3.8088749801071496e-07, |
|
"logits/chosen": -2.179251194000244, |
|
"logits/rejected": -2.139425277709961, |
|
"logps/chosen": -373.7418518066406, |
|
"logps/rejected": -501.843505859375, |
|
"loss": 0.369, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -1.2382627725601196, |
|
"rewards/margins": 1.2532708644866943, |
|
"rewards/rejected": -2.4915337562561035, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.40816326530612246, |
|
"grad_norm": 57.958160333752495, |
|
"learning_rate": 3.6901857004211443e-07, |
|
"logits/chosen": -2.2685585021972656, |
|
"logits/rejected": -2.1836700439453125, |
|
"logps/chosen": -438.3352966308594, |
|
"logps/rejected": -474.6566467285156, |
|
"loss": 0.3729, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -1.387054443359375, |
|
"rewards/margins": 1.218056559562683, |
|
"rewards/rejected": -2.6051113605499268, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.423861852433281, |
|
"grad_norm": 78.46346160026032, |
|
"learning_rate": 3.5679196060850034e-07, |
|
"logits/chosen": -2.194472312927246, |
|
"logits/rejected": -2.1565122604370117, |
|
"logps/chosen": -470.94024658203125, |
|
"logps/rejected": -557.4560546875, |
|
"loss": 0.3988, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -1.681726098060608, |
|
"rewards/margins": 1.0995349884033203, |
|
"rewards/rejected": -2.7812609672546387, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.43956043956043955, |
|
"grad_norm": 73.42817989092451, |
|
"learning_rate": 3.4424441382108826e-07, |
|
"logits/chosen": -2.0928378105163574, |
|
"logits/rejected": -2.080376148223877, |
|
"logps/chosen": -437.2099609375, |
|
"logps/rejected": -575.6405029296875, |
|
"loss": 0.3881, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.617456078529358, |
|
"rewards/margins": 1.3479641675949097, |
|
"rewards/rejected": -2.9654202461242676, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.4552590266875981, |
|
"grad_norm": 62.6558696654114, |
|
"learning_rate": 3.314136382905234e-07, |
|
"logits/chosen": -2.0131213665008545, |
|
"logits/rejected": -1.9851818084716797, |
|
"logps/chosen": -512.6057739257812, |
|
"logps/rejected": -646.832275390625, |
|
"loss": 0.4216, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -2.3018593788146973, |
|
"rewards/margins": 1.346274971961975, |
|
"rewards/rejected": -3.648134708404541, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.47095761381475665, |
|
"grad_norm": 78.12598055564801, |
|
"learning_rate": 3.1833819380279023e-07, |
|
"logits/chosen": -2.0949795246124268, |
|
"logits/rejected": -1.9874067306518555, |
|
"logps/chosen": -488.44384765625, |
|
"logps/rejected": -571.2259521484375, |
|
"loss": 0.386, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -2.06516695022583, |
|
"rewards/margins": 1.3252384662628174, |
|
"rewards/rejected": -3.3904056549072266, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.48665620094191525, |
|
"grad_norm": 48.25836081307725, |
|
"learning_rate": 3.0505737543712275e-07, |
|
"logits/chosen": -2.1759631633758545, |
|
"logits/rejected": -2.095853805541992, |
|
"logps/chosen": -437.9419860839844, |
|
"logps/rejected": -518.4359130859375, |
|
"loss": 0.3903, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -1.5794050693511963, |
|
"rewards/margins": 1.1850736141204834, |
|
"rewards/rejected": -2.7644786834716797, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.5023547880690737, |
|
"grad_norm": 60.78980701157502, |
|
"learning_rate": 2.9161109547416667e-07, |
|
"logits/chosen": -2.1254727840423584, |
|
"logits/rejected": -2.0983071327209473, |
|
"logps/chosen": -487.24383544921875, |
|
"logps/rejected": -561.2527465820312, |
|
"loss": 0.3585, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": -1.5642211437225342, |
|
"rewards/margins": 1.3017809391021729, |
|
"rewards/rejected": -2.866001844406128, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.5180533751962323, |
|
"grad_norm": 59.58938057948085, |
|
"learning_rate": 2.780397634492949e-07, |
|
"logits/chosen": -2.1095099449157715, |
|
"logits/rejected": -2.0623815059661865, |
|
"logps/chosen": -420.601318359375, |
|
"logps/rejected": -561.5743408203125, |
|
"loss": 0.4054, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -1.7670698165893555, |
|
"rewards/margins": 1.3623578548431396, |
|
"rewards/rejected": -3.129427671432495, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.533751962323391, |
|
"grad_norm": 59.60421047453409, |
|
"learning_rate": 2.6438416471154273e-07, |
|
"logits/chosen": -2.105985641479492, |
|
"logits/rejected": -2.1100611686706543, |
|
"logps/chosen": -455.4019470214844, |
|
"logps/rejected": -597.48583984375, |
|
"loss": 0.3559, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -1.826162338256836, |
|
"rewards/margins": 1.1689088344573975, |
|
"rewards/rejected": -2.9950711727142334, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.5494505494505495, |
|
"grad_norm": 64.80810897640455, |
|
"learning_rate": 2.5068533785312666e-07, |
|
"logits/chosen": -2.1501102447509766, |
|
"logits/rejected": -2.098646640777588, |
|
"logps/chosen": -398.44293212890625, |
|
"logps/rejected": -512.3727416992188, |
|
"loss": 0.3817, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": -1.6598011255264282, |
|
"rewards/margins": 1.3402740955352783, |
|
"rewards/rejected": -3.000075101852417, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.565149136577708, |
|
"grad_norm": 66.30622337743506, |
|
"learning_rate": 2.3698445137790258e-07, |
|
"logits/chosen": -2.183788776397705, |
|
"logits/rejected": -2.1399548053741455, |
|
"logps/chosen": -450.28790283203125, |
|
"logps/rejected": -570.9381103515625, |
|
"loss": 0.3441, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -1.7852160930633545, |
|
"rewards/margins": 1.4079563617706299, |
|
"rewards/rejected": -3.1931726932525635, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.5808477237048666, |
|
"grad_norm": 74.385047879014, |
|
"learning_rate": 2.2332267997940513e-07, |
|
"logits/chosen": -1.9558976888656616, |
|
"logits/rejected": -1.8889505863189697, |
|
"logps/chosen": -472.29046630859375, |
|
"logps/rejected": -564.1910400390625, |
|
"loss": 0.4144, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -2.128002405166626, |
|
"rewards/margins": 1.2228187322616577, |
|
"rewards/rejected": -3.3508212566375732, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.5965463108320251, |
|
"grad_norm": 98.38326705187197, |
|
"learning_rate": 2.0974108080028692e-07, |
|
"logits/chosen": -2.1048827171325684, |
|
"logits/rejected": -2.014101266860962, |
|
"logps/chosen": -472.6429138183594, |
|
"logps/rejected": -552.962646484375, |
|
"loss": 0.362, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -1.7709888219833374, |
|
"rewards/margins": 1.5231949090957642, |
|
"rewards/rejected": -3.2941837310791016, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.6122448979591837, |
|
"grad_norm": 28.798007236746653, |
|
"learning_rate": 1.962804700450265e-07, |
|
"logits/chosen": -2.229259490966797, |
|
"logits/rejected": -2.136399269104004, |
|
"logps/chosen": -404.88970947265625, |
|
"logps/rejected": -605.4417724609375, |
|
"loss": 0.2913, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -1.31179940700531, |
|
"rewards/margins": 2.022050142288208, |
|
"rewards/rejected": -3.3338496685028076, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.6279434850863422, |
|
"grad_norm": 47.93461485983235, |
|
"learning_rate": 1.8298130031671972e-07, |
|
"logits/chosen": -2.066481351852417, |
|
"logits/rejected": -2.0052218437194824, |
|
"logps/chosen": -461.60711669921875, |
|
"logps/rejected": -593.0902099609375, |
|
"loss": 0.3182, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -1.50743567943573, |
|
"rewards/margins": 1.5388964414596558, |
|
"rewards/rejected": -3.0463321208953857, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.6436420722135008, |
|
"grad_norm": 81.64181086704542, |
|
"learning_rate": 1.6988353904658492e-07, |
|
"logits/chosen": -2.079695224761963, |
|
"logits/rejected": -2.0234522819519043, |
|
"logps/chosen": -435.9869079589844, |
|
"logps/rejected": -548.7467041015625, |
|
"loss": 0.5391, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.8887078762054443, |
|
"rewards/margins": 1.0673691034317017, |
|
"rewards/rejected": -2.9560768604278564, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.6593406593406593, |
|
"grad_norm": 99.80504939653643, |
|
"learning_rate": 1.570265483815364e-07, |
|
"logits/chosen": -2.146688222885132, |
|
"logits/rejected": -2.1056787967681885, |
|
"logps/chosen": -441.99456787109375, |
|
"logps/rejected": -486.8045959472656, |
|
"loss": 0.6064, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.862978219985962, |
|
"rewards/margins": 0.5048335790634155, |
|
"rewards/rejected": -2.367811679840088, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.6750392464678179, |
|
"grad_norm": 85.01436657258617, |
|
"learning_rate": 1.444489668907914e-07, |
|
"logits/chosen": -2.0182952880859375, |
|
"logits/rejected": -2.0226969718933105, |
|
"logps/chosen": -346.7263488769531, |
|
"logps/rejected": -505.09619140625, |
|
"loss": 0.5198, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -1.6009681224822998, |
|
"rewards/margins": 1.235584020614624, |
|
"rewards/rejected": -2.836552143096924, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.6907378335949764, |
|
"grad_norm": 75.62069078299841, |
|
"learning_rate": 1.3218859344701632e-07, |
|
"logits/chosen": -2.2084121704101562, |
|
"logits/rejected": -2.1915016174316406, |
|
"logps/chosen": -412.80224609375, |
|
"logps/rejected": -515.23095703125, |
|
"loss": 0.5238, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -1.4952690601348877, |
|
"rewards/margins": 1.0581085681915283, |
|
"rewards/rejected": -2.553377628326416, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.706436420722135, |
|
"grad_norm": 150.50609049436147, |
|
"learning_rate": 1.202822736309758e-07, |
|
"logits/chosen": -2.1147706508636475, |
|
"logits/rejected": -2.0880684852600098, |
|
"logps/chosen": -442.6726989746094, |
|
"logps/rejected": -475.3531799316406, |
|
"loss": 0.5647, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.6886441707611084, |
|
"rewards/margins": 0.8048279881477356, |
|
"rewards/rejected": -2.4934723377227783, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.7221350078492935, |
|
"grad_norm": 76.56164006248804, |
|
"learning_rate": 1.0876578900107053e-07, |
|
"logits/chosen": -2.2221930027008057, |
|
"logits/rejected": -2.1485908031463623, |
|
"logps/chosen": -464.043701171875, |
|
"logps/rejected": -508.6534118652344, |
|
"loss": 0.522, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -1.763662338256836, |
|
"rewards/margins": 0.7875553965568542, |
|
"rewards/rejected": -2.551217555999756, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.7378335949764521, |
|
"grad_norm": 74.89323927902005, |
|
"learning_rate": 9.767374956053584e-08, |
|
"logits/chosen": -2.205277919769287, |
|
"logits/rejected": -2.18218994140625, |
|
"logps/chosen": -412.97686767578125, |
|
"logps/rejected": -543.1309204101562, |
|
"loss": 0.5596, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.553432822227478, |
|
"rewards/margins": 0.9423764944076538, |
|
"rewards/rejected": -2.495809316635132, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.7535321821036107, |
|
"grad_norm": 87.37555939932336, |
|
"learning_rate": 8.70394897454659e-08, |
|
"logits/chosen": -2.2252628803253174, |
|
"logits/rejected": -2.1848928928375244, |
|
"logps/chosen": -382.27752685546875, |
|
"logps/rejected": -457.525390625, |
|
"loss": 0.547, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -1.6248447895050049, |
|
"rewards/margins": 0.4326212406158447, |
|
"rewards/rejected": -2.0574657917022705, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.7692307692307693, |
|
"grad_norm": 93.79826007492892, |
|
"learning_rate": 7.689496824624525e-08, |
|
"logits/chosen": -2.2677321434020996, |
|
"logits/rejected": -2.212900161743164, |
|
"logps/chosen": -403.1687316894531, |
|
"logps/rejected": -456.62615966796875, |
|
"loss": 0.5459, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.4313546419143677, |
|
"rewards/margins": 0.6928239464759827, |
|
"rewards/rejected": -2.124178647994995, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.7849293563579278, |
|
"grad_norm": 56.75850068630019, |
|
"learning_rate": 6.727067196345099e-08, |
|
"logits/chosen": -2.3276655673980713, |
|
"logits/rejected": -2.2189202308654785, |
|
"logps/chosen": -438.74267578125, |
|
"logps/rejected": -478.62298583984375, |
|
"loss": 0.5453, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.3515713214874268, |
|
"rewards/margins": 1.0120915174484253, |
|
"rewards/rejected": -2.3636624813079834, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.8006279434850864, |
|
"grad_norm": 97.05649014340479, |
|
"learning_rate": 5.8195524386862374e-08, |
|
"logits/chosen": -2.311316967010498, |
|
"logits/rejected": -2.242011547088623, |
|
"logps/chosen": -362.28399658203125, |
|
"logps/rejected": -465.5743103027344, |
|
"loss": 0.4852, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -1.409887433052063, |
|
"rewards/margins": 0.9168035387992859, |
|
"rewards/rejected": -2.326690912246704, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.8163265306122449, |
|
"grad_norm": 127.5887173764836, |
|
"learning_rate": 4.969679867292276e-08, |
|
"logits/chosen": -2.2007040977478027, |
|
"logits/rejected": -2.138988971710205, |
|
"logps/chosen": -413.65447998046875, |
|
"logps/rejected": -489.22540283203125, |
|
"loss": 0.4985, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -1.3157858848571777, |
|
"rewards/margins": 1.0105135440826416, |
|
"rewards/rejected": -2.3262994289398193, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.8320251177394035, |
|
"grad_norm": 92.31568060854485, |
|
"learning_rate": 4.180003568187776e-08, |
|
"logits/chosen": -2.2751996517181396, |
|
"logits/rejected": -2.2204182147979736, |
|
"logps/chosen": -437.9576110839844, |
|
"logps/rejected": -478.0186462402344, |
|
"loss": 0.5193, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.3832905292510986, |
|
"rewards/margins": 0.656178891658783, |
|
"rewards/rejected": -2.0394692420959473, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.847723704866562, |
|
"grad_norm": 93.73800900430595, |
|
"learning_rate": 3.452896722091128e-08, |
|
"logits/chosen": -2.2862913608551025, |
|
"logits/rejected": -2.1965749263763428, |
|
"logps/chosen": -418.68218994140625, |
|
"logps/rejected": -488.07208251953125, |
|
"loss": 0.549, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -1.4643903970718384, |
|
"rewards/margins": 0.8227276802062988, |
|
"rewards/rejected": -2.2871181964874268, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.8634222919937206, |
|
"grad_norm": 74.68277239397396, |
|
"learning_rate": 2.7905444723949762e-08, |
|
"logits/chosen": -2.2326064109802246, |
|
"logits/rejected": -2.1876773834228516, |
|
"logps/chosen": -434.93377685546875, |
|
"logps/rejected": -486.3379821777344, |
|
"loss": 0.5464, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.5688042640686035, |
|
"rewards/margins": 0.6943384408950806, |
|
"rewards/rejected": -2.2631428241729736, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.8791208791208791, |
|
"grad_norm": 98.95496114421825, |
|
"learning_rate": 2.194937358247506e-08, |
|
"logits/chosen": -2.314251661300659, |
|
"logits/rejected": -2.282222032546997, |
|
"logps/chosen": -444.4891662597656, |
|
"logps/rejected": -482.2521057128906, |
|
"loss": 0.5058, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -1.4890687465667725, |
|
"rewards/margins": 0.7982088327407837, |
|
"rewards/rejected": -2.2872776985168457, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.8948194662480377, |
|
"grad_norm": 93.93456697380334, |
|
"learning_rate": 1.6678653324693787e-08, |
|
"logits/chosen": -2.271334171295166, |
|
"logits/rejected": -2.243417263031006, |
|
"logps/chosen": -379.41131591796875, |
|
"logps/rejected": -474.8644104003906, |
|
"loss": 0.5554, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.2463254928588867, |
|
"rewards/margins": 1.0109448432922363, |
|
"rewards/rejected": -2.257270336151123, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.9105180533751962, |
|
"grad_norm": 63.908735732483215, |
|
"learning_rate": 1.2109123822844653e-08, |
|
"logits/chosen": -2.2195041179656982, |
|
"logits/rejected": -2.152104139328003, |
|
"logps/chosen": -424.376220703125, |
|
"logps/rejected": -493.041748046875, |
|
"loss": 0.5407, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.484989047050476, |
|
"rewards/margins": 0.8357736468315125, |
|
"rewards/rejected": -2.3207626342773438, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.9262166405023547, |
|
"grad_norm": 77.29245256601979, |
|
"learning_rate": 8.254517690300944e-09, |
|
"logits/chosen": -2.1921186447143555, |
|
"logits/rejected": -2.160266876220703, |
|
"logps/chosen": -358.38665771484375, |
|
"logps/rejected": -419.24053955078125, |
|
"loss": 0.5065, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.4891427755355835, |
|
"rewards/margins": 0.6564360857009888, |
|
"rewards/rejected": -2.1455788612365723, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.9419152276295133, |
|
"grad_norm": 85.49976100967837, |
|
"learning_rate": 5.126419011529992e-09, |
|
"logits/chosen": -2.247999668121338, |
|
"logits/rejected": -2.1932740211486816, |
|
"logps/chosen": -437.494384765625, |
|
"logps/rejected": -485.1595764160156, |
|
"loss": 0.4877, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.302431583404541, |
|
"rewards/margins": 0.9456756711006165, |
|
"rewards/rejected": -2.2481071949005127, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.957613814756672, |
|
"grad_norm": 101.21792438469406, |
|
"learning_rate": 2.734228528934679e-09, |
|
"logits/chosen": -2.12449312210083, |
|
"logits/rejected": -2.1045031547546387, |
|
"logps/chosen": -403.0607604980469, |
|
"logps/rejected": -488.42059326171875, |
|
"loss": 0.5817, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -1.3698210716247559, |
|
"rewards/margins": 0.6692644953727722, |
|
"rewards/rejected": -2.0390853881835938, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.9733124018838305, |
|
"grad_norm": 62.863474044060304, |
|
"learning_rate": 1.0851353912008642e-09, |
|
"logits/chosen": -2.2791266441345215, |
|
"logits/rejected": -2.18548583984375, |
|
"logps/chosen": -420.2642517089844, |
|
"logps/rejected": -431.43804931640625, |
|
"loss": 0.5458, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.357425332069397, |
|
"rewards/margins": 0.5371707677841187, |
|
"rewards/rejected": -1.8945963382720947, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.989010989010989, |
|
"grad_norm": 82.02664725646991, |
|
"learning_rate": 1.840955480532924e-10, |
|
"logits/chosen": -2.251556873321533, |
|
"logits/rejected": -2.2051682472229004, |
|
"logps/chosen": -372.6395568847656, |
|
"logps/rejected": -425.1973571777344, |
|
"loss": 0.5177, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -1.1917184591293335, |
|
"rewards/margins": 0.899652361869812, |
|
"rewards/rejected": -2.0913710594177246, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 637, |
|
"total_flos": 0.0, |
|
"train_loss": 0.030315694479590496, |
|
"train_runtime": 321.4302, |
|
"train_samples_per_second": 47.547, |
|
"train_steps_per_second": 1.982 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 637, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|