|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.9936, |
|
"eval_steps": 100, |
|
"global_step": 1248, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4e-08, |
|
"logits/chosen": 0.76749187707901, |
|
"logits/rejected": 1.0001295804977417, |
|
"logps/chosen": -205.27383422851562, |
|
"logps/rejected": -130.56936645507812, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.0000000000000003e-07, |
|
"logits/chosen": 0.7141001224517822, |
|
"logits/rejected": 0.7724499106407166, |
|
"logps/chosen": -190.74786376953125, |
|
"logps/rejected": -138.00537109375, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.4027777910232544, |
|
"rewards/chosen": 0.0002450596366543323, |
|
"rewards/margins": 0.0004758307186421007, |
|
"rewards/rejected": -0.0002307710237801075, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 8.000000000000001e-07, |
|
"logits/chosen": 0.601749062538147, |
|
"logits/rejected": 0.879185676574707, |
|
"logps/chosen": -173.01181030273438, |
|
"logps/rejected": -116.30070495605469, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": 4.058447757415706e-06, |
|
"rewards/margins": -0.00013263085565995425, |
|
"rewards/rejected": 0.00013668931205756962, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.2000000000000002e-06, |
|
"logits/chosen": 0.6547126173973083, |
|
"logits/rejected": 0.833116352558136, |
|
"logps/chosen": -187.99520874023438, |
|
"logps/rejected": -127.53055572509766, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.45625001192092896, |
|
"rewards/chosen": 2.28220596909523e-06, |
|
"rewards/margins": 0.00018625140364747494, |
|
"rewards/rejected": -0.00018396916857454926, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.6000000000000001e-06, |
|
"logits/chosen": 0.5628782510757446, |
|
"logits/rejected": 0.745602548122406, |
|
"logps/chosen": -183.92758178710938, |
|
"logps/rejected": -134.64688110351562, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.45625001192092896, |
|
"rewards/chosen": -0.0005207593785598874, |
|
"rewards/margins": -0.0005336635513231158, |
|
"rewards/rejected": 1.290418458665954e-05, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"logits/chosen": 0.6242247819900513, |
|
"logits/rejected": 0.8314679265022278, |
|
"logps/chosen": -171.67556762695312, |
|
"logps/rejected": -120.4903335571289, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.00020891983876936138, |
|
"rewards/margins": 0.0005679951282218099, |
|
"rewards/rejected": -0.00035907537676393986, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 2.4000000000000003e-06, |
|
"logits/chosen": 0.7160875797271729, |
|
"logits/rejected": 0.8556329607963562, |
|
"logps/chosen": -185.2340087890625, |
|
"logps/rejected": -124.66600036621094, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.4312500059604645, |
|
"rewards/chosen": 0.0006271885358728468, |
|
"rewards/margins": 0.0005722854984924197, |
|
"rewards/rejected": 5.4903095588088036e-05, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 2.8000000000000003e-06, |
|
"logits/chosen": 0.6916844844818115, |
|
"logits/rejected": 0.8196538090705872, |
|
"logps/chosen": -161.62498474121094, |
|
"logps/rejected": -107.52471923828125, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": 0.0007440428016707301, |
|
"rewards/margins": 0.0006342666456475854, |
|
"rewards/rejected": 0.00010977611964335665, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 3.2000000000000003e-06, |
|
"logits/chosen": 0.6701158285140991, |
|
"logits/rejected": 0.8293789029121399, |
|
"logps/chosen": -177.7081298828125, |
|
"logps/rejected": -122.1727294921875, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.0014677448198199272, |
|
"rewards/margins": 0.000520342611707747, |
|
"rewards/rejected": 0.0009474022081121802, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 3.6000000000000003e-06, |
|
"logits/chosen": 0.6228800415992737, |
|
"logits/rejected": 0.7212022542953491, |
|
"logps/chosen": -183.03811645507812, |
|
"logps/rejected": -139.0586700439453, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.53125, |
|
"rewards/chosen": 0.0022542846854776144, |
|
"rewards/margins": 0.0009365282021462917, |
|
"rewards/rejected": 0.0013177564833313227, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.000000000000001e-06, |
|
"logits/chosen": 0.6344588994979858, |
|
"logits/rejected": 0.8314794301986694, |
|
"logps/chosen": -163.23043823242188, |
|
"logps/rejected": -121.80052185058594, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.002225330099463463, |
|
"rewards/margins": 0.0011866830755025148, |
|
"rewards/rejected": 0.0010386471403762698, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"eval_logits/chosen": 0.8262824416160583, |
|
"eval_logits/rejected": 0.8840107321739197, |
|
"eval_logps/chosen": -256.7341003417969, |
|
"eval_logps/rejected": -233.68739318847656, |
|
"eval_loss": 0.00015139963943511248, |
|
"eval_rewards/accuracies": 0.5199999809265137, |
|
"eval_rewards/chosen": -0.00119913334492594, |
|
"eval_rewards/margins": 0.0002922326675616205, |
|
"eval_rewards/rejected": -0.0014913661871105433, |
|
"eval_runtime": 415.0818, |
|
"eval_samples_per_second": 4.818, |
|
"eval_steps_per_second": 1.205, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 4.4e-06, |
|
"logits/chosen": 0.6864518523216248, |
|
"logits/rejected": 0.9017802476882935, |
|
"logps/chosen": -174.11480712890625, |
|
"logps/rejected": -118.93388366699219, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.00283862859942019, |
|
"rewards/margins": 0.0018309386214241385, |
|
"rewards/rejected": 0.001007690210826695, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 4.800000000000001e-06, |
|
"logits/chosen": 0.5682826638221741, |
|
"logits/rejected": 0.6713820695877075, |
|
"logps/chosen": -189.4928436279297, |
|
"logps/rejected": -135.20498657226562, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": 0.0033208150416612625, |
|
"rewards/margins": 0.001955473329871893, |
|
"rewards/rejected": 0.0013653415953740478, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 4.999755441268144e-06, |
|
"logits/chosen": 0.7162402868270874, |
|
"logits/rejected": 0.8464711904525757, |
|
"logps/chosen": -175.83523559570312, |
|
"logps/rejected": -126.2065200805664, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.0035425268579274416, |
|
"rewards/margins": 0.0022287473548203707, |
|
"rewards/rejected": 0.0013137792702764273, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.997799258487003e-06, |
|
"logits/chosen": 0.6711681485176086, |
|
"logits/rejected": 0.8102658987045288, |
|
"logps/chosen": -182.16258239746094, |
|
"logps/rejected": -128.60398864746094, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.0029964938294142485, |
|
"rewards/margins": 0.002129464875906706, |
|
"rewards/rejected": 0.0008670290117152035, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.993888423734898e-06, |
|
"logits/chosen": 0.6097667217254639, |
|
"logits/rejected": 0.7144483327865601, |
|
"logps/chosen": -190.95156860351562, |
|
"logps/rejected": -141.19229125976562, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": 0.0037058107554912567, |
|
"rewards/margins": 0.002363733481615782, |
|
"rewards/rejected": 0.001342077157460153, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.988025997434253e-06, |
|
"logits/chosen": 0.6315719485282898, |
|
"logits/rejected": 0.7987428307533264, |
|
"logps/chosen": -181.68386840820312, |
|
"logps/rejected": -128.19888305664062, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.5562499761581421, |
|
"rewards/chosen": 0.002868856769055128, |
|
"rewards/margins": 0.0025774172972887754, |
|
"rewards/rejected": 0.00029143941355869174, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.980216567224801e-06, |
|
"logits/chosen": 0.6035071611404419, |
|
"logits/rejected": 0.8083668947219849, |
|
"logps/chosen": -181.84107971191406, |
|
"logps/rejected": -126.97474670410156, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.0037610617000609636, |
|
"rewards/margins": 0.0028130013961344957, |
|
"rewards/rejected": 0.0009480599546805024, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 4.970466244373527e-06, |
|
"logits/chosen": 0.6453949213027954, |
|
"logits/rejected": 0.797042727470398, |
|
"logps/chosen": -155.79519653320312, |
|
"logps/rejected": -113.65773010253906, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.0029109427705407143, |
|
"rewards/margins": 0.0023111128248274326, |
|
"rewards/rejected": 0.0005998298292979598, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.958782658992307e-06, |
|
"logits/chosen": 0.6763242483139038, |
|
"logits/rejected": 0.8502823710441589, |
|
"logps/chosen": -159.13275146484375, |
|
"logps/rejected": -120.99656677246094, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": 0.0026291562244296074, |
|
"rewards/margins": 0.0016078405315056443, |
|
"rewards/rejected": 0.001021315692923963, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.945174954066957e-06, |
|
"logits/chosen": 0.7041156888008118, |
|
"logits/rejected": 0.7947753667831421, |
|
"logps/chosen": -177.0174560546875, |
|
"logps/rejected": -127.36385345458984, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.003798459889367223, |
|
"rewards/margins": 0.0031563788652420044, |
|
"rewards/rejected": 0.0006420810823328793, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"eval_logits/chosen": 0.8200604319572449, |
|
"eval_logits/rejected": 0.8777603507041931, |
|
"eval_logps/chosen": -256.8277893066406, |
|
"eval_logps/rejected": -233.76914978027344, |
|
"eval_loss": 0.000158556635142304, |
|
"eval_rewards/accuracies": 0.5005000233650208, |
|
"eval_rewards/chosen": -0.00213597621768713, |
|
"eval_rewards/margins": 0.00017285306239500642, |
|
"eval_rewards/rejected": -0.0023088292218744755, |
|
"eval_runtime": 412.468, |
|
"eval_samples_per_second": 4.849, |
|
"eval_steps_per_second": 1.212, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 4.929653778302397e-06, |
|
"logits/chosen": 0.6099511981010437, |
|
"logits/rejected": 0.811872661113739, |
|
"logps/chosen": -192.26446533203125, |
|
"logps/rejected": -131.25962829589844, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.0029747250955551863, |
|
"rewards/margins": 0.0029428431298583746, |
|
"rewards/rejected": 3.1882118491921574e-05, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.912231277789509e-06, |
|
"logits/chosen": 0.6272088289260864, |
|
"logits/rejected": 0.767717719078064, |
|
"logps/chosen": -193.66490173339844, |
|
"logps/rejected": -143.585205078125, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.003646609140560031, |
|
"rewards/margins": 0.0021339866798371077, |
|
"rewards/rejected": 0.0015126224607229233, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 4.892921086500219e-06, |
|
"logits/chosen": 0.6231340169906616, |
|
"logits/rejected": 0.7231715321540833, |
|
"logps/chosen": -160.1138458251953, |
|
"logps/rejected": -114.69986724853516, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.003955821972340345, |
|
"rewards/margins": 0.003026761580258608, |
|
"rewards/rejected": 0.0009290605084970593, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.871738315618236e-06, |
|
"logits/chosen": 0.7013573050498962, |
|
"logits/rejected": 0.8051062822341919, |
|
"logps/chosen": -186.76821899414062, |
|
"logps/rejected": -135.32489013671875, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": 0.003390461904928088, |
|
"rewards/margins": 0.002614812459796667, |
|
"rewards/rejected": 0.0007756495615467429, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.848699541713801e-06, |
|
"logits/chosen": 0.6483660340309143, |
|
"logits/rejected": 0.8247146606445312, |
|
"logps/chosen": -159.59156799316406, |
|
"logps/rejected": -118.78582763671875, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.0031985179521143436, |
|
"rewards/margins": 0.0021302015520632267, |
|
"rewards/rejected": 0.0010683165164664388, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.823822793771696e-06, |
|
"logits/chosen": 0.625135064125061, |
|
"logits/rejected": 0.7819581627845764, |
|
"logps/chosen": -169.98837280273438, |
|
"logps/rejected": -119.9397964477539, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.00324636441655457, |
|
"rewards/margins": 0.0028731045313179493, |
|
"rewards/rejected": 0.0003732596233021468, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 4.797127539082669e-06, |
|
"logits/chosen": 0.649788498878479, |
|
"logits/rejected": 0.8432229161262512, |
|
"logps/chosen": -191.59213256835938, |
|
"logps/rejected": -134.23428344726562, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.0034480884205549955, |
|
"rewards/margins": 0.002935568569228053, |
|
"rewards/rejected": 0.0005125202005729079, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 4.7686346680093135e-06, |
|
"logits/chosen": 0.6038953065872192, |
|
"logits/rejected": 0.8061636686325073, |
|
"logps/chosen": -188.86422729492188, |
|
"logps/rejected": -125.95369720458984, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": 0.004464815836399794, |
|
"rewards/margins": 0.004056466277688742, |
|
"rewards/rejected": 0.0004083492676727474, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 4.738366477638319e-06, |
|
"logits/chosen": 0.5706946849822998, |
|
"logits/rejected": 0.7165879011154175, |
|
"logps/chosen": -172.26846313476562, |
|
"logps/rejected": -113.09400939941406, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.0033409663010388613, |
|
"rewards/margins": 0.0031195811461657286, |
|
"rewards/rejected": 0.0002213849511463195, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 4.7063466543318965e-06, |
|
"logits/chosen": 0.6467469334602356, |
|
"logits/rejected": 0.8215667605400085, |
|
"logps/chosen": -171.28201293945312, |
|
"logps/rejected": -121.31685638427734, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": 0.00259224371984601, |
|
"rewards/margins": 0.0022495179437100887, |
|
"rewards/rejected": 0.0003427262417972088, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"eval_logits/chosen": 0.8206124901771545, |
|
"eval_logits/rejected": 0.8782824277877808, |
|
"eval_logps/chosen": -256.82720947265625, |
|
"eval_logps/rejected": -233.77804565429688, |
|
"eval_loss": 0.00015650840941816568, |
|
"eval_rewards/accuracies": 0.4984999895095825, |
|
"eval_rewards/chosen": -0.002130264649167657, |
|
"eval_rewards/margins": 0.00026750334654934704, |
|
"eval_rewards/rejected": -0.0023977679666131735, |
|
"eval_runtime": 412.0828, |
|
"eval_samples_per_second": 4.853, |
|
"eval_steps_per_second": 1.213, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 4.672600255192022e-06, |
|
"logits/chosen": 0.6745213270187378, |
|
"logits/rejected": 0.7582186460494995, |
|
"logps/chosen": -177.1463623046875, |
|
"logps/rejected": -121.45220947265625, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": 0.003095152322202921, |
|
"rewards/margins": 0.002747387159615755, |
|
"rewards/rejected": 0.00034776475513353944, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 4.6371536884520115e-06, |
|
"logits/chosen": 0.6856507062911987, |
|
"logits/rejected": 0.8083009719848633, |
|
"logps/chosen": -175.43466186523438, |
|
"logps/rejected": -113.60682678222656, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.0029540827963501215, |
|
"rewards/margins": 0.0019870868418365717, |
|
"rewards/rejected": 0.0009669959545135498, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 4.600034692810764e-06, |
|
"logits/chosen": 0.6629844307899475, |
|
"logits/rejected": 0.831767737865448, |
|
"logps/chosen": -181.02310180664062, |
|
"logps/rejected": -119.25450134277344, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": 0.0041047511622309685, |
|
"rewards/margins": 0.003629709128290415, |
|
"rewards/rejected": 0.0004750423540826887, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 4.561272315725852e-06, |
|
"logits/chosen": 0.6184499263763428, |
|
"logits/rejected": 0.7539141178131104, |
|
"logps/chosen": -186.89682006835938, |
|
"logps/rejected": -139.5793914794922, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": 0.0036196750588715076, |
|
"rewards/margins": 0.0029566895682364702, |
|
"rewards/rejected": 0.0006629853160120547, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 4.520896890682449e-06, |
|
"logits/chosen": 0.6621273159980774, |
|
"logits/rejected": 0.7836776971817017, |
|
"logps/chosen": -179.44253540039062, |
|
"logps/rejected": -126.19744873046875, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.003894126508384943, |
|
"rewards/margins": 0.003107634838670492, |
|
"rewards/rejected": 0.0007864916697144508, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 4.478940013455864e-06, |
|
"logits/chosen": 0.6130011677742004, |
|
"logits/rejected": 0.7723643183708191, |
|
"logps/chosen": -179.69866943359375, |
|
"logps/rejected": -119.17742919921875, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.0030810669995844364, |
|
"rewards/margins": 0.0031474686693400145, |
|
"rewards/rejected": -6.640238279942423e-05, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 4.435434517386281e-06, |
|
"logits/chosen": 0.6930996179580688, |
|
"logits/rejected": 0.8360698819160461, |
|
"logps/chosen": -189.090087890625, |
|
"logps/rejected": -135.1619873046875, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.003456123173236847, |
|
"rewards/margins": 0.0029000742360949516, |
|
"rewards/rejected": 0.0005560485878959298, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 4.39041444768504e-06, |
|
"logits/chosen": 0.6620668172836304, |
|
"logits/rejected": 0.853411853313446, |
|
"logps/chosen": -177.2210235595703, |
|
"logps/rejected": -124.18900299072266, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": 0.00355343846604228, |
|
"rewards/margins": 0.003507003653794527, |
|
"rewards/rejected": 4.643518332159147e-05, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 4.343915034792569e-06, |
|
"logits/chosen": 0.629216730594635, |
|
"logits/rejected": 0.7686316967010498, |
|
"logps/chosen": -184.9043426513672, |
|
"logps/rejected": -130.17727661132812, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.003557362360879779, |
|
"rewards/margins": 0.003719041822478175, |
|
"rewards/rejected": -0.00016167931607924402, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 4.295972666808811e-06, |
|
"logits/chosen": 0.6529077291488647, |
|
"logits/rejected": 0.8112252354621887, |
|
"logps/chosen": -172.85665893554688, |
|
"logps/rejected": -127.3191146850586, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": 0.0037923946511000395, |
|
"rewards/margins": 0.003340603783726692, |
|
"rewards/rejected": 0.00045179054723121226, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"eval_logits/chosen": 0.8192203044891357, |
|
"eval_logits/rejected": 0.8769342303276062, |
|
"eval_logps/chosen": -256.87567138671875, |
|
"eval_logps/rejected": -233.8277130126953, |
|
"eval_loss": 0.00015775366046000272, |
|
"eval_rewards/accuracies": 0.5195000171661377, |
|
"eval_rewards/chosen": -0.002614969853311777, |
|
"eval_rewards/margins": 0.00027956385747529566, |
|
"eval_rewards/rejected": -0.002894533798098564, |
|
"eval_runtime": 412.2856, |
|
"eval_samples_per_second": 4.851, |
|
"eval_steps_per_second": 1.213, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 4.246624861017732e-06, |
|
"logits/chosen": 0.625112771987915, |
|
"logits/rejected": 0.7611836791038513, |
|
"logps/chosen": -178.12124633789062, |
|
"logps/rejected": -135.6422882080078, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.004022772889584303, |
|
"rewards/margins": 0.003282977268099785, |
|
"rewards/rejected": 0.0007397954468615353, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 4.195910234528186e-06, |
|
"logits/chosen": 0.6460477709770203, |
|
"logits/rejected": 0.7641812562942505, |
|
"logps/chosen": -168.17930603027344, |
|
"logps/rejected": -124.3600082397461, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.0026535852812230587, |
|
"rewards/margins": 0.0026994948275387287, |
|
"rewards/rejected": -4.590977187035605e-05, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 4.143868474054098e-06, |
|
"logits/chosen": 0.5987478494644165, |
|
"logits/rejected": 0.7457458972930908, |
|
"logps/chosen": -189.61422729492188, |
|
"logps/rejected": -123.73170471191406, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": 0.003896042238920927, |
|
"rewards/margins": 0.00355791044421494, |
|
"rewards/rejected": 0.00033813173649832606, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 4.0905403048576545e-06, |
|
"logits/chosen": 0.5944562554359436, |
|
"logits/rejected": 0.7794451713562012, |
|
"logps/chosen": -173.83494567871094, |
|
"logps/rejected": -125.55583190917969, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": 0.003923391457647085, |
|
"rewards/margins": 0.003630859311670065, |
|
"rewards/rejected": 0.00029253208776935935, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 4.035967458879751e-06, |
|
"logits/chosen": 0.5830662846565247, |
|
"logits/rejected": 0.7792474031448364, |
|
"logps/chosen": -174.3615264892578, |
|
"logps/rejected": -130.67514038085938, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": 0.0029520168900489807, |
|
"rewards/margins": 0.0029378398321568966, |
|
"rewards/rejected": 1.4177237972035073e-05, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 3.980192642082682e-06, |
|
"logits/chosen": 0.6810758709907532, |
|
"logits/rejected": 0.7834008932113647, |
|
"logps/chosen": -168.80197143554688, |
|
"logps/rejected": -118.42387390136719, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": 0.0025485253427177668, |
|
"rewards/margins": 0.002175524365156889, |
|
"rewards/rejected": 0.00037300080293789506, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 3.923259501030604e-06, |
|
"logits/chosen": 0.6081482768058777, |
|
"logits/rejected": 0.8008432388305664, |
|
"logps/chosen": -158.7695770263672, |
|
"logps/rejected": -117.162109375, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": 0.003074315609410405, |
|
"rewards/margins": 0.0024462412111461163, |
|
"rewards/rejected": 0.0006280745146796107, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 3.865212588733927e-06, |
|
"logits/chosen": 0.5983234643936157, |
|
"logits/rejected": 0.8347161412239075, |
|
"logps/chosen": -177.07232666015625, |
|
"logps/rejected": -113.17292785644531, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": 0.004156365990638733, |
|
"rewards/margins": 0.0032986297737807035, |
|
"rewards/rejected": 0.0008577358676120639, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 3.8060973297843773e-06, |
|
"logits/chosen": 0.6949520111083984, |
|
"logits/rejected": 0.8520463109016418, |
|
"logps/chosen": -180.8011016845703, |
|
"logps/rejected": -135.38951110839844, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": 0.0031148726120591164, |
|
"rewards/margins": 0.0025527984835207462, |
|
"rewards/rejected": 0.000562074186746031, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 3.7459599848079965e-06, |
|
"logits/chosen": 0.6497796773910522, |
|
"logits/rejected": 0.8080886006355286, |
|
"logps/chosen": -187.10287475585938, |
|
"logps/rejected": -133.8563995361328, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.004290526732802391, |
|
"rewards/margins": 0.0023678760044276714, |
|
"rewards/rejected": 0.0019226508447900414, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_logits/chosen": 0.8150748610496521, |
|
"eval_logits/rejected": 0.8728834986686707, |
|
"eval_logps/chosen": -256.8869323730469, |
|
"eval_logps/rejected": -233.8388214111328, |
|
"eval_loss": 0.0001666269963607192, |
|
"eval_rewards/accuracies": 0.5170000195503235, |
|
"eval_rewards/chosen": -0.0027276412583887577, |
|
"eval_rewards/margins": 0.00027794469497166574, |
|
"eval_rewards/rejected": -0.0030055860988795757, |
|
"eval_runtime": 414.2028, |
|
"eval_samples_per_second": 4.829, |
|
"eval_steps_per_second": 1.207, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 3.684847614263898e-06, |
|
"logits/chosen": 0.649914562702179, |
|
"logits/rejected": 0.8345224261283875, |
|
"logps/chosen": -170.88528442382812, |
|
"logps/rejected": -113.5182876586914, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": 0.004132578149437904, |
|
"rewards/margins": 0.004135853610932827, |
|
"rewards/rejected": -3.27564202962094e-06, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 3.622808041617133e-06, |
|
"logits/chosen": 0.6696175336837769, |
|
"logits/rejected": 0.8282491564750671, |
|
"logps/chosen": -165.21356201171875, |
|
"logps/rejected": -120.80168151855469, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": 0.003804347710683942, |
|
"rewards/margins": 0.002934789750725031, |
|
"rewards/rejected": 0.0008695581927895546, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 3.559889815914441e-06, |
|
"logits/chosen": 0.6223723888397217, |
|
"logits/rejected": 0.8060038685798645, |
|
"logps/chosen": -173.92562866210938, |
|
"logps/rejected": -115.7896728515625, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.0046675438061356544, |
|
"rewards/margins": 0.0034386657644063234, |
|
"rewards/rejected": 0.001228878041729331, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 3.496142173792219e-06, |
|
"logits/chosen": 0.5860522389411926, |
|
"logits/rejected": 0.7538793683052063, |
|
"logps/chosen": -179.20030212402344, |
|
"logps/rejected": -126.5418930053711, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.0036121346056461334, |
|
"rewards/margins": 0.003292589681223035, |
|
"rewards/rejected": 0.00031954512814991176, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 3.4316150009464023e-06, |
|
"logits/chosen": 0.6498968601226807, |
|
"logits/rejected": 0.8035451173782349, |
|
"logps/chosen": -192.52578735351562, |
|
"logps/rejected": -139.58251953125, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": 0.003925256431102753, |
|
"rewards/margins": 0.0028843428008258343, |
|
"rewards/rejected": 0.001040913863107562, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 3.366358793094433e-06, |
|
"logits/chosen": 0.5884669423103333, |
|
"logits/rejected": 0.8008158802986145, |
|
"logps/chosen": -179.25753784179688, |
|
"logps/rejected": -120.347412109375, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": 0.003893566783517599, |
|
"rewards/margins": 0.0027398644015192986, |
|
"rewards/rejected": 0.0011537026148289442, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 3.3004246164598535e-06, |
|
"logits/chosen": 0.6389291286468506, |
|
"logits/rejected": 0.8485361933708191, |
|
"logps/chosen": -190.9259490966797, |
|
"logps/rejected": -140.17202758789062, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.004239690490067005, |
|
"rewards/margins": 0.003860587952658534, |
|
"rewards/rejected": 0.0003791024792008102, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 3.233864067810446e-06, |
|
"logits/chosen": 0.6482547521591187, |
|
"logits/rejected": 0.8119586110115051, |
|
"logps/chosen": -183.17164611816406, |
|
"logps/rejected": -119.36767578125, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.004655472934246063, |
|
"rewards/margins": 0.0037689208984375, |
|
"rewards/rejected": 0.0008865518611855805, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 3.1667292340812077e-06, |
|
"logits/chosen": 0.6013578176498413, |
|
"logits/rejected": 0.7613108158111572, |
|
"logps/chosen": -181.44009399414062, |
|
"logps/rejected": -140.5045166015625, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.004445691592991352, |
|
"rewards/margins": 0.003334993962198496, |
|
"rewards/rejected": 0.0011106978636234999, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 3.099072651613728e-06, |
|
"logits/chosen": 0.7029744386672974, |
|
"logits/rejected": 0.8172439336776733, |
|
"logps/chosen": -165.12625122070312, |
|
"logps/rejected": -119.59954833984375, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.0032034076284617186, |
|
"rewards/margins": 0.0023894021287560463, |
|
"rewards/rejected": 0.0008140054414980114, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"eval_logits/chosen": 0.817969799041748, |
|
"eval_logits/rejected": 0.8756601810455322, |
|
"eval_logps/chosen": -256.885986328125, |
|
"eval_logps/rejected": -233.84141540527344, |
|
"eval_loss": 0.00016333417443092912, |
|
"eval_rewards/accuracies": 0.5070000290870667, |
|
"eval_rewards/chosen": -0.0027182498015463352, |
|
"eval_rewards/margins": 0.0003131589328404516, |
|
"eval_rewards/rejected": -0.003031408879905939, |
|
"eval_runtime": 412.1696, |
|
"eval_samples_per_second": 4.852, |
|
"eval_steps_per_second": 1.213, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 3.0309472650438982e-06, |
|
"logits/chosen": 0.7164211869239807, |
|
"logits/rejected": 0.8135250806808472, |
|
"logps/chosen": -167.83938598632812, |
|
"logps/rejected": -123.3685531616211, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": 0.004080263432115316, |
|
"rewards/margins": 0.002866895869374275, |
|
"rewards/rejected": 0.0012133677955716848, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 2.9624063858701006e-06, |
|
"logits/chosen": 0.6123205423355103, |
|
"logits/rejected": 0.8067408800125122, |
|
"logps/chosen": -174.879638671875, |
|
"logps/rejected": -119.88896179199219, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.0032961233519017696, |
|
"rewards/margins": 0.0031299355905503035, |
|
"rewards/rejected": 0.00016618790687061846, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 2.8935036507343185e-06, |
|
"logits/chosen": 0.6171947717666626, |
|
"logits/rejected": 0.7731830477714539, |
|
"logps/chosen": -173.52308654785156, |
|
"logps/rejected": -122.30467224121094, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": 0.004019217099994421, |
|
"rewards/margins": 0.003546707332134247, |
|
"rewards/rejected": 0.0004725100880023092, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 2.8242929794487926e-06, |
|
"logits/chosen": 0.6720027923583984, |
|
"logits/rejected": 0.8209166526794434, |
|
"logps/chosen": -167.04946899414062, |
|
"logps/rejected": -120.01603698730469, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.004417883697897196, |
|
"rewards/margins": 0.00379347731359303, |
|
"rewards/rejected": 0.000624406267888844, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 2.7548285328010984e-06, |
|
"logits/chosen": 0.5649908781051636, |
|
"logits/rejected": 0.7522573471069336, |
|
"logps/chosen": -167.37994384765625, |
|
"logps/rejected": -118.6939697265625, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": 0.003759379032999277, |
|
"rewards/margins": 0.0030631672125309706, |
|
"rewards/rejected": 0.0006962117040529847, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 2.6851646701706306e-06, |
|
"logits/chosen": 0.7023177742958069, |
|
"logits/rejected": 0.8027107119560242, |
|
"logps/chosen": -177.06430053710938, |
|
"logps/rejected": -129.97714233398438, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.003546078223735094, |
|
"rewards/margins": 0.0027953279204666615, |
|
"rewards/rejected": 0.0007507502450607717, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 2.6153559069897007e-06, |
|
"logits/chosen": 0.6293431520462036, |
|
"logits/rejected": 0.7750530242919922, |
|
"logps/chosen": -168.621337890625, |
|
"logps/rejected": -122.5462875366211, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": 0.003033037530258298, |
|
"rewards/margins": 0.0028599021025002003, |
|
"rewards/rejected": 0.00017313548596575856, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 2.5454568720824937e-06, |
|
"logits/chosen": 0.5758141279220581, |
|
"logits/rejected": 0.7897475957870483, |
|
"logps/chosen": -174.54208374023438, |
|
"logps/rejected": -118.59149169921875, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.003853294998407364, |
|
"rewards/margins": 0.003840038087219, |
|
"rewards/rejected": 1.3256654710858129e-05, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 2.4755222649153014e-06, |
|
"logits/chosen": 0.6393508911132812, |
|
"logits/rejected": 0.8246806263923645, |
|
"logps/chosen": -184.72274780273438, |
|
"logps/rejected": -126.2255859375, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": 0.0034170313738286495, |
|
"rewards/margins": 0.0032350593246519566, |
|
"rewards/rejected": 0.00018197241297457367, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 2.4056068127914803e-06, |
|
"logits/chosen": 0.7041198015213013, |
|
"logits/rejected": 0.8579221963882446, |
|
"logps/chosen": -165.37600708007812, |
|
"logps/rejected": -113.6267318725586, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": 0.0038474693428725004, |
|
"rewards/margins": 0.004361593164503574, |
|
"rewards/rejected": -0.0005141238798387349, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"eval_logits/chosen": 0.8142222166061401, |
|
"eval_logits/rejected": 0.8718712329864502, |
|
"eval_logps/chosen": -256.9122619628906, |
|
"eval_logps/rejected": -233.85919189453125, |
|
"eval_loss": 0.00016846887592691928, |
|
"eval_rewards/accuracies": 0.5065000057220459, |
|
"eval_rewards/chosen": -0.0029809277039021254, |
|
"eval_rewards/margins": 0.00022821790480520576, |
|
"eval_rewards/rejected": -0.003209145972505212, |
|
"eval_runtime": 412.3465, |
|
"eval_samples_per_second": 4.85, |
|
"eval_steps_per_second": 1.213, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 2.3357652280246125e-06, |
|
"logits/chosen": 0.6320663690567017, |
|
"logits/rejected": 0.785031259059906, |
|
"logps/chosen": -189.28390502929688, |
|
"logps/rejected": -144.56167602539062, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.0025972402654588223, |
|
"rewards/margins": 0.0028369042556732893, |
|
"rewards/rejected": -0.00023966425214894116, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 2.2660521651234036e-06, |
|
"logits/chosen": 0.6551303267478943, |
|
"logits/rejected": 0.8406831622123718, |
|
"logps/chosen": -186.629638671875, |
|
"logps/rejected": -126.31327819824219, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": 0.0038953598123043776, |
|
"rewards/margins": 0.003374651074409485, |
|
"rewards/rejected": 0.0005207090289331973, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 2.1965221780218173e-06, |
|
"logits/chosen": 0.6312896013259888, |
|
"logits/rejected": 0.7552987337112427, |
|
"logps/chosen": -173.728271484375, |
|
"logps/rejected": -124.27177429199219, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.004026113077998161, |
|
"rewards/margins": 0.0039476132951676846, |
|
"rewards/rejected": 7.849968096707016e-05, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 2.1272296773879107e-06, |
|
"logits/chosen": 0.6133307814598083, |
|
"logits/rejected": 0.8152937889099121, |
|
"logps/chosen": -178.77232360839844, |
|
"logps/rejected": -125.87672424316406, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": 0.0026045762933790684, |
|
"rewards/margins": 0.0032864962704479694, |
|
"rewards/rejected": -0.0006819200934842229, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 2.058228888044788e-06, |
|
"logits/chosen": 0.6092817187309265, |
|
"logits/rejected": 0.7013009190559387, |
|
"logps/chosen": -165.13404846191406, |
|
"logps/rejected": -122.30549621582031, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": 0.003001943463459611, |
|
"rewards/margins": 0.0025921487249433994, |
|
"rewards/rejected": 0.0004097948840353638, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 1.989573806536978e-06, |
|
"logits/chosen": 0.6568277478218079, |
|
"logits/rejected": 0.8574051856994629, |
|
"logps/chosen": -179.80409240722656, |
|
"logps/rejected": -120.96827697753906, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 0.004101686645299196, |
|
"rewards/margins": 0.00385199673473835, |
|
"rewards/rejected": 0.0002496893866918981, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 1.921318158875459e-06, |
|
"logits/chosen": 0.6586846113204956, |
|
"logits/rejected": 0.8662702441215515, |
|
"logps/chosen": -174.4876708984375, |
|
"logps/rejected": -128.13922119140625, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.004603301174938679, |
|
"rewards/margins": 0.0032628674525767565, |
|
"rewards/rejected": 0.0013404333731159568, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 1.8535153584943915e-06, |
|
"logits/chosen": 0.6371630430221558, |
|
"logits/rejected": 0.7737770080566406, |
|
"logps/chosen": -170.006591796875, |
|
"logps/rejected": -126.65934753417969, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": 0.004735985770821571, |
|
"rewards/margins": 0.003097447333857417, |
|
"rewards/rejected": 0.001638538553379476, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 1.7862184644524422e-06, |
|
"logits/chosen": 0.6403388977050781, |
|
"logits/rejected": 0.7904574275016785, |
|
"logps/chosen": -178.15911865234375, |
|
"logps/rejected": -133.64309692382812, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.00445596594363451, |
|
"rewards/margins": 0.004045891109853983, |
|
"rewards/rejected": 0.0004100751248188317, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 1.7194801399114471e-06, |
|
"logits/chosen": 0.6325648427009583, |
|
"logits/rejected": 0.747418999671936, |
|
"logps/chosen": -174.4492950439453, |
|
"logps/rejected": -125.17801666259766, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.004687703680247068, |
|
"rewards/margins": 0.004329483024775982, |
|
"rewards/rejected": 0.00035822103382088244, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"eval_logits/chosen": 0.8135467767715454, |
|
"eval_logits/rejected": 0.8712592124938965, |
|
"eval_logps/chosen": -256.8898010253906, |
|
"eval_logps/rejected": -233.84222412109375, |
|
"eval_loss": 0.00016861619951669127, |
|
"eval_rewards/accuracies": 0.5189999938011169, |
|
"eval_rewards/chosen": -0.002756227506324649, |
|
"eval_rewards/margins": 0.00028340137214399874, |
|
"eval_rewards/rejected": -0.003039628965780139, |
|
"eval_runtime": 412.1883, |
|
"eval_samples_per_second": 4.852, |
|
"eval_steps_per_second": 1.213, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 1.6533526109248632e-06, |
|
"logits/chosen": 0.6175631284713745, |
|
"logits/rejected": 0.74301677942276, |
|
"logps/chosen": -179.27880859375, |
|
"logps/rejected": -123.3892822265625, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": 0.003877174574881792, |
|
"rewards/margins": 0.003652264829725027, |
|
"rewards/rejected": 0.00022490958508569747, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 1.5878876255682951e-06, |
|
"logits/chosen": 0.6676111817359924, |
|
"logits/rejected": 0.7865282893180847, |
|
"logps/chosen": -189.4076385498047, |
|
"logps/rejected": -137.06271362304688, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.004411728121340275, |
|
"rewards/margins": 0.0037712506018579006, |
|
"rewards/rejected": 0.0006404774612747133, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 1.5231364134440485e-06, |
|
"logits/chosen": 0.6061269044876099, |
|
"logits/rejected": 0.7513775825500488, |
|
"logps/chosen": -190.88760375976562, |
|
"logps/rejected": -134.599365234375, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.004417582880705595, |
|
"rewards/margins": 0.0038115177303552628, |
|
"rewards/rejected": 0.000606064626481384, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 1.4591496455914292e-06, |
|
"logits/chosen": 0.6278064846992493, |
|
"logits/rejected": 0.7927815914154053, |
|
"logps/chosen": -173.77554321289062, |
|
"logps/rejected": -128.66485595703125, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.003960388712584972, |
|
"rewards/margins": 0.0037871100939810276, |
|
"rewards/rejected": 0.0001732785312924534, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 1.395977394834132e-06, |
|
"logits/chosen": 0.6175497174263, |
|
"logits/rejected": 0.8719260096549988, |
|
"logps/chosen": -172.75567626953125, |
|
"logps/rejected": -118.21751403808594, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 0.004103314597159624, |
|
"rewards/margins": 0.003662864211946726, |
|
"rewards/rejected": 0.0004404502979014069, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 1.3336690965957733e-06, |
|
"logits/chosen": 0.6757524013519287, |
|
"logits/rejected": 0.8124428987503052, |
|
"logps/chosen": -192.186279296875, |
|
"logps/rejected": -144.07810974121094, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.004593437071889639, |
|
"rewards/margins": 0.004751748405396938, |
|
"rewards/rejected": -0.0001583110133651644, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 1.2722735102142192e-06, |
|
"logits/chosen": 0.5904192328453064, |
|
"logits/rejected": 0.8412498235702515, |
|
"logps/chosen": -178.94638061523438, |
|
"logps/rejected": -116.67149353027344, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.0042373063042759895, |
|
"rewards/margins": 0.004430609289556742, |
|
"rewards/rejected": -0.00019330321811139584, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 1.2118386807849733e-06, |
|
"logits/chosen": 0.6753177642822266, |
|
"logits/rejected": 0.8218367695808411, |
|
"logps/chosen": -169.46713256835938, |
|
"logps/rejected": -124.23944091796875, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.0036155576817691326, |
|
"rewards/margins": 0.0030525142792612314, |
|
"rewards/rejected": 0.0005630434607155621, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 1.1524119015635116e-06, |
|
"logits/chosen": 0.5247241258621216, |
|
"logits/rejected": 0.7601326704025269, |
|
"logps/chosen": -187.26348876953125, |
|
"logps/rejected": -123.6650390625, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.004491317551583052, |
|
"rewards/margins": 0.003613727632910013, |
|
"rewards/rejected": 0.0008775900350883603, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 1.0940396769559584e-06, |
|
"logits/chosen": 0.6696980595588684, |
|
"logits/rejected": 0.7714194059371948, |
|
"logps/chosen": -191.7372283935547, |
|
"logps/rejected": -128.82192993164062, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.004666435532271862, |
|
"rewards/margins": 0.0038504847325384617, |
|
"rewards/rejected": 0.0008159511489793658, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"eval_logits/chosen": 0.8135749101638794, |
|
"eval_logits/rejected": 0.8714309930801392, |
|
"eval_logps/chosen": -256.91107177734375, |
|
"eval_logps/rejected": -233.8529052734375, |
|
"eval_loss": 0.00016910216072574258, |
|
"eval_rewards/accuracies": 0.5015000104904175, |
|
"eval_rewards/chosen": -0.0029687141068279743, |
|
"eval_rewards/margins": 0.00017772088176570833, |
|
"eval_rewards/rejected": -0.0031464346684515476, |
|
"eval_runtime": 412.7326, |
|
"eval_samples_per_second": 4.846, |
|
"eval_steps_per_second": 1.211, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 1.036767686127079e-06, |
|
"logits/chosen": 0.6320682168006897, |
|
"logits/rejected": 0.7783384323120117, |
|
"logps/chosen": -189.1212158203125, |
|
"logps/rejected": -130.10711669921875, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": 0.004713307600468397, |
|
"rewards/margins": 0.003414090257138014, |
|
"rewards/rejected": 0.0012992171104997396, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 9.806407472540644e-07, |
|
"logits/chosen": 0.6682049036026001, |
|
"logits/rejected": 0.8254071474075317, |
|
"logps/chosen": -184.09628295898438, |
|
"logps/rejected": -123.4487075805664, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": 0.004300036001950502, |
|
"rewards/margins": 0.003941989503800869, |
|
"rewards/rejected": 0.00035804632352665067, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 9.257027824540823e-07, |
|
"logits/chosen": 0.6992205381393433, |
|
"logits/rejected": 0.8208533525466919, |
|
"logps/chosen": -164.10159301757812, |
|
"logps/rejected": -110.07139587402344, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 0.003934869542717934, |
|
"rewards/margins": 0.003884183941408992, |
|
"rewards/rejected": 5.068551399745047e-05, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 8.719967834130385e-07, |
|
"logits/chosen": 0.6116827726364136, |
|
"logits/rejected": 0.8160429000854492, |
|
"logps/chosen": -169.47280883789062, |
|
"logps/rejected": -115.8711166381836, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": 0.005224294029176235, |
|
"rewards/margins": 0.004378092475235462, |
|
"rewards/rejected": 0.0008462019031867385, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 8.195647777424479e-07, |
|
"logits/chosen": 0.5635887384414673, |
|
"logits/rejected": 0.7940059900283813, |
|
"logps/chosen": -177.22665405273438, |
|
"logps/rejected": -120.8065414428711, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.003887483151629567, |
|
"rewards/margins": 0.0035337016452103853, |
|
"rewards/rejected": 0.00035378162283450365, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 7.684477960907422e-07, |
|
"logits/chosen": 0.5837413668632507, |
|
"logits/rejected": 0.7604612112045288, |
|
"logps/chosen": -175.0729217529297, |
|
"logps/rejected": -118.60954284667969, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.004512741696089506, |
|
"rewards/margins": 0.0036391555331647396, |
|
"rewards/rejected": 0.0008735861629247665, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 7.186858400347455e-07, |
|
"logits/chosen": 0.6265005469322205, |
|
"logits/rejected": 0.7885332703590393, |
|
"logps/chosen": -182.97769165039062, |
|
"logps/rejected": -138.80723571777344, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.004079051315784454, |
|
"rewards/margins": 0.004020996857434511, |
|
"rewards/rejected": 5.8054854889633134e-05, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 6.703178507764618e-07, |
|
"logits/chosen": 0.6634309887886047, |
|
"logits/rejected": 0.7914069890975952, |
|
"logps/chosen": -167.03756713867188, |
|
"logps/rejected": -124.2927474975586, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.005084961652755737, |
|
"rewards/margins": 0.0035723126493394375, |
|
"rewards/rejected": 0.0015126490034162998, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 6.233816786696414e-07, |
|
"logits/chosen": 0.6042094230651855, |
|
"logits/rejected": 0.7901323437690735, |
|
"logps/chosen": -195.58741760253906, |
|
"logps/rejected": -132.481689453125, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": 0.005596310831606388, |
|
"rewards/margins": 0.0045918067917227745, |
|
"rewards/rejected": 0.001004504389129579, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 5.77914053600005e-07, |
|
"logits/chosen": 0.5928013920783997, |
|
"logits/rejected": 0.8330589532852173, |
|
"logps/chosen": -176.38113403320312, |
|
"logps/rejected": -123.68299865722656, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.0048389798030257225, |
|
"rewards/margins": 0.00368002662435174, |
|
"rewards/rejected": 0.001158953527919948, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"eval_logits/chosen": 0.8156088590621948, |
|
"eval_logits/rejected": 0.8733118176460266, |
|
"eval_logps/chosen": -256.9035949707031, |
|
"eval_logps/rejected": -233.8666229248047, |
|
"eval_loss": 0.00016867661906871945, |
|
"eval_rewards/accuracies": 0.5180000066757202, |
|
"eval_rewards/chosen": -0.002894038800150156, |
|
"eval_rewards/margins": 0.0003897584683727473, |
|
"eval_rewards/rejected": -0.0032837972976267338, |
|
"eval_runtime": 412.3586, |
|
"eval_samples_per_second": 4.85, |
|
"eval_steps_per_second": 1.213, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 5.339505562422851e-07, |
|
"logits/chosen": 0.6899782419204712, |
|
"logits/rejected": 0.8187691569328308, |
|
"logps/chosen": -177.58436584472656, |
|
"logps/rejected": -121.7637939453125, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": 0.0037464499473571777, |
|
"rewards/margins": 0.0031559974886476994, |
|
"rewards/rejected": 0.0005904529243707657, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 4.915255902165734e-07, |
|
"logits/chosen": 0.6754826903343201, |
|
"logits/rejected": 0.8096501231193542, |
|
"logps/chosen": -173.49542236328125, |
|
"logps/rejected": -120.12110900878906, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": 0.004369380883872509, |
|
"rewards/margins": 0.0036476694513112307, |
|
"rewards/rejected": 0.000721711665391922, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 4.506723551657879e-07, |
|
"logits/chosen": 0.5708236694335938, |
|
"logits/rejected": 0.7320288419723511, |
|
"logps/chosen": -179.21517944335938, |
|
"logps/rejected": -131.3833465576172, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 0.004431615583598614, |
|
"rewards/margins": 0.003471215022727847, |
|
"rewards/rejected": 0.0009604001534171402, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 4.11422820775299e-07, |
|
"logits/chosen": 0.6452796459197998, |
|
"logits/rejected": 0.8163026571273804, |
|
"logps/chosen": -205.68319702148438, |
|
"logps/rejected": -129.33828735351562, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.004141085781157017, |
|
"rewards/margins": 0.0046521080657839775, |
|
"rewards/rejected": -0.0005110226338729262, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 3.7380770175506397e-07, |
|
"logits/chosen": 0.5789592862129211, |
|
"logits/rejected": 0.7904280424118042, |
|
"logps/chosen": -180.31417846679688, |
|
"logps/rejected": -129.52687072753906, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.004802372306585312, |
|
"rewards/margins": 0.004047960974276066, |
|
"rewards/rejected": 0.0007544115069322288, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 3.3785643380384063e-07, |
|
"logits/chosen": 0.6884064674377441, |
|
"logits/rejected": 0.8517038226127625, |
|
"logps/chosen": -177.23248291015625, |
|
"logps/rejected": -117.1332778930664, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": 0.0034295388031750917, |
|
"rewards/margins": 0.0038840598426759243, |
|
"rewards/rejected": -0.0004545215633697808, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 3.0359715057429186e-07, |
|
"logits/chosen": 0.5767031908035278, |
|
"logits/rejected": 0.6466313004493713, |
|
"logps/chosen": -172.95346069335938, |
|
"logps/rejected": -127.76350402832031, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.0043509481474757195, |
|
"rewards/margins": 0.0033387758303433657, |
|
"rewards/rejected": 0.0010121725499629974, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 2.710566616570048e-07, |
|
"logits/chosen": 0.661910891532898, |
|
"logits/rejected": 0.8002559542655945, |
|
"logps/chosen": -177.2826690673828, |
|
"logps/rejected": -131.19056701660156, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": 0.0037956517189741135, |
|
"rewards/margins": 0.003371240571141243, |
|
"rewards/rejected": 0.0004244106821715832, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 2.40260431600654e-07, |
|
"logits/chosen": 0.6113812923431396, |
|
"logits/rejected": 0.8205118179321289, |
|
"logps/chosen": -174.49224853515625, |
|
"logps/rejected": -135.5587158203125, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.004168152809143066, |
|
"rewards/margins": 0.0029846313409507275, |
|
"rewards/rejected": 0.0011835219338536263, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 2.1123255998472952e-07, |
|
"logits/chosen": 0.6319399476051331, |
|
"logits/rejected": 0.7784051895141602, |
|
"logps/chosen": -160.6739501953125, |
|
"logps/rejected": -114.9722671508789, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.004197821486741304, |
|
"rewards/margins": 0.0031375852413475513, |
|
"rewards/rejected": 0.0010602364782243967, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"eval_logits/chosen": 0.8145170211791992, |
|
"eval_logits/rejected": 0.8724321126937866, |
|
"eval_logps/chosen": -256.90802001953125, |
|
"eval_logps/rejected": -233.87786865234375, |
|
"eval_loss": 0.00016770198999438435, |
|
"eval_rewards/accuracies": 0.5264999866485596, |
|
"eval_rewards/chosen": -0.00293838232755661, |
|
"eval_rewards/margins": 0.00045752059668302536, |
|
"eval_rewards/rejected": -0.003395902691408992, |
|
"eval_runtime": 412.1486, |
|
"eval_samples_per_second": 4.853, |
|
"eval_steps_per_second": 1.213, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 1.8399576256041525e-07, |
|
"logits/chosen": 0.6978103518486023, |
|
"logits/rejected": 0.7642599940299988, |
|
"logps/chosen": -188.5946502685547, |
|
"logps/rejected": -142.72915649414062, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": 0.004109769128262997, |
|
"rewards/margins": 0.003021980170160532, |
|
"rewards/rejected": 0.0010877888416871428, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 1.58571353474391e-07, |
|
"logits/chosen": 0.6541138887405396, |
|
"logits/rejected": 0.7682735323905945, |
|
"logps/chosen": -157.97927856445312, |
|
"logps/rejected": -120.40803527832031, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.0030703709926456213, |
|
"rewards/margins": 0.0024266496766358614, |
|
"rewards/rejected": 0.0006437213160097599, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 1.3497922858944857e-07, |
|
"logits/chosen": 0.7424976229667664, |
|
"logits/rejected": 0.8078197240829468, |
|
"logps/chosen": -174.2366943359375, |
|
"logps/rejected": -131.4811553955078, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 0.004558051936328411, |
|
"rewards/margins": 0.003959262277930975, |
|
"rewards/rejected": 0.0005987894837744534, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 1.1323784991499471e-07, |
|
"logits/chosen": 0.6016920804977417, |
|
"logits/rejected": 0.7492619752883911, |
|
"logps/chosen": -172.1629638671875, |
|
"logps/rejected": -116.34178161621094, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.004433467984199524, |
|
"rewards/margins": 0.004034010227769613, |
|
"rewards/rejected": 0.0003994574653916061, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 9.336423115961002e-08, |
|
"logits/chosen": 0.6536157727241516, |
|
"logits/rejected": 0.8383282423019409, |
|
"logps/chosen": -179.27108764648438, |
|
"logps/rejected": -121.70698547363281, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.00442055519670248, |
|
"rewards/margins": 0.003978613764047623, |
|
"rewards/rejected": 0.00044194143265485764, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 7.537392441697793e-08, |
|
"logits/chosen": 0.6289481520652771, |
|
"logits/rejected": 0.8159587979316711, |
|
"logps/chosen": -178.74453735351562, |
|
"logps/rejected": -126.57807922363281, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.004435301758348942, |
|
"rewards/margins": 0.0039503672160208225, |
|
"rewards/rejected": 0.0004849349206779152, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 5.928100799559938e-08, |
|
"logits/chosen": 0.6689172387123108, |
|
"logits/rejected": 0.7863209843635559, |
|
"logps/chosen": -178.4945068359375, |
|
"logps/rejected": -124.7654037475586, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": 0.005153838545084, |
|
"rewards/margins": 0.003751266747713089, |
|
"rewards/rejected": 0.001402571564540267, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 4.5098075401815435e-08, |
|
"logits/chosen": 0.6192042827606201, |
|
"logits/rejected": 0.76641446352005, |
|
"logps/chosen": -171.137939453125, |
|
"logps/rejected": -125.4515151977539, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.004533619619905949, |
|
"rewards/margins": 0.003654058324173093, |
|
"rewards/rejected": 0.000879561179317534, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 3.283622548476445e-08, |
|
"logits/chosen": 0.6808220148086548, |
|
"logits/rejected": 0.8677403330802917, |
|
"logps/chosen": -178.3741455078125, |
|
"logps/rejected": -125.1767349243164, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.005535735748708248, |
|
"rewards/margins": 0.004433406982570887, |
|
"rewards/rejected": 0.0011023286497220397, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 2.250505375098161e-08, |
|
"logits/chosen": 0.669333815574646, |
|
"logits/rejected": 0.808275043964386, |
|
"logps/chosen": -176.30514526367188, |
|
"logps/rejected": -125.28890228271484, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.004538280423730612, |
|
"rewards/margins": 0.0036300034262239933, |
|
"rewards/rejected": 0.0009082768228836358, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"eval_logits/chosen": 0.8126665353775024, |
|
"eval_logits/rejected": 0.8705229759216309, |
|
"eval_logps/chosen": -256.92266845703125, |
|
"eval_logps/rejected": -233.87326049804688, |
|
"eval_loss": 0.00017112624482251704, |
|
"eval_rewards/accuracies": 0.5044999718666077, |
|
"eval_rewards/chosen": -0.0030848486348986626, |
|
"eval_rewards/margins": 0.0002651172399055213, |
|
"eval_rewards/rejected": -0.0033499656710773706, |
|
"eval_runtime": 412.2112, |
|
"eval_samples_per_second": 4.852, |
|
"eval_steps_per_second": 1.213, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 1.4112644855438228e-08, |
|
"logits/chosen": 0.6185505390167236, |
|
"logits/rejected": 0.786270260810852, |
|
"logps/chosen": -172.150634765625, |
|
"logps/rejected": -123.99739074707031, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.005183863919228315, |
|
"rewards/margins": 0.004181054420769215, |
|
"rewards/rejected": 0.0010028090327978134, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 7.665566274897007e-09, |
|
"logits/chosen": 0.6319261789321899, |
|
"logits/rejected": 0.8173401951789856, |
|
"logps/chosen": -197.80917358398438, |
|
"logps/rejected": -135.27621459960938, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.005665967706590891, |
|
"rewards/margins": 0.005035373382270336, |
|
"rewards/rejected": 0.0006305938586592674, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 3.1688631685364292e-09, |
|
"logits/chosen": 0.5452470183372498, |
|
"logits/rejected": 0.7646620869636536, |
|
"logps/chosen": -184.90304565429688, |
|
"logps/rejected": -128.32806396484375, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": 0.0048486096784472466, |
|
"rewards/margins": 0.004199582617729902, |
|
"rewards/rejected": 0.0006490271771326661, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 6.260544298619664e-10, |
|
"logits/chosen": 0.6011223793029785, |
|
"logits/rejected": 0.7510023713111877, |
|
"logps/chosen": -178.15272521972656, |
|
"logps/rejected": -124.17759704589844, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.0051615191623568535, |
|
"rewards/margins": 0.003729583229869604, |
|
"rewards/rejected": 0.0014319362817332149, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"step": 1248, |
|
"total_flos": 0.0, |
|
"train_loss": 9.35627439654733e-05, |
|
"train_runtime": 14053.8323, |
|
"train_samples_per_second": 1.423, |
|
"train_steps_per_second": 0.089 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1248, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|