|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9996020692399522, |
|
"eval_steps": 100, |
|
"global_step": 1256, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.007958615200955034, |
|
"grad_norm": 4.841538053414044, |
|
"learning_rate": 3.968253968253968e-08, |
|
"logits/chosen": -2.470052719116211, |
|
"logits/rejected": -2.4655823707580566, |
|
"logps/chosen": -157.4224090576172, |
|
"logps/rejected": -164.1490936279297, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.375, |
|
"rewards/chosen": -0.0001321556919720024, |
|
"rewards/margins": -0.0003976511361543089, |
|
"rewards/rejected": 0.00026549544418230653, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01591723040191007, |
|
"grad_norm": 5.024112543339822, |
|
"learning_rate": 7.936507936507936e-08, |
|
"logits/chosen": -2.4524307250976562, |
|
"logits/rejected": -2.4474711418151855, |
|
"logps/chosen": -142.13648986816406, |
|
"logps/rejected": -142.22164916992188, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": -0.0001045634489855729, |
|
"rewards/margins": -0.0003915712586604059, |
|
"rewards/rejected": 0.0002870078315027058, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0238758456028651, |
|
"grad_norm": 4.743494095766565, |
|
"learning_rate": 1.1904761904761903e-07, |
|
"logits/chosen": -2.4943318367004395, |
|
"logits/rejected": -2.48530912399292, |
|
"logps/chosen": -154.43202209472656, |
|
"logps/rejected": -149.37612915039062, |
|
"loss": 0.6929, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": 0.0018067583441734314, |
|
"rewards/margins": 0.0005435317871160805, |
|
"rewards/rejected": 0.0012632266152650118, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03183446080382014, |
|
"grad_norm": 4.832210059469465, |
|
"learning_rate": 1.5873015873015872e-07, |
|
"logits/chosen": -2.5287983417510986, |
|
"logits/rejected": -2.510993242263794, |
|
"logps/chosen": -135.90139770507812, |
|
"logps/rejected": -132.0488739013672, |
|
"loss": 0.6927, |
|
"rewards/accuracies": 0.53125, |
|
"rewards/chosen": 0.004627659916877747, |
|
"rewards/margins": 0.000702070421539247, |
|
"rewards/rejected": 0.003925589844584465, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03979307600477517, |
|
"grad_norm": 4.526944976438542, |
|
"learning_rate": 1.984126984126984e-07, |
|
"logits/chosen": -2.525243043899536, |
|
"logits/rejected": -2.4904685020446777, |
|
"logps/chosen": -157.05628967285156, |
|
"logps/rejected": -141.57211303710938, |
|
"loss": 0.6918, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": 0.013502730056643486, |
|
"rewards/margins": 0.005045468918979168, |
|
"rewards/rejected": 0.008457262068986893, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0477516912057302, |
|
"grad_norm": 4.47136990235777, |
|
"learning_rate": 2.3809523809523806e-07, |
|
"logits/chosen": -2.4738245010375977, |
|
"logits/rejected": -2.4801831245422363, |
|
"logps/chosen": -150.5889892578125, |
|
"logps/rejected": -159.7234344482422, |
|
"loss": 0.6903, |
|
"rewards/accuracies": 0.5562499761581421, |
|
"rewards/chosen": 0.01993567869067192, |
|
"rewards/margins": 0.004683250095695257, |
|
"rewards/rejected": 0.0152524309232831, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.055710306406685235, |
|
"grad_norm": 4.904005180990896, |
|
"learning_rate": 2.7777777777777776e-07, |
|
"logits/chosen": -2.5422394275665283, |
|
"logits/rejected": -2.529735565185547, |
|
"logps/chosen": -165.7467803955078, |
|
"logps/rejected": -162.11795043945312, |
|
"loss": 0.688, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": 0.013221414759755135, |
|
"rewards/margins": 0.010048055090010166, |
|
"rewards/rejected": 0.003173359902575612, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.06366892160764027, |
|
"grad_norm": 5.205118982895456, |
|
"learning_rate": 3.1746031746031743e-07, |
|
"logits/chosen": -2.5232584476470947, |
|
"logits/rejected": -2.5178704261779785, |
|
"logps/chosen": -147.32882690429688, |
|
"logps/rejected": -160.47280883789062, |
|
"loss": 0.6843, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.013839629478752613, |
|
"rewards/margins": 0.023469623178243637, |
|
"rewards/rejected": -0.037309251725673676, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.07162753680859531, |
|
"grad_norm": 6.494638432518059, |
|
"learning_rate": 3.5714285714285716e-07, |
|
"logits/chosen": -2.510326385498047, |
|
"logits/rejected": -2.4887425899505615, |
|
"logps/chosen": -148.05128479003906, |
|
"logps/rejected": -149.84288024902344, |
|
"loss": 0.6798, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.1187344640493393, |
|
"rewards/margins": 0.04123911261558533, |
|
"rewards/rejected": -0.15997359156608582, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.07958615200955034, |
|
"grad_norm": 5.5529916484322746, |
|
"learning_rate": 3.968253968253968e-07, |
|
"logits/chosen": -2.4746994972229004, |
|
"logits/rejected": -2.4473915100097656, |
|
"logps/chosen": -173.7371368408203, |
|
"logps/rejected": -159.02810668945312, |
|
"loss": 0.6755, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -0.13897764682769775, |
|
"rewards/margins": 0.035591237246990204, |
|
"rewards/rejected": -0.17456887662410736, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07958615200955034, |
|
"eval_logits/chosen": -2.4733359813690186, |
|
"eval_logits/rejected": -2.457453489303589, |
|
"eval_logps/chosen": -158.66261291503906, |
|
"eval_logps/rejected": -169.2390899658203, |
|
"eval_loss": 0.6757674217224121, |
|
"eval_rewards/accuracies": 0.58302241563797, |
|
"eval_rewards/chosen": -0.11758849024772644, |
|
"eval_rewards/margins": 0.044215064495801926, |
|
"eval_rewards/rejected": -0.16180357336997986, |
|
"eval_runtime": 347.737, |
|
"eval_samples_per_second": 24.593, |
|
"eval_steps_per_second": 0.385, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.08754476721050537, |
|
"grad_norm": 6.715544747804576, |
|
"learning_rate": 4.365079365079365e-07, |
|
"logits/chosen": -2.4566893577575684, |
|
"logits/rejected": -2.416964530944824, |
|
"logps/chosen": -170.65357971191406, |
|
"logps/rejected": -163.5463104248047, |
|
"loss": 0.6704, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": -0.16277848184108734, |
|
"rewards/margins": 0.05988674238324165, |
|
"rewards/rejected": -0.2226652204990387, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.0955033824114604, |
|
"grad_norm": 9.596141007780728, |
|
"learning_rate": 4.761904761904761e-07, |
|
"logits/chosen": -2.4483439922332764, |
|
"logits/rejected": -2.454890012741089, |
|
"logps/chosen": -172.45748901367188, |
|
"logps/rejected": -192.52012634277344, |
|
"loss": 0.6658, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.3439694941043854, |
|
"rewards/margins": 0.06312272697687149, |
|
"rewards/rejected": -0.40709224343299866, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.10346199761241544, |
|
"grad_norm": 13.202078476460924, |
|
"learning_rate": 4.999845414634076e-07, |
|
"logits/chosen": -2.4176621437072754, |
|
"logits/rejected": -2.3901853561401367, |
|
"logps/chosen": -193.67239379882812, |
|
"logps/rejected": -195.47042846679688, |
|
"loss": 0.6499, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.41546326875686646, |
|
"rewards/margins": 0.09810779988765717, |
|
"rewards/rejected": -0.51357102394104, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.11142061281337047, |
|
"grad_norm": 22.684431556876046, |
|
"learning_rate": 4.998106548810311e-07, |
|
"logits/chosen": -1.8343102931976318, |
|
"logits/rejected": -1.7827365398406982, |
|
"logps/chosen": -209.33108520507812, |
|
"logps/rejected": -233.1872100830078, |
|
"loss": 0.6356, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.6506957411766052, |
|
"rewards/margins": 0.23305030167102814, |
|
"rewards/rejected": -0.8837459683418274, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.1193792280143255, |
|
"grad_norm": 21.199901451025738, |
|
"learning_rate": 4.994436933879359e-07, |
|
"logits/chosen": -1.7879050970077515, |
|
"logits/rejected": -1.6920697689056396, |
|
"logps/chosen": -249.83236694335938, |
|
"logps/rejected": -267.5176696777344, |
|
"loss": 0.6217, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.9289053678512573, |
|
"rewards/margins": 0.2332083284854889, |
|
"rewards/rejected": -1.162113904953003, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.12733784321528055, |
|
"grad_norm": 14.830944311799147, |
|
"learning_rate": 4.988839406031596e-07, |
|
"logits/chosen": -1.9281498193740845, |
|
"logits/rejected": -1.8930234909057617, |
|
"logps/chosen": -225.1186981201172, |
|
"logps/rejected": -262.7356262207031, |
|
"loss": 0.6212, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.8514399528503418, |
|
"rewards/margins": 0.2691754698753357, |
|
"rewards/rejected": -1.1206153631210327, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.13529645841623558, |
|
"grad_norm": 20.510239439728636, |
|
"learning_rate": 4.981318291512395e-07, |
|
"logits/chosen": -1.838880181312561, |
|
"logits/rejected": -1.7559387683868408, |
|
"logps/chosen": -188.37014770507812, |
|
"logps/rejected": -228.2301025390625, |
|
"loss": 0.6211, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.48021888732910156, |
|
"rewards/margins": 0.42019492387771606, |
|
"rewards/rejected": -0.9004138112068176, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.14325507361719061, |
|
"grad_norm": 25.59372376753845, |
|
"learning_rate": 4.971879403278432e-07, |
|
"logits/chosen": -1.7563384771347046, |
|
"logits/rejected": -1.6859989166259766, |
|
"logps/chosen": -209.96487426757812, |
|
"logps/rejected": -244.59176635742188, |
|
"loss": 0.6016, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.6993848085403442, |
|
"rewards/margins": 0.3423052430152893, |
|
"rewards/rejected": -1.0416901111602783, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.15121368881814565, |
|
"grad_norm": 19.61741462433235, |
|
"learning_rate": 4.960530036504941e-07, |
|
"logits/chosen": -1.0844591856002808, |
|
"logits/rejected": -0.939785361289978, |
|
"logps/chosen": -214.14883422851562, |
|
"logps/rejected": -269.7398376464844, |
|
"loss": 0.5844, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.7484572529792786, |
|
"rewards/margins": 0.5090824365615845, |
|
"rewards/rejected": -1.2575398683547974, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.15917230401910068, |
|
"grad_norm": 20.322889470392276, |
|
"learning_rate": 4.947278962947386e-07, |
|
"logits/chosen": -0.841314435005188, |
|
"logits/rejected": -0.6785773038864136, |
|
"logps/chosen": -217.5854949951172, |
|
"logps/rejected": -252.13394165039062, |
|
"loss": 0.5965, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.734225332736969, |
|
"rewards/margins": 0.3655502200126648, |
|
"rewards/rejected": -1.0997755527496338, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.15917230401910068, |
|
"eval_logits/chosen": -0.5108200907707214, |
|
"eval_logits/rejected": -0.29105332493782043, |
|
"eval_logps/chosen": -253.34841918945312, |
|
"eval_logps/rejected": -302.3987731933594, |
|
"eval_loss": 0.5933550000190735, |
|
"eval_rewards/accuracies": 0.6772388219833374, |
|
"eval_rewards/chosen": -1.0644464492797852, |
|
"eval_rewards/margins": 0.42895349860191345, |
|
"eval_rewards/rejected": -1.4933998584747314, |
|
"eval_runtime": 347.7398, |
|
"eval_samples_per_second": 24.593, |
|
"eval_steps_per_second": 0.385, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.1671309192200557, |
|
"grad_norm": 22.814312588520362, |
|
"learning_rate": 4.932136424161899e-07, |
|
"logits/chosen": -0.14724041521549225, |
|
"logits/rejected": 0.11416204273700714, |
|
"logps/chosen": -296.7062072753906, |
|
"logps/rejected": -309.0001525878906, |
|
"loss": 0.5991, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.3078434467315674, |
|
"rewards/margins": 0.296301931142807, |
|
"rewards/rejected": -1.6041454076766968, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.17508953442101075, |
|
"grad_norm": 21.458184320123785, |
|
"learning_rate": 4.915114123589732e-07, |
|
"logits/chosen": -0.4067531228065491, |
|
"logits/rejected": 0.039240967482328415, |
|
"logps/chosen": -275.824462890625, |
|
"logps/rejected": -349.72113037109375, |
|
"loss": 0.5687, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.1693265438079834, |
|
"rewards/margins": 0.5683996081352234, |
|
"rewards/rejected": -1.7377259731292725, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.18304814962196578, |
|
"grad_norm": 18.192679070915965, |
|
"learning_rate": 4.896225217511849e-07, |
|
"logits/chosen": -0.3498181998729706, |
|
"logits/rejected": -0.03378605097532272, |
|
"logps/chosen": -270.6407775878906, |
|
"logps/rejected": -321.39971923828125, |
|
"loss": 0.5735, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -1.18222177028656, |
|
"rewards/margins": 0.45714884996414185, |
|
"rewards/rejected": -1.6393705606460571, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.1910067648229208, |
|
"grad_norm": 17.971534261191277, |
|
"learning_rate": 4.875484304880629e-07, |
|
"logits/chosen": -0.42381399869918823, |
|
"logits/rejected": -0.07996497303247452, |
|
"logps/chosen": -282.5169982910156, |
|
"logps/rejected": -310.2297668457031, |
|
"loss": 0.5686, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -1.2509108781814575, |
|
"rewards/margins": 0.36313191056251526, |
|
"rewards/rejected": -1.6140426397323608, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.19896538002387584, |
|
"grad_norm": 19.570671154398198, |
|
"learning_rate": 4.852907416036558e-07, |
|
"logits/chosen": -1.4505198001861572, |
|
"logits/rejected": -0.9843381643295288, |
|
"logps/chosen": -237.9661407470703, |
|
"logps/rejected": -284.70269775390625, |
|
"loss": 0.5926, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.8647058606147766, |
|
"rewards/margins": 0.535582423210144, |
|
"rewards/rejected": -1.4002882242202759, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.20692399522483088, |
|
"grad_norm": 16.95113035500878, |
|
"learning_rate": 4.828512000318616e-07, |
|
"logits/chosen": -0.7392469644546509, |
|
"logits/rejected": -0.28931230306625366, |
|
"logps/chosen": -262.4371643066406, |
|
"logps/rejected": -301.09613037109375, |
|
"loss": 0.5726, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.2168900966644287, |
|
"rewards/margins": 0.40174025297164917, |
|
"rewards/rejected": -1.6186301708221436, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.2148826104257859, |
|
"grad_norm": 16.523246341979362, |
|
"learning_rate": 4.802316912577946e-07, |
|
"logits/chosen": -0.058869220316410065, |
|
"logits/rejected": 0.5011920928955078, |
|
"logps/chosen": -253.3494110107422, |
|
"logps/rejected": -311.0137939453125, |
|
"loss": 0.5791, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -1.2564125061035156, |
|
"rewards/margins": 0.604422926902771, |
|
"rewards/rejected": -1.8608356714248657, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.22284122562674094, |
|
"grad_norm": 19.225450591324332, |
|
"learning_rate": 4.774342398605221e-07, |
|
"logits/chosen": 0.02062203921377659, |
|
"logits/rejected": 0.6116986274719238, |
|
"logps/chosen": -289.97467041015625, |
|
"logps/rejected": -309.21142578125, |
|
"loss": 0.5711, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -1.2227064371109009, |
|
"rewards/margins": 0.4611144959926605, |
|
"rewards/rejected": -1.6838209629058838, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.23079984082769597, |
|
"grad_norm": 18.086581218295944, |
|
"learning_rate": 4.744610079482978e-07, |
|
"logits/chosen": -0.2072794884443283, |
|
"logits/rejected": 0.367055743932724, |
|
"logps/chosen": -283.09442138671875, |
|
"logps/rejected": -332.0167541503906, |
|
"loss": 0.5907, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.336042046546936, |
|
"rewards/margins": 0.5237594246864319, |
|
"rewards/rejected": -1.8598015308380127, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.238758456028651, |
|
"grad_norm": 21.35763469044062, |
|
"learning_rate": 4.713142934875005e-07, |
|
"logits/chosen": -0.6442250609397888, |
|
"logits/rejected": -0.12395715713500977, |
|
"logps/chosen": -272.5120544433594, |
|
"logps/rejected": -335.85205078125, |
|
"loss": 0.5621, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -1.311974287033081, |
|
"rewards/margins": 0.5121132731437683, |
|
"rewards/rejected": -1.8240875005722046, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.238758456028651, |
|
"eval_logits/chosen": -0.1607174426317215, |
|
"eval_logits/rejected": 0.31636956334114075, |
|
"eval_logps/chosen": -280.8055114746094, |
|
"eval_logps/rejected": -342.06884765625, |
|
"eval_loss": 0.5711521506309509, |
|
"eval_rewards/accuracies": 0.6875, |
|
"eval_rewards/chosen": -1.3390170335769653, |
|
"eval_rewards/margins": 0.5510838627815247, |
|
"eval_rewards/rejected": -1.8901009559631348, |
|
"eval_runtime": 347.894, |
|
"eval_samples_per_second": 24.582, |
|
"eval_steps_per_second": 0.385, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.24671707122960604, |
|
"grad_norm": 18.925595409949292, |
|
"learning_rate": 4.679965285265706e-07, |
|
"logits/chosen": 0.1555846780538559, |
|
"logits/rejected": 0.7421888709068298, |
|
"logps/chosen": -270.05670166015625, |
|
"logps/rejected": -335.81304931640625, |
|
"loss": 0.5604, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -1.152674674987793, |
|
"rewards/margins": 0.6260913610458374, |
|
"rewards/rejected": -1.7787659168243408, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.2546756864305611, |
|
"grad_norm": 20.721991698227423, |
|
"learning_rate": 4.64510277316316e-07, |
|
"logits/chosen": 0.32529157400131226, |
|
"logits/rejected": 0.7045286893844604, |
|
"logps/chosen": -249.66665649414062, |
|
"logps/rejected": -322.38623046875, |
|
"loss": 0.5451, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -1.1683197021484375, |
|
"rewards/margins": 0.6144580841064453, |
|
"rewards/rejected": -1.7827777862548828, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.26263430163151613, |
|
"grad_norm": 14.642325287307886, |
|
"learning_rate": 4.6085823432804137e-07, |
|
"logits/chosen": 0.47873038053512573, |
|
"logits/rejected": 0.7926498055458069, |
|
"logps/chosen": -267.3265075683594, |
|
"logps/rejected": -309.6785888671875, |
|
"loss": 0.5785, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -1.2106026411056519, |
|
"rewards/margins": 0.4037841260433197, |
|
"rewards/rejected": -1.614386796951294, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.27059291683247116, |
|
"grad_norm": 21.85817503072542, |
|
"learning_rate": 4.570432221710314e-07, |
|
"logits/chosen": -0.6919598579406738, |
|
"logits/rejected": -0.20407061278820038, |
|
"logps/chosen": -269.3352355957031, |
|
"logps/rejected": -297.7936096191406, |
|
"loss": 0.566, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -1.2421492338180542, |
|
"rewards/margins": 0.45925775170326233, |
|
"rewards/rejected": -1.7014070749282837, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.2785515320334262, |
|
"grad_norm": 21.890637546837947, |
|
"learning_rate": 4.5306818941099866e-07, |
|
"logits/chosen": 0.06561018526554108, |
|
"logits/rejected": 0.5712476968765259, |
|
"logps/chosen": -322.6842041015625, |
|
"logps/rejected": -386.76849365234375, |
|
"loss": 0.5751, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.7097322940826416, |
|
"rewards/margins": 0.580051064491272, |
|
"rewards/rejected": -2.289783477783203, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.28651014723438123, |
|
"grad_norm": 21.606918978741653, |
|
"learning_rate": 4.4893620829118124e-07, |
|
"logits/chosen": 0.25220245122909546, |
|
"logits/rejected": 0.6177417039871216, |
|
"logps/chosen": -311.06549072265625, |
|
"logps/rejected": -332.5730285644531, |
|
"loss": 0.5817, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.5046530961990356, |
|
"rewards/margins": 0.4226300120353699, |
|
"rewards/rejected": -1.9272829294204712, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.29446876243533626, |
|
"grad_norm": 18.712312384246502, |
|
"learning_rate": 4.4465047235785185e-07, |
|
"logits/chosen": -0.7520114183425903, |
|
"logits/rejected": -0.39875468611717224, |
|
"logps/chosen": -257.7146911621094, |
|
"logps/rejected": -293.6401062011719, |
|
"loss": 0.5653, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.1130893230438232, |
|
"rewards/margins": 0.4012773633003235, |
|
"rewards/rejected": -1.514366626739502, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.3024273776362913, |
|
"grad_norm": 22.820775200718288, |
|
"learning_rate": 4.40214293992074e-07, |
|
"logits/chosen": -0.7524155378341675, |
|
"logits/rejected": -0.3933519721031189, |
|
"logps/chosen": -270.42950439453125, |
|
"logps/rejected": -315.5451354980469, |
|
"loss": 0.5603, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.2562357187271118, |
|
"rewards/margins": 0.46937066316604614, |
|
"rewards/rejected": -1.7256062030792236, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.3103859928372463, |
|
"grad_norm": 22.618138742949483, |
|
"learning_rate": 4.3563110184961234e-07, |
|
"logits/chosen": -0.30805596709251404, |
|
"logits/rejected": 0.08593909442424774, |
|
"logps/chosen": -304.3953552246094, |
|
"logps/rejected": -344.43048095703125, |
|
"loss": 0.5511, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.488152027130127, |
|
"rewards/margins": 0.6020984649658203, |
|
"rewards/rejected": -2.0902504920959473, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.31834460803820136, |
|
"grad_norm": 24.046968912259764, |
|
"learning_rate": 4.3090443821097566e-07, |
|
"logits/chosen": -0.4176566004753113, |
|
"logits/rejected": -0.04139077663421631, |
|
"logps/chosen": -271.0086669921875, |
|
"logps/rejected": -326.3518981933594, |
|
"loss": 0.551, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.2549865245819092, |
|
"rewards/margins": 0.5567766427993774, |
|
"rewards/rejected": -1.8117631673812866, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.31834460803820136, |
|
"eval_logits/chosen": -0.6012183427810669, |
|
"eval_logits/rejected": -0.20857903361320496, |
|
"eval_logps/chosen": -268.00634765625, |
|
"eval_logps/rejected": -335.35748291015625, |
|
"eval_loss": 0.5651077628135681, |
|
"eval_rewards/accuracies": 0.7192164063453674, |
|
"eval_rewards/chosen": -1.2110257148742676, |
|
"eval_rewards/margins": 0.6119614243507385, |
|
"eval_rewards/rejected": -1.8229870796203613, |
|
"eval_runtime": 348.003, |
|
"eval_samples_per_second": 24.574, |
|
"eval_steps_per_second": 0.385, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.3263032232391564, |
|
"grad_norm": 22.684798823358562, |
|
"learning_rate": 4.2603795624364195e-07, |
|
"logits/chosen": -0.4925737977027893, |
|
"logits/rejected": -0.12107300758361816, |
|
"logps/chosen": -286.2200927734375, |
|
"logps/rejected": -347.74822998046875, |
|
"loss": 0.5528, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -1.3317722082138062, |
|
"rewards/margins": 0.5646699666976929, |
|
"rewards/rejected": -1.8964424133300781, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.3342618384401114, |
|
"grad_norm": 21.49498601600208, |
|
"learning_rate": 4.210354171785795e-07, |
|
"logits/chosen": -0.2925480008125305, |
|
"logits/rejected": 0.1620359718799591, |
|
"logps/chosen": -324.54119873046875, |
|
"logps/rejected": -375.17254638671875, |
|
"loss": 0.5617, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -1.7158241271972656, |
|
"rewards/margins": 0.5555315017700195, |
|
"rewards/rejected": -2.2713558673858643, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.34222045364106646, |
|
"grad_norm": 21.031716510195604, |
|
"learning_rate": 4.15900687403248e-07, |
|
"logits/chosen": -0.6163953542709351, |
|
"logits/rejected": -0.1466153860092163, |
|
"logps/chosen": -305.3257751464844, |
|
"logps/rejected": -376.08660888671875, |
|
"loss": 0.5559, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -1.4343578815460205, |
|
"rewards/margins": 0.6992334127426147, |
|
"rewards/rejected": -2.1335911750793457, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.3501790688420215, |
|
"grad_norm": 20.623388608927577, |
|
"learning_rate": 4.1063773547332584e-07, |
|
"logits/chosen": -0.6795397996902466, |
|
"logits/rejected": -0.08995871245861053, |
|
"logps/chosen": -294.76861572265625, |
|
"logps/rejected": -366.74078369140625, |
|
"loss": 0.5548, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -1.4508978128433228, |
|
"rewards/margins": 0.8005591630935669, |
|
"rewards/rejected": -2.2514569759368896, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.3581376840429765, |
|
"grad_norm": 18.86752878832109, |
|
"learning_rate": 4.0525062904547276e-07, |
|
"logits/chosen": -1.0369296073913574, |
|
"logits/rejected": -0.7145065665245056, |
|
"logps/chosen": -260.9083557128906, |
|
"logps/rejected": -321.2560729980469, |
|
"loss": 0.5713, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -1.2059574127197266, |
|
"rewards/margins": 0.5262748003005981, |
|
"rewards/rejected": -1.7322320938110352, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.36609629924393156, |
|
"grad_norm": 16.972689064266323, |
|
"learning_rate": 3.997435317334988e-07, |
|
"logits/chosen": -1.0485179424285889, |
|
"logits/rejected": -0.5938493609428406, |
|
"logps/chosen": -259.70257568359375, |
|
"logps/rejected": -303.14208984375, |
|
"loss": 0.5614, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -1.2105125188827515, |
|
"rewards/margins": 0.5468652248382568, |
|
"rewards/rejected": -1.7573776245117188, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.3740549144448866, |
|
"grad_norm": 20.430854113440475, |
|
"learning_rate": 3.941206998903701e-07, |
|
"logits/chosen": -0.8004724383354187, |
|
"logits/rejected": -0.3017769455909729, |
|
"logps/chosen": -305.0013122558594, |
|
"logps/rejected": -351.1456604003906, |
|
"loss": 0.5549, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -1.5550200939178467, |
|
"rewards/margins": 0.6512759327888489, |
|
"rewards/rejected": -2.206295967102051, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.3820135296458416, |
|
"grad_norm": 23.092485323541265, |
|
"learning_rate": 3.8838647931853684e-07, |
|
"logits/chosen": -0.3017476201057434, |
|
"logits/rejected": 0.17767734825611115, |
|
"logps/chosen": -338.9472961425781, |
|
"logps/rejected": -402.6326904296875, |
|
"loss": 0.5403, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.9183552265167236, |
|
"rewards/margins": 0.6131450533866882, |
|
"rewards/rejected": -2.5315003395080566, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.38997214484679665, |
|
"grad_norm": 18.08015650723665, |
|
"learning_rate": 3.825453019111281e-07, |
|
"logits/chosen": -0.07430411875247955, |
|
"logits/rejected": 0.602587878704071, |
|
"logps/chosen": -348.27655029296875, |
|
"logps/rejected": -385.1519775390625, |
|
"loss": 0.5459, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -1.762590765953064, |
|
"rewards/margins": 0.6176367402076721, |
|
"rewards/rejected": -2.3802273273468018, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.3979307600477517, |
|
"grad_norm": 20.002403877751092, |
|
"learning_rate": 3.7660168222660824e-07, |
|
"logits/chosen": 0.1473921835422516, |
|
"logits/rejected": 0.9172118902206421, |
|
"logps/chosen": -354.8985290527344, |
|
"logps/rejected": -379.05523681640625, |
|
"loss": 0.5696, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -1.8115737438201904, |
|
"rewards/margins": 0.544532060623169, |
|
"rewards/rejected": -2.3561058044433594, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.3979307600477517, |
|
"eval_logits/chosen": 0.08715073019266129, |
|
"eval_logits/rejected": 0.5150542855262756, |
|
"eval_logps/chosen": -329.2126770019531, |
|
"eval_logps/rejected": -392.61614990234375, |
|
"eval_loss": 0.5572181344032288, |
|
"eval_rewards/accuracies": 0.7229477763175964, |
|
"eval_rewards/chosen": -1.8230891227722168, |
|
"eval_rewards/margins": 0.5724846124649048, |
|
"eval_rewards/rejected": -2.395573854446411, |
|
"eval_runtime": 348.5057, |
|
"eval_samples_per_second": 24.539, |
|
"eval_steps_per_second": 0.384, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.4058893752487067, |
|
"grad_norm": 18.39580411753885, |
|
"learning_rate": 3.705602139995416e-07, |
|
"logits/chosen": 0.08027039468288422, |
|
"logits/rejected": 0.7471383810043335, |
|
"logps/chosen": -305.5868835449219, |
|
"logps/rejected": -390.7528076171875, |
|
"loss": 0.5476, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -1.5938637256622314, |
|
"rewards/margins": 0.8495914340019226, |
|
"rewards/rejected": -2.443455219268799, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.41384799044966175, |
|
"grad_norm": 21.538483001362934, |
|
"learning_rate": 3.6442556659016475e-07, |
|
"logits/chosen": 0.034043505787849426, |
|
"logits/rejected": 0.929337203502655, |
|
"logps/chosen": -279.78143310546875, |
|
"logps/rejected": -357.68695068359375, |
|
"loss": 0.554, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -1.2777533531188965, |
|
"rewards/margins": 0.8431286811828613, |
|
"rewards/rejected": -2.120882034301758, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.4218066056506168, |
|
"grad_norm": 17.59460512186992, |
|
"learning_rate": 3.582024813755076e-07, |
|
"logits/chosen": -0.20782026648521423, |
|
"logits/rejected": 0.35743075609207153, |
|
"logps/chosen": -288.68463134765625, |
|
"logps/rejected": -331.3399963378906, |
|
"loss": 0.5513, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -1.2878282070159912, |
|
"rewards/margins": 0.4769444465637207, |
|
"rewards/rejected": -1.7647724151611328, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.4297652208515718, |
|
"grad_norm": 20.90074242546295, |
|
"learning_rate": 3.5189576808485404e-07, |
|
"logits/chosen": -0.1114623099565506, |
|
"logits/rejected": 0.424373060464859, |
|
"logps/chosen": -293.0398864746094, |
|
"logps/rejected": -363.51739501953125, |
|
"loss": 0.5404, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.5382354259490967, |
|
"rewards/margins": 0.6857717633247375, |
|
"rewards/rejected": -2.2240071296691895, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.43772383605252685, |
|
"grad_norm": 23.171714324291212, |
|
"learning_rate": 3.4551030108237433e-07, |
|
"logits/chosen": 0.3947924077510834, |
|
"logits/rejected": 0.8871338963508606, |
|
"logps/chosen": -294.8044738769531, |
|
"logps/rejected": -357.0321350097656, |
|
"loss": 0.5583, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.5001775026321411, |
|
"rewards/margins": 0.5571330189704895, |
|
"rewards/rejected": -2.0573105812072754, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.4456824512534819, |
|
"grad_norm": 16.844516375770297, |
|
"learning_rate": 3.390510155998023e-07, |
|
"logits/chosen": 0.7585184574127197, |
|
"logits/rejected": 1.5741997957229614, |
|
"logps/chosen": -327.8985595703125, |
|
"logps/rejected": -400.3335266113281, |
|
"loss": 0.5335, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -1.7798864841461182, |
|
"rewards/margins": 0.7171236872673035, |
|
"rewards/rejected": -2.4970102310180664, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.4536410664544369, |
|
"grad_norm": 19.847258394334578, |
|
"learning_rate": 3.325229039220684e-07, |
|
"logits/chosen": 0.8790721893310547, |
|
"logits/rejected": 1.7523505687713623, |
|
"logps/chosen": -359.2749938964844, |
|
"logps/rejected": -424.80499267578125, |
|
"loss": 0.5515, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -1.9252793788909912, |
|
"rewards/margins": 0.7141390442848206, |
|
"rewards/rejected": -2.639418363571167, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.46159968165539195, |
|
"grad_norm": 16.499266637052294, |
|
"learning_rate": 3.2593101152883795e-07, |
|
"logits/chosen": 0.8924285173416138, |
|
"logits/rejected": 1.5235947370529175, |
|
"logps/chosen": -365.3081970214844, |
|
"logps/rejected": -430.99444580078125, |
|
"loss": 0.5584, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -2.1520824432373047, |
|
"rewards/margins": 0.5842655897140503, |
|
"rewards/rejected": -2.7363476753234863, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.469558296856347, |
|
"grad_norm": 19.289158217917635, |
|
"learning_rate": 3.192804331949349e-07, |
|
"logits/chosen": 0.44026702642440796, |
|
"logits/rejected": 1.019954800605774, |
|
"logps/chosen": -345.7406005859375, |
|
"logps/rejected": -414.36175537109375, |
|
"loss": 0.5328, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -2.0610880851745605, |
|
"rewards/margins": 0.6739950180053711, |
|
"rewards/rejected": -2.7350831031799316, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.477516912057302, |
|
"grad_norm": 20.92118650846254, |
|
"learning_rate": 3.125763090526674e-07, |
|
"logits/chosen": 0.970676064491272, |
|
"logits/rejected": 2.074838399887085, |
|
"logps/chosen": -345.6512145996094, |
|
"logps/rejected": -411.70477294921875, |
|
"loss": 0.5504, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.9593521356582642, |
|
"rewards/margins": 0.8702648878097534, |
|
"rewards/rejected": -2.8296167850494385, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.477516912057302, |
|
"eval_logits/chosen": 0.4866677522659302, |
|
"eval_logits/rejected": 1.2370327711105347, |
|
"eval_logps/chosen": -339.2297668457031, |
|
"eval_logps/rejected": -423.96630859375, |
|
"eval_loss": 0.5507791638374329, |
|
"eval_rewards/accuracies": 0.7201492786407471, |
|
"eval_rewards/chosen": -1.9232597351074219, |
|
"eval_rewards/margins": 0.7858158946037292, |
|
"eval_rewards/rejected": -2.709075689315796, |
|
"eval_runtime": 349.0994, |
|
"eval_samples_per_second": 24.497, |
|
"eval_steps_per_second": 0.384, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.48547552725825704, |
|
"grad_norm": 18.912227877255763, |
|
"learning_rate": 3.0582382061909623e-07, |
|
"logits/chosen": 0.08690143376588821, |
|
"logits/rejected": 0.8724335432052612, |
|
"logps/chosen": -364.74578857421875, |
|
"logps/rejected": -458.05487060546875, |
|
"loss": 0.5407, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -1.894468903541565, |
|
"rewards/margins": 0.8513941764831543, |
|
"rewards/rejected": -2.745863437652588, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.4934341424592121, |
|
"grad_norm": 15.345100918286033, |
|
"learning_rate": 2.9902818679131775e-07, |
|
"logits/chosen": -0.0536082498729229, |
|
"logits/rejected": 1.048099398612976, |
|
"logps/chosen": -342.81097412109375, |
|
"logps/rejected": -402.736328125, |
|
"loss": 0.5478, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -1.7372751235961914, |
|
"rewards/margins": 0.8304754495620728, |
|
"rewards/rejected": -2.5677504539489746, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.5013927576601671, |
|
"grad_norm": 19.242517379429756, |
|
"learning_rate": 2.921946598128571e-07, |
|
"logits/chosen": -0.14560095965862274, |
|
"logits/rejected": 0.7982407808303833, |
|
"logps/chosen": -312.3756408691406, |
|
"logps/rejected": -383.5072021484375, |
|
"loss": 0.5432, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -1.5843050479888916, |
|
"rewards/margins": 0.8270316123962402, |
|
"rewards/rejected": -2.411336898803711, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.5093513728611222, |
|
"grad_norm": 19.263253541697434, |
|
"learning_rate": 2.8532852121428733e-07, |
|
"logits/chosen": 0.22138485312461853, |
|
"logits/rejected": 0.9763603210449219, |
|
"logps/chosen": -308.0843811035156, |
|
"logps/rejected": -368.4732971191406, |
|
"loss": 0.5352, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -1.6622591018676758, |
|
"rewards/margins": 0.7096768617630005, |
|
"rewards/rejected": -2.3719358444213867, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.5173099880620772, |
|
"grad_norm": 22.205857969775185, |
|
"learning_rate": 2.7843507773121414e-07, |
|
"logits/chosen": 0.6237181425094604, |
|
"logits/rejected": 1.5619014501571655, |
|
"logps/chosen": -342.9239807128906, |
|
"logps/rejected": -444.139892578125, |
|
"loss": 0.526, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -1.9262256622314453, |
|
"rewards/margins": 0.9577581286430359, |
|
"rewards/rejected": -2.883983850479126, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.5252686032630323, |
|
"grad_norm": 18.128676048285264, |
|
"learning_rate": 2.715196572027789e-07, |
|
"logits/chosen": 0.98185795545578, |
|
"logits/rejected": 1.66350519657135, |
|
"logps/chosen": -382.2640075683594, |
|
"logps/rejected": -468.0584411621094, |
|
"loss": 0.5367, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -2.389634370803833, |
|
"rewards/margins": 0.7552004456520081, |
|
"rewards/rejected": -3.1448347568511963, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.5332272184639872, |
|
"grad_norm": 16.001394211106067, |
|
"learning_rate": 2.645876044538521e-07, |
|
"logits/chosen": 1.2729809284210205, |
|
"logits/rejected": 1.5976616144180298, |
|
"logps/chosen": -350.9837646484375, |
|
"logps/rejected": -434.87060546875, |
|
"loss": 0.5386, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -2.2160532474517822, |
|
"rewards/margins": 0.5502675771713257, |
|
"rewards/rejected": -2.7663207054138184, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.5411858336649423, |
|
"grad_norm": 18.674809360169434, |
|
"learning_rate": 2.5764427716409815e-07, |
|
"logits/chosen": 1.510772705078125, |
|
"logits/rejected": 2.1603095531463623, |
|
"logps/chosen": -406.2839660644531, |
|
"logps/rejected": -456.6971740722656, |
|
"loss": 0.5519, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -2.386213779449463, |
|
"rewards/margins": 0.6464362740516663, |
|
"rewards/rejected": -3.0326499938964844, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.5491444488658973, |
|
"grad_norm": 19.685756061152915, |
|
"learning_rate": 2.5069504172710494e-07, |
|
"logits/chosen": 1.9052760601043701, |
|
"logits/rejected": 2.854236364364624, |
|
"logps/chosen": -398.56829833984375, |
|
"logps/rejected": -475.7207946777344, |
|
"loss": 0.5318, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -2.607776165008545, |
|
"rewards/margins": 0.8156100511550903, |
|
"rewards/rejected": -3.423386335372925, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.5571030640668524, |
|
"grad_norm": 21.722190178307944, |
|
"learning_rate": 2.4374526910277886e-07, |
|
"logits/chosen": 1.4309998750686646, |
|
"logits/rejected": 2.0307841300964355, |
|
"logps/chosen": -382.191162109375, |
|
"logps/rejected": -473.32183837890625, |
|
"loss": 0.5387, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -2.5259125232696533, |
|
"rewards/margins": 0.7485069632530212, |
|
"rewards/rejected": -3.2744197845458984, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.5571030640668524, |
|
"eval_logits/chosen": 1.1806707382202148, |
|
"eval_logits/rejected": 1.9106937646865845, |
|
"eval_logps/chosen": -380.19281005859375, |
|
"eval_logps/rejected": -464.579833984375, |
|
"eval_loss": 0.5417102575302124, |
|
"eval_rewards/accuracies": 0.7210820913314819, |
|
"eval_rewards/chosen": -2.332890510559082, |
|
"eval_rewards/margins": 0.7823202013969421, |
|
"eval_rewards/rejected": -3.115210771560669, |
|
"eval_runtime": 349.0863, |
|
"eval_samples_per_second": 24.498, |
|
"eval_steps_per_second": 0.384, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.5650616792678074, |
|
"grad_norm": 16.03437069863273, |
|
"learning_rate": 2.368003306662104e-07, |
|
"logits/chosen": 0.908370316028595, |
|
"logits/rejected": 1.6032642126083374, |
|
"logps/chosen": -369.47900390625, |
|
"logps/rejected": -431.5857849121094, |
|
"loss": 0.5607, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -2.1660048961639404, |
|
"rewards/margins": 0.6613842248916626, |
|
"rewards/rejected": -2.8273892402648926, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.5730202944687625, |
|
"grad_norm": 17.46281711779211, |
|
"learning_rate": 2.2986559405621886e-07, |
|
"logits/chosen": 0.8651970028877258, |
|
"logits/rejected": 1.6542387008666992, |
|
"logps/chosen": -364.164794921875, |
|
"logps/rejected": -433.8982849121094, |
|
"loss": 0.5354, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -2.096378803253174, |
|
"rewards/margins": 0.7120481133460999, |
|
"rewards/rejected": -2.808427095413208, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.5809789096697174, |
|
"grad_norm": 19.94099113488109, |
|
"learning_rate": 2.2294641902678443e-07, |
|
"logits/chosen": 0.88880854845047, |
|
"logits/rejected": 1.6225669384002686, |
|
"logps/chosen": -333.8936462402344, |
|
"logps/rejected": -411.53857421875, |
|
"loss": 0.5348, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -1.8313400745391846, |
|
"rewards/margins": 0.8025676608085632, |
|
"rewards/rejected": -2.6339077949523926, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.5889375248706725, |
|
"grad_norm": 19.161711121288285, |
|
"learning_rate": 2.160481533045751e-07, |
|
"logits/chosen": 1.7531951665878296, |
|
"logits/rejected": 2.6644296646118164, |
|
"logps/chosen": -379.2308654785156, |
|
"logps/rejected": -482.81903076171875, |
|
"loss": 0.554, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -2.293461322784424, |
|
"rewards/margins": 1.045929193496704, |
|
"rewards/rejected": -3.339390516281128, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.5968961400716275, |
|
"grad_norm": 19.15869602934714, |
|
"learning_rate": 2.0917612845576882e-07, |
|
"logits/chosen": 1.944665551185608, |
|
"logits/rejected": 2.7940452098846436, |
|
"logps/chosen": -385.5401916503906, |
|
"logps/rejected": -463.55401611328125, |
|
"loss": 0.5252, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -2.5116262435913086, |
|
"rewards/margins": 0.8533546328544617, |
|
"rewards/rejected": -3.364980697631836, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.6048547552725826, |
|
"grad_norm": 21.49823487598931, |
|
"learning_rate": 2.0233565576536564e-07, |
|
"logits/chosen": 1.6610772609710693, |
|
"logits/rejected": 2.386462688446045, |
|
"logps/chosen": -380.05462646484375, |
|
"logps/rejected": -462.010498046875, |
|
"loss": 0.5255, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -2.2347652912139893, |
|
"rewards/margins": 0.7181671261787415, |
|
"rewards/rejected": -2.952932119369507, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.6128133704735376, |
|
"grad_norm": 20.93099391686062, |
|
"learning_rate": 1.9553202213217537e-07, |
|
"logits/chosen": 2.1078057289123535, |
|
"logits/rejected": 3.0261335372924805, |
|
"logps/chosen": -390.66082763671875, |
|
"logps/rejected": -465.5203552246094, |
|
"loss": 0.541, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -2.3615031242370605, |
|
"rewards/margins": 0.9020525217056274, |
|
"rewards/rejected": -3.2635560035705566, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.6207719856744927, |
|
"grad_norm": 16.749614654207814, |
|
"learning_rate": 1.887704859826528e-07, |
|
"logits/chosen": 2.5508079528808594, |
|
"logits/rejected": 3.456491470336914, |
|
"logps/chosen": -404.16729736328125, |
|
"logps/rejected": -483.2388610839844, |
|
"loss": 0.5457, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -2.6340322494506836, |
|
"rewards/margins": 0.8538228869438171, |
|
"rewards/rejected": -3.4878547191619873, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.6287306008754476, |
|
"grad_norm": 16.5271927987375, |
|
"learning_rate": 1.8205627320673836e-07, |
|
"logits/chosen": 2.747197389602661, |
|
"logits/rejected": 3.1584315299987793, |
|
"logps/chosen": -409.50946044921875, |
|
"logps/rejected": -479.3316345214844, |
|
"loss": 0.5402, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -2.6576485633850098, |
|
"rewards/margins": 0.5818369388580322, |
|
"rewards/rejected": -3.239485502243042, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.6366892160764027, |
|
"grad_norm": 20.529555438059905, |
|
"learning_rate": 1.7539457311884675e-07, |
|
"logits/chosen": 2.4543442726135254, |
|
"logits/rejected": 3.273699998855591, |
|
"logps/chosen": -408.31427001953125, |
|
"logps/rejected": -496.12091064453125, |
|
"loss": 0.5119, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -2.5986037254333496, |
|
"rewards/margins": 0.82551509141922, |
|
"rewards/rejected": -3.424118757247925, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.6366892160764027, |
|
"eval_logits/chosen": 2.7208683490753174, |
|
"eval_logits/rejected": 3.444331645965576, |
|
"eval_logps/chosen": -414.11798095703125, |
|
"eval_logps/rejected": -503.328125, |
|
"eval_loss": 0.541582465171814, |
|
"eval_rewards/accuracies": 0.7276119589805603, |
|
"eval_rewards/chosen": -2.6721420288085938, |
|
"eval_rewards/margins": 0.83055180311203, |
|
"eval_rewards/rejected": -3.5026936531066895, |
|
"eval_runtime": 348.3456, |
|
"eval_samples_per_second": 24.55, |
|
"eval_steps_per_second": 0.385, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.6446478312773577, |
|
"grad_norm": 19.46716360912331, |
|
"learning_rate": 1.687905344471226e-07, |
|
"logits/chosen": 2.899752616882324, |
|
"logits/rejected": 3.6358859539031982, |
|
"logps/chosen": -426.015380859375, |
|
"logps/rejected": -523.51708984375, |
|
"loss": 0.5394, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -2.757784605026245, |
|
"rewards/margins": 0.8762688636779785, |
|
"rewards/rejected": -3.6340534687042236, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.6526064464783128, |
|
"grad_norm": 20.097758366480125, |
|
"learning_rate": 1.6224926135406693e-07, |
|
"logits/chosen": 2.4270927906036377, |
|
"logits/rejected": 3.3316092491149902, |
|
"logps/chosen": -420.87542724609375, |
|
"logps/rejected": -481.60076904296875, |
|
"loss": 0.5303, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -2.7377285957336426, |
|
"rewards/margins": 0.7255538702011108, |
|
"rewards/rejected": -3.463282823562622, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.6605650616792678, |
|
"grad_norm": 17.290268929609024, |
|
"learning_rate": 1.557758094916053e-07, |
|
"logits/chosen": 2.7220635414123535, |
|
"logits/rejected": 3.3889517784118652, |
|
"logps/chosen": -438.2432556152344, |
|
"logps/rejected": -506.6184997558594, |
|
"loss": 0.5311, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -2.7044851779937744, |
|
"rewards/margins": 0.7628629803657532, |
|
"rewards/rejected": -3.467348575592041, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.6685236768802229, |
|
"grad_norm": 20.475546044973836, |
|
"learning_rate": 1.4937518209365108e-07, |
|
"logits/chosen": 2.531759738922119, |
|
"logits/rejected": 3.477173328399658, |
|
"logps/chosen": -438.1893615722656, |
|
"logps/rejected": -512.6656494140625, |
|
"loss": 0.5221, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -2.8120474815368652, |
|
"rewards/margins": 0.850610613822937, |
|
"rewards/rejected": -3.66265869140625, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.6764822920811778, |
|
"grad_norm": 19.71518623080275, |
|
"learning_rate": 1.4305232610918045e-07, |
|
"logits/chosen": 2.5035440921783447, |
|
"logits/rejected": 3.8153274059295654, |
|
"logps/chosen": -413.65533447265625, |
|
"logps/rejected": -504.1393127441406, |
|
"loss": 0.5189, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -2.5576460361480713, |
|
"rewards/margins": 1.1708472967147827, |
|
"rewards/rejected": -3.7284939289093018, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.6844409072821329, |
|
"grad_norm": 17.595133264616365, |
|
"learning_rate": 1.3681212837880977e-07, |
|
"logits/chosen": 2.362572431564331, |
|
"logits/rejected": 3.3001837730407715, |
|
"logps/chosen": -384.64825439453125, |
|
"logps/rejected": -475.32489013671875, |
|
"loss": 0.5186, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -2.380725145339966, |
|
"rewards/margins": 0.9119716882705688, |
|
"rewards/rejected": -3.292696714401245, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.6923995224830879, |
|
"grad_norm": 18.53603009333697, |
|
"learning_rate": 1.3065941185782977e-07, |
|
"logits/chosen": 2.170623302459717, |
|
"logits/rejected": 2.6820473670959473, |
|
"logps/chosen": -410.9842224121094, |
|
"logps/rejected": -517.3212890625, |
|
"loss": 0.5136, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -2.604910135269165, |
|
"rewards/margins": 0.8896166682243347, |
|
"rewards/rejected": -3.4945271015167236, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.700358137684043, |
|
"grad_norm": 27.040507130579233, |
|
"learning_rate": 1.2459893188861613e-07, |
|
"logits/chosen": 2.036362648010254, |
|
"logits/rejected": 3.0169339179992676, |
|
"logps/chosen": -395.36248779296875, |
|
"logps/rejected": -469.5846252441406, |
|
"loss": 0.5127, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": -2.4672751426696777, |
|
"rewards/margins": 0.9281255006790161, |
|
"rewards/rejected": -3.3954005241394043, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.708316752884998, |
|
"grad_norm": 19.58653971077579, |
|
"learning_rate": 1.1863537252529548e-07, |
|
"logits/chosen": 1.9658253192901611, |
|
"logits/rejected": 3.1701443195343018, |
|
"logps/chosen": -395.36279296875, |
|
"logps/rejected": -498.6559143066406, |
|
"loss": 0.5361, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -2.475236177444458, |
|
"rewards/margins": 1.1605395078659058, |
|
"rewards/rejected": -3.635775327682495, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.716275368085953, |
|
"grad_norm": 16.08727968386679, |
|
"learning_rate": 1.1277334291351145e-07, |
|
"logits/chosen": 1.9292398691177368, |
|
"logits/rejected": 2.558868885040283, |
|
"logps/chosen": -445.3673400878906, |
|
"logps/rejected": -508.82794189453125, |
|
"loss": 0.564, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -2.6232690811157227, |
|
"rewards/margins": 0.7433099150657654, |
|
"rewards/rejected": -3.3665783405303955, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.716275368085953, |
|
"eval_logits/chosen": 1.9952205419540405, |
|
"eval_logits/rejected": 2.5529251098632812, |
|
"eval_logps/chosen": -410.51849365234375, |
|
"eval_logps/rejected": -489.1202087402344, |
|
"eval_loss": 0.5384515523910522, |
|
"eval_rewards/accuracies": 0.7182835936546326, |
|
"eval_rewards/chosen": -2.6361470222473145, |
|
"eval_rewards/margins": 0.7244672775268555, |
|
"eval_rewards/rejected": -3.36061429977417, |
|
"eval_runtime": 348.9854, |
|
"eval_samples_per_second": 24.505, |
|
"eval_steps_per_second": 0.384, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.724233983286908, |
|
"grad_norm": 19.608360282510876, |
|
"learning_rate": 1.0701737372808431e-07, |
|
"logits/chosen": 1.9433863162994385, |
|
"logits/rejected": 2.197230100631714, |
|
"logps/chosen": -402.63958740234375, |
|
"logps/rejected": -492.9768981933594, |
|
"loss": 0.5417, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -2.6037509441375732, |
|
"rewards/margins": 0.6724608540534973, |
|
"rewards/rejected": -3.2762115001678467, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.7321925984878631, |
|
"grad_norm": 21.302962778555738, |
|
"learning_rate": 1.0137191367132078e-07, |
|
"logits/chosen": 2.313117504119873, |
|
"logits/rejected": 2.6423773765563965, |
|
"logps/chosen": -418.1343688964844, |
|
"logps/rejected": -490.7264709472656, |
|
"loss": 0.542, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -2.770127534866333, |
|
"rewards/margins": 0.5889343619346619, |
|
"rewards/rejected": -3.3590621948242188, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.7401512136888182, |
|
"grad_norm": 20.543232804563647, |
|
"learning_rate": 9.584132603467827e-08, |
|
"logits/chosen": 2.2165169715881348, |
|
"logits/rejected": 2.9923043251037598, |
|
"logps/chosen": -415.32177734375, |
|
"logps/rejected": -475.14544677734375, |
|
"loss": 0.5193, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -2.3847451210021973, |
|
"rewards/margins": 0.7635815739631653, |
|
"rewards/rejected": -3.148326873779297, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.7481098288897732, |
|
"grad_norm": 16.82352536953462, |
|
"learning_rate": 9.042988532644249e-08, |
|
"logits/chosen": 2.554182529449463, |
|
"logits/rejected": 3.0109364986419678, |
|
"logps/chosen": -417.294921875, |
|
"logps/rejected": -497.1542053222656, |
|
"loss": 0.5421, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -2.6419878005981445, |
|
"rewards/margins": 0.7793722748756409, |
|
"rewards/rejected": -3.4213595390319824, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.7560684440907283, |
|
"grad_norm": 16.52509872707129, |
|
"learning_rate": 8.514177396802428e-08, |
|
"logits/chosen": 2.2360918521881104, |
|
"logits/rejected": 2.8688242435455322, |
|
"logps/chosen": -388.20880126953125, |
|
"logps/rejected": -494.08721923828125, |
|
"loss": 0.5432, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -2.520381450653076, |
|
"rewards/margins": 0.9274767637252808, |
|
"rewards/rejected": -3.4478580951690674, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.7640270592916832, |
|
"grad_norm": 18.591898542275075, |
|
"learning_rate": 7.998107906142839e-08, |
|
"logits/chosen": 2.534701108932495, |
|
"logits/rejected": 3.3956542015075684, |
|
"logps/chosen": -409.61944580078125, |
|
"logps/rejected": -477.8722229003906, |
|
"loss": 0.5346, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -2.4543957710266113, |
|
"rewards/margins": 0.8880963325500488, |
|
"rewards/rejected": -3.3424923419952393, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.7719856744926383, |
|
"grad_norm": 23.074813574206292, |
|
"learning_rate": 7.495178923039396e-08, |
|
"logits/chosen": 2.1817586421966553, |
|
"logits/rejected": 3.049762487411499, |
|
"logps/chosen": -394.3160705566406, |
|
"logps/rejected": -475.4697265625, |
|
"loss": 0.5496, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -2.4694297313690186, |
|
"rewards/margins": 0.882437527179718, |
|
"rewards/rejected": -3.35186767578125, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.7799442896935933, |
|
"grad_norm": 18.820352227282687, |
|
"learning_rate": 7.005779153764682e-08, |
|
"logits/chosen": 2.0304465293884277, |
|
"logits/rejected": 2.5523359775543213, |
|
"logps/chosen": -387.8653564453125, |
|
"logps/rejected": -465.1128845214844, |
|
"loss": 0.5422, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -2.4239563941955566, |
|
"rewards/margins": 0.675017237663269, |
|
"rewards/rejected": -3.098973512649536, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.7879029048945484, |
|
"grad_norm": 18.25070063670074, |
|
"learning_rate": 6.530286848064698e-08, |
|
"logits/chosen": 1.7723140716552734, |
|
"logits/rejected": 1.9407542943954468, |
|
"logps/chosen": -413.15692138671875, |
|
"logps/rejected": -463.80865478515625, |
|
"loss": 0.5479, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -2.591344118118286, |
|
"rewards/margins": 0.4910058379173279, |
|
"rewards/rejected": -3.082350254058838, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.7958615200955034, |
|
"grad_norm": 17.31216331182086, |
|
"learning_rate": 6.069069506815325e-08, |
|
"logits/chosen": 2.043708324432373, |
|
"logits/rejected": 2.6901423931121826, |
|
"logps/chosen": -399.66064453125, |
|
"logps/rejected": -487.8421936035156, |
|
"loss": 0.5201, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -2.45231556892395, |
|
"rewards/margins": 0.9817484617233276, |
|
"rewards/rejected": -3.4340641498565674, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7958615200955034, |
|
"eval_logits/chosen": 1.8888087272644043, |
|
"eval_logits/rejected": 2.4305639266967773, |
|
"eval_logps/chosen": -397.115966796875, |
|
"eval_logps/rejected": -481.51214599609375, |
|
"eval_loss": 0.5347379446029663, |
|
"eval_rewards/accuracies": 0.7229477763175964, |
|
"eval_rewards/chosen": -2.502121925354004, |
|
"eval_rewards/margins": 0.7824118137359619, |
|
"eval_rewards/rejected": -3.2845335006713867, |
|
"eval_runtime": 350.501, |
|
"eval_samples_per_second": 24.399, |
|
"eval_steps_per_second": 0.382, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.8038201352964585, |
|
"grad_norm": 17.72839329874758, |
|
"learning_rate": 5.6224835979863714e-08, |
|
"logits/chosen": 1.8582655191421509, |
|
"logits/rejected": 2.2548227310180664, |
|
"logps/chosen": -385.1344909667969, |
|
"logps/rejected": -468.8387145996094, |
|
"loss": 0.522, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -2.356790065765381, |
|
"rewards/margins": 0.7311912775039673, |
|
"rewards/rejected": -3.0879814624786377, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 0.8117787504974134, |
|
"grad_norm": 18.254082210124515, |
|
"learning_rate": 5.190874281132851e-08, |
|
"logits/chosen": 2.157611131668091, |
|
"logits/rejected": 2.920135974884033, |
|
"logps/chosen": -406.5130310058594, |
|
"logps/rejected": -482.342529296875, |
|
"loss": 0.5486, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -2.3830180168151855, |
|
"rewards/margins": 0.8080500364303589, |
|
"rewards/rejected": -3.191068172454834, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 0.8197373656983685, |
|
"grad_norm": 24.462207528511584, |
|
"learning_rate": 4.774575140626316e-08, |
|
"logits/chosen": 2.092142105102539, |
|
"logits/rejected": 2.9269495010375977, |
|
"logps/chosen": -421.19830322265625, |
|
"logps/rejected": -489.29571533203125, |
|
"loss": 0.5479, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -2.6434485912323, |
|
"rewards/margins": 0.8873342275619507, |
|
"rewards/rejected": -3.530783176422119, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 0.8276959808993235, |
|
"grad_norm": 18.98119668610804, |
|
"learning_rate": 4.373907927832513e-08, |
|
"logits/chosen": 2.023267984390259, |
|
"logits/rejected": 2.8832485675811768, |
|
"logps/chosen": -404.16546630859375, |
|
"logps/rejected": -519.4654541015625, |
|
"loss": 0.5224, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": -2.4847495555877686, |
|
"rewards/margins": 1.1609361171722412, |
|
"rewards/rejected": -3.6456856727600098, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.8356545961002786, |
|
"grad_norm": 17.461112275411644, |
|
"learning_rate": 3.9891823124345665e-08, |
|
"logits/chosen": 2.068453311920166, |
|
"logits/rejected": 2.5450072288513184, |
|
"logps/chosen": -420.19927978515625, |
|
"logps/rejected": -475.58404541015625, |
|
"loss": 0.5501, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -2.6072211265563965, |
|
"rewards/margins": 0.6359682083129883, |
|
"rewards/rejected": -3.2431893348693848, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.8436132113012336, |
|
"grad_norm": 16.543474663899303, |
|
"learning_rate": 3.620695643093924e-08, |
|
"logits/chosen": 1.6659740209579468, |
|
"logits/rejected": 2.1419837474823, |
|
"logps/chosen": -398.4236145019531, |
|
"logps/rejected": -484.2503967285156, |
|
"loss": 0.5452, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -2.5702877044677734, |
|
"rewards/margins": 0.7829211950302124, |
|
"rewards/rejected": -3.353208541870117, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 0.8515718265021887, |
|
"grad_norm": 17.254761854068718, |
|
"learning_rate": 3.268732717634032e-08, |
|
"logits/chosen": 1.6836740970611572, |
|
"logits/rejected": 2.284329652786255, |
|
"logps/chosen": -411.85009765625, |
|
"logps/rejected": -475.13922119140625, |
|
"loss": 0.5274, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -2.4504170417785645, |
|
"rewards/margins": 0.7453802824020386, |
|
"rewards/rejected": -3.1957974433898926, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 0.8595304417031436, |
|
"grad_norm": 17.38092861982639, |
|
"learning_rate": 2.9335655629243645e-08, |
|
"logits/chosen": 1.6976267099380493, |
|
"logits/rejected": 2.579646110534668, |
|
"logps/chosen": -400.24896240234375, |
|
"logps/rejected": -494.01953125, |
|
"loss": 0.546, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -2.477663516998291, |
|
"rewards/margins": 0.8947075009346008, |
|
"rewards/rejected": -3.372371196746826, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 0.8674890569040987, |
|
"grad_norm": 19.735012113626887, |
|
"learning_rate": 2.6154532246349476e-08, |
|
"logits/chosen": 1.7606983184814453, |
|
"logits/rejected": 2.5042643547058105, |
|
"logps/chosen": -420.964599609375, |
|
"logps/rejected": -511.8990173339844, |
|
"loss": 0.5358, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -2.4465584754943848, |
|
"rewards/margins": 0.9180288314819336, |
|
"rewards/rejected": -3.3645873069763184, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 0.8754476721050537, |
|
"grad_norm": 20.59566154349169, |
|
"learning_rate": 2.31464156702382e-08, |
|
"logits/chosen": 1.714812994003296, |
|
"logits/rejected": 2.1487162113189697, |
|
"logps/chosen": -376.896728515625, |
|
"logps/rejected": -463.0181579589844, |
|
"loss": 0.5341, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -2.430546522140503, |
|
"rewards/margins": 0.7609037160873413, |
|
"rewards/rejected": -3.191450357437134, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.8754476721050537, |
|
"eval_logits/chosen": 1.8146584033966064, |
|
"eval_logits/rejected": 2.3851335048675537, |
|
"eval_logps/chosen": -395.8829650878906, |
|
"eval_logps/rejected": -481.46636962890625, |
|
"eval_loss": 0.5346034169197083, |
|
"eval_rewards/accuracies": 0.72947758436203, |
|
"eval_rewards/chosen": -2.4897918701171875, |
|
"eval_rewards/margins": 0.7942842245101929, |
|
"eval_rewards/rejected": -3.2840757369995117, |
|
"eval_runtime": 349.0502, |
|
"eval_samples_per_second": 24.501, |
|
"eval_steps_per_second": 0.384, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.8834062873060088, |
|
"grad_norm": 24.480879815940032, |
|
"learning_rate": 2.031363082912252e-08, |
|
"logits/chosen": 1.3329254388809204, |
|
"logits/rejected": 1.8607990741729736, |
|
"logps/chosen": -393.1271057128906, |
|
"logps/rejected": -474.62030029296875, |
|
"loss": 0.5303, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -2.4428350925445557, |
|
"rewards/margins": 0.7839537858963013, |
|
"rewards/rejected": -3.2267889976501465, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 0.8913649025069638, |
|
"grad_norm": 22.23107791348212, |
|
"learning_rate": 1.7658367139945228e-08, |
|
"logits/chosen": 1.9081084728240967, |
|
"logits/rejected": 2.736818790435791, |
|
"logps/chosen": -383.86090087890625, |
|
"logps/rejected": -476.2447814941406, |
|
"loss": 0.5523, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -2.3657093048095703, |
|
"rewards/margins": 0.9522883296012878, |
|
"rewards/rejected": -3.317997455596924, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 0.8993235177079189, |
|
"grad_norm": 18.52217546312308, |
|
"learning_rate": 1.5182676816211632e-08, |
|
"logits/chosen": 1.5083836317062378, |
|
"logits/rejected": 2.2149221897125244, |
|
"logps/chosen": -386.4073791503906, |
|
"logps/rejected": -472.29425048828125, |
|
"loss": 0.5071, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -2.3090853691101074, |
|
"rewards/margins": 0.8999401926994324, |
|
"rewards/rejected": -3.2090256214141846, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 0.9072821329088738, |
|
"grad_norm": 18.15234080791347, |
|
"learning_rate": 1.2888473281864597e-08, |
|
"logits/chosen": 2.142833709716797, |
|
"logits/rejected": 2.8091158866882324, |
|
"logps/chosen": -391.89910888671875, |
|
"logps/rejected": -496.69757080078125, |
|
"loss": 0.5088, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -2.436957836151123, |
|
"rewards/margins": 0.9512897729873657, |
|
"rewards/rejected": -3.3882477283477783, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 0.9152407481098289, |
|
"grad_norm": 40.801838672570874, |
|
"learning_rate": 1.0777529692427679e-08, |
|
"logits/chosen": 1.646925687789917, |
|
"logits/rejected": 2.3574109077453613, |
|
"logps/chosen": -405.37457275390625, |
|
"logps/rejected": -485.71990966796875, |
|
"loss": 0.5398, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -2.4802286624908447, |
|
"rewards/margins": 0.8511859774589539, |
|
"rewards/rejected": -3.3314144611358643, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.9231993633107839, |
|
"grad_norm": 18.707936215451618, |
|
"learning_rate": 8.851477564560061e-09, |
|
"logits/chosen": 1.6634693145751953, |
|
"logits/rejected": 2.3943982124328613, |
|
"logps/chosen": -370.2140197753906, |
|
"logps/rejected": -464.6942443847656, |
|
"loss": 0.5257, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -2.3133978843688965, |
|
"rewards/margins": 0.9280908703804016, |
|
"rewards/rejected": -3.241488218307495, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 0.931157978511739, |
|
"grad_norm": 20.648180333245318, |
|
"learning_rate": 7.111805515081531e-09, |
|
"logits/chosen": 2.0850751399993896, |
|
"logits/rejected": 2.890317440032959, |
|
"logps/chosen": -375.6150207519531, |
|
"logps/rejected": -469.1720275878906, |
|
"loss": 0.5335, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -2.287146806716919, |
|
"rewards/margins": 0.9208979606628418, |
|
"rewards/rejected": -3.2080445289611816, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 0.939116593712694, |
|
"grad_norm": 18.214755371193483, |
|
"learning_rate": 5.559858110443016e-09, |
|
"logits/chosen": 1.984487533569336, |
|
"logits/rejected": 2.5453860759735107, |
|
"logps/chosen": -384.75177001953125, |
|
"logps/rejected": -463.731689453125, |
|
"loss": 0.5315, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -2.390475034713745, |
|
"rewards/margins": 0.7526242136955261, |
|
"rewards/rejected": -3.143099308013916, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 0.947075208913649, |
|
"grad_norm": 19.352322603158484, |
|
"learning_rate": 4.196834827531276e-09, |
|
"logits/chosen": 1.8608672618865967, |
|
"logits/rejected": 2.2748332023620605, |
|
"logps/chosen": -398.26397705078125, |
|
"logps/rejected": -469.12542724609375, |
|
"loss": 0.5555, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -2.4501779079437256, |
|
"rewards/margins": 0.6367470026016235, |
|
"rewards/rejected": -3.0869250297546387, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 0.955033824114604, |
|
"grad_norm": 22.98572661304592, |
|
"learning_rate": 3.023789126611137e-09, |
|
"logits/chosen": 1.9012830257415771, |
|
"logits/rejected": 2.2817015647888184, |
|
"logps/chosen": -402.8839416503906, |
|
"logps/rejected": -466.7554626464844, |
|
"loss": 0.5394, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -2.605943202972412, |
|
"rewards/margins": 0.6437206864356995, |
|
"rewards/rejected": -3.249663829803467, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.955033824114604, |
|
"eval_logits/chosen": 1.891152024269104, |
|
"eval_logits/rejected": 2.4846954345703125, |
|
"eval_logps/chosen": -397.9764404296875, |
|
"eval_logps/rejected": -485.8160705566406, |
|
"eval_loss": 0.5341334342956543, |
|
"eval_rewards/accuracies": 0.72947758436203, |
|
"eval_rewards/chosen": -2.5107264518737793, |
|
"eval_rewards/margins": 0.8168463706970215, |
|
"eval_rewards/rejected": -3.327572822570801, |
|
"eval_runtime": 349.0972, |
|
"eval_samples_per_second": 24.497, |
|
"eval_steps_per_second": 0.384, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.9629924393155591, |
|
"grad_norm": 20.459199823597086, |
|
"learning_rate": 2.041627637121929e-09, |
|
"logits/chosen": 2.0498170852661133, |
|
"logits/rejected": 2.8115336894989014, |
|
"logps/chosen": -374.2912292480469, |
|
"logps/rejected": -481.9027404785156, |
|
"loss": 0.5073, |
|
"rewards/accuracies": 0.793749988079071, |
|
"rewards/chosen": -2.3598573207855225, |
|
"rewards/margins": 1.0655521154403687, |
|
"rewards/rejected": -3.4254093170166016, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 0.9709510545165141, |
|
"grad_norm": 27.51193921402863, |
|
"learning_rate": 1.2511094569571668e-09, |
|
"logits/chosen": 1.997765302658081, |
|
"logits/rejected": 2.4746177196502686, |
|
"logps/chosen": -394.349365234375, |
|
"logps/rejected": -496.71588134765625, |
|
"loss": 0.5213, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -2.551574468612671, |
|
"rewards/margins": 0.9096673727035522, |
|
"rewards/rejected": -3.4612419605255127, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 0.9789096697174692, |
|
"grad_norm": 21.766618365029238, |
|
"learning_rate": 6.528455657691112e-10, |
|
"logits/chosen": 1.7345082759857178, |
|
"logits/rejected": 2.2918901443481445, |
|
"logps/chosen": -404.74481201171875, |
|
"logps/rejected": -502.8517150878906, |
|
"loss": 0.535, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -2.441373825073242, |
|
"rewards/margins": 0.877045750617981, |
|
"rewards/rejected": -3.3184192180633545, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 0.9868682849184242, |
|
"grad_norm": 23.710802357522613, |
|
"learning_rate": 2.4729835275189016e-10, |
|
"logits/chosen": 1.854836106300354, |
|
"logits/rejected": 2.660703420639038, |
|
"logps/chosen": -402.3847961425781, |
|
"logps/rejected": -474.1854553222656, |
|
"loss": 0.5461, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -2.477180004119873, |
|
"rewards/margins": 0.7909337282180786, |
|
"rewards/rejected": -3.268113613128662, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 0.9948269001193792, |
|
"grad_norm": 20.55632802983822, |
|
"learning_rate": 3.478125926756337e-11, |
|
"logits/chosen": 2.092129945755005, |
|
"logits/rejected": 2.7219738960266113, |
|
"logps/chosen": -392.1388854980469, |
|
"logps/rejected": -490.11273193359375, |
|
"loss": 0.5228, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -2.4716639518737793, |
|
"rewards/margins": 0.9531086683273315, |
|
"rewards/rejected": -3.4247727394104004, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.9996020692399522, |
|
"step": 1256, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5626794682566527, |
|
"train_runtime": 30362.0274, |
|
"train_samples_per_second": 5.296, |
|
"train_steps_per_second": 0.041 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1256, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|