{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.0, "eval_steps": 100, "global_step": 478, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0, "learning_rate": 1.0416666666666666e-08, "logits/chosen": -2.8099329471588135, "logits/rejected": -2.7572641372680664, "logps/chosen": -241.48843383789062, "logps/rejected": -197.4517822265625, "loss": 0.7347, "rewards/accuracies": 0.0, "rewards/chosen": 0.0, "rewards/margins": 0.0, "rewards/rejected": 0.0, "step": 1 }, { "epoch": 0.02, "learning_rate": 1.0416666666666667e-07, "logits/chosen": -2.832111358642578, "logits/rejected": -2.8087170124053955, "logps/chosen": -292.73345947265625, "logps/rejected": -278.5730895996094, "loss": 0.731, "rewards/accuracies": 0.4236111044883728, "rewards/chosen": -0.00042624352499842644, "rewards/margins": -0.0005570763605646789, "rewards/rejected": 0.000130832806462422, "step": 10 }, { "epoch": 0.04, "learning_rate": 2.0833333333333333e-07, "logits/chosen": -2.8119773864746094, "logits/rejected": -2.783511161804199, "logps/chosen": -290.25933837890625, "logps/rejected": -290.83258056640625, "loss": 0.7347, "rewards/accuracies": 0.6000000238418579, "rewards/chosen": 0.001403255038894713, "rewards/margins": 0.002630328293889761, "rewards/rejected": -0.0012270736042410135, "step": 20 }, { "epoch": 0.06, "learning_rate": 3.1249999999999997e-07, "logits/chosen": -2.7597923278808594, "logits/rejected": -2.728569984436035, "logps/chosen": -246.2042694091797, "logps/rejected": -226.95529174804688, "loss": 0.728, "rewards/accuracies": 0.65625, "rewards/chosen": 0.0015784974675625563, "rewards/margins": 0.008332479745149612, "rewards/rejected": -0.0067539820447564125, "step": 30 }, { "epoch": 0.08, "learning_rate": 4.1666666666666667e-07, "logits/chosen": -2.799250364303589, "logits/rejected": -2.7705955505371094, "logps/chosen": -298.7322082519531, "logps/rejected": -264.1480407714844, "loss": 0.7197, "rewards/accuracies": 0.731249988079071, "rewards/chosen": 0.007894225418567657, "rewards/margins": 0.03674033656716347, "rewards/rejected": -0.02884610928595066, "step": 40 }, { "epoch": 0.1, "learning_rate": 4.999733114418725e-07, "logits/chosen": -2.7239151000976562, "logits/rejected": -2.7024989128112793, "logps/chosen": -277.66741943359375, "logps/rejected": -271.1574401855469, "loss": 0.7069, "rewards/accuracies": 0.668749988079071, "rewards/chosen": -0.002369915135204792, "rewards/margins": 0.07223416864871979, "rewards/rejected": -0.0746040791273117, "step": 50 }, { "epoch": 0.13, "learning_rate": 4.990398100856366e-07, "logits/chosen": -2.7488715648651123, "logits/rejected": -2.7262396812438965, "logps/chosen": -261.4748229980469, "logps/rejected": -247.807861328125, "loss": 0.6795, "rewards/accuracies": 0.6499999761581421, "rewards/chosen": -0.023985665291547775, "rewards/margins": 0.1279429942369461, "rewards/rejected": -0.1519286334514618, "step": 60 }, { "epoch": 0.15, "learning_rate": 4.967775735898179e-07, "logits/chosen": -2.7364790439605713, "logits/rejected": -2.70658802986145, "logps/chosen": -287.2948913574219, "logps/rejected": -270.7928466796875, "loss": 0.7083, "rewards/accuracies": 0.668749988079071, "rewards/chosen": -0.18872329592704773, "rewards/margins": 0.17036835849285126, "rewards/rejected": -0.3590916693210602, "step": 70 }, { "epoch": 0.17, "learning_rate": 4.931986719649298e-07, "logits/chosen": -2.6997952461242676, "logits/rejected": -2.697025775909424, "logps/chosen": -298.27984619140625, "logps/rejected": -323.8697814941406, "loss": 0.6995, "rewards/accuracies": 0.6875, "rewards/chosen": -0.27548032999038696, "rewards/margins": 0.28797003626823425, "rewards/rejected": -0.5634504556655884, "step": 80 }, { "epoch": 0.19, "learning_rate": 4.883222001996351e-07, "logits/chosen": -2.7339961528778076, "logits/rejected": -2.7010178565979004, "logps/chosen": -296.7826232910156, "logps/rejected": -312.85284423828125, "loss": 0.8219, "rewards/accuracies": 0.6937500238418579, "rewards/chosen": -0.41818127036094666, "rewards/margins": 0.36340323090553284, "rewards/rejected": -0.7815844416618347, "step": 90 }, { "epoch": 0.21, "learning_rate": 4.821741763807186e-07, "logits/chosen": -2.715860605239868, "logits/rejected": -2.686281204223633, "logps/chosen": -333.1566467285156, "logps/rejected": -342.143310546875, "loss": 2.2447, "rewards/accuracies": 0.6625000238418579, "rewards/chosen": -0.4371761679649353, "rewards/margins": 0.3056567311286926, "rewards/rejected": -0.7428328990936279, "step": 100 }, { "epoch": 0.21, "eval_logits/chosen": -2.671812057495117, "eval_logits/rejected": -2.6597108840942383, "eval_logps/chosen": -285.50408935546875, "eval_logps/rejected": -325.74481201171875, "eval_loss": 12.741142272949219, "eval_rewards/accuracies": 0.75, "eval_rewards/chosen": -0.28464433550834656, "eval_rewards/margins": 0.39927127957344055, "eval_rewards/rejected": -0.6839155554771423, "eval_runtime": 53.1281, "eval_samples_per_second": 37.645, "eval_steps_per_second": 0.602, "step": 100 }, { "epoch": 0.23, "learning_rate": 4.747874028753375e-07, "logits/chosen": -2.5364506244659424, "logits/rejected": -2.518993377685547, "logps/chosen": -321.1919860839844, "logps/rejected": -313.2504577636719, "loss": 2.392, "rewards/accuracies": 0.731249988079071, "rewards/chosen": -0.5109449028968811, "rewards/margins": 0.4034649729728699, "rewards/rejected": -0.9144099354743958, "step": 110 }, { "epoch": 0.25, "learning_rate": 4.662012913161997e-07, "logits/chosen": -2.513144016265869, "logits/rejected": -2.483302593231201, "logps/chosen": -317.71197509765625, "logps/rejected": -354.447265625, "loss": 0.7845, "rewards/accuracies": 0.7437499761581421, "rewards/chosen": -0.4886089861392975, "rewards/margins": 0.42735376954078674, "rewards/rejected": -0.915962815284729, "step": 120 }, { "epoch": 0.27, "learning_rate": 4.5646165232345103e-07, "logits/chosen": -2.38696026802063, "logits/rejected": -2.377903461456299, "logps/chosen": -329.28619384765625, "logps/rejected": -339.8948669433594, "loss": 0.9022, "rewards/accuracies": 0.6625000238418579, "rewards/chosen": -0.5479137897491455, "rewards/margins": 0.34450799226760864, "rewards/rejected": -0.8924217224121094, "step": 130 }, { "epoch": 0.29, "learning_rate": 4.456204510851956e-07, "logits/chosen": -2.3478336334228516, "logits/rejected": -2.315876007080078, "logps/chosen": -339.808837890625, "logps/rejected": -381.429443359375, "loss": 29.8037, "rewards/accuracies": 0.71875, "rewards/chosen": -0.5917433500289917, "rewards/margins": 0.5149160623550415, "rewards/rejected": -1.1066595315933228, "step": 140 }, { "epoch": 0.31, "learning_rate": 4.337355301007335e-07, "logits/chosen": -2.4338667392730713, "logits/rejected": -2.4544992446899414, "logps/chosen": -269.7679443359375, "logps/rejected": -317.58233642578125, "loss": 1.0584, "rewards/accuracies": 0.699999988079071, "rewards/chosen": -0.4308902621269226, "rewards/margins": 0.38207167387008667, "rewards/rejected": -0.8129619359970093, "step": 150 }, { "epoch": 0.33, "learning_rate": 4.2087030056579986e-07, "logits/chosen": -2.514054298400879, "logits/rejected": -2.484346866607666, "logps/chosen": -303.3541564941406, "logps/rejected": -321.21856689453125, "loss": 0.8359, "rewards/accuracies": 0.675000011920929, "rewards/chosen": -0.47382301092147827, "rewards/margins": 0.2795892357826233, "rewards/rejected": -0.7534123659133911, "step": 160 }, { "epoch": 0.36, "learning_rate": 4.070934040463998e-07, "logits/chosen": -2.496929883956909, "logits/rejected": -2.4770359992980957, "logps/chosen": -329.54736328125, "logps/rejected": -343.8206787109375, "loss": 4.1491, "rewards/accuracies": 0.7562500238418579, "rewards/chosen": -0.47380566596984863, "rewards/margins": 0.3627183437347412, "rewards/rejected": -0.8365238904953003, "step": 170 }, { "epoch": 0.38, "learning_rate": 3.9247834624635404e-07, "logits/chosen": -2.5219836235046387, "logits/rejected": -2.5218372344970703, "logps/chosen": -325.4960632324219, "logps/rejected": -352.91546630859375, "loss": 4.4761, "rewards/accuracies": 0.7250000238418579, "rewards/chosen": -0.5631815195083618, "rewards/margins": 0.41213661432266235, "rewards/rejected": -0.9753181338310242, "step": 180 }, { "epoch": 0.4, "learning_rate": 3.7710310482256523e-07, "logits/chosen": -2.543841600418091, "logits/rejected": -2.4971985816955566, "logps/chosen": -335.67486572265625, "logps/rejected": -353.8238830566406, "loss": 10897.8156, "rewards/accuracies": 0.699999988079071, "rewards/chosen": -0.44279035925865173, "rewards/margins": 0.4546957015991211, "rewards/rejected": -0.8974860906600952, "step": 190 }, { "epoch": 0.42, "learning_rate": 3.610497133404795e-07, "logits/chosen": -2.428988456726074, "logits/rejected": -2.4355721473693848, "logps/chosen": -354.25653076171875, "logps/rejected": -367.38916015625, "loss": 4.720340943901239e+22, "rewards/accuracies": 0.6937500238418579, "rewards/chosen": -0.5808899998664856, "rewards/margins": 0.4498439431190491, "rewards/rejected": -1.0307339429855347, "step": 200 }, { "epoch": 0.42, "eval_logits/chosen": -2.2813971042633057, "eval_logits/rejected": -2.236406087875366, "eval_logps/chosen": -400.4986877441406, "eval_logps/rejected": -450.00384521484375, "eval_loss": Infinity, "eval_rewards/accuracies": 0.69921875, "eval_rewards/chosen": -1.434590458869934, "eval_rewards/margins": 0.49191567301750183, "eval_rewards/rejected": -1.9265060424804688, "eval_runtime": 53.0955, "eval_samples_per_second": 37.668, "eval_steps_per_second": 0.603, "step": 200 }, { "epoch": 0.44, "learning_rate": 3.4440382358952115e-07, "logits/chosen": -2.0168819427490234, "logits/rejected": -1.9473979473114014, "logps/chosen": -461.4383850097656, "logps/rejected": -495.66925048828125, "loss": 4.475516531174102e+21, "rewards/accuracies": 0.7124999761581421, "rewards/chosen": -1.8840515613555908, "rewards/margins": 0.5532835721969604, "rewards/rejected": -2.4373350143432617, "step": 210 }, { "epoch": 0.46, "learning_rate": 3.272542485937368e-07, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 40054194176.0, "rewards/accuracies": 0.11249999701976776, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 220 }, { "epoch": 0.48, "learning_rate": 3.096924887558854e-07, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 230 }, { "epoch": 0.5, "learning_rate": 2.9181224366319943e-07, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 240 }, { "epoch": 0.52, "learning_rate": 2.7370891215954565e-07, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 250 }, { "epoch": 0.54, "learning_rate": 2.55479083351317e-07, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 260 }, { "epoch": 0.56, "learning_rate": 2.3722002126275822e-07, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 270 }, { "epoch": 0.59, "learning_rate": 2.19029145890313e-07, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 280 }, { "epoch": 0.61, "learning_rate": 2.0100351342479216e-07, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 290 }, { "epoch": 0.63, "learning_rate": 1.8323929841460178e-07, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 300 }, { "epoch": 0.63, "eval_logits/chosen": NaN, "eval_logits/rejected": NaN, "eval_logps/chosen": NaN, "eval_logps/rejected": NaN, "eval_loss": NaN, "eval_rewards/accuracies": 0.0, "eval_rewards/chosen": NaN, "eval_rewards/margins": NaN, "eval_rewards/rejected": NaN, "eval_runtime": 51.7823, "eval_samples_per_second": 38.623, "eval_steps_per_second": 0.618, "step": 300 }, { "epoch": 0.65, "learning_rate": 1.6583128063291573e-07, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 310 }, { "epoch": 0.67, "learning_rate": 1.488723393865766e-07, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 320 }, { "epoch": 0.69, "learning_rate": 1.3245295796480788e-07, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 330 }, { "epoch": 0.71, "learning_rate": 1.1666074087171627e-07, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 340 }, { "epoch": 0.73, "learning_rate": 1.0157994641835734e-07, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 350 }, { "epoch": 0.75, "learning_rate": 8.729103716819111e-08, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 360 }, { "epoch": 0.77, "learning_rate": 7.387025063449081e-08, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 370 }, { "epoch": 0.79, "learning_rate": 6.138919252022435e-08, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 380 }, { "epoch": 0.82, "learning_rate": 4.991445467064689e-08, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 390 }, { "epoch": 0.84, "learning_rate": 3.9507259776993954e-08, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 400 }, { "epoch": 0.84, "eval_logits/chosen": NaN, "eval_logits/rejected": NaN, "eval_logps/chosen": NaN, "eval_logps/rejected": NaN, "eval_loss": NaN, "eval_rewards/accuracies": 0.0, "eval_rewards/chosen": NaN, "eval_rewards/margins": NaN, "eval_rewards/rejected": NaN, "eval_runtime": 51.801, "eval_samples_per_second": 38.609, "eval_steps_per_second": 0.618, "step": 400 }, { "epoch": 0.86, "learning_rate": 3.022313472693447e-08, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 410 }, { "epoch": 0.88, "learning_rate": 2.2111614344599684e-08, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 420 }, { "epoch": 0.9, "learning_rate": 1.521597710086439e-08, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 430 }, { "epoch": 0.92, "learning_rate": 9.57301420397924e-09, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 440 }, { "epoch": 0.94, "learning_rate": 5.212833302556258e-09, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 450 }, { "epoch": 0.96, "learning_rate": 2.158697848236607e-09, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 460 }, { "epoch": 0.98, "learning_rate": 4.269029751107489e-10, "logits/chosen": NaN, "logits/rejected": NaN, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0, "rewards/accuracies": 0.0, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 470 }, { "epoch": 1.0, "step": 478, "total_flos": 0.0, "train_loss": 1.0811490788750324e+21, "train_runtime": 4273.3161, "train_samples_per_second": 14.306, "train_steps_per_second": 0.112 } ], "logging_steps": 10, "max_steps": 478, "num_train_epochs": 1, "save_steps": 100, "total_flos": 0.0, "trial_name": null, "trial_params": null }