{ "best_metric": 0.0047710007056593895, "best_model_checkpoint": "./Zephyr/28-03-24-Weni-WeniGPT-QA-Zephyr-7B-4.0.1-KTO_WeniGPT Experiment using KTO trainer with no collator, Zephyr model and no system prompt.-2_max_steps-786_batch_32_2024-03-28_ppid_9/checkpoint-200", "epoch": 2.268431001890359, "eval_steps": 50, "global_step": 300, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.15, "grad_norm": 1.94673752784729, "kl": 0.3060356676578522, "learning_rate": 0.0001666666666666667, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 1.0194, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 20 }, { "epoch": 0.3, "grad_norm": 0.799897313117981, "kl": 0.08012839406728745, "learning_rate": 0.00019580052493438322, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.204, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 40 }, { "epoch": 0.38, "eval_kl": 0.0, "eval_logps/chosen": -120.33751678466797, "eval_logps/rejected": -405.6329345703125, "eval_loss": 0.02746938355267048, "eval_rewards/chosen": 5.632839202880859, "eval_rewards/margins": 25.979337692260742, "eval_rewards/rejected": -20.346500396728516, "eval_runtime": 215.3361, "eval_samples_per_second": 2.322, "eval_steps_per_second": 0.58, "step": 50 }, { "epoch": 0.45, "grad_norm": 0.1745920479297638, "kl": 0.20441873371601105, "learning_rate": 0.0001905511811023622, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0962, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 60 }, { "epoch": 0.6, "grad_norm": 1.8645330667495728, "kl": 0.0, "learning_rate": 0.00018556430446194227, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0881, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 80 }, { "epoch": 0.76, "grad_norm": 0.13575485348701477, "kl": 0.0, "learning_rate": 0.00018031496062992125, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.073, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 100 }, { "epoch": 0.76, "eval_kl": 0.0, "eval_logps/chosen": -118.76765441894531, "eval_logps/rejected": -394.83203125, "eval_loss": 0.014343788847327232, "eval_rewards/chosen": 5.789826393127441, "eval_rewards/margins": 25.05623435974121, "eval_rewards/rejected": -19.266408920288086, "eval_runtime": 215.2593, "eval_samples_per_second": 2.323, "eval_steps_per_second": 0.581, "step": 100 }, { "epoch": 0.91, "grad_norm": 0.04445016011595726, "kl": 0.25088077783584595, "learning_rate": 0.0001750656167979003, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0312, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 120 }, { "epoch": 1.06, "grad_norm": 0.08600552380084991, "kl": 0.0, "learning_rate": 0.00016981627296587927, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0553, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 140 }, { "epoch": 1.13, "eval_kl": 0.0, "eval_logps/chosen": -118.54534149169922, "eval_logps/rejected": -501.9826354980469, "eval_loss": 0.022491294890642166, "eval_rewards/chosen": 5.812057018280029, "eval_rewards/margins": 35.79352569580078, "eval_rewards/rejected": -29.981468200683594, "eval_runtime": 215.2877, "eval_samples_per_second": 2.322, "eval_steps_per_second": 0.581, "step": 150 }, { "epoch": 1.21, "grad_norm": 1.7538838386535645, "kl": 0.08890169113874435, "learning_rate": 0.00016456692913385828, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0322, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 160 }, { "epoch": 1.36, "grad_norm": 0.1595699042081833, "kl": 0.0, "learning_rate": 0.00015931758530183726, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0108, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 180 }, { "epoch": 1.51, "grad_norm": 1.2758983373641968, "kl": 0.011286890134215355, "learning_rate": 0.0001540682414698163, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0232, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 200 }, { "epoch": 1.51, "eval_kl": 0.0, "eval_logps/chosen": -112.15116882324219, "eval_logps/rejected": -478.0784912109375, "eval_loss": 0.0047710007056593895, "eval_rewards/chosen": 6.451474189758301, "eval_rewards/margins": 34.04253005981445, "eval_rewards/rejected": -27.591054916381836, "eval_runtime": 215.2869, "eval_samples_per_second": 2.322, "eval_steps_per_second": 0.581, "step": 200 }, { "epoch": 1.66, "grad_norm": 0.04578682407736778, "kl": 0.08650239557027817, "learning_rate": 0.00014881889763779528, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0095, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 220 }, { "epoch": 1.81, "grad_norm": 4.036252498626709, "kl": 0.0, "learning_rate": 0.0001435695538057743, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0519, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 240 }, { "epoch": 1.89, "eval_kl": 0.0, "eval_logps/chosen": -111.8521728515625, "eval_logps/rejected": -507.0782470703125, "eval_loss": 0.008064903318881989, "eval_rewards/chosen": 6.481375217437744, "eval_rewards/margins": 36.97240447998047, "eval_rewards/rejected": -30.491031646728516, "eval_runtime": 215.2836, "eval_samples_per_second": 2.323, "eval_steps_per_second": 0.581, "step": 250 }, { "epoch": 1.97, "grad_norm": 0.027836302295327187, "kl": 0.10306696593761444, "learning_rate": 0.00013832020997375327, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0103, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 260 }, { "epoch": 2.12, "grad_norm": 0.5005787014961243, "kl": 0.0, "learning_rate": 0.0001330708661417323, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0185, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 280 }, { "epoch": 2.27, "grad_norm": 0.010965784080326557, "kl": 0.008135443553328514, "learning_rate": 0.0001278215223097113, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0095, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 300 }, { "epoch": 2.27, "eval_kl": 0.0, "eval_logps/chosen": -112.58515930175781, "eval_logps/rejected": -541.0062866210938, "eval_loss": 0.0153852179646492, "eval_rewards/chosen": 6.408076286315918, "eval_rewards/margins": 40.29190444946289, "eval_rewards/rejected": -33.88383102416992, "eval_runtime": 215.4103, "eval_samples_per_second": 2.321, "eval_steps_per_second": 0.58, "step": 300 } ], "logging_steps": 20, "max_steps": 786, "num_input_tokens_seen": 0, "num_train_epochs": 6, "save_steps": 100, "total_flos": 0.0, "train_batch_size": 4, "trial_name": null, "trial_params": null }