|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9874476987447699, |
|
"eval_steps": 500, |
|
"global_step": 59, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5e-08, |
|
"logits/chosen": -2.593817710876465, |
|
"logits/rejected": -2.562267780303955, |
|
"logps/chosen": -224.3258819580078, |
|
"logps/pi_response": -158.2955322265625, |
|
"logps/ref_response": -158.2955322265625, |
|
"logps/rejected": -466.1955871582031, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 2.9580343711206163e-07, |
|
"logits/chosen": -2.6071786880493164, |
|
"logits/rejected": -2.561267137527466, |
|
"logps/chosen": -257.0948791503906, |
|
"logps/pi_response": -139.04913330078125, |
|
"logps/ref_response": -134.45907592773438, |
|
"logps/rejected": -405.8514709472656, |
|
"loss": 0.674, |
|
"rewards/accuracies": 0.5798611044883728, |
|
"rewards/chosen": -0.08496704697608948, |
|
"rewards/margins": 0.058161403983831406, |
|
"rewards/rejected": -0.14312845468521118, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 2.512474502277316e-07, |
|
"logits/chosen": -2.5841031074523926, |
|
"logits/rejected": -2.4879150390625, |
|
"logps/chosen": -301.86749267578125, |
|
"logps/pi_response": -155.46690368652344, |
|
"logps/ref_response": -146.185791015625, |
|
"logps/rejected": -503.9505920410156, |
|
"loss": 0.5738, |
|
"rewards/accuracies": 0.746874988079071, |
|
"rewards/chosen": -0.4705522954463959, |
|
"rewards/margins": 0.5787664651870728, |
|
"rewards/rejected": -1.049318790435791, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.721469846003722e-07, |
|
"logits/chosen": -2.410111904144287, |
|
"logits/rejected": -2.3357787132263184, |
|
"logps/chosen": -362.79449462890625, |
|
"logps/pi_response": -168.09576416015625, |
|
"logps/ref_response": -146.45614624023438, |
|
"logps/rejected": -546.7628784179688, |
|
"loss": 0.52, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.852766215801239, |
|
"rewards/margins": 0.7539817094802856, |
|
"rewards/rejected": -1.6067478656768799, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 8.549021965852197e-08, |
|
"logits/chosen": -2.417564630508423, |
|
"logits/rejected": -2.3170862197875977, |
|
"logps/chosen": -331.92822265625, |
|
"logps/pi_response": -158.364013671875, |
|
"logps/ref_response": -145.10653686523438, |
|
"logps/rejected": -582.767822265625, |
|
"loss": 0.5142, |
|
"rewards/accuracies": 0.746874988079071, |
|
"rewards/chosen": -0.6605563759803772, |
|
"rewards/margins": 0.9509684443473816, |
|
"rewards/rejected": -1.6115245819091797, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 2.0843458234896666e-08, |
|
"logits/chosen": -2.4105136394500732, |
|
"logits/rejected": -2.3534531593322754, |
|
"logps/chosen": -345.56024169921875, |
|
"logps/pi_response": -149.22549438476562, |
|
"logps/ref_response": -138.38841247558594, |
|
"logps/rejected": -550.0933227539062, |
|
"loss": 0.5322, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.6944135427474976, |
|
"rewards/margins": 0.7579292058944702, |
|
"rewards/rejected": -1.4523428678512573, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"step": 59, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5597076092736196, |
|
"train_runtime": 3500.8727, |
|
"train_samples_per_second": 4.365, |
|
"train_steps_per_second": 0.017 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 59, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|