ironrock's picture
Upload folder using huggingface_hub
a529f91 verified
{
"best_metric": 0.5667449831962585,
"best_model_checkpoint": "./Mistral/13-03-24-Weni-ZeroShot-3.4.6-Mistral-7b-DPO-1.0.0_ZeroShot DPO Training a improved dataset and best hyperparameters found so far-2_max_steps-288_batch_32_2024-03-13_ppid_9/checkpoint-100",
"epoch": 2.0408163265306123,
"eval_steps": 100,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.41,
"grad_norm": 4.933152675628662,
"learning_rate": 1.3793103448275862e-06,
"logits/chosen": -1.3310586214065552,
"logits/rejected": -1.2998794317245483,
"logps/chosen": -17.44550895690918,
"logps/rejected": -15.287511825561523,
"loss": 0.6887,
"rewards/accuracies": 0.6078125238418579,
"rewards/chosen": 0.012908421456813812,
"rewards/margins": 0.00912781897932291,
"rewards/rejected": 0.0037806027103215456,
"step": 20
},
{
"epoch": 0.82,
"grad_norm": 1.840120553970337,
"learning_rate": 1.915057915057915e-06,
"logits/chosen": -1.342775583267212,
"logits/rejected": -1.3136231899261475,
"logps/chosen": -16.57306480407715,
"logps/rejected": -15.076986312866211,
"loss": 0.6579,
"rewards/accuracies": 0.7734375,
"rewards/chosen": 0.10680261999368668,
"rewards/margins": 0.08031970262527466,
"rewards/rejected": 0.02648291550576687,
"step": 40
},
{
"epoch": 1.22,
"grad_norm": 2.2041518688201904,
"learning_rate": 1.7606177606177606e-06,
"logits/chosen": -1.3282166719436646,
"logits/rejected": -1.300879716873169,
"logps/chosen": -15.91553783416748,
"logps/rejected": -15.11401653289795,
"loss": 0.6314,
"rewards/accuracies": 0.785937488079071,
"rewards/chosen": 0.16994965076446533,
"rewards/margins": 0.14337585866451263,
"rewards/rejected": 0.026573771610856056,
"step": 60
},
{
"epoch": 1.63,
"grad_norm": 1.4858603477478027,
"learning_rate": 1.606177606177606e-06,
"logits/chosen": -1.3553317785263062,
"logits/rejected": -1.3338539600372314,
"logps/chosen": -15.328280448913574,
"logps/rejected": -15.158895492553711,
"loss": 0.6066,
"rewards/accuracies": 0.768750011920929,
"rewards/chosen": 0.2215496301651001,
"rewards/margins": 0.20472590625286102,
"rewards/rejected": 0.016823694109916687,
"step": 80
},
{
"epoch": 2.04,
"grad_norm": 1.4387667179107666,
"learning_rate": 1.4517374517374517e-06,
"logits/chosen": -1.3066260814666748,
"logits/rejected": -1.282898187637329,
"logps/chosen": -14.973272323608398,
"logps/rejected": -15.511428833007812,
"loss": 0.5697,
"rewards/accuracies": 0.793749988079071,
"rewards/chosen": 0.2805514931678772,
"rewards/margins": 0.2973305881023407,
"rewards/rejected": -0.016779109835624695,
"step": 100
},
{
"epoch": 2.04,
"eval_logits/chosen": -1.3297994136810303,
"eval_logits/rejected": -1.3079336881637573,
"eval_logps/chosen": -14.785962104797363,
"eval_logps/rejected": -15.702004432678223,
"eval_loss": 0.5667449831962585,
"eval_rewards/accuracies": 0.7651515603065491,
"eval_rewards/chosen": 0.30174848437309265,
"eval_rewards/margins": 0.31671077013015747,
"eval_rewards/rejected": -0.014962326735258102,
"eval_runtime": 81.5286,
"eval_samples_per_second": 2.134,
"eval_steps_per_second": 0.27,
"step": 100
}
],
"logging_steps": 20,
"max_steps": 288,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}