VPO-Zephyr-7B-0.0005-iter-1 / trainer_state.json
Katayoon's picture
Model save
32bac17 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016736401673640166,
"grad_norm": 5.776347249629427,
"learning_rate": 8.333333333333333e-08,
"logits/chosen": -2.832916021347046,
"logits/rejected": -2.8954272270202637,
"logps/chosen": -112.55461120605469,
"logps/pi_response": -112.97522735595703,
"logps/ref_response": -112.97522735595703,
"logps/rejected": -126.53972625732422,
"loss": 0.6925,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.16736401673640167,
"grad_norm": 5.912336712519558,
"learning_rate": 4.930057285201027e-07,
"logits/chosen": -2.7919647693634033,
"logits/rejected": -2.7902114391326904,
"logps/chosen": -150.03506469726562,
"logps/pi_response": -152.3460693359375,
"logps/ref_response": -152.45172119140625,
"logps/rejected": -154.693115234375,
"loss": 0.6924,
"rewards/accuracies": 0.4305555522441864,
"rewards/chosen": 0.0003976296284236014,
"rewards/margins": -0.0001906168181449175,
"rewards/rejected": 0.000588246330153197,
"step": 10
},
{
"epoch": 0.33472803347280333,
"grad_norm": 6.02225171380915,
"learning_rate": 4.187457503795526e-07,
"logits/chosen": -2.822767972946167,
"logits/rejected": -2.808192729949951,
"logps/chosen": -147.93670654296875,
"logps/pi_response": -148.9482421875,
"logps/ref_response": -147.01174926757812,
"logps/rejected": -150.41445922851562,
"loss": 0.6928,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": -0.022129487246274948,
"rewards/margins": -0.0039032220374792814,
"rewards/rejected": -0.0182262621819973,
"step": 20
},
{
"epoch": 0.502092050209205,
"grad_norm": 6.134054284556473,
"learning_rate": 2.8691164100062034e-07,
"logits/chosen": -2.8247873783111572,
"logits/rejected": -2.8113598823547363,
"logps/chosen": -166.9477081298828,
"logps/pi_response": -164.89968872070312,
"logps/ref_response": -149.63009643554688,
"logps/rejected": -166.7788848876953,
"loss": 0.692,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": -0.15240411460399628,
"rewards/margins": -0.0001383073686156422,
"rewards/rejected": -0.15226581692695618,
"step": 30
},
{
"epoch": 0.6694560669456067,
"grad_norm": 6.3666889370862085,
"learning_rate": 1.4248369943086995e-07,
"logits/chosen": -2.7281992435455322,
"logits/rejected": -2.721857786178589,
"logps/chosen": -158.48544311523438,
"logps/pi_response": -156.80209350585938,
"logps/ref_response": -143.953857421875,
"logps/rejected": -156.04202270507812,
"loss": 0.6908,
"rewards/accuracies": 0.543749988079071,
"rewards/chosen": -0.12262190878391266,
"rewards/margins": 0.013718548230826855,
"rewards/rejected": -0.1363404393196106,
"step": 40
},
{
"epoch": 0.8368200836820083,
"grad_norm": 6.157460299989396,
"learning_rate": 3.473909705816111e-08,
"logits/chosen": -2.74588942527771,
"logits/rejected": -2.745689868927002,
"logps/chosen": -152.3079833984375,
"logps/pi_response": -150.0457000732422,
"logps/ref_response": -140.59339904785156,
"logps/rejected": -147.81971740722656,
"loss": 0.6908,
"rewards/accuracies": 0.4937500059604645,
"rewards/chosen": -0.09621705859899521,
"rewards/margins": 0.0019109401619061828,
"rewards/rejected": -0.09812799841165543,
"step": 50
},
{
"epoch": 0.9874476987447699,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.6910505557464341,
"train_runtime": 1298.8389,
"train_samples_per_second": 11.767,
"train_steps_per_second": 0.045
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}