ShenaoZhang's picture
Model save
0141e70 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016736401673640166,
"grad_norm": 11.462343454171908,
"learning_rate": 8.333333333333333e-08,
"logits/chosen": -2.824117660522461,
"logits/rejected": -2.7380800247192383,
"logps/chosen": -202.6835479736328,
"logps/pi_response": -110.02621459960938,
"logps/ref_response": -110.02621459960938,
"logps/rejected": -213.89146423339844,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.16736401673640167,
"grad_norm": 11.923945925166176,
"learning_rate": 4.930057285201027e-07,
"logits/chosen": -2.747312307357788,
"logits/rejected": -2.7030231952667236,
"logps/chosen": -212.33128356933594,
"logps/pi_response": -115.43215942382812,
"logps/ref_response": -114.7027587890625,
"logps/rejected": -262.1092529296875,
"loss": 0.6856,
"rewards/accuracies": 0.5381944179534912,
"rewards/chosen": -0.033359646797180176,
"rewards/margins": 0.023605287075042725,
"rewards/rejected": -0.0569649264216423,
"step": 10
},
{
"epoch": 0.33472803347280333,
"grad_norm": 26.32112815002961,
"learning_rate": 4.187457503795526e-07,
"logits/chosen": -2.7702105045318604,
"logits/rejected": -2.7281694412231445,
"logps/chosen": -251.9454803466797,
"logps/pi_response": -113.5649642944336,
"logps/ref_response": -116.27818298339844,
"logps/rejected": -307.8755798339844,
"loss": 0.6305,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.23709583282470703,
"rewards/margins": 0.2704735994338989,
"rewards/rejected": -0.507569432258606,
"step": 20
},
{
"epoch": 0.502092050209205,
"grad_norm": 16.774434593568316,
"learning_rate": 2.8691164100062034e-07,
"logits/chosen": -2.753960132598877,
"logits/rejected": -2.7110753059387207,
"logps/chosen": -285.9163513183594,
"logps/pi_response": -142.59048461914062,
"logps/ref_response": -119.65989685058594,
"logps/rejected": -355.4615173339844,
"loss": 0.5856,
"rewards/accuracies": 0.690625011920929,
"rewards/chosen": -0.5269377827644348,
"rewards/margins": 0.4690930247306824,
"rewards/rejected": -0.9960308074951172,
"step": 30
},
{
"epoch": 0.6694560669456067,
"grad_norm": 16.80898412201918,
"learning_rate": 1.4248369943086995e-07,
"logits/chosen": -2.6672911643981934,
"logits/rejected": -2.6349196434020996,
"logps/chosen": -276.1371154785156,
"logps/pi_response": -147.93870544433594,
"logps/ref_response": -113.15153503417969,
"logps/rejected": -365.1174011230469,
"loss": 0.5512,
"rewards/accuracies": 0.684374988079071,
"rewards/chosen": -0.5750434994697571,
"rewards/margins": 0.5344008803367615,
"rewards/rejected": -1.1094443798065186,
"step": 40
},
{
"epoch": 0.8368200836820083,
"grad_norm": 20.73271112896151,
"learning_rate": 3.473909705816111e-08,
"logits/chosen": -2.63154673576355,
"logits/rejected": -2.579827308654785,
"logps/chosen": -283.89990234375,
"logps/pi_response": -160.52090454101562,
"logps/ref_response": -112.18473815917969,
"logps/rejected": -355.16339111328125,
"loss": 0.5406,
"rewards/accuracies": 0.715624988079071,
"rewards/chosen": -0.6666939854621887,
"rewards/margins": 0.5634760856628418,
"rewards/rejected": -1.2301701307296753,
"step": 50
},
{
"epoch": 0.9874476987447699,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.5881464764223261,
"train_runtime": 2654.8514,
"train_samples_per_second": 5.757,
"train_steps_per_second": 0.022
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}