ShenaoZ's picture
Model save
60e746f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.99581589958159,
"eval_steps": 500,
"global_step": 119,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008368200836820083,
"grad_norm": 13.7675517674306,
"learning_rate": 4.166666666666666e-08,
"logits/chosen": -2.6905035972595215,
"logits/rejected": -2.6002821922302246,
"logps/chosen": -238.50418090820312,
"logps/pi_response": -60.972923278808594,
"logps/ref_response": -60.972923278808594,
"logps/rejected": -122.8811264038086,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.08368200836820083,
"grad_norm": 11.556330314168122,
"learning_rate": 4.1666666666666667e-07,
"logits/chosen": -2.7245476245880127,
"logits/rejected": -2.6954076290130615,
"logps/chosen": -223.26068115234375,
"logps/pi_response": -68.41806030273438,
"logps/ref_response": -68.24886322021484,
"logps/rejected": -108.38255310058594,
"loss": 0.6909,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.00402547512203455,
"rewards/margins": 0.00467924727126956,
"rewards/rejected": -0.0006537723238579929,
"step": 10
},
{
"epoch": 0.16736401673640167,
"grad_norm": 8.0051562247015,
"learning_rate": 4.931352528237397e-07,
"logits/chosen": -2.7007386684417725,
"logits/rejected": -2.6637234687805176,
"logps/chosen": -239.10507202148438,
"logps/pi_response": -75.34381866455078,
"logps/ref_response": -65.86756896972656,
"logps/rejected": -120.88731384277344,
"loss": 0.6538,
"rewards/accuracies": 0.768750011920929,
"rewards/chosen": 0.02172889932990074,
"rewards/margins": 0.08931762725114822,
"rewards/rejected": -0.06758873164653778,
"step": 20
},
{
"epoch": 0.2510460251046025,
"grad_norm": 6.2838771200093735,
"learning_rate": 4.658920803689553e-07,
"logits/chosen": -2.5834615230560303,
"logits/rejected": -2.541572093963623,
"logps/chosen": -234.6986541748047,
"logps/pi_response": -112.6206283569336,
"logps/ref_response": -70.35834503173828,
"logps/rejected": -146.62071228027344,
"loss": 0.6128,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -0.1661997139453888,
"rewards/margins": 0.1909169852733612,
"rewards/rejected": -0.35711669921875,
"step": 30
},
{
"epoch": 0.33472803347280333,
"grad_norm": 8.1669760332498,
"learning_rate": 4.201712553872657e-07,
"logits/chosen": -2.544977903366089,
"logits/rejected": -2.512420415878296,
"logps/chosen": -266.4893798828125,
"logps/pi_response": -148.94216918945312,
"logps/ref_response": -70.82736206054688,
"logps/rejected": -183.97122192382812,
"loss": 0.5908,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -0.31368952989578247,
"rewards/margins": 0.3473220467567444,
"rewards/rejected": -0.6610115170478821,
"step": 40
},
{
"epoch": 0.41841004184100417,
"grad_norm": 10.22318295333062,
"learning_rate": 3.598859066780754e-07,
"logits/chosen": -2.5687315464019775,
"logits/rejected": -2.520003318786621,
"logps/chosen": -306.9870300292969,
"logps/pi_response": -178.8148956298828,
"logps/ref_response": -73.63240814208984,
"logps/rejected": -191.5988006591797,
"loss": 0.5573,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.37883269786834717,
"rewards/margins": 0.5206230282783508,
"rewards/rejected": -0.899455726146698,
"step": 50
},
{
"epoch": 0.502092050209205,
"grad_norm": 12.014191391493059,
"learning_rate": 2.9019570347986706e-07,
"logits/chosen": -2.542023181915283,
"logits/rejected": -2.512960195541382,
"logps/chosen": -290.0708923339844,
"logps/pi_response": -188.24777221679688,
"logps/ref_response": -76.79859924316406,
"logps/rejected": -204.185791015625,
"loss": 0.5334,
"rewards/accuracies": 0.8125,
"rewards/chosen": -0.3407396078109741,
"rewards/margins": 0.7002220153808594,
"rewards/rejected": -1.0409616231918335,
"step": 60
},
{
"epoch": 0.5857740585774058,
"grad_norm": 11.306193009663613,
"learning_rate": 2.1706525253979534e-07,
"logits/chosen": -2.555372476577759,
"logits/rejected": -2.517289400100708,
"logps/chosen": -280.76043701171875,
"logps/pi_response": -182.2036895751953,
"logps/ref_response": -66.60467529296875,
"logps/rejected": -207.8173828125,
"loss": 0.549,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -0.4452129900455475,
"rewards/margins": 0.5477712750434875,
"rewards/rejected": -0.9929842948913574,
"step": 70
},
{
"epoch": 0.6694560669456067,
"grad_norm": 17.500798301996966,
"learning_rate": 1.4675360263490295e-07,
"logits/chosen": -2.516525983810425,
"logits/rejected": -2.505002021789551,
"logps/chosen": -231.9325714111328,
"logps/pi_response": -168.99024963378906,
"logps/ref_response": -66.72526550292969,
"logps/rejected": -189.8736114501953,
"loss": 0.5258,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.42112892866134644,
"rewards/margins": 0.49120765924453735,
"rewards/rejected": -0.912336528301239,
"step": 80
},
{
"epoch": 0.7531380753138075,
"grad_norm": 16.641411155096407,
"learning_rate": 8.527854855097224e-08,
"logits/chosen": -2.596357822418213,
"logits/rejected": -2.5533790588378906,
"logps/chosen": -280.3979187011719,
"logps/pi_response": -187.88308715820312,
"logps/ref_response": -65.7259750366211,
"logps/rejected": -200.82089233398438,
"loss": 0.5428,
"rewards/accuracies": 0.8187500238418579,
"rewards/chosen": -0.4766550064086914,
"rewards/margins": 0.6347955465316772,
"rewards/rejected": -1.1114505529403687,
"step": 90
},
{
"epoch": 0.8368200836820083,
"grad_norm": 12.985065267871795,
"learning_rate": 3.790158337517127e-08,
"logits/chosen": -2.542086124420166,
"logits/rejected": -2.512556552886963,
"logps/chosen": -261.39459228515625,
"logps/pi_response": -190.4388427734375,
"logps/ref_response": -67.59146881103516,
"logps/rejected": -229.26296997070312,
"loss": 0.5035,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.5009726285934448,
"rewards/margins": 0.5954475998878479,
"rewards/rejected": -1.096420168876648,
"step": 100
},
{
"epoch": 0.9205020920502092,
"grad_norm": 20.329401448237313,
"learning_rate": 8.677580722139671e-09,
"logits/chosen": -2.6092028617858887,
"logits/rejected": -2.5456957817077637,
"logps/chosen": -294.7940979003906,
"logps/pi_response": -201.2043914794922,
"logps/ref_response": -79.07398223876953,
"logps/rejected": -223.47451782226562,
"loss": 0.5098,
"rewards/accuracies": 0.793749988079071,
"rewards/chosen": -0.3964020609855652,
"rewards/margins": 0.7144461870193481,
"rewards/rejected": -1.110848307609558,
"step": 110
},
{
"epoch": 0.99581589958159,
"step": 119,
"total_flos": 0.0,
"train_loss": 0.5697346855612362,
"train_runtime": 2586.0024,
"train_samples_per_second": 5.91,
"train_steps_per_second": 0.046
}
],
"logging_steps": 10,
"max_steps": 119,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}