ShenaoZ's picture
Model save
047def2 verified
raw
history blame
4.38 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016736401673640166,
"grad_norm": 117.7337967450897,
"learning_rate": 8.333333333333333e-08,
"logits/chosen": 0.5924187898635864,
"logits/rejected": 2.4197378158569336,
"logps/chosen": -481.50433349609375,
"logps/pi_response": -57.85610580444336,
"logps/ref_response": -57.85610580444336,
"logps/rejected": -407.123291015625,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.16736401673640167,
"grad_norm": 114.88052445634541,
"learning_rate": 4.930057285201027e-07,
"logits/chosen": 1.0167477130889893,
"logits/rejected": 1.715598464012146,
"logps/chosen": -461.42584228515625,
"logps/pi_response": -612.001708984375,
"logps/ref_response": -43.13614273071289,
"logps/rejected": -751.348388671875,
"loss": 0.5847,
"rewards/accuracies": 0.6041666865348816,
"rewards/chosen": 0.18106825649738312,
"rewards/margins": 5.178502082824707,
"rewards/rejected": -4.997433185577393,
"step": 10
},
{
"epoch": 0.33472803347280333,
"grad_norm": 74.42064867560319,
"learning_rate": 4.187457503795526e-07,
"logits/chosen": 0.9024032354354858,
"logits/rejected": 1.7998549938201904,
"logps/chosen": -459.34100341796875,
"logps/pi_response": -839.3756713867188,
"logps/ref_response": -48.8006591796875,
"logps/rejected": -996.8543090820312,
"loss": 0.3541,
"rewards/accuracies": 0.793749988079071,
"rewards/chosen": 0.46666306257247925,
"rewards/margins": 7.785676002502441,
"rewards/rejected": -7.319011688232422,
"step": 20
},
{
"epoch": 0.502092050209205,
"grad_norm": 130.4626377347778,
"learning_rate": 2.8691164100062034e-07,
"logits/chosen": 2.6123948097229004,
"logits/rejected": 3.322319507598877,
"logps/chosen": -594.0442504882812,
"logps/pi_response": -878.4356689453125,
"logps/ref_response": -42.91078567504883,
"logps/rejected": -1136.06591796875,
"loss": 0.2937,
"rewards/accuracies": 0.890625,
"rewards/chosen": -1.1390485763549805,
"rewards/margins": 7.548802852630615,
"rewards/rejected": -8.68785285949707,
"step": 30
},
{
"epoch": 0.6694560669456067,
"grad_norm": 53.877472153382854,
"learning_rate": 1.4248369943086995e-07,
"logits/chosen": 2.7364916801452637,
"logits/rejected": 3.1835875511169434,
"logps/chosen": -542.8189697265625,
"logps/pi_response": -965.1339721679688,
"logps/ref_response": -44.70442581176758,
"logps/rejected": -1189.79150390625,
"loss": 0.2283,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -0.6533902883529663,
"rewards/margins": 8.359137535095215,
"rewards/rejected": -9.012528419494629,
"step": 40
},
{
"epoch": 0.8368200836820083,
"grad_norm": 56.17404888873974,
"learning_rate": 3.473909705816111e-08,
"logits/chosen": 2.6901650428771973,
"logits/rejected": 3.5459206104278564,
"logps/chosen": -530.8473510742188,
"logps/pi_response": -472.94268798828125,
"logps/ref_response": -48.817832946777344,
"logps/rejected": -734.7946166992188,
"loss": 0.2278,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -0.4413676857948303,
"rewards/margins": 4.092815399169922,
"rewards/rejected": -4.534182548522949,
"step": 50
},
{
"epoch": 0.9874476987447699,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.3167330996464875,
"train_runtime": 2832.9137,
"train_samples_per_second": 5.395,
"train_steps_per_second": 0.021
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}