ShenaoZ's picture
Model save
4467b56 verified
raw
history blame
4.04 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 1.6666666666666664e-08,
"logits/chosen": 0.22308249771595,
"logits/rejected": 2.386685371398926,
"logps/chosen": -637.75146484375,
"logps/pi_response": -59.625553131103516,
"logps/ref_response": -59.625553131103516,
"logps/rejected": -297.7115783691406,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.17,
"learning_rate": 9.860114570402053e-08,
"logits/chosen": 0.7615225315093994,
"logits/rejected": 2.1085410118103027,
"logps/chosen": -559.4776611328125,
"logps/pi_response": -72.19044494628906,
"logps/ref_response": -68.9166030883789,
"logps/rejected": -288.6199645996094,
"loss": 0.6639,
"rewards/accuracies": 0.5972222089767456,
"rewards/chosen": 0.042805079370737076,
"rewards/margins": 0.06203806400299072,
"rewards/rejected": -0.019232982769608498,
"step": 10
},
{
"epoch": 0.33,
"learning_rate": 8.374915007591053e-08,
"logits/chosen": 0.3314076066017151,
"logits/rejected": 1.314693570137024,
"logps/chosen": -558.7711791992188,
"logps/pi_response": -875.009765625,
"logps/ref_response": -72.45027923583984,
"logps/rejected": -934.8829345703125,
"loss": 0.3538,
"rewards/accuracies": 0.793749988079071,
"rewards/chosen": 0.5102975964546204,
"rewards/margins": 7.6188764572143555,
"rewards/rejected": -7.108579158782959,
"step": 20
},
{
"epoch": 0.5,
"learning_rate": 5.738232820012406e-08,
"logits/chosen": 0.21183089911937714,
"logits/rejected": 0.8000686764717102,
"logps/chosen": -564.4741821289062,
"logps/pi_response": -3023.58251953125,
"logps/ref_response": -92.48184204101562,
"logps/rejected": -2812.103759765625,
"loss": 0.2933,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": 0.33371633291244507,
"rewards/margins": 25.731952667236328,
"rewards/rejected": -25.398237228393555,
"step": 30
},
{
"epoch": 0.67,
"learning_rate": 2.8496739886173992e-08,
"logits/chosen": 0.4574252963066101,
"logits/rejected": 0.48074570298194885,
"logps/chosen": -538.3971557617188,
"logps/pi_response": -5107.06103515625,
"logps/ref_response": -71.75224304199219,
"logps/rejected": -4723.8134765625,
"loss": 0.0916,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.3258439898490906,
"rewards/margins": 44.844539642333984,
"rewards/rejected": -44.51869583129883,
"step": 40
},
{
"epoch": 0.84,
"learning_rate": 6.947819411632222e-09,
"logits/chosen": 0.4396973252296448,
"logits/rejected": 0.41408687829971313,
"logps/chosen": -516.14990234375,
"logps/pi_response": -5759.0869140625,
"logps/ref_response": -65.65478515625,
"logps/rejected": -5587.03369140625,
"loss": -0.0369,
"rewards/accuracies": 0.8031250238418579,
"rewards/chosen": 0.5476996898651123,
"rewards/margins": 53.79410934448242,
"rewards/rejected": -53.24641799926758,
"step": 50
},
{
"epoch": 0.99,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.22220247175733923,
"train_runtime": 3779.3916,
"train_samples_per_second": 4.044,
"train_steps_per_second": 0.016
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}