ShenaoZ's picture
Model save
92aba59 verified
raw
history blame
No virus
3.83 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016736401673640166,
"grad_norm": 7.772319002734789,
"learning_rate": 8.333333333333333e-08,
"logits/chosen": -2.807276487350464,
"logits/rejected": -2.7759768962860107,
"logps/chosen": -315.42626953125,
"logps/rejected": -227.5915985107422,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.16736401673640167,
"grad_norm": 7.445029419543799,
"learning_rate": 4.930057285201027e-07,
"logits/chosen": -2.755617618560791,
"logits/rejected": -2.7461938858032227,
"logps/chosen": -271.96563720703125,
"logps/rejected": -260.71453857421875,
"loss": 0.6914,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.005042961798608303,
"rewards/margins": 0.0035791893023997545,
"rewards/rejected": 0.0014637727290391922,
"step": 10
},
{
"epoch": 0.33472803347280333,
"grad_norm": 6.722931161787328,
"learning_rate": 4.187457503795526e-07,
"logits/chosen": -2.7870638370513916,
"logits/rejected": -2.7682018280029297,
"logps/chosen": -261.4487609863281,
"logps/rejected": -250.8934783935547,
"loss": 0.6745,
"rewards/accuracies": 0.715624988079071,
"rewards/chosen": 0.03232940658926964,
"rewards/margins": 0.04083934798836708,
"rewards/rejected": -0.008509937673807144,
"step": 20
},
{
"epoch": 0.502092050209205,
"grad_norm": 7.3904115438921405,
"learning_rate": 2.8691164100062034e-07,
"logits/chosen": -2.788241386413574,
"logits/rejected": -2.769221544265747,
"logps/chosen": -293.80859375,
"logps/rejected": -254.22073364257812,
"loss": 0.6486,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -0.02352513000369072,
"rewards/margins": 0.12710139155387878,
"rewards/rejected": -0.150626540184021,
"step": 30
},
{
"epoch": 0.6694560669456067,
"grad_norm": 8.14675940130506,
"learning_rate": 1.4248369943086995e-07,
"logits/chosen": -2.761005401611328,
"logits/rejected": -2.738503932952881,
"logps/chosen": -264.15814208984375,
"logps/rejected": -255.6121826171875,
"loss": 0.6354,
"rewards/accuracies": 0.690625011920929,
"rewards/chosen": -0.11238692700862885,
"rewards/margins": 0.16062822937965393,
"rewards/rejected": -0.2730151116847992,
"step": 40
},
{
"epoch": 0.8368200836820083,
"grad_norm": 8.52526513305357,
"learning_rate": 3.473909705816111e-08,
"logits/chosen": -2.7708582878112793,
"logits/rejected": -2.750849962234497,
"logps/chosen": -279.1473083496094,
"logps/rejected": -287.328857421875,
"loss": 0.6201,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": -0.22138634324073792,
"rewards/margins": 0.13677750527858734,
"rewards/rejected": -0.35816383361816406,
"step": 50
},
{
"epoch": 0.9874476987447699,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.649035688173973,
"train_runtime": 1642.5856,
"train_samples_per_second": 9.304,
"train_steps_per_second": 0.036
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}