ShenaoZ's picture
Model save
b90d7ef verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016736401673640166,
"grad_norm": 28.310565581646102,
"learning_rate": 8.333333333333333e-08,
"logits/chosen": -2.282123565673828,
"logits/rejected": -2.180680274963379,
"logps/chosen": -366.17987060546875,
"logps/rejected": -389.84954833984375,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.16736401673640167,
"grad_norm": 26.63593514238783,
"learning_rate": 4.930057285201027e-07,
"logits/chosen": -2.1524295806884766,
"logits/rejected": -2.1412665843963623,
"logps/chosen": -333.9299011230469,
"logps/rejected": -436.9288330078125,
"loss": 0.6639,
"rewards/accuracies": 0.6493055820465088,
"rewards/chosen": -0.07371152937412262,
"rewards/margins": 0.06567458808422089,
"rewards/rejected": -0.1393861174583435,
"step": 10
},
{
"epoch": 0.33472803347280333,
"grad_norm": 45.04577240979691,
"learning_rate": 4.187457503795526e-07,
"logits/chosen": -1.623558759689331,
"logits/rejected": -1.549423336982727,
"logps/chosen": -467.49139404296875,
"logps/rejected": -620.3740844726562,
"loss": 0.5604,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -1.3230326175689697,
"rewards/margins": 0.725892961025238,
"rewards/rejected": -2.0489256381988525,
"step": 20
},
{
"epoch": 0.502092050209205,
"grad_norm": 25.13032401537719,
"learning_rate": 2.8691164100062034e-07,
"logits/chosen": -1.4312714338302612,
"logits/rejected": -1.241246223449707,
"logps/chosen": -468.46240234375,
"logps/rejected": -618.2733764648438,
"loss": 0.5045,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -1.284248948097229,
"rewards/margins": 0.9667768478393555,
"rewards/rejected": -2.251025676727295,
"step": 30
},
{
"epoch": 0.6694560669456067,
"grad_norm": 28.72537330625965,
"learning_rate": 1.4248369943086995e-07,
"logits/chosen": -1.2682111263275146,
"logits/rejected": -1.0946087837219238,
"logps/chosen": -389.8838806152344,
"logps/rejected": -542.1454467773438,
"loss": 0.4978,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -0.8395187258720398,
"rewards/margins": 0.7310727834701538,
"rewards/rejected": -1.570591688156128,
"step": 40
},
{
"epoch": 0.8368200836820083,
"grad_norm": 32.05290862112005,
"learning_rate": 3.473909705816111e-08,
"logits/chosen": -1.0692652463912964,
"logits/rejected": -0.9052573442459106,
"logps/chosen": -400.3882141113281,
"logps/rejected": -565.8882446289062,
"loss": 0.4863,
"rewards/accuracies": 0.746874988079071,
"rewards/chosen": -0.8596526980400085,
"rewards/margins": 0.7219886183738708,
"rewards/rejected": -1.581641435623169,
"step": 50
},
{
"epoch": 0.9874476987447699,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.5407808513964637,
"train_runtime": 1626.1325,
"train_samples_per_second": 9.398,
"train_steps_per_second": 0.036
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}