gemma-7b-dpo-full-orca-v0 / trainer_state.json
lewtun's picture
lewtun HF staff
Model save
cc72aa6 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9919137466307277,
"eval_steps": 100,
"global_step": 92,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 381.81930096475645,
"learning_rate": 5e-08,
"logits/chosen": 110.69648742675781,
"logits/rejected": 144.8971710205078,
"logps/chosen": -802.6702880859375,
"logps/rejected": -789.908203125,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.11,
"grad_norm": 145.3169996556915,
"learning_rate": 5e-07,
"logits/chosen": 104.16372680664062,
"logits/rejected": 147.7155303955078,
"logps/chosen": -773.0805053710938,
"logps/rejected": -786.1904907226562,
"loss": 0.5786,
"rewards/accuracies": 0.5416666865348816,
"rewards/chosen": -0.04775738716125488,
"rewards/margins": 0.49189090728759766,
"rewards/rejected": -0.5396482944488525,
"step": 10
},
{
"epoch": 0.22,
"grad_norm": 123.89226507692392,
"learning_rate": 4.818756127755237e-07,
"logits/chosen": 114.03608703613281,
"logits/rejected": 162.7917022705078,
"logps/chosen": -765.4005737304688,
"logps/rejected": -855.3450927734375,
"loss": 0.1177,
"rewards/accuracies": 0.96875,
"rewards/chosen": 0.9601849317550659,
"rewards/margins": 7.298236846923828,
"rewards/rejected": -6.3380513191223145,
"step": 20
},
{
"epoch": 0.32,
"grad_norm": 34.79847456534606,
"learning_rate": 4.301303984001967e-07,
"logits/chosen": 103.0413818359375,
"logits/rejected": 154.4637908935547,
"logps/chosen": -750.0765380859375,
"logps/rejected": -893.15478515625,
"loss": 0.0543,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": 4.489648818969727,
"rewards/margins": 12.956143379211426,
"rewards/rejected": -8.4664945602417,
"step": 30
},
{
"epoch": 0.43,
"grad_norm": 23.791340435812334,
"learning_rate": 3.52267159292835e-07,
"logits/chosen": 104.96720123291016,
"logits/rejected": 149.83804321289062,
"logps/chosen": -773.8189697265625,
"logps/rejected": -879.3125,
"loss": 0.0344,
"rewards/accuracies": 0.981249988079071,
"rewards/chosen": 4.7802629470825195,
"rewards/margins": 12.39081859588623,
"rewards/rejected": -7.610556125640869,
"step": 40
},
{
"epoch": 0.54,
"grad_norm": 15.09725764927681,
"learning_rate": 2.5957568342250883e-07,
"logits/chosen": 92.78871154785156,
"logits/rejected": 145.62005615234375,
"logps/chosen": -734.4014892578125,
"logps/rejected": -871.9119262695312,
"loss": 0.0152,
"rewards/accuracies": 1.0,
"rewards/chosen": 5.088149547576904,
"rewards/margins": 11.76667594909668,
"rewards/rejected": -6.678526401519775,
"step": 50
},
{
"epoch": 0.65,
"grad_norm": 6.1316228742026295,
"learning_rate": 1.6549578039787434e-07,
"logits/chosen": 103.61817932128906,
"logits/rejected": 147.3481903076172,
"logps/chosen": -762.0783081054688,
"logps/rejected": -869.6064453125,
"loss": 0.0127,
"rewards/accuracies": 1.0,
"rewards/chosen": 4.692760944366455,
"rewards/margins": 11.84466552734375,
"rewards/rejected": -7.1519036293029785,
"step": 60
},
{
"epoch": 0.75,
"grad_norm": 8.649773974511314,
"learning_rate": 8.366857495860869e-08,
"logits/chosen": 100.9188232421875,
"logits/rejected": 142.58493041992188,
"logps/chosen": -742.1723022460938,
"logps/rejected": -868.6980590820312,
"loss": 0.0099,
"rewards/accuracies": 1.0,
"rewards/chosen": 4.574300289154053,
"rewards/margins": 11.782574653625488,
"rewards/rejected": -7.208274841308594,
"step": 70
},
{
"epoch": 0.86,
"grad_norm": 14.86224162055759,
"learning_rate": 2.5958610759736126e-08,
"logits/chosen": 105.79638671875,
"logits/rejected": 157.96678161621094,
"logps/chosen": -737.038818359375,
"logps/rejected": -870.8370971679688,
"loss": 0.0089,
"rewards/accuracies": 1.0,
"rewards/chosen": 4.09164571762085,
"rewards/margins": 12.486515045166016,
"rewards/rejected": -8.394868850708008,
"step": 80
},
{
"epoch": 0.97,
"grad_norm": 6.750575547661943,
"learning_rate": 7.335497040648897e-10,
"logits/chosen": 102.36738586425781,
"logits/rejected": 147.32630920410156,
"logps/chosen": -761.7495727539062,
"logps/rejected": -880.4935302734375,
"loss": 0.009,
"rewards/accuracies": 1.0,
"rewards/chosen": 4.6002397537231445,
"rewards/margins": 12.243142127990723,
"rewards/rejected": -7.642902374267578,
"step": 90
},
{
"epoch": 0.99,
"step": 92,
"total_flos": 0.0,
"train_loss": 0.0926785598124337,
"train_runtime": 1161.69,
"train_samples_per_second": 10.208,
"train_steps_per_second": 0.079
}
],
"logging_steps": 10,
"max_steps": 92,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}