gemma-7b-dpo-full-mix1-beta-0.1 / trainer_state.json
lewtun's picture
lewtun HF staff
Model save
957ba3d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.985781990521327,
"eval_steps": 100,
"global_step": 52,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 265.94108817940406,
"learning_rate": 8.333333333333333e-08,
"logits/chosen": 123.11854553222656,
"logits/rejected": 97.00198364257812,
"logps/chosen": -425.18585205078125,
"logps/rejected": -424.1869201660156,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.19,
"grad_norm": 271.5503719388604,
"learning_rate": 4.907293218369498e-07,
"logits/chosen": 117.11209869384766,
"logits/rejected": 135.9506378173828,
"logps/chosen": -441.22607421875,
"logps/rejected": -524.04638671875,
"loss": 0.7822,
"rewards/accuracies": 0.5486111044883728,
"rewards/chosen": 0.21623480319976807,
"rewards/margins": 0.20938372611999512,
"rewards/rejected": 0.0068510971032083035,
"step": 10
},
{
"epoch": 0.38,
"grad_norm": 209.9713525910076,
"learning_rate": 3.941700805287168e-07,
"logits/chosen": 125.7469482421875,
"logits/rejected": 133.27896118164062,
"logps/chosen": -429.1166076660156,
"logps/rejected": -501.01220703125,
"loss": 0.6478,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -0.28519946336746216,
"rewards/margins": 1.0649617910385132,
"rewards/rejected": -1.3501613140106201,
"step": 20
},
{
"epoch": 0.57,
"grad_norm": 191.69712594247977,
"learning_rate": 2.3293939665883228e-07,
"logits/chosen": 124.35345458984375,
"logits/rejected": 127.7409896850586,
"logps/chosen": -479.87322998046875,
"logps/rejected": -545.6209716796875,
"loss": 0.6668,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -2.475310802459717,
"rewards/margins": 1.3653061389923096,
"rewards/rejected": -3.8406174182891846,
"step": 30
},
{
"epoch": 0.76,
"grad_norm": 207.15105323033092,
"learning_rate": 7.936171419533652e-08,
"logits/chosen": 123.809326171875,
"logits/rejected": 118.8666763305664,
"logps/chosen": -496.2925720214844,
"logps/rejected": -518.7486572265625,
"loss": 0.6648,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -2.3111202716827393,
"rewards/margins": 1.0872735977172852,
"rewards/rejected": -3.3983941078186035,
"step": 40
},
{
"epoch": 0.95,
"grad_norm": 220.98535059164251,
"learning_rate": 2.328513490917311e-09,
"logits/chosen": 132.80233764648438,
"logits/rejected": 133.139892578125,
"logps/chosen": -503.554443359375,
"logps/rejected": -525.2648315429688,
"loss": 0.6049,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -2.073172092437744,
"rewards/margins": 1.3284822702407837,
"rewards/rejected": -3.401654005050659,
"step": 50
},
{
"epoch": 0.99,
"step": 52,
"total_flos": 0.0,
"train_loss": 0.6733019076860868,
"train_runtime": 575.7905,
"train_samples_per_second": 11.723,
"train_steps_per_second": 0.09
}
],
"logging_steps": 10,
"max_steps": 52,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}