zephyr-7b-gemma-dpo / trainer_state.json
chrlu's picture
Model save
03cad8d verified
raw
history blame
7.07 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.971563981042654,
"eval_steps": 100,
"global_step": 104,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.018957345971563982,
"grad_norm": 133.64647421295854,
"learning_rate": 4.545454545454545e-08,
"logits/chosen": 117.4909439086914,
"logits/rejected": 126.8502426147461,
"logps/chosen": -338.3250732421875,
"logps/rejected": -438.210205078125,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.1895734597156398,
"grad_norm": 137.17714765050428,
"learning_rate": 4.545454545454545e-07,
"logits/chosen": 135.0139923095703,
"logits/rejected": 138.361328125,
"logps/chosen": -397.126220703125,
"logps/rejected": -439.42083740234375,
"loss": 0.7143,
"rewards/accuracies": 0.3888888955116272,
"rewards/chosen": -0.02168009988963604,
"rewards/margins": -0.04445798695087433,
"rewards/rejected": 0.02277788519859314,
"step": 10
},
{
"epoch": 0.3791469194312796,
"grad_norm": 125.18497680199994,
"learning_rate": 4.885348141000122e-07,
"logits/chosen": 121.53276062011719,
"logits/rejected": 125.26307678222656,
"logps/chosen": -370.91107177734375,
"logps/rejected": -423.38677978515625,
"loss": 0.6418,
"rewards/accuracies": 0.5874999761581421,
"rewards/chosen": 0.08632902055978775,
"rewards/margins": 0.28587669134140015,
"rewards/rejected": -0.199547678232193,
"step": 20
},
{
"epoch": 0.5687203791469194,
"grad_norm": 115.98151525592598,
"learning_rate": 4.5025027361734613e-07,
"logits/chosen": 142.67178344726562,
"logits/rejected": 136.16537475585938,
"logps/chosen": -415.01104736328125,
"logps/rejected": -460.3519592285156,
"loss": 0.5673,
"rewards/accuracies": 0.668749988079071,
"rewards/chosen": -1.1033741235733032,
"rewards/margins": 0.8893669843673706,
"rewards/rejected": -1.9927412271499634,
"step": 30
},
{
"epoch": 0.7582938388625592,
"grad_norm": 112.04677171325864,
"learning_rate": 3.893311157806091e-07,
"logits/chosen": 124.56459045410156,
"logits/rejected": 113.08979797363281,
"logps/chosen": -391.10174560546875,
"logps/rejected": -417.09051513671875,
"loss": 0.5586,
"rewards/accuracies": 0.71875,
"rewards/chosen": -1.8466203212738037,
"rewards/margins": 1.0624934434890747,
"rewards/rejected": -2.909113883972168,
"step": 40
},
{
"epoch": 0.9478672985781991,
"grad_norm": 126.0267199667638,
"learning_rate": 3.126631330646801e-07,
"logits/chosen": 138.824462890625,
"logits/rejected": 142.9259033203125,
"logps/chosen": -455.6646423339844,
"logps/rejected": -536.987548828125,
"loss": 0.4941,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -1.8371152877807617,
"rewards/margins": 1.2058273553848267,
"rewards/rejected": -3.042942523956299,
"step": 50
},
{
"epoch": 1.1374407582938388,
"grad_norm": 63.02753605606795,
"learning_rate": 2.2891223348923882e-07,
"logits/chosen": 131.37802124023438,
"logits/rejected": 134.72222900390625,
"logps/chosen": -436.17047119140625,
"logps/rejected": -520.2355346679688,
"loss": 0.3078,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -1.9126123189926147,
"rewards/margins": 2.285891056060791,
"rewards/rejected": -4.198503017425537,
"step": 60
},
{
"epoch": 1.3270142180094786,
"grad_norm": 59.15589622996558,
"learning_rate": 1.4754491880085317e-07,
"logits/chosen": 124.51689147949219,
"logits/rejected": 126.70524597167969,
"logps/chosen": -402.62066650390625,
"logps/rejected": -505.50006103515625,
"loss": 0.1932,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -1.7549495697021484,
"rewards/margins": 2.8932533264160156,
"rewards/rejected": -4.648203372955322,
"step": 70
},
{
"epoch": 1.5165876777251186,
"grad_norm": 44.24206971141979,
"learning_rate": 7.775827023107834e-08,
"logits/chosen": 111.74947357177734,
"logits/rejected": 128.5332489013672,
"logps/chosen": -400.06146240234375,
"logps/rejected": -519.4473876953125,
"loss": 0.1687,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -2.326984167098999,
"rewards/margins": 2.937407970428467,
"rewards/rejected": -5.264392375946045,
"step": 80
},
{
"epoch": 1.7061611374407581,
"grad_norm": 50.07584592888485,
"learning_rate": 2.7440387297912122e-08,
"logits/chosen": 110.84814453125,
"logits/rejected": 123.78230285644531,
"logps/chosen": -435.03265380859375,
"logps/rejected": -550.7723388671875,
"loss": 0.1579,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -2.400338649749756,
"rewards/margins": 3.238767623901367,
"rewards/rejected": -5.639105796813965,
"step": 90
},
{
"epoch": 1.8957345971563981,
"grad_norm": 49.65552371508206,
"learning_rate": 2.27878296044029e-09,
"logits/chosen": 117.1094970703125,
"logits/rejected": 117.060302734375,
"logps/chosen": -427.23431396484375,
"logps/rejected": -520.2066650390625,
"loss": 0.1591,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -2.2608113288879395,
"rewards/margins": 2.9113571643829346,
"rewards/rejected": -5.172169208526611,
"step": 100
},
{
"epoch": 1.8957345971563981,
"eval_logits/chosen": 96.76607513427734,
"eval_logits/rejected": 91.05736541748047,
"eval_logps/chosen": -422.7994689941406,
"eval_logps/rejected": -453.052978515625,
"eval_loss": 0.4691648781299591,
"eval_rewards/accuracies": 0.7604166865348816,
"eval_rewards/chosen": -2.9809672832489014,
"eval_rewards/margins": 1.6640973091125488,
"eval_rewards/rejected": -4.645064353942871,
"eval_runtime": 56.9732,
"eval_samples_per_second": 13.164,
"eval_steps_per_second": 0.421,
"step": 100
},
{
"epoch": 1.971563981042654,
"step": 104,
"total_flos": 0.0,
"train_loss": 0.3921648321243433,
"train_runtime": 1190.3032,
"train_samples_per_second": 11.342,
"train_steps_per_second": 0.087
}
],
"logging_steps": 10,
"max_steps": 104,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}