zephyr-7b-dpo-full / trainer_state.json
RikkiXu's picture
Model save
98a5bff verified
raw
history blame
10.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 175,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005714285714285714,
"grad_norm": 1251.908438964567,
"learning_rate": 2.7777777777777774e-08,
"logits/chosen": -4.099947929382324,
"logits/rejected": -4.528928756713867,
"logps/chosen": -297.4884033203125,
"logps/rejected": -227.07449340820312,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.05714285714285714,
"grad_norm": 1007.5595895273253,
"learning_rate": 2.7777777777777776e-07,
"logits/chosen": -4.256350040435791,
"logits/rejected": -4.503963947296143,
"logps/chosen": -316.07769775390625,
"logps/rejected": -254.57467651367188,
"loss": 0.6613,
"rewards/accuracies": 0.5034722089767456,
"rewards/chosen": 0.2096220850944519,
"rewards/margins": 0.15642070770263672,
"rewards/rejected": 0.0532013401389122,
"step": 10
},
{
"epoch": 0.11428571428571428,
"grad_norm": 675.3841086149566,
"learning_rate": 4.997998237821233e-07,
"logits/chosen": -4.360010623931885,
"logits/rejected": -4.628513813018799,
"logps/chosen": -298.9122009277344,
"logps/rejected": -249.00918579101562,
"loss": 0.4212,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 3.4100475311279297,
"rewards/margins": 2.592763900756836,
"rewards/rejected": 0.8172838091850281,
"step": 20
},
{
"epoch": 0.17142857142857143,
"grad_norm": 675.1555265980924,
"learning_rate": 4.928272579403969e-07,
"logits/chosen": -4.373316287994385,
"logits/rejected": -4.6160383224487305,
"logps/chosen": -303.8053894042969,
"logps/rejected": -266.44818115234375,
"loss": 0.4762,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 4.110724449157715,
"rewards/margins": 6.506677150726318,
"rewards/rejected": -2.3959527015686035,
"step": 30
},
{
"epoch": 0.22857142857142856,
"grad_norm": 600.7834437052495,
"learning_rate": 4.7616414547743854e-07,
"logits/chosen": -4.35813045501709,
"logits/rejected": -4.55276346206665,
"logps/chosen": -289.32257080078125,
"logps/rejected": -250.25341796875,
"loss": 0.5303,
"rewards/accuracies": 0.8343750238418579,
"rewards/chosen": 3.9142494201660156,
"rewards/margins": 7.811418056488037,
"rewards/rejected": -3.897169589996338,
"step": 40
},
{
"epoch": 0.2857142857142857,
"grad_norm": 788.2365830395779,
"learning_rate": 4.5047546391491e-07,
"logits/chosen": -4.276906967163086,
"logits/rejected": -4.5039567947387695,
"logps/chosen": -297.9548034667969,
"logps/rejected": -260.8029479980469,
"loss": 0.4673,
"rewards/accuracies": 0.8531249761581421,
"rewards/chosen": 4.0600905418396,
"rewards/margins": 7.4909186363220215,
"rewards/rejected": -3.430828094482422,
"step": 50
},
{
"epoch": 0.34285714285714286,
"grad_norm": 715.3302112367288,
"learning_rate": 4.167863756189767e-07,
"logits/chosen": -4.322784900665283,
"logits/rejected": -4.564073085784912,
"logps/chosen": -293.1005554199219,
"logps/rejected": -254.21835327148438,
"loss": 0.4621,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 5.134177207946777,
"rewards/margins": 7.875572204589844,
"rewards/rejected": -2.741394519805908,
"step": 60
},
{
"epoch": 0.4,
"grad_norm": 633.4373267135044,
"learning_rate": 3.764413164801049e-07,
"logits/chosen": -4.282719612121582,
"logits/rejected": -4.559357643127441,
"logps/chosen": -287.670166015625,
"logps/rejected": -240.59359741210938,
"loss": 0.3978,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 4.107884407043457,
"rewards/margins": 7.409787654876709,
"rewards/rejected": -3.301903247833252,
"step": 70
},
{
"epoch": 0.45714285714285713,
"grad_norm": 585.2050073161457,
"learning_rate": 3.3105034329273217e-07,
"logits/chosen": -4.1827239990234375,
"logits/rejected": -4.454409599304199,
"logps/chosen": -294.9931640625,
"logps/rejected": -253.04092407226562,
"loss": 0.4435,
"rewards/accuracies": 0.828125,
"rewards/chosen": 4.6706342697143555,
"rewards/margins": 7.3439764976501465,
"rewards/rejected": -2.67334246635437,
"step": 80
},
{
"epoch": 0.5142857142857142,
"grad_norm": 744.3685065355755,
"learning_rate": 2.8242488095860204e-07,
"logits/chosen": -4.200292110443115,
"logits/rejected": -4.432915210723877,
"logps/chosen": -289.46466064453125,
"logps/rejected": -249.84048461914062,
"loss": 0.4059,
"rewards/accuracies": 0.8656250238418579,
"rewards/chosen": 5.276065349578857,
"rewards/margins": 7.3948163986206055,
"rewards/rejected": -2.118751287460327,
"step": 90
},
{
"epoch": 0.5714285714285714,
"grad_norm": 671.6213883992457,
"learning_rate": 2.3250543366050071e-07,
"logits/chosen": -4.27265739440918,
"logits/rejected": -4.471877098083496,
"logps/chosen": -299.2139892578125,
"logps/rejected": -262.4172668457031,
"loss": 0.3587,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 5.20701265335083,
"rewards/margins": 7.200909614562988,
"rewards/rejected": -1.9938958883285522,
"step": 100
},
{
"epoch": 0.6285714285714286,
"grad_norm": 731.7704645558294,
"learning_rate": 1.8328414484826743e-07,
"logits/chosen": -4.198658466339111,
"logits/rejected": -4.5151848793029785,
"logps/chosen": -299.356689453125,
"logps/rejected": -248.8483428955078,
"loss": 0.3809,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 5.6889519691467285,
"rewards/margins": 7.738437652587891,
"rewards/rejected": -2.049485683441162,
"step": 110
},
{
"epoch": 0.6857142857142857,
"grad_norm": 763.5659705732334,
"learning_rate": 1.3672529644823003e-07,
"logits/chosen": -4.348945140838623,
"logits/rejected": -4.604073524475098,
"logps/chosen": -269.67547607421875,
"logps/rejected": -229.8912811279297,
"loss": 0.3889,
"rewards/accuracies": 0.8656250238418579,
"rewards/chosen": 4.305537700653076,
"rewards/margins": 6.874751091003418,
"rewards/rejected": -2.569213390350342,
"step": 120
},
{
"epoch": 0.7428571428571429,
"grad_norm": 609.1110882142142,
"learning_rate": 9.468691994696146e-08,
"logits/chosen": -4.341274261474609,
"logits/rejected": -4.5989298820495605,
"logps/chosen": -275.35833740234375,
"logps/rejected": -245.8115692138672,
"loss": 0.3699,
"rewards/accuracies": 0.871874988079071,
"rewards/chosen": 4.084762096405029,
"rewards/margins": 6.567566871643066,
"rewards/rejected": -2.4828040599823,
"step": 130
},
{
"epoch": 0.8,
"grad_norm": 833.6810153426302,
"learning_rate": 5.884664762850466e-08,
"logits/chosen": -4.367494106292725,
"logits/rejected": -4.598031044006348,
"logps/chosen": -273.83099365234375,
"logps/rejected": -237.6991729736328,
"loss": 0.381,
"rewards/accuracies": 0.8656250238418579,
"rewards/chosen": 4.479451656341553,
"rewards/margins": 6.597804069519043,
"rewards/rejected": -2.118351697921753,
"step": 140
},
{
"epoch": 0.8571428571428571,
"grad_norm": 677.0337306190108,
"learning_rate": 3.063476303172388e-08,
"logits/chosen": -4.267864227294922,
"logits/rejected": -4.488691806793213,
"logps/chosen": -288.9342041015625,
"logps/rejected": -247.3463592529297,
"loss": 0.392,
"rewards/accuracies": 0.8843749761581421,
"rewards/chosen": 5.401379585266113,
"rewards/margins": 7.644896030426025,
"rewards/rejected": -2.2435173988342285,
"step": 150
},
{
"epoch": 0.9142857142857143,
"grad_norm": 601.5321276048043,
"learning_rate": 1.1177122393998372e-08,
"logits/chosen": -4.331192970275879,
"logits/rejected": -4.530573844909668,
"logps/chosen": -277.52020263671875,
"logps/rejected": -243.84323120117188,
"loss": 0.363,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 4.819538593292236,
"rewards/margins": 6.703360080718994,
"rewards/rejected": -1.8838220834732056,
"step": 160
},
{
"epoch": 0.9714285714285714,
"grad_norm": 771.9761619985129,
"learning_rate": 1.2502249244298879e-09,
"logits/chosen": -4.293517112731934,
"logits/rejected": -4.548079013824463,
"logps/chosen": -295.1907653808594,
"logps/rejected": -248.191650390625,
"loss": 0.3693,
"rewards/accuracies": 0.8843749761581421,
"rewards/chosen": 4.456015586853027,
"rewards/margins": 6.605706214904785,
"rewards/rejected": -2.1496901512145996,
"step": 170
},
{
"epoch": 1.0,
"step": 175,
"total_flos": 0.0,
"train_loss": 0.4268451908656529,
"train_runtime": 5571.7779,
"train_samples_per_second": 8.032,
"train_steps_per_second": 0.031
}
],
"logging_steps": 10,
"max_steps": 175,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}