zephyr-7b-dpo-full / trainer_state.json
RikkiXu's picture
Model save
e63de6b verified
raw
history blame
No virus
21.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9980806142034548,
"eval_steps": 1000000,
"global_step": 390,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 1292.2103490533002,
"learning_rate": 1.282051282051282e-08,
"logits/chosen": -2.5583817958831787,
"logits/rejected": -2.4487552642822266,
"logps/chosen": -258.1644592285156,
"logps/rejected": -216.25729370117188,
"loss": 0.6964,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.03,
"grad_norm": 1261.1198733310573,
"learning_rate": 1.2820512820512818e-07,
"logits/chosen": -2.6061007976531982,
"logits/rejected": -2.553147315979004,
"logps/chosen": -267.5506591796875,
"logps/rejected": -217.63583374023438,
"loss": 0.7045,
"rewards/accuracies": 0.4375,
"rewards/chosen": 0.012439604848623276,
"rewards/margins": 0.01009546872228384,
"rewards/rejected": 0.002344133099541068,
"step": 10
},
{
"epoch": 0.05,
"grad_norm": 853.2707411908161,
"learning_rate": 2.5641025641025636e-07,
"logits/chosen": -2.629751443862915,
"logits/rejected": -2.5669989585876465,
"logps/chosen": -260.5412292480469,
"logps/rejected": -207.0039825439453,
"loss": 0.5192,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": 0.6866833567619324,
"rewards/margins": 0.6415502429008484,
"rewards/rejected": 0.045133065432310104,
"step": 20
},
{
"epoch": 0.08,
"grad_norm": 1142.6320871817904,
"learning_rate": 3.8461538461538463e-07,
"logits/chosen": -2.6441783905029297,
"logits/rejected": -2.5700392723083496,
"logps/chosen": -251.12313842773438,
"logps/rejected": -198.34071350097656,
"loss": 0.3383,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 3.621673583984375,
"rewards/margins": 3.1466643810272217,
"rewards/rejected": 0.47500935196876526,
"step": 30
},
{
"epoch": 0.1,
"grad_norm": 498.4795422948633,
"learning_rate": 4.99989986344963e-07,
"logits/chosen": -2.6392924785614014,
"logits/rejected": -2.561156749725342,
"logps/chosen": -243.8309783935547,
"logps/rejected": -192.75845336914062,
"loss": 0.3207,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 5.866530418395996,
"rewards/margins": 5.031473636627197,
"rewards/rejected": 0.8350569009780884,
"step": 40
},
{
"epoch": 0.13,
"grad_norm": 687.258084457276,
"learning_rate": 4.987893180827479e-07,
"logits/chosen": -2.6534440517425537,
"logits/rejected": -2.5842742919921875,
"logps/chosen": -258.9037170410156,
"logps/rejected": -203.41665649414062,
"loss": 0.3666,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 8.400110244750977,
"rewards/margins": 6.8410444259643555,
"rewards/rejected": 1.559065818786621,
"step": 50
},
{
"epoch": 0.15,
"grad_norm": 759.1516730607283,
"learning_rate": 4.955969343539162e-07,
"logits/chosen": -2.616115093231201,
"logits/rejected": -2.5453152656555176,
"logps/chosen": -262.9014892578125,
"logps/rejected": -209.2264404296875,
"loss": 0.3916,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 5.333600044250488,
"rewards/margins": 6.3847455978393555,
"rewards/rejected": -1.0511456727981567,
"step": 60
},
{
"epoch": 0.18,
"grad_norm": 560.0502144214485,
"learning_rate": 4.90438392204474e-07,
"logits/chosen": -2.590355634689331,
"logits/rejected": -2.5212607383728027,
"logps/chosen": -292.03955078125,
"logps/rejected": -227.56900024414062,
"loss": 0.366,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 5.28397798538208,
"rewards/margins": 7.424477577209473,
"rewards/rejected": -2.1404995918273926,
"step": 70
},
{
"epoch": 0.2,
"grad_norm": 1037.6404299504059,
"learning_rate": 4.83354989019146e-07,
"logits/chosen": -2.5414998531341553,
"logits/rejected": -2.468327283859253,
"logps/chosen": -260.0003356933594,
"logps/rejected": -203.8599090576172,
"loss": 0.3627,
"rewards/accuracies": 0.875,
"rewards/chosen": 6.802065372467041,
"rewards/margins": 7.507330894470215,
"rewards/rejected": -0.705264687538147,
"step": 80
},
{
"epoch": 0.23,
"grad_norm": 1095.0164975833522,
"learning_rate": 4.7440343190975353e-07,
"logits/chosen": -2.5701346397399902,
"logits/rejected": -2.513249397277832,
"logps/chosen": -257.8319396972656,
"logps/rejected": -217.47695922851562,
"loss": 0.3529,
"rewards/accuracies": 0.84375,
"rewards/chosen": 3.5220108032226562,
"rewards/margins": 6.205338478088379,
"rewards/rejected": -2.683328151702881,
"step": 90
},
{
"epoch": 0.26,
"grad_norm": 493.8561719761038,
"learning_rate": 4.6365538373900506e-07,
"logits/chosen": -2.624537944793701,
"logits/rejected": -2.5495262145996094,
"logps/chosen": -237.0422821044922,
"logps/rejected": -201.1056671142578,
"loss": 0.6296,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 4.613960266113281,
"rewards/margins": 6.805362701416016,
"rewards/rejected": -2.191401958465576,
"step": 100
},
{
"epoch": 0.28,
"grad_norm": 783.3339585792174,
"learning_rate": 4.5119688941406386e-07,
"logits/chosen": -2.6185028553009033,
"logits/rejected": -2.5373358726501465,
"logps/chosen": -258.2229919433594,
"logps/rejected": -209.95010375976562,
"loss": 0.5085,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 6.706292629241943,
"rewards/margins": 8.07982349395752,
"rewards/rejected": -1.3735301494598389,
"step": 110
},
{
"epoch": 0.31,
"grad_norm": 1013.1425815966796,
"learning_rate": 4.3712768704277524e-07,
"logits/chosen": -2.5965781211853027,
"logits/rejected": -2.5263915061950684,
"logps/chosen": -263.1861572265625,
"logps/rejected": -208.8050079345703,
"loss": 0.4699,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 5.6486382484436035,
"rewards/margins": 7.843411445617676,
"rewards/rejected": -2.194772720336914,
"step": 120
},
{
"epoch": 0.33,
"grad_norm": 659.1716861853262,
"learning_rate": 4.2156040946718343e-07,
"logits/chosen": -2.571542263031006,
"logits/rejected": -2.502825975418091,
"logps/chosen": -252.36941528320312,
"logps/rejected": -197.37109375,
"loss": 0.4289,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 4.440427303314209,
"rewards/margins": 7.714503288269043,
"rewards/rejected": -3.274075746536255,
"step": 130
},
{
"epoch": 0.36,
"grad_norm": 706.862930333198,
"learning_rate": 4.046196825665637e-07,
"logits/chosen": -2.5883898735046387,
"logits/rejected": -2.5185062885284424,
"logps/chosen": -271.88494873046875,
"logps/rejected": -218.22323608398438,
"loss": 0.4943,
"rewards/accuracies": 0.84375,
"rewards/chosen": 2.9622278213500977,
"rewards/margins": 7.161763668060303,
"rewards/rejected": -4.199534893035889,
"step": 140
},
{
"epoch": 0.38,
"grad_norm": 715.990249793818,
"learning_rate": 3.864411275486261e-07,
"logits/chosen": -2.5699923038482666,
"logits/rejected": -2.502592086791992,
"logps/chosen": -264.418212890625,
"logps/rejected": -213.1693115234375,
"loss": 0.5111,
"rewards/accuracies": 0.875,
"rewards/chosen": 5.556182384490967,
"rewards/margins": 8.052742004394531,
"rewards/rejected": -2.4965591430664062,
"step": 150
},
{
"epoch": 0.41,
"grad_norm": 710.6520782907305,
"learning_rate": 3.671702752161759e-07,
"logits/chosen": -2.559586763381958,
"logits/rejected": -2.4897525310516357,
"logps/chosen": -245.1999969482422,
"logps/rejected": -198.19442749023438,
"loss": 0.4696,
"rewards/accuracies": 0.875,
"rewards/chosen": 3.364131212234497,
"rewards/margins": 8.019769668579102,
"rewards/rejected": -4.655638694763184,
"step": 160
},
{
"epoch": 0.44,
"grad_norm": 865.8215089865342,
"learning_rate": 3.4696140090121375e-07,
"logits/chosen": -2.57694935798645,
"logits/rejected": -2.5136027336120605,
"logps/chosen": -266.91412353515625,
"logps/rejected": -211.6914825439453,
"loss": 0.397,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 3.399867296218872,
"rewards/margins": 8.182429313659668,
"rewards/rejected": -4.782561302185059,
"step": 170
},
{
"epoch": 0.46,
"grad_norm": 658.5054936178984,
"learning_rate": 3.259762893935617e-07,
"logits/chosen": -2.6607277393341064,
"logits/rejected": -2.581068515777588,
"logps/chosen": -239.2282257080078,
"logps/rejected": -188.63772583007812,
"loss": 0.4691,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 2.5246458053588867,
"rewards/margins": 7.373200416564941,
"rewards/rejected": -4.848554611206055,
"step": 180
},
{
"epoch": 0.49,
"grad_norm": 746.921617266345,
"learning_rate": 3.0438293975154184e-07,
"logits/chosen": -2.6273646354675293,
"logits/rejected": -2.5594921112060547,
"logps/chosen": -263.73443603515625,
"logps/rejected": -207.8045654296875,
"loss": 0.4399,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 0.9775351285934448,
"rewards/margins": 8.604721069335938,
"rewards/rejected": -7.6271867752075195,
"step": 190
},
{
"epoch": 0.51,
"grad_norm": 859.0725144089504,
"learning_rate": 2.823542203635138e-07,
"logits/chosen": -2.6617209911346436,
"logits/rejected": -2.5823440551757812,
"logps/chosen": -278.6440734863281,
"logps/rejected": -222.3423309326172,
"loss": 0.493,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 1.6911423206329346,
"rewards/margins": 9.542765617370605,
"rewards/rejected": -7.851624488830566,
"step": 200
},
{
"epoch": 0.54,
"grad_norm": 1073.7778122145994,
"learning_rate": 2.600664850273538e-07,
"logits/chosen": -2.6473941802978516,
"logits/rejected": -2.581458568572998,
"logps/chosen": -270.93621826171875,
"logps/rejected": -214.9030303955078,
"loss": 0.7378,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 0.10583686828613281,
"rewards/margins": 7.7938642501831055,
"rewards/rejected": -7.688027858734131,
"step": 210
},
{
"epoch": 0.56,
"grad_norm": 783.9779969138841,
"learning_rate": 2.3769816112703045e-07,
"logits/chosen": -2.6640350818634033,
"logits/rejected": -2.606898307800293,
"logps/chosen": -260.6977233886719,
"logps/rejected": -216.708740234375,
"loss": 0.5298,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 0.881191611289978,
"rewards/margins": 7.128912448883057,
"rewards/rejected": -6.247720241546631,
"step": 220
},
{
"epoch": 0.59,
"grad_norm": 656.1996441950056,
"learning_rate": 2.1542832120881677e-07,
"logits/chosen": -2.6948089599609375,
"logits/rejected": -2.615889072418213,
"logps/chosen": -270.77557373046875,
"logps/rejected": -219.38320922851562,
"loss": 0.4547,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 2.3647713661193848,
"rewards/margins": 8.186816215515137,
"rewards/rejected": -5.82204532623291,
"step": 230
},
{
"epoch": 0.61,
"grad_norm": 1015.3762946599339,
"learning_rate": 1.934352493925695e-07,
"logits/chosen": -2.6751513481140137,
"logits/rejected": -2.6343884468078613,
"logps/chosen": -266.14263916015625,
"logps/rejected": -223.5273895263672,
"loss": 0.429,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.4297797679901123,
"rewards/margins": 10.390259742736816,
"rewards/rejected": -8.960479736328125,
"step": 240
},
{
"epoch": 0.64,
"grad_norm": 586.0366917195352,
"learning_rate": 1.7189501409486059e-07,
"logits/chosen": -2.6815237998962402,
"logits/rejected": -2.6186068058013916,
"logps/chosen": -271.32818603515625,
"logps/rejected": -225.2965545654297,
"loss": 0.4243,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 0.4054955542087555,
"rewards/margins": 8.429037094116211,
"rewards/rejected": -8.023542404174805,
"step": 250
},
{
"epoch": 0.67,
"grad_norm": 567.2324686905487,
"learning_rate": 1.5098005849021078e-07,
"logits/chosen": -2.6706247329711914,
"logits/rejected": -2.6183619499206543,
"logps/chosen": -265.3566589355469,
"logps/rejected": -211.55789184570312,
"loss": 0.4026,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 0.452970027923584,
"rewards/margins": 7.969753265380859,
"rewards/rejected": -7.516783237457275,
"step": 260
},
{
"epoch": 0.69,
"grad_norm": 651.6922692823155,
"learning_rate": 1.30857819994673e-07,
"logits/chosen": -2.648026943206787,
"logits/rejected": -2.5697438716888428,
"logps/chosen": -276.09661865234375,
"logps/rejected": -233.24966430664062,
"loss": 0.4615,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.4385630190372467,
"rewards/margins": 11.782003402709961,
"rewards/rejected": -11.343441009521484,
"step": 270
},
{
"epoch": 0.72,
"grad_norm": 492.898374979646,
"learning_rate": 1.116893898236716e-07,
"logits/chosen": -2.6764636039733887,
"logits/rejected": -2.6275370121002197,
"logps/chosen": -273.55120849609375,
"logps/rejected": -222.28256225585938,
"loss": 0.431,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 0.1396121382713318,
"rewards/margins": 8.543681144714355,
"rewards/rejected": -8.404068946838379,
"step": 280
},
{
"epoch": 0.74,
"grad_norm": 601.4691313440442,
"learning_rate": 9.362822335518062e-08,
"logits/chosen": -2.639714241027832,
"logits/rejected": -2.6006019115448,
"logps/chosen": -271.51104736328125,
"logps/rejected": -219.78646850585938,
"loss": 0.3785,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 0.39154329895973206,
"rewards/margins": 8.193072319030762,
"rewards/rejected": -7.8015289306640625,
"step": 290
},
{
"epoch": 0.77,
"grad_norm": 876.6568683843423,
"learning_rate": 7.681891162260015e-08,
"logits/chosen": -2.6577625274658203,
"logits/rejected": -2.610130786895752,
"logps/chosen": -278.3086853027344,
"logps/rejected": -223.9765625,
"loss": 0.4175,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 0.23941349983215332,
"rewards/margins": 8.298933029174805,
"rewards/rejected": -8.059518814086914,
"step": 300
},
{
"epoch": 0.79,
"grad_norm": 491.2362210232499,
"learning_rate": 6.139602377230247e-08,
"logits/chosen": -2.623286724090576,
"logits/rejected": -2.5633485317230225,
"logps/chosen": -281.3511047363281,
"logps/rejected": -218.5331268310547,
"loss": 0.4291,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.8305476903915405,
"rewards/margins": 8.879867553710938,
"rewards/rejected": -8.049318313598633,
"step": 310
},
{
"epoch": 0.82,
"grad_norm": 762.3598954431114,
"learning_rate": 4.748302975270837e-08,
"logits/chosen": -2.6497387886047363,
"logits/rejected": -2.610168933868408,
"logps/chosen": -264.21588134765625,
"logps/rejected": -206.8611602783203,
"loss": 0.4476,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.6263083219528198,
"rewards/margins": 7.853513240814209,
"rewards/rejected": -7.227204322814941,
"step": 320
},
{
"epoch": 0.84,
"grad_norm": 668.8344256472935,
"learning_rate": 3.5191311859445795e-08,
"logits/chosen": -2.6693060398101807,
"logits/rejected": -2.6197760105133057,
"logps/chosen": -268.38543701171875,
"logps/rejected": -220.74081420898438,
"loss": 0.4113,
"rewards/accuracies": 0.90625,
"rewards/chosen": 1.1819716691970825,
"rewards/margins": 8.392292976379395,
"rewards/rejected": -7.210320949554443,
"step": 330
},
{
"epoch": 0.87,
"grad_norm": 600.9418350776771,
"learning_rate": 2.4619273049795996e-08,
"logits/chosen": -2.653104066848755,
"logits/rejected": -2.6034810543060303,
"logps/chosen": -263.5737609863281,
"logps/rejected": -213.79544067382812,
"loss": 0.3933,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.1136972904205322,
"rewards/margins": 9.145872116088867,
"rewards/rejected": -8.032175064086914,
"step": 340
},
{
"epoch": 0.9,
"grad_norm": 448.6960620183454,
"learning_rate": 1.5851549164932115e-08,
"logits/chosen": -2.663357734680176,
"logits/rejected": -2.6194939613342285,
"logps/chosen": -272.70233154296875,
"logps/rejected": -229.30697631835938,
"loss": 0.4249,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 1.3936760425567627,
"rewards/margins": 8.432661056518555,
"rewards/rejected": -7.0389862060546875,
"step": 350
},
{
"epoch": 0.92,
"grad_norm": 606.8337189375926,
"learning_rate": 8.958331366609423e-09,
"logits/chosen": -2.666874885559082,
"logits/rejected": -2.6079227924346924,
"logps/chosen": -278.23712158203125,
"logps/rejected": -222.1784210205078,
"loss": 0.4777,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 0.8818317651748657,
"rewards/margins": 8.486412048339844,
"rewards/rejected": -7.604579925537109,
"step": 360
},
{
"epoch": 0.95,
"grad_norm": 565.1329564450134,
"learning_rate": 3.994804212627461e-09,
"logits/chosen": -2.626528739929199,
"logits/rejected": -2.5960590839385986,
"logps/chosen": -277.90789794921875,
"logps/rejected": -232.6277313232422,
"loss": 0.4164,
"rewards/accuracies": 0.84375,
"rewards/chosen": 1.226670503616333,
"rewards/margins": 8.475992202758789,
"rewards/rejected": -7.249321937561035,
"step": 370
},
{
"epoch": 0.97,
"grad_norm": 494.78405010061283,
"learning_rate": 1.0007038696262516e-09,
"logits/chosen": -2.672760486602783,
"logits/rejected": -2.6364920139312744,
"logps/chosen": -266.59918212890625,
"logps/rejected": -233.4165496826172,
"loss": 0.4064,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 1.6075836420059204,
"rewards/margins": 8.459333419799805,
"rewards/rejected": -6.851749420166016,
"step": 380
},
{
"epoch": 1.0,
"grad_norm": 917.0969156966863,
"learning_rate": 0.0,
"logits/chosen": -2.6849241256713867,
"logits/rejected": -2.630876302719116,
"logps/chosen": -254.27664184570312,
"logps/rejected": -213.14102172851562,
"loss": 0.4983,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 0.6460372805595398,
"rewards/margins": 8.144643783569336,
"rewards/rejected": -7.498606204986572,
"step": 390
},
{
"epoch": 1.0,
"step": 390,
"total_flos": 0.0,
"train_loss": 0.4513903107398596,
"train_runtime": 5854.463,
"train_samples_per_second": 8.54,
"train_steps_per_second": 0.067
}
],
"logging_steps": 10,
"max_steps": 390,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}