zephyr-7b-dpo-full / trainer_state.json
RikkiXu's picture
Model save
5ab60fa verified
raw
history blame
22.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9986824769433466,
"eval_steps": 100,
"global_step": 379,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002635046113306983,
"grad_norm": 1701.3284378032004,
"learning_rate": 1.3157894736842104e-08,
"logits/chosen": -4.685327529907227,
"logits/rejected": -4.87608528137207,
"logps/chosen": -207.7137451171875,
"logps/rejected": -145.5098114013672,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.026350461133069828,
"grad_norm": 1584.9120962726856,
"learning_rate": 1.3157894736842104e-07,
"logits/chosen": -4.499300479888916,
"logits/rejected": -4.840802192687988,
"logps/chosen": -223.6631317138672,
"logps/rejected": -160.81097412109375,
"loss": 0.7136,
"rewards/accuracies": 0.4444444477558136,
"rewards/chosen": 0.004151582717895508,
"rewards/margins": 0.002958830911666155,
"rewards/rejected": 0.001192751806229353,
"step": 10
},
{
"epoch": 0.052700922266139656,
"grad_norm": 955.29292445595,
"learning_rate": 2.631578947368421e-07,
"logits/chosen": -4.518028259277344,
"logits/rejected": -4.817793846130371,
"logps/chosen": -220.1512451171875,
"logps/rejected": -172.69322204589844,
"loss": 0.4939,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": 0.9042474627494812,
"rewards/margins": 0.9315685033798218,
"rewards/rejected": -0.02732105180621147,
"step": 20
},
{
"epoch": 0.07905138339920949,
"grad_norm": 1078.6380127502835,
"learning_rate": 3.9473684210526315e-07,
"logits/chosen": -4.58280086517334,
"logits/rejected": -4.878857612609863,
"logps/chosen": -212.6325225830078,
"logps/rejected": -183.79238891601562,
"loss": 0.3906,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 3.010448932647705,
"rewards/margins": 2.6640021800994873,
"rewards/rejected": 0.346446692943573,
"step": 30
},
{
"epoch": 0.10540184453227931,
"grad_norm": 1034.6942446334156,
"learning_rate": 4.999575626062319e-07,
"logits/chosen": -4.496463298797607,
"logits/rejected": -4.832797050476074,
"logps/chosen": -225.2804412841797,
"logps/rejected": -177.3509063720703,
"loss": 0.4804,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 4.241217136383057,
"rewards/margins": 4.989673614501953,
"rewards/rejected": -0.7484563589096069,
"step": 40
},
{
"epoch": 0.13175230566534915,
"grad_norm": 1292.8962171873402,
"learning_rate": 4.984737660598186e-07,
"logits/chosen": -4.517908573150635,
"logits/rejected": -4.786294937133789,
"logps/chosen": -214.01718139648438,
"logps/rejected": -174.09678649902344,
"loss": 0.5163,
"rewards/accuracies": 0.84375,
"rewards/chosen": 3.7782645225524902,
"rewards/margins": 5.338944435119629,
"rewards/rejected": -1.5606796741485596,
"step": 50
},
{
"epoch": 0.15810276679841898,
"grad_norm": 1081.6576652859876,
"learning_rate": 4.948824853131236e-07,
"logits/chosen": -4.719171047210693,
"logits/rejected": -4.9818878173828125,
"logps/chosen": -215.00869750976562,
"logps/rejected": -180.1785430908203,
"loss": 0.4821,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 3.4492969512939453,
"rewards/margins": 6.232473373413086,
"rewards/rejected": -2.7831759452819824,
"step": 60
},
{
"epoch": 0.1844532279314888,
"grad_norm": 780.7579097796144,
"learning_rate": 4.892141805936084e-07,
"logits/chosen": -4.691411018371582,
"logits/rejected": -4.9814910888671875,
"logps/chosen": -222.72189331054688,
"logps/rejected": -186.29788208007812,
"loss": 0.5005,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 3.7675652503967285,
"rewards/margins": 6.836648464202881,
"rewards/rejected": -3.0690836906433105,
"step": 70
},
{
"epoch": 0.21080368906455862,
"grad_norm": 1751.5015463679365,
"learning_rate": 4.81516928858564e-07,
"logits/chosen": -4.691437721252441,
"logits/rejected": -4.943267822265625,
"logps/chosen": -219.7614288330078,
"logps/rejected": -179.60899353027344,
"loss": 0.5615,
"rewards/accuracies": 0.831250011920929,
"rewards/chosen": 3.4195969104766846,
"rewards/margins": 6.783135890960693,
"rewards/rejected": -3.363539457321167,
"step": 80
},
{
"epoch": 0.23715415019762845,
"grad_norm": 1233.3333710128843,
"learning_rate": 4.7185601601995784e-07,
"logits/chosen": -4.569981575012207,
"logits/rejected": -4.954944610595703,
"logps/chosen": -212.4117431640625,
"logps/rejected": -173.86911010742188,
"loss": 0.4989,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 3.7934730052948,
"rewards/margins": 7.21230936050415,
"rewards/rejected": -3.4188361167907715,
"step": 90
},
{
"epoch": 0.2635046113306983,
"grad_norm": 1244.999056136522,
"learning_rate": 4.603133832077953e-07,
"logits/chosen": -4.672577381134033,
"logits/rejected": -4.9231157302856445,
"logps/chosen": -206.11221313476562,
"logps/rejected": -176.262451171875,
"loss": 0.5909,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 3.6933536529541016,
"rewards/margins": 6.304307460784912,
"rewards/rejected": -2.6109542846679688,
"step": 100
},
{
"epoch": 0.2635046113306983,
"eval_logits/chosen": -4.612120628356934,
"eval_logits/rejected": -4.85081672668457,
"eval_logps/chosen": -396.4924011230469,
"eval_logps/rejected": -512.6068115234375,
"eval_loss": 6.653407096862793,
"eval_rewards/accuracies": 0.27734375,
"eval_rewards/chosen": -3.0089728832244873,
"eval_rewards/margins": -5.8993988037109375,
"eval_rewards/rejected": 2.890425682067871,
"eval_runtime": 98.318,
"eval_samples_per_second": 20.342,
"eval_steps_per_second": 0.325,
"step": 100
},
{
"epoch": 0.2898550724637681,
"grad_norm": 870.1595065644425,
"learning_rate": 4.4698693176863316e-07,
"logits/chosen": -4.71740198135376,
"logits/rejected": -4.952963829040527,
"logps/chosen": -208.0568084716797,
"logps/rejected": -172.92605590820312,
"loss": 0.4837,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 3.114009380340576,
"rewards/margins": 7.352834224700928,
"rewards/rejected": -4.238823890686035,
"step": 110
},
{
"epoch": 0.31620553359683795,
"grad_norm": 1230.5660125025618,
"learning_rate": 4.319896928940505e-07,
"logits/chosen": -4.8249382972717285,
"logits/rejected": -5.139795780181885,
"logps/chosen": -204.9242401123047,
"logps/rejected": -181.16221618652344,
"loss": 0.6657,
"rewards/accuracies": 0.8125,
"rewards/chosen": 4.0227861404418945,
"rewards/margins": 6.973240852355957,
"rewards/rejected": -2.950455904006958,
"step": 120
},
{
"epoch": 0.3425559947299078,
"grad_norm": 1400.2090737238004,
"learning_rate": 4.1544886892205354e-07,
"logits/chosen": -4.680369853973389,
"logits/rejected": -4.979363441467285,
"logps/chosen": -218.03897094726562,
"logps/rejected": -192.44757080078125,
"loss": 0.4732,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 3.1923911571502686,
"rewards/margins": 8.124491691589355,
"rewards/rejected": -4.932101249694824,
"step": 130
},
{
"epoch": 0.3689064558629776,
"grad_norm": 1284.6303869923338,
"learning_rate": 3.975047544428254e-07,
"logits/chosen": -4.738985061645508,
"logits/rejected": -4.995828628540039,
"logps/chosen": -200.0480499267578,
"logps/rejected": -173.74069213867188,
"loss": 0.7249,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 3.118265151977539,
"rewards/margins": 6.879487037658691,
"rewards/rejected": -3.7612221240997314,
"step": 140
},
{
"epoch": 0.3952569169960474,
"grad_norm": 1087.9280552765,
"learning_rate": 3.78309546359696e-07,
"logits/chosen": -4.816591739654541,
"logits/rejected": -5.046236991882324,
"logps/chosen": -197.75784301757812,
"logps/rejected": -187.12631225585938,
"loss": 0.6191,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.7183030843734741,
"rewards/margins": 8.374044418334961,
"rewards/rejected": -6.655740261077881,
"step": 150
},
{
"epoch": 0.42160737812911725,
"grad_norm": 870.0436658351372,
"learning_rate": 3.580260529980584e-07,
"logits/chosen": -4.622679233551025,
"logits/rejected": -4.90102481842041,
"logps/chosen": -210.0752410888672,
"logps/rejected": -182.36378479003906,
"loss": 0.5743,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 2.5026469230651855,
"rewards/margins": 6.609257698059082,
"rewards/rejected": -4.106611251831055,
"step": 160
},
{
"epoch": 0.4479578392621871,
"grad_norm": 1109.7545725706777,
"learning_rate": 3.36826313211205e-07,
"logits/chosen": -4.862623691558838,
"logits/rejected": -5.1108198165893555,
"logps/chosen": -195.3279266357422,
"logps/rejected": -170.62802124023438,
"loss": 0.6483,
"rewards/accuracies": 0.8187500238418579,
"rewards/chosen": 2.5267701148986816,
"rewards/margins": 7.295065402984619,
"rewards/rejected": -4.768294811248779,
"step": 170
},
{
"epoch": 0.4743083003952569,
"grad_norm": 1174.4955529247104,
"learning_rate": 3.14890137195437e-07,
"logits/chosen": -4.831478595733643,
"logits/rejected": -5.037031173706055,
"logps/chosen": -211.2603302001953,
"logps/rejected": -190.32931518554688,
"loss": 0.5829,
"rewards/accuracies": 0.84375,
"rewards/chosen": 1.8433873653411865,
"rewards/margins": 7.003817558288574,
"rewards/rejected": -5.160429954528809,
"step": 180
},
{
"epoch": 0.5006587615283268,
"grad_norm": 1405.2874598551575,
"learning_rate": 2.9240358139084013e-07,
"logits/chosen": -4.9419050216674805,
"logits/rejected": -5.200289726257324,
"logps/chosen": -209.5860595703125,
"logps/rejected": -188.95069885253906,
"loss": 0.5485,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 1.218836784362793,
"rewards/margins": 8.241748809814453,
"rewards/rejected": -7.022912502288818,
"step": 190
},
{
"epoch": 0.5270092226613966,
"grad_norm": 1006.939051019727,
"learning_rate": 2.695573704031885e-07,
"logits/chosen": -4.7473955154418945,
"logits/rejected": -5.000621795654297,
"logps/chosen": -217.6959686279297,
"logps/rejected": -190.3670196533203,
"loss": 0.7239,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 3.8086190223693848,
"rewards/margins": 6.961747646331787,
"rewards/rejected": -3.1531288623809814,
"step": 200
},
{
"epoch": 0.5270092226613966,
"eval_logits/chosen": -4.771502494812012,
"eval_logits/rejected": -4.989596843719482,
"eval_logps/chosen": -396.1126708984375,
"eval_logps/rejected": -511.37469482421875,
"eval_loss": 8.072031021118164,
"eval_rewards/accuracies": 0.2734375,
"eval_rewards/chosen": -2.8191022872924805,
"eval_rewards/margins": -6.325590133666992,
"eval_rewards/rejected": 3.5064878463745117,
"eval_runtime": 97.9327,
"eval_samples_per_second": 20.422,
"eval_steps_per_second": 0.327,
"step": 200
},
{
"epoch": 0.5533596837944664,
"grad_norm": 1103.8948285514384,
"learning_rate": 2.465452793317865e-07,
"logits/chosen": -4.7866034507751465,
"logits/rejected": -5.081458568572998,
"logps/chosen": -228.73922729492188,
"logps/rejected": -201.91409301757812,
"loss": 0.5681,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 2.7976126670837402,
"rewards/margins": 7.8736419677734375,
"rewards/rejected": -5.076028823852539,
"step": 210
},
{
"epoch": 0.5797101449275363,
"grad_norm": 715.6592810091468,
"learning_rate": 2.2356249022388789e-07,
"logits/chosen": -4.758913516998291,
"logits/rejected": -5.04398250579834,
"logps/chosen": -205.92568969726562,
"logps/rejected": -170.3586883544922,
"loss": 0.4895,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 4.249292373657227,
"rewards/margins": 7.541260719299316,
"rewards/rejected": -3.291968822479248,
"step": 220
},
{
"epoch": 0.6060606060606061,
"grad_norm": 944.8747719910025,
"learning_rate": 2.0080393659578038e-07,
"logits/chosen": -4.764594078063965,
"logits/rejected": -5.100175857543945,
"logps/chosen": -210.0486297607422,
"logps/rejected": -179.91928100585938,
"loss": 0.5197,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 5.489472389221191,
"rewards/margins": 8.828967094421387,
"rewards/rejected": -3.3394947052001953,
"step": 230
},
{
"epoch": 0.6324110671936759,
"grad_norm": 1267.6123006349214,
"learning_rate": 1.7846265006183976e-07,
"logits/chosen": -4.821089744567871,
"logits/rejected": -5.0871124267578125,
"logps/chosen": -211.7589874267578,
"logps/rejected": -181.33819580078125,
"loss": 0.5444,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -0.23131565749645233,
"rewards/margins": 6.660643577575684,
"rewards/rejected": -6.891959190368652,
"step": 240
},
{
"epoch": 0.6587615283267457,
"grad_norm": 911.8592996107284,
"learning_rate": 1.5672812309497722e-07,
"logits/chosen": -4.781493663787842,
"logits/rejected": -5.052074432373047,
"logps/chosen": -201.82290649414062,
"logps/rejected": -175.06719970703125,
"loss": 0.5887,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 2.856235980987549,
"rewards/margins": 8.613394737243652,
"rewards/rejected": -5.757159233093262,
"step": 250
},
{
"epoch": 0.6851119894598156,
"grad_norm": 840.8020636445922,
"learning_rate": 1.357847018050843e-07,
"logits/chosen": -4.674568176269531,
"logits/rejected": -4.961749076843262,
"logps/chosen": -232.63119506835938,
"logps/rejected": -204.2981719970703,
"loss": 0.6421,
"rewards/accuracies": 0.8062499761581421,
"rewards/chosen": 2.906367778778076,
"rewards/margins": 7.688606262207031,
"rewards/rejected": -4.782238483428955,
"step": 260
},
{
"epoch": 0.7114624505928854,
"grad_norm": 787.2425305884327,
"learning_rate": 1.1581002236747328e-07,
"logits/chosen": -4.637770652770996,
"logits/rejected": -4.975251197814941,
"logps/chosen": -190.36253356933594,
"logps/rejected": -166.21788024902344,
"loss": 0.5113,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 2.2253878116607666,
"rewards/margins": 8.868097305297852,
"rewards/rejected": -6.6427106857299805,
"step": 270
},
{
"epoch": 0.7378129117259552,
"grad_norm": 1064.0700290859438,
"learning_rate": 9.697350436308427e-08,
"logits/chosen": -4.585692405700684,
"logits/rejected": -4.871885776519775,
"logps/chosen": -229.0143585205078,
"logps/rejected": -194.08761596679688,
"loss": 0.5252,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 3.0419421195983887,
"rewards/margins": 8.473891258239746,
"rewards/rejected": -5.431949138641357,
"step": 280
},
{
"epoch": 0.764163372859025,
"grad_norm": 839.1911415542294,
"learning_rate": 7.943491380952188e-08,
"logits/chosen": -4.828024864196777,
"logits/rejected": -5.060397148132324,
"logps/chosen": -196.42611694335938,
"logps/rejected": -171.01446533203125,
"loss": 0.4822,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 4.435191631317139,
"rewards/margins": 8.396050453186035,
"rewards/rejected": -3.9608588218688965,
"step": 290
},
{
"epoch": 0.7905138339920948,
"grad_norm": 887.4606387846492,
"learning_rate": 6.334300807088508e-08,
"logits/chosen": -4.608688831329346,
"logits/rejected": -4.9593987464904785,
"logps/chosen": -195.76962280273438,
"logps/rejected": -164.97048950195312,
"loss": 0.5556,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 3.875316619873047,
"rewards/margins": 8.369561195373535,
"rewards/rejected": -4.4942450523376465,
"step": 300
},
{
"epoch": 0.7905138339920948,
"eval_logits/chosen": -4.660388469696045,
"eval_logits/rejected": -4.890798091888428,
"eval_logps/chosen": -399.5094909667969,
"eval_logps/rejected": -517.530029296875,
"eval_loss": 6.923000335693359,
"eval_rewards/accuracies": 0.31640625,
"eval_rewards/chosen": -4.517519474029541,
"eval_rewards/margins": -4.946353435516357,
"eval_rewards/rejected": 0.42883408069610596,
"eval_runtime": 99.1917,
"eval_samples_per_second": 20.163,
"eval_steps_per_second": 0.323,
"step": 300
},
{
"epoch": 0.8168642951251647,
"grad_norm": 1030.5057638496492,
"learning_rate": 4.8834274139883084e-08,
"logits/chosen": -4.663365840911865,
"logits/rejected": -5.023074150085449,
"logps/chosen": -201.54336547851562,
"logps/rejected": -171.71324157714844,
"loss": 0.533,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 4.074584007263184,
"rewards/margins": 8.798391342163086,
"rewards/rejected": -4.723808765411377,
"step": 310
},
{
"epoch": 0.8432147562582345,
"grad_norm": 1279.040936993484,
"learning_rate": 3.60317709937693e-08,
"logits/chosen": -4.707262992858887,
"logits/rejected": -5.011012077331543,
"logps/chosen": -223.0199432373047,
"logps/rejected": -183.51393127441406,
"loss": 0.5239,
"rewards/accuracies": 0.8187500238418579,
"rewards/chosen": 3.1946961879730225,
"rewards/margins": 7.443505764007568,
"rewards/rejected": -4.248808860778809,
"step": 320
},
{
"epoch": 0.8695652173913043,
"grad_norm": 790.9511672313881,
"learning_rate": 2.5044085842905683e-08,
"logits/chosen": -4.707150459289551,
"logits/rejected": -4.967694282531738,
"logps/chosen": -208.9776153564453,
"logps/rejected": -184.1087188720703,
"loss": 0.5674,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 4.723270893096924,
"rewards/margins": 9.567978858947754,
"rewards/rejected": -4.844708442687988,
"step": 330
},
{
"epoch": 0.8959156785243741,
"grad_norm": 1075.929120672392,
"learning_rate": 1.5964413124758493e-08,
"logits/chosen": -4.645999431610107,
"logits/rejected": -4.947402477264404,
"logps/chosen": -212.19015502929688,
"logps/rejected": -185.76678466796875,
"loss": 0.5578,
"rewards/accuracies": 0.875,
"rewards/chosen": 4.216076374053955,
"rewards/margins": 8.17945671081543,
"rewards/rejected": -3.9633796215057373,
"step": 340
},
{
"epoch": 0.922266139657444,
"grad_norm": 1055.0113319220882,
"learning_rate": 8.869764055041501e-09,
"logits/chosen": -4.714905738830566,
"logits/rejected": -4.937991619110107,
"logps/chosen": -215.5680389404297,
"logps/rejected": -200.6318359375,
"loss": 0.5113,
"rewards/accuracies": 0.793749988079071,
"rewards/chosen": 2.9399425983428955,
"rewards/margins": 7.131015777587891,
"rewards/rejected": -4.191073417663574,
"step": 350
},
{
"epoch": 0.9486166007905138,
"grad_norm": 1003.3923870155849,
"learning_rate": 3.82031344036729e-09,
"logits/chosen": -4.614500999450684,
"logits/rejected": -4.9195661544799805,
"logps/chosen": -216.197998046875,
"logps/rejected": -186.8596649169922,
"loss": 0.4994,
"rewards/accuracies": 0.8187500238418579,
"rewards/chosen": 2.650672197341919,
"rewards/margins": 6.149393081665039,
"rewards/rejected": -3.49872088432312,
"step": 360
},
{
"epoch": 0.9749670619235836,
"grad_norm": 1021.842139855702,
"learning_rate": 8.588892925590063e-10,
"logits/chosen": -4.69917631149292,
"logits/rejected": -5.054296016693115,
"logps/chosen": -212.79714965820312,
"logps/rejected": -171.7442169189453,
"loss": 0.5535,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 3.2063796520233154,
"rewards/margins": 8.728368759155273,
"rewards/rejected": -5.521987438201904,
"step": 370
},
{
"epoch": 0.9986824769433466,
"step": 379,
"total_flos": 0.0,
"train_loss": 0.5517442987587962,
"train_runtime": 6181.8185,
"train_samples_per_second": 7.85,
"train_steps_per_second": 0.061
}
],
"logging_steps": 10,
"max_steps": 379,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}