zephyr-7b-dpo-full / trainer_state.json
RikkiXu's picture
Model save
abf72d1 verified
raw
history blame
No virus
19.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9945205479452055,
"eval_steps": 500,
"global_step": 364,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 749.8044037682902,
"learning_rate": 2.702702702702703e-10,
"logits/chosen": -0.7030794620513916,
"logits/rejected": -0.3951629400253296,
"logps/chosen": -341.73382568359375,
"logps/rejected": -292.9862060546875,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.05,
"grad_norm": 728.4025747943551,
"learning_rate": 2.702702702702703e-09,
"logits/chosen": -0.8550878167152405,
"logits/rejected": -0.5013476610183716,
"logps/chosen": -343.0583801269531,
"logps/rejected": -297.6263427734375,
"loss": 0.7129,
"rewards/accuracies": 0.40625,
"rewards/chosen": 0.02346261963248253,
"rewards/margins": -0.025641746819019318,
"rewards/rejected": 0.049104366451501846,
"step": 10
},
{
"epoch": 0.11,
"grad_norm": 769.1100121309379,
"learning_rate": 5.405405405405406e-09,
"logits/chosen": -0.89165860414505,
"logits/rejected": -0.5214177966117859,
"logps/chosen": -333.6175231933594,
"logps/rejected": -287.69482421875,
"loss": 0.7088,
"rewards/accuracies": 0.512499988079071,
"rewards/chosen": -0.0008101940038613975,
"rewards/margins": 0.021183893084526062,
"rewards/rejected": -0.02199408784508705,
"step": 20
},
{
"epoch": 0.16,
"grad_norm": 782.2455729371497,
"learning_rate": 8.108108108108109e-09,
"logits/chosen": -0.9365941882133484,
"logits/rejected": -0.5470438599586487,
"logps/chosen": -332.33966064453125,
"logps/rejected": -289.5589904785156,
"loss": 0.7083,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.015217331238090992,
"rewards/margins": -0.010174614377319813,
"rewards/rejected": -0.005042716860771179,
"step": 30
},
{
"epoch": 0.22,
"grad_norm": 736.3539975893915,
"learning_rate": 9.997923381619256e-09,
"logits/chosen": -0.9017000198364258,
"logits/rejected": -0.555226743221283,
"logps/chosen": -337.62115478515625,
"logps/rejected": -306.5302429199219,
"loss": 0.7072,
"rewards/accuracies": 0.5687500238418579,
"rewards/chosen": 0.05695560574531555,
"rewards/margins": 0.08540164679288864,
"rewards/rejected": -0.02844604291021824,
"step": 40
},
{
"epoch": 0.27,
"grad_norm": 730.7089871756754,
"learning_rate": 9.961053687802851e-09,
"logits/chosen": -0.9294391870498657,
"logits/rejected": -0.5007705688476562,
"logps/chosen": -333.2597961425781,
"logps/rejected": -295.4553527832031,
"loss": 0.6941,
"rewards/accuracies": 0.53125,
"rewards/chosen": -0.01942090317606926,
"rewards/margins": 0.017053481191396713,
"rewards/rejected": -0.03647438436746597,
"step": 50
},
{
"epoch": 0.33,
"grad_norm": 730.7067078437899,
"learning_rate": 9.878428410862483e-09,
"logits/chosen": -0.8303865194320679,
"logits/rejected": -0.45489081740379333,
"logps/chosen": -331.8045349121094,
"logps/rejected": -295.13055419921875,
"loss": 0.6832,
"rewards/accuracies": 0.6031249761581421,
"rewards/chosen": -0.029834549874067307,
"rewards/margins": 0.05674406886100769,
"rewards/rejected": -0.0865786224603653,
"step": 60
},
{
"epoch": 0.38,
"grad_norm": 674.6624722402527,
"learning_rate": 9.750809600145954e-09,
"logits/chosen": -1.0535982847213745,
"logits/rejected": -0.6284598112106323,
"logps/chosen": -351.1028747558594,
"logps/rejected": -304.6434631347656,
"loss": 0.6804,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.0078976359218359,
"rewards/margins": 0.11121608316898346,
"rewards/rejected": -0.10331843793392181,
"step": 70
},
{
"epoch": 0.44,
"grad_norm": 640.6205441818687,
"learning_rate": 9.579374278412817e-09,
"logits/chosen": -0.9343639612197876,
"logits/rejected": -0.3806554079055786,
"logps/chosen": -344.04443359375,
"logps/rejected": -294.193359375,
"loss": 0.6487,
"rewards/accuracies": 0.659375011920929,
"rewards/chosen": -0.01617306098341942,
"rewards/margins": 0.14446857571601868,
"rewards/rejected": -0.1606416255235672,
"step": 80
},
{
"epoch": 0.49,
"grad_norm": 629.6802089882239,
"learning_rate": 9.365703586204494e-09,
"logits/chosen": -1.0297319889068604,
"logits/rejected": -0.654572606086731,
"logps/chosen": -340.8974609375,
"logps/rejected": -307.7338562011719,
"loss": 0.639,
"rewards/accuracies": 0.621874988079071,
"rewards/chosen": 0.009362801909446716,
"rewards/margins": 0.15911515057086945,
"rewards/rejected": -0.14975234866142273,
"step": 90
},
{
"epoch": 0.55,
"grad_norm": 654.9042346572915,
"learning_rate": 9.111768199053588e-09,
"logits/chosen": -0.8516262173652649,
"logits/rejected": -0.5497715473175049,
"logps/chosen": -318.9173889160156,
"logps/rejected": -284.2920837402344,
"loss": 0.6303,
"rewards/accuracies": 0.609375,
"rewards/chosen": -0.07476776093244553,
"rewards/margins": 0.15329131484031677,
"rewards/rejected": -0.2280590981245041,
"step": 100
},
{
"epoch": 0.6,
"grad_norm": 568.2894000484466,
"learning_rate": 8.819910152028871e-09,
"logits/chosen": -0.7658065557479858,
"logits/rejected": -0.4776793122291565,
"logps/chosen": -335.7439270019531,
"logps/rejected": -302.370849609375,
"loss": 0.6175,
"rewards/accuracies": 0.640625,
"rewards/chosen": 0.008360857143998146,
"rewards/margins": 0.20505543053150177,
"rewards/rejected": -0.19669455289840698,
"step": 110
},
{
"epoch": 0.66,
"grad_norm": 569.4638082407581,
"learning_rate": 8.492821239247364e-09,
"logits/chosen": -0.851113498210907,
"logits/rejected": -0.49246683716773987,
"logps/chosen": -349.08477783203125,
"logps/rejected": -308.5341491699219,
"loss": 0.5935,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": 0.11443378776311874,
"rewards/margins": 0.3531562387943268,
"rewards/rejected": -0.23872247338294983,
"step": 120
},
{
"epoch": 0.71,
"grad_norm": 581.8336234971125,
"learning_rate": 8.133518187573863e-09,
"logits/chosen": -0.8286861181259155,
"logits/rejected": -0.5028008818626404,
"logps/chosen": -328.44915771484375,
"logps/rejected": -298.2557373046875,
"loss": 0.5959,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": 0.06084731966257095,
"rewards/margins": 0.24498350918293,
"rewards/rejected": -0.18413618206977844,
"step": 130
},
{
"epoch": 0.77,
"grad_norm": 566.985090918969,
"learning_rate": 7.745314833479833e-09,
"logits/chosen": -1.0121524333953857,
"logits/rejected": -0.5360809564590454,
"logps/chosen": -338.01531982421875,
"logps/rejected": -293.3200378417969,
"loss": 0.5864,
"rewards/accuracies": 0.7281249761581421,
"rewards/chosen": 0.14776429533958435,
"rewards/margins": 0.35846027731895447,
"rewards/rejected": -0.21069595217704773,
"step": 140
},
{
"epoch": 0.82,
"grad_norm": 585.5307136387664,
"learning_rate": 7.33179155967327e-09,
"logits/chosen": -0.9024865031242371,
"logits/rejected": -0.5908719301223755,
"logps/chosen": -331.0682373046875,
"logps/rejected": -298.38677978515625,
"loss": 0.5718,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": 0.1406635046005249,
"rewards/margins": 0.32861852645874023,
"rewards/rejected": -0.18795502185821533,
"step": 150
},
{
"epoch": 0.88,
"grad_norm": 608.4424256652742,
"learning_rate": 6.896762273384178e-09,
"logits/chosen": -0.9404014348983765,
"logits/rejected": -0.5414465069770813,
"logps/chosen": -340.7105407714844,
"logps/rejected": -301.7948303222656,
"loss": 0.5625,
"rewards/accuracies": 0.7406250238418579,
"rewards/chosen": 0.29647737741470337,
"rewards/margins": 0.45771312713623047,
"rewards/rejected": -0.1612357348203659,
"step": 160
},
{
"epoch": 0.93,
"grad_norm": 571.5105228832468,
"learning_rate": 6.444239230863505e-09,
"logits/chosen": -0.6664124131202698,
"logits/rejected": -0.3767244815826416,
"logps/chosen": -326.7174377441406,
"logps/rejected": -292.18304443359375,
"loss": 0.5516,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.3695653975009918,
"rewards/margins": 0.49222081899642944,
"rewards/rejected": -0.12265541404485703,
"step": 170
},
{
"epoch": 0.99,
"grad_norm": 486.04829364539904,
"learning_rate": 5.97839603251764e-09,
"logits/chosen": -0.8600166440010071,
"logits/rejected": -0.5389689803123474,
"logps/chosen": -323.1247863769531,
"logps/rejected": -290.40802001953125,
"loss": 0.5408,
"rewards/accuracies": 0.784375011920929,
"rewards/chosen": 0.3320290446281433,
"rewards/margins": 0.5290471315383911,
"rewards/rejected": -0.19701813161373138,
"step": 180
},
{
"epoch": 1.04,
"grad_norm": 524.5665762946036,
"learning_rate": 5.5035291299727915e-09,
"logits/chosen": -1.0205037593841553,
"logits/rejected": -0.6305373907089233,
"logps/chosen": -351.1410827636719,
"logps/rejected": -313.13812255859375,
"loss": 0.5258,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": 0.40980952978134155,
"rewards/margins": 0.5211332440376282,
"rewards/rejected": -0.11132367700338364,
"step": 190
},
{
"epoch": 1.1,
"grad_norm": 474.67341086960977,
"learning_rate": 5.024018200087854e-09,
"logits/chosen": -0.836185097694397,
"logits/rejected": -0.33586248755455017,
"logps/chosen": -321.29168701171875,
"logps/rejected": -280.28753662109375,
"loss": 0.512,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": 0.3657322824001312,
"rewards/margins": 0.5857880711555481,
"rewards/rejected": -0.22005578875541687,
"step": 200
},
{
"epoch": 1.15,
"grad_norm": 480.69197567309544,
"learning_rate": 4.544285751384584e-09,
"logits/chosen": -0.9048161506652832,
"logits/rejected": -0.6336894631385803,
"logps/chosen": -329.5954895019531,
"logps/rejected": -297.79901123046875,
"loss": 0.5067,
"rewards/accuracies": 0.721875011920929,
"rewards/chosen": 0.41357460618019104,
"rewards/margins": 0.5688098669052124,
"rewards/rejected": -0.1552352011203766,
"step": 210
},
{
"epoch": 1.21,
"grad_norm": 498.26717868032813,
"learning_rate": 4.068756335443199e-09,
"logits/chosen": -0.8500760793685913,
"logits/rejected": -0.501125693321228,
"logps/chosen": -339.07012939453125,
"logps/rejected": -299.48095703125,
"loss": 0.514,
"rewards/accuracies": 0.7906249761581421,
"rewards/chosen": 0.5280696153640747,
"rewards/margins": 0.6353160738945007,
"rewards/rejected": -0.10724644362926483,
"step": 220
},
{
"epoch": 1.26,
"grad_norm": 522.8370181213115,
"learning_rate": 3.6018157394549284e-09,
"logits/chosen": -0.8433824777603149,
"logits/rejected": -0.4627400040626526,
"logps/chosen": -332.1639709472656,
"logps/rejected": -297.5298156738281,
"loss": 0.5207,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": 0.4683798849582672,
"rewards/margins": 0.5844911336898804,
"rewards/rejected": -0.11611130088567734,
"step": 230
},
{
"epoch": 1.32,
"grad_norm": 521.1145144981034,
"learning_rate": 3.14777053629687e-09,
"logits/chosen": -1.0663576126098633,
"logits/rejected": -0.6788166761398315,
"logps/chosen": -336.2250671386719,
"logps/rejected": -296.2794494628906,
"loss": 0.5124,
"rewards/accuracies": 0.7593749761581421,
"rewards/chosen": 0.5691989660263062,
"rewards/margins": 0.702237069606781,
"rewards/rejected": -0.13303814828395844,
"step": 240
},
{
"epoch": 1.37,
"grad_norm": 487.7677359334702,
"learning_rate": 2.7108083651970002e-09,
"logits/chosen": -0.7713569402694702,
"logits/rejected": -0.4026997983455658,
"logps/chosen": -327.7779235839844,
"logps/rejected": -290.0624084472656,
"loss": 0.5045,
"rewards/accuracies": 0.7406250238418579,
"rewards/chosen": 0.5247091054916382,
"rewards/margins": 0.6842475533485413,
"rewards/rejected": -0.1595384031534195,
"step": 250
},
{
"epoch": 1.42,
"grad_norm": 524.4673235373901,
"learning_rate": 2.294959309319086e-09,
"logits/chosen": -0.8702203035354614,
"logits/rejected": -0.6658123135566711,
"logps/chosen": -328.04638671875,
"logps/rejected": -302.77777099609375,
"loss": 0.5103,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": 0.6019162535667419,
"rewards/margins": 0.5868763327598572,
"rewards/rejected": 0.015039998106658459,
"step": 260
},
{
"epoch": 1.48,
"grad_norm": 553.8708647904535,
"learning_rate": 1.9040587264803672e-09,
"logits/chosen": -0.9641194343566895,
"logits/rejected": -0.4711450934410095,
"logps/chosen": -340.99169921875,
"logps/rejected": -293.9861145019531,
"loss": 0.4939,
"rewards/accuracies": 0.831250011920929,
"rewards/chosen": 0.56943279504776,
"rewards/margins": 0.7665254473686218,
"rewards/rejected": -0.1970926821231842,
"step": 270
},
{
"epoch": 1.53,
"grad_norm": 453.07875520502444,
"learning_rate": 1.5417118758126409e-09,
"logits/chosen": -0.8815242648124695,
"logits/rejected": -0.4348044991493225,
"logps/chosen": -342.6517028808594,
"logps/rejected": -296.92169189453125,
"loss": 0.4934,
"rewards/accuracies": 0.7906249761581421,
"rewards/chosen": 0.5705780982971191,
"rewards/margins": 0.8008774518966675,
"rewards/rejected": -0.23029938340187073,
"step": 280
},
{
"epoch": 1.59,
"grad_norm": 499.34891989016415,
"learning_rate": 1.2112606666135601e-09,
"logits/chosen": -0.9316185712814331,
"logits/rejected": -0.5389292240142822,
"logps/chosen": -333.5206604003906,
"logps/rejected": -297.6076354980469,
"loss": 0.4978,
"rewards/accuracies": 0.765625,
"rewards/chosen": 0.5433750748634338,
"rewards/margins": 0.6790112257003784,
"rewards/rejected": -0.135636106133461,
"step": 290
},
{
"epoch": 1.64,
"grad_norm": 475.94785473220173,
"learning_rate": 9.157528360620415e-10,
"logits/chosen": -0.8223875164985657,
"logits/rejected": -0.5513488054275513,
"logps/chosen": -312.7162780761719,
"logps/rejected": -280.6097106933594,
"loss": 0.4933,
"rewards/accuracies": 0.7593749761581421,
"rewards/chosen": 0.5738901495933533,
"rewards/margins": 0.6515259146690369,
"rewards/rejected": -0.07763569802045822,
"step": 300
},
{
"epoch": 1.7,
"grad_norm": 530.0260445258433,
"learning_rate": 6.579138400703716e-10,
"logits/chosen": -0.8587888479232788,
"logits/rejected": -0.4476223587989807,
"logps/chosen": -334.20648193359375,
"logps/rejected": -295.112548828125,
"loss": 0.4913,
"rewards/accuracies": 0.7906249761581421,
"rewards/chosen": 0.6548627018928528,
"rewards/margins": 0.7542473077774048,
"rewards/rejected": -0.09938466548919678,
"step": 310
},
{
"epoch": 1.75,
"grad_norm": 487.39892256915766,
"learning_rate": 4.401217165224564e-10,
"logits/chosen": -0.7274507284164429,
"logits/rejected": -0.45758286118507385,
"logps/chosen": -322.195068359375,
"logps/rejected": -289.47064208984375,
"loss": 0.4876,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.6047518849372864,
"rewards/margins": 0.6844498515129089,
"rewards/rejected": -0.07969798892736435,
"step": 320
},
{
"epoch": 1.81,
"grad_norm": 474.27503066220174,
"learning_rate": 2.643851527335006e-10,
"logits/chosen": -0.9080432653427124,
"logits/rejected": -0.5377383232116699,
"logps/chosen": -331.7919921875,
"logps/rejected": -301.7752685546875,
"loss": 0.4856,
"rewards/accuracies": 0.7718750238418579,
"rewards/chosen": 0.6742402911186218,
"rewards/margins": 0.7511230111122131,
"rewards/rejected": -0.07688269764184952,
"step": 330
},
{
"epoch": 1.86,
"grad_norm": 515.521744630777,
"learning_rate": 1.3232495941396638e-10,
"logits/chosen": -0.9773859977722168,
"logits/rejected": -0.6425100564956665,
"logps/chosen": -329.1192321777344,
"logps/rejected": -293.9158020019531,
"loss": 0.4902,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.5788453817367554,
"rewards/margins": 0.6871310472488403,
"rewards/rejected": -0.10828564316034317,
"step": 340
},
{
"epoch": 1.92,
"grad_norm": 474.472293237774,
"learning_rate": 4.515912200264427e-11,
"logits/chosen": -0.9029630422592163,
"logits/rejected": -0.4482271075248718,
"logps/chosen": -331.9292907714844,
"logps/rejected": -287.7762451171875,
"loss": 0.4845,
"rewards/accuracies": 0.7906249761581421,
"rewards/chosen": 0.5662914514541626,
"rewards/margins": 0.7054949998855591,
"rewards/rejected": -0.1392035186290741,
"step": 350
},
{
"epoch": 1.97,
"grad_norm": 466.2251927040291,
"learning_rate": 3.691567239743621e-12,
"logits/chosen": -0.866126537322998,
"logits/rejected": -0.38964423537254333,
"logps/chosen": -346.421875,
"logps/rejected": -304.1325988769531,
"loss": 0.4772,
"rewards/accuracies": 0.8031250238418579,
"rewards/chosen": 0.6508310437202454,
"rewards/margins": 0.8615090250968933,
"rewards/rejected": -0.21067801117897034,
"step": 360
},
{
"epoch": 1.99,
"step": 364,
"total_flos": 0.0,
"train_loss": 0.5661277400923299,
"train_runtime": 10889.322,
"train_samples_per_second": 8.572,
"train_steps_per_second": 0.033
}
],
"logging_steps": 10,
"max_steps": 364,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}