zephyr-7b-dpo-full / trainer_state.json
RikkiXu's picture
Model save
2c388ad verified
raw
history blame
No virus
21.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9980806142034548,
"eval_steps": 10000000,
"global_step": 390,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 851.8646963671724,
"learning_rate": 1.282051282051282e-08,
"logits/chosen": -2.5583817958831787,
"logits/rejected": -2.4487552642822266,
"logps/chosen": -258.1644592285156,
"logps/rejected": -216.25729370117188,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.03,
"grad_norm": 827.2359528491244,
"learning_rate": 1.2820512820512818e-07,
"logits/chosen": -2.605931282043457,
"logits/rejected": -2.552781105041504,
"logps/chosen": -267.6236267089844,
"logps/rejected": -217.6671905517578,
"loss": 0.6967,
"rewards/accuracies": 0.4097222089767456,
"rewards/chosen": -0.03547710180282593,
"rewards/margins": -0.018225612118840218,
"rewards/rejected": -0.01725148782134056,
"step": 10
},
{
"epoch": 0.05,
"grad_norm": 598.05709697014,
"learning_rate": 2.5641025641025636e-07,
"logits/chosen": -2.6306538581848145,
"logits/rejected": -2.5675768852233887,
"logps/chosen": -260.528564453125,
"logps/rejected": -207.09140014648438,
"loss": 0.5351,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": 0.4653858244419098,
"rewards/margins": 0.4877452850341797,
"rewards/rejected": -0.022359488531947136,
"step": 20
},
{
"epoch": 0.08,
"grad_norm": 777.4500662657566,
"learning_rate": 3.8461538461538463e-07,
"logits/chosen": -2.651550769805908,
"logits/rejected": -2.5767629146575928,
"logps/chosen": -250.84542846679688,
"logps/rejected": -198.71180725097656,
"loss": 0.3391,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 2.5810751914978027,
"rewards/margins": 2.4870572090148926,
"rewards/rejected": 0.09401801228523254,
"step": 30
},
{
"epoch": 0.1,
"grad_norm": 344.88554576974366,
"learning_rate": 4.99989986344963e-07,
"logits/chosen": -2.6472256183624268,
"logits/rejected": -2.5672099590301514,
"logps/chosen": -243.0611114501953,
"logps/rejected": -193.21621704101562,
"loss": 0.3063,
"rewards/accuracies": 0.831250011920929,
"rewards/chosen": 4.372925758361816,
"rewards/margins": 4.090872287750244,
"rewards/rejected": 0.28205329179763794,
"step": 40
},
{
"epoch": 0.13,
"grad_norm": 481.831925522066,
"learning_rate": 4.987893180827479e-07,
"logits/chosen": -2.658193588256836,
"logits/rejected": -2.5846261978149414,
"logps/chosen": -256.97283935546875,
"logps/rejected": -203.25177001953125,
"loss": 0.342,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 6.758584499359131,
"rewards/margins": 5.620272636413574,
"rewards/rejected": 1.1383121013641357,
"step": 50
},
{
"epoch": 0.15,
"grad_norm": 507.1864724110635,
"learning_rate": 4.955969343539162e-07,
"logits/chosen": -2.6098527908325195,
"logits/rejected": -2.5346760749816895,
"logps/chosen": -260.9481506347656,
"logps/rejected": -208.94192504882812,
"loss": 0.3189,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 4.727741241455078,
"rewards/margins": 5.257795333862305,
"rewards/rejected": -0.5300543308258057,
"step": 60
},
{
"epoch": 0.18,
"grad_norm": 347.65616298975976,
"learning_rate": 4.90438392204474e-07,
"logits/chosen": -2.5738308429718018,
"logits/rejected": -2.496386766433716,
"logps/chosen": -291.3803405761719,
"logps/rejected": -228.85986328125,
"loss": 0.3314,
"rewards/accuracies": 0.90625,
"rewards/chosen": 3.918160915374756,
"rewards/margins": 6.1196980476379395,
"rewards/rejected": -2.201537609100342,
"step": 70
},
{
"epoch": 0.2,
"grad_norm": 548.6777756471274,
"learning_rate": 4.83354989019146e-07,
"logits/chosen": -2.5354433059692383,
"logits/rejected": -2.4591267108917236,
"logps/chosen": -259.90399169921875,
"logps/rejected": -205.8795623779297,
"loss": 0.2963,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 4.5924973487854,
"rewards/margins": 6.274462699890137,
"rewards/rejected": -1.6819652318954468,
"step": 80
},
{
"epoch": 0.23,
"grad_norm": 746.9613110272085,
"learning_rate": 4.7440343190975353e-07,
"logits/chosen": -2.572143316268921,
"logits/rejected": -2.514286518096924,
"logps/chosen": -256.91656494140625,
"logps/rejected": -218.2171630859375,
"loss": 0.3067,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 2.8972291946411133,
"rewards/margins": 5.130236625671387,
"rewards/rejected": -2.2330079078674316,
"step": 90
},
{
"epoch": 0.26,
"grad_norm": 292.7469627770502,
"learning_rate": 4.6365538373900506e-07,
"logits/chosen": -2.6255955696105957,
"logits/rejected": -2.5519518852233887,
"logps/chosen": -235.6707000732422,
"logps/rejected": -201.62594604492188,
"loss": 0.4639,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 3.8989346027374268,
"rewards/margins": 5.672036170959473,
"rewards/rejected": -1.773101806640625,
"step": 100
},
{
"epoch": 0.28,
"grad_norm": 462.4159148288855,
"learning_rate": 4.5119688941406386e-07,
"logits/chosen": -2.6220152378082275,
"logits/rejected": -2.5428764820098877,
"logps/chosen": -256.5944519042969,
"logps/rejected": -210.29629516601562,
"loss": 0.3852,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 5.447988033294678,
"rewards/margins": 6.571375370025635,
"rewards/rejected": -1.1233874559402466,
"step": 110
},
{
"epoch": 0.31,
"grad_norm": 684.7134637036779,
"learning_rate": 4.3712768704277524e-07,
"logits/chosen": -2.5924530029296875,
"logits/rejected": -2.523179769515991,
"logps/chosen": -261.6965026855469,
"logps/rejected": -209.29666137695312,
"loss": 0.3703,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 4.659531116485596,
"rewards/margins": 6.417691707611084,
"rewards/rejected": -1.758161187171936,
"step": 120
},
{
"epoch": 0.33,
"grad_norm": 363.69452359861873,
"learning_rate": 4.2156040946718343e-07,
"logits/chosen": -2.5601067543029785,
"logits/rejected": -2.4928698539733887,
"logps/chosen": -251.21463012695312,
"logps/rejected": -198.01596069335938,
"loss": 0.3349,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 3.6531822681427,
"rewards/margins": 6.222817897796631,
"rewards/rejected": -2.569636583328247,
"step": 130
},
{
"epoch": 0.36,
"grad_norm": 388.6865732944718,
"learning_rate": 4.046196825665637e-07,
"logits/chosen": -2.58420729637146,
"logits/rejected": -2.5159454345703125,
"logps/chosen": -269.7867431640625,
"logps/rejected": -217.6550750732422,
"loss": 0.3662,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 3.2337214946746826,
"rewards/margins": 5.692519187927246,
"rewards/rejected": -2.4587976932525635,
"step": 140
},
{
"epoch": 0.38,
"grad_norm": 392.10882995088383,
"learning_rate": 3.864411275486261e-07,
"logits/chosen": -2.568713426589966,
"logits/rejected": -2.5009543895721436,
"logps/chosen": -263.0735778808594,
"logps/rejected": -213.8876495361328,
"loss": 0.3955,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 4.510898590087891,
"rewards/margins": 6.606268882751465,
"rewards/rejected": -2.095369815826416,
"step": 150
},
{
"epoch": 0.41,
"grad_norm": 466.21844913881523,
"learning_rate": 3.671702752161759e-07,
"logits/chosen": -2.567312717437744,
"logits/rejected": -2.497217893600464,
"logps/chosen": -244.8683624267578,
"logps/rejected": -200.1343536376953,
"loss": 0.3547,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 2.441751480102539,
"rewards/margins": 6.709475040435791,
"rewards/rejected": -4.267723083496094,
"step": 160
},
{
"epoch": 0.44,
"grad_norm": 681.7420438130807,
"learning_rate": 3.4696140090121375e-07,
"logits/chosen": -2.5749361515045166,
"logits/rejected": -2.509636640548706,
"logps/chosen": -266.73236083984375,
"logps/rejected": -213.6396026611328,
"loss": 0.3163,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 2.3756332397460938,
"rewards/margins": 6.7328901290893555,
"rewards/rejected": -4.357257843017578,
"step": 170
},
{
"epoch": 0.46,
"grad_norm": 450.6259386496744,
"learning_rate": 3.259762893935617e-07,
"logits/chosen": -2.635408878326416,
"logits/rejected": -2.547847270965576,
"logps/chosen": -237.7583465576172,
"logps/rejected": -188.80947875976562,
"loss": 0.3732,
"rewards/accuracies": 0.875,
"rewards/chosen": 2.5650315284729004,
"rewards/margins": 5.900453090667725,
"rewards/rejected": -3.335421323776245,
"step": 180
},
{
"epoch": 0.49,
"grad_norm": 393.57273774416257,
"learning_rate": 3.0438293975154184e-07,
"logits/chosen": -2.5968377590179443,
"logits/rejected": -2.520313024520874,
"logps/chosen": -259.823974609375,
"logps/rejected": -205.4634246826172,
"loss": 0.3175,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 2.9979615211486816,
"rewards/margins": 6.678049564361572,
"rewards/rejected": -3.6800880432128906,
"step": 190
},
{
"epoch": 0.51,
"grad_norm": 569.8752439327117,
"learning_rate": 2.823542203635138e-07,
"logits/chosen": -2.62797212600708,
"logits/rejected": -2.5373997688293457,
"logps/chosen": -274.06500244140625,
"logps/rejected": -219.7933807373047,
"loss": 0.36,
"rewards/accuracies": 0.875,
"rewards/chosen": 3.8748526573181152,
"rewards/margins": 7.579891204833984,
"rewards/rejected": -3.705038070678711,
"step": 200
},
{
"epoch": 0.54,
"grad_norm": 528.723840543068,
"learning_rate": 2.600664850273538e-07,
"logits/chosen": -2.612349033355713,
"logits/rejected": -2.537264347076416,
"logps/chosen": -266.8662109375,
"logps/rejected": -212.73904418945312,
"loss": 0.4949,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 2.512554883956909,
"rewards/margins": 6.339517116546631,
"rewards/rejected": -3.8269622325897217,
"step": 210
},
{
"epoch": 0.56,
"grad_norm": 499.14391994879634,
"learning_rate": 2.3769816112703045e-07,
"logits/chosen": -2.6316843032836914,
"logits/rejected": -2.567474842071533,
"logps/chosen": -257.08892822265625,
"logps/rejected": -214.96658325195312,
"loss": 0.3779,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 2.75276517868042,
"rewards/margins": 5.872605323791504,
"rewards/rejected": -3.119840145111084,
"step": 220
},
{
"epoch": 0.59,
"grad_norm": 411.504863411568,
"learning_rate": 2.1542832120881677e-07,
"logits/chosen": -2.6690242290496826,
"logits/rejected": -2.5815441608428955,
"logps/chosen": -266.91094970703125,
"logps/rejected": -217.81674194335938,
"loss": 0.33,
"rewards/accuracies": 0.875,
"rewards/chosen": 3.8953208923339844,
"rewards/margins": 6.836805820465088,
"rewards/rejected": -2.9414849281311035,
"step": 230
},
{
"epoch": 0.61,
"grad_norm": 592.3030101879839,
"learning_rate": 1.934352493925695e-07,
"logits/chosen": -2.65104341506958,
"logits/rejected": -2.6025872230529785,
"logps/chosen": -262.87799072265625,
"logps/rejected": -222.07803344726562,
"loss": 0.3337,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 2.9119772911071777,
"rewards/margins": 8.015997886657715,
"rewards/rejected": -5.104020118713379,
"step": 240
},
{
"epoch": 0.64,
"grad_norm": 484.1153834763422,
"learning_rate": 1.7189501409486059e-07,
"logits/chosen": -2.6601402759552,
"logits/rejected": -2.5887746810913086,
"logps/chosen": -267.6604309082031,
"logps/rejected": -223.6746826171875,
"loss": 0.3508,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 2.4709649085998535,
"rewards/margins": 6.8468828201293945,
"rewards/rejected": -4.375916957855225,
"step": 250
},
{
"epoch": 0.67,
"grad_norm": 369.72495233490616,
"learning_rate": 1.5098005849021078e-07,
"logits/chosen": -2.6492981910705566,
"logits/rejected": -2.5904128551483154,
"logps/chosen": -261.8894958496094,
"logps/rejected": -209.9693145751953,
"loss": 0.3216,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 2.382293224334717,
"rewards/margins": 6.440347194671631,
"rewards/rejected": -4.058054447174072,
"step": 260
},
{
"epoch": 0.69,
"grad_norm": 423.81065150105087,
"learning_rate": 1.30857819994673e-07,
"logits/chosen": -2.626788377761841,
"logits/rejected": -2.541728973388672,
"logps/chosen": -270.8172912597656,
"logps/rejected": -231.0417022705078,
"loss": 0.4168,
"rewards/accuracies": 0.90625,
"rewards/chosen": 3.459994077682495,
"rewards/margins": 9.697509765625,
"rewards/rejected": -6.237515926361084,
"step": 270
},
{
"epoch": 0.72,
"grad_norm": 328.5138374152781,
"learning_rate": 1.116893898236716e-07,
"logits/chosen": -2.656203508377075,
"logits/rejected": -2.6001851558685303,
"logps/chosen": -269.64996337890625,
"logps/rejected": -220.4279327392578,
"loss": 0.3201,
"rewards/accuracies": 0.875,
"rewards/chosen": 2.4338252544403076,
"rewards/margins": 6.923757076263428,
"rewards/rejected": -4.489931583404541,
"step": 280
},
{
"epoch": 0.74,
"grad_norm": 382.1224954400422,
"learning_rate": 9.362822335518062e-08,
"logits/chosen": -2.6185498237609863,
"logits/rejected": -2.572524309158325,
"logps/chosen": -267.46600341796875,
"logps/rejected": -217.64089965820312,
"loss": 0.319,
"rewards/accuracies": 0.875,
"rewards/chosen": 2.6880507469177246,
"rewards/margins": 6.601712703704834,
"rewards/rejected": -3.9136624336242676,
"step": 290
},
{
"epoch": 0.77,
"grad_norm": 546.200687023906,
"learning_rate": 7.681891162260015e-08,
"logits/chosen": -2.6388938426971436,
"logits/rejected": -2.5842864513397217,
"logps/chosen": -273.81378173828125,
"logps/rejected": -221.57211303710938,
"loss": 0.3443,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 2.8565659523010254,
"rewards/margins": 6.786903381347656,
"rewards/rejected": -3.9303371906280518,
"step": 300
},
{
"epoch": 0.79,
"grad_norm": 307.4415182886001,
"learning_rate": 6.139602377230247e-08,
"logits/chosen": -2.604128360748291,
"logits/rejected": -2.5361759662628174,
"logps/chosen": -277.45074462890625,
"logps/rejected": -216.7763671875,
"loss": 0.3778,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 2.8939478397369385,
"rewards/margins": 7.20609188079834,
"rewards/rejected": -4.3121442794799805,
"step": 310
},
{
"epoch": 0.82,
"grad_norm": 438.0667457692161,
"learning_rate": 4.748302975270837e-08,
"logits/chosen": -2.6282718181610107,
"logits/rejected": -2.5831058025360107,
"logps/chosen": -260.8962707519531,
"logps/rejected": -205.3025665283203,
"loss": 0.3343,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 2.4093146324157715,
"rewards/margins": 6.292300701141357,
"rewards/rejected": -3.882986068725586,
"step": 320
},
{
"epoch": 0.84,
"grad_norm": 997.9385970550169,
"learning_rate": 3.5191311859445795e-08,
"logits/chosen": -2.6487433910369873,
"logits/rejected": -2.5906193256378174,
"logps/chosen": -264.0924377441406,
"logps/rejected": -218.33743286132812,
"loss": 0.3351,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 3.3637747764587402,
"rewards/margins": 6.7286200523376465,
"rewards/rejected": -3.364844799041748,
"step": 330
},
{
"epoch": 0.87,
"grad_norm": 381.15409194754363,
"learning_rate": 2.4619273049795996e-08,
"logits/chosen": -2.6333811283111572,
"logits/rejected": -2.577167510986328,
"logps/chosen": -259.5494689941406,
"logps/rejected": -211.8303680419922,
"loss": 0.2972,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 3.1570422649383545,
"rewards/margins": 7.332770347595215,
"rewards/rejected": -4.175727844238281,
"step": 340
},
{
"epoch": 0.9,
"grad_norm": 832.2502586276644,
"learning_rate": 1.5851549164932115e-08,
"logits/chosen": -2.6442418098449707,
"logits/rejected": -2.595158576965332,
"logps/chosen": -269.0829162597656,
"logps/rejected": -227.48281860351562,
"loss": 0.3277,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 3.1007628440856934,
"rewards/margins": 6.698919773101807,
"rewards/rejected": -3.598156452178955,
"step": 350
},
{
"epoch": 0.92,
"grad_norm": 372.63658491482676,
"learning_rate": 8.958331366609423e-09,
"logits/chosen": -2.6463985443115234,
"logits/rejected": -2.5795321464538574,
"logps/chosen": -274.5993957519531,
"logps/rejected": -220.4040069580078,
"loss": 0.3835,
"rewards/accuracies": 0.90625,
"rewards/chosen": 2.770512819290161,
"rewards/margins": 6.775577545166016,
"rewards/rejected": -4.005064964294434,
"step": 360
},
{
"epoch": 0.95,
"grad_norm": 695.4821075832409,
"learning_rate": 3.994804212627461e-09,
"logits/chosen": -2.6053659915924072,
"logits/rejected": -2.5696444511413574,
"logps/chosen": -273.0481262207031,
"logps/rejected": -229.8125762939453,
"loss": 0.3476,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 3.7336528301239014,
"rewards/margins": 6.8774590492248535,
"rewards/rejected": -3.143805980682373,
"step": 370
},
{
"epoch": 0.97,
"grad_norm": 284.5109598747135,
"learning_rate": 1.0007038696262516e-09,
"logits/chosen": -2.65440034866333,
"logits/rejected": -2.61323618888855,
"logps/chosen": -262.66998291015625,
"logps/rejected": -231.3791961669922,
"loss": 0.3209,
"rewards/accuracies": 0.875,
"rewards/chosen": 3.4292311668395996,
"rewards/margins": 6.774644374847412,
"rewards/rejected": -3.3454127311706543,
"step": 380
},
{
"epoch": 1.0,
"grad_norm": 490.88652867728223,
"learning_rate": 0.0,
"logits/chosen": -2.6619739532470703,
"logits/rejected": -2.6010966300964355,
"logps/chosen": -250.47537231445312,
"logps/rejected": -211.33810424804688,
"loss": 0.3426,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 2.711453914642334,
"rewards/margins": 6.628802299499512,
"rewards/rejected": -3.917348861694336,
"step": 390
},
{
"epoch": 1.0,
"step": 390,
"total_flos": 0.0,
"train_loss": 0.36373490798167696,
"train_runtime": 5835.088,
"train_samples_per_second": 8.569,
"train_steps_per_second": 0.067
}
],
"logging_steps": 10,
"max_steps": 390,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}