zephyr-7b-dpo-full / trainer_state.json
RikkiXu's picture
Model save
9c61062 verified
raw
history blame
20.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 100,
"global_step": 350,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 3039.0846042522635,
"learning_rate": 1.4285714285714284e-08,
"logits/chosen": -4.185730934143066,
"logits/rejected": -4.509836196899414,
"logps/chosen": -274.000732421875,
"logps/rejected": -205.8054962158203,
"loss": 0.6932,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.03,
"grad_norm": 3122.252138846549,
"learning_rate": 1.4285714285714285e-07,
"logits/chosen": -4.2117600440979,
"logits/rejected": -4.4855546951293945,
"logps/chosen": -318.3944396972656,
"logps/rejected": -257.1120910644531,
"loss": 0.7578,
"rewards/accuracies": 0.4375,
"rewards/chosen": 0.11668112874031067,
"rewards/margins": -0.05277401953935623,
"rewards/rejected": 0.1694551408290863,
"step": 10
},
{
"epoch": 0.06,
"grad_norm": 2683.55929188042,
"learning_rate": 2.857142857142857e-07,
"logits/chosen": -4.264363765716553,
"logits/rejected": -4.5196099281311035,
"logps/chosen": -303.1786193847656,
"logps/rejected": -243.7255096435547,
"loss": 0.5287,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 2.058025598526001,
"rewards/margins": 1.5574162006378174,
"rewards/rejected": 0.5006095767021179,
"step": 20
},
{
"epoch": 0.09,
"grad_norm": 1727.068886617789,
"learning_rate": 4.285714285714285e-07,
"logits/chosen": -4.307942867279053,
"logits/rejected": -4.567526340484619,
"logps/chosen": -299.24615478515625,
"logps/rejected": -256.9350280761719,
"loss": 0.4422,
"rewards/accuracies": 0.831250011920929,
"rewards/chosen": 7.268563747406006,
"rewards/margins": 4.868700981140137,
"rewards/rejected": 2.3998632431030273,
"step": 30
},
{
"epoch": 0.11,
"grad_norm": 2206.909408534419,
"learning_rate": 4.996892303047305e-07,
"logits/chosen": -4.312300682067871,
"logits/rejected": -4.578764915466309,
"logps/chosen": -288.2650451660156,
"logps/rejected": -235.3504638671875,
"loss": 0.5344,
"rewards/accuracies": 0.8062499761581421,
"rewards/chosen": 7.26935338973999,
"rewards/margins": 7.414456367492676,
"rewards/rejected": -0.145103320479393,
"step": 40
},
{
"epoch": 0.14,
"grad_norm": 1574.7884185445766,
"learning_rate": 4.972077065562821e-07,
"logits/chosen": -4.287051200866699,
"logits/rejected": -4.532064914703369,
"logps/chosen": -295.8678894042969,
"logps/rejected": -256.9671325683594,
"loss": 0.514,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 6.857720851898193,
"rewards/margins": 10.064432144165039,
"rewards/rejected": -3.206712007522583,
"step": 50
},
{
"epoch": 0.17,
"grad_norm": 2086.3490202582466,
"learning_rate": 4.922693215572695e-07,
"logits/chosen": -4.274147033691406,
"logits/rejected": -4.500526428222656,
"logps/chosen": -304.9868469238281,
"logps/rejected": -266.1625061035156,
"loss": 0.5936,
"rewards/accuracies": 0.84375,
"rewards/chosen": 6.364766597747803,
"rewards/margins": 9.915193557739258,
"rewards/rejected": -3.550427198410034,
"step": 60
},
{
"epoch": 0.2,
"grad_norm": 2489.785834029294,
"learning_rate": 4.849231551964771e-07,
"logits/chosen": -4.388774871826172,
"logits/rejected": -4.523660659790039,
"logps/chosen": -281.50799560546875,
"logps/rejected": -240.20126342773438,
"loss": 0.578,
"rewards/accuracies": 0.84375,
"rewards/chosen": 7.396153926849365,
"rewards/margins": 10.37517261505127,
"rewards/rejected": -2.9790191650390625,
"step": 70
},
{
"epoch": 0.23,
"grad_norm": 1795.0951767218703,
"learning_rate": 4.752422169756047e-07,
"logits/chosen": -4.22324275970459,
"logits/rejected": -4.482357025146484,
"logps/chosen": -289.7461853027344,
"logps/rejected": -244.2007293701172,
"loss": 0.6974,
"rewards/accuracies": 0.875,
"rewards/chosen": 5.9415082931518555,
"rewards/margins": 9.690756797790527,
"rewards/rejected": -3.749248504638672,
"step": 80
},
{
"epoch": 0.26,
"grad_norm": 1863.4883740900614,
"learning_rate": 4.6332272040803887e-07,
"logits/chosen": -4.147845268249512,
"logits/rejected": -4.379548072814941,
"logps/chosen": -301.5574645996094,
"logps/rejected": -264.04815673828125,
"loss": 0.6584,
"rewards/accuracies": 0.8125,
"rewards/chosen": 6.005797386169434,
"rewards/margins": 10.730647087097168,
"rewards/rejected": -4.724849700927734,
"step": 90
},
{
"epoch": 0.29,
"grad_norm": 2150.0529520665987,
"learning_rate": 4.492831268057306e-07,
"logits/chosen": -4.204574108123779,
"logits/rejected": -4.426244735717773,
"logps/chosen": -287.6278076171875,
"logps/rejected": -244.3843231201172,
"loss": 0.7723,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 8.200125694274902,
"rewards/margins": 10.383246421813965,
"rewards/rejected": -2.1831212043762207,
"step": 100
},
{
"epoch": 0.29,
"eval_logits/chosen": -3.147157669067383,
"eval_logits/rejected": -3.147157669067383,
"eval_logps/chosen": -157.6016845703125,
"eval_logps/rejected": -157.6016845703125,
"eval_loss": 0.6931471824645996,
"eval_rewards/accuracies": 0.0,
"eval_rewards/chosen": -2.0246658325195312,
"eval_rewards/margins": 0.0,
"eval_rewards/rejected": -2.0246658325195312,
"eval_runtime": 1.5111,
"eval_samples_per_second": 0.662,
"eval_steps_per_second": 0.662,
"step": 100
},
{
"epoch": 0.31,
"grad_norm": 2069.7674123460392,
"learning_rate": 4.332629679574565e-07,
"logits/chosen": -4.301981449127197,
"logits/rejected": -4.562737464904785,
"logps/chosen": -298.0445861816406,
"logps/rejected": -258.07379150390625,
"loss": 0.9782,
"rewards/accuracies": 0.84375,
"rewards/chosen": 5.386460304260254,
"rewards/margins": 12.155603408813477,
"rewards/rejected": -6.769143581390381,
"step": 110
},
{
"epoch": 0.34,
"grad_norm": 2493.1141420840436,
"learning_rate": 4.154214593992149e-07,
"logits/chosen": -4.2862958908081055,
"logits/rejected": -4.542973518371582,
"logps/chosen": -285.873046875,
"logps/rejected": -243.8842010498047,
"loss": 0.9519,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 7.061153411865234,
"rewards/margins": 11.827821731567383,
"rewards/rejected": -4.766669273376465,
"step": 120
},
{
"epoch": 0.37,
"grad_norm": 1751.0600688695415,
"learning_rate": 3.959359180586975e-07,
"logits/chosen": -4.280123710632324,
"logits/rejected": -4.522739887237549,
"logps/chosen": -277.5672302246094,
"logps/rejected": -226.9970703125,
"loss": 0.7208,
"rewards/accuracies": 0.875,
"rewards/chosen": 5.132817268371582,
"rewards/margins": 12.189440727233887,
"rewards/rejected": -7.056623935699463,
"step": 130
},
{
"epoch": 0.4,
"grad_norm": 1830.8828386105183,
"learning_rate": 3.75e-07,
"logits/chosen": -4.162067413330078,
"logits/rejected": -4.473877429962158,
"logps/chosen": -294.2828369140625,
"logps/rejected": -248.1217041015625,
"loss": 0.8272,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 5.600853443145752,
"rewards/margins": 12.919031143188477,
"rewards/rejected": -7.31817626953125,
"step": 140
},
{
"epoch": 0.43,
"grad_norm": 1975.8832437780866,
"learning_rate": 3.528217757826529e-07,
"logits/chosen": -4.179337978363037,
"logits/rejected": -4.442940711975098,
"logps/chosen": -295.30914306640625,
"logps/rejected": -259.9874267578125,
"loss": 0.8642,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 4.101029872894287,
"rewards/margins": 12.387203216552734,
"rewards/rejected": -8.286172866821289,
"step": 150
},
{
"epoch": 0.46,
"grad_norm": 1974.3387943820865,
"learning_rate": 3.296216625629211e-07,
"logits/chosen": -4.1478681564331055,
"logits/rejected": -4.431545257568359,
"logps/chosen": -294.5439758300781,
"logps/rejected": -244.33657836914062,
"loss": 0.9334,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 6.383957386016846,
"rewards/margins": 13.940587997436523,
"rewards/rejected": -7.556630611419678,
"step": 160
},
{
"epoch": 0.49,
"grad_norm": 1732.7845040237094,
"learning_rate": 3.056302334890786e-07,
"logits/chosen": -4.18727970123291,
"logits/rejected": -4.425799369812012,
"logps/chosen": -294.9515075683594,
"logps/rejected": -250.8203125,
"loss": 0.8554,
"rewards/accuracies": 0.84375,
"rewards/chosen": 5.709182262420654,
"rewards/margins": 14.079116821289062,
"rewards/rejected": -8.369935989379883,
"step": 170
},
{
"epoch": 0.51,
"grad_norm": 2121.8411757635613,
"learning_rate": 2.810859261618713e-07,
"logits/chosen": -4.313704490661621,
"logits/rejected": -4.544769287109375,
"logps/chosen": -282.9936828613281,
"logps/rejected": -250.189453125,
"loss": 0.7998,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 6.883467674255371,
"rewards/margins": 14.591836929321289,
"rewards/rejected": -7.708369255065918,
"step": 180
},
{
"epoch": 0.54,
"grad_norm": 1756.6999736537189,
"learning_rate": 2.5623267293451823e-07,
"logits/chosen": -4.283580303192139,
"logits/rejected": -4.457066535949707,
"logps/chosen": -305.109375,
"logps/rejected": -265.3091125488281,
"loss": 0.6833,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 7.185091495513916,
"rewards/margins": 13.808235168457031,
"rewards/rejected": -6.623143196105957,
"step": 190
},
{
"epoch": 0.57,
"grad_norm": 1748.1359792805438,
"learning_rate": 2.3131747660339394e-07,
"logits/chosen": -4.295716285705566,
"logits/rejected": -4.545838832855225,
"logps/chosen": -289.03521728515625,
"logps/rejected": -257.3003234863281,
"loss": 0.7717,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 8.096379280090332,
"rewards/margins": 14.581645011901855,
"rewards/rejected": -6.485265254974365,
"step": 200
},
{
"epoch": 0.57,
"eval_logits/chosen": -3.163522243499756,
"eval_logits/rejected": -3.163522243499756,
"eval_logps/chosen": -167.66006469726562,
"eval_logps/rejected": -167.66006469726562,
"eval_loss": 0.6931471824645996,
"eval_rewards/accuracies": 0.0,
"eval_rewards/chosen": -12.083049774169922,
"eval_rewards/margins": 0.0,
"eval_rewards/rejected": -12.083049774169922,
"eval_runtime": 1.4711,
"eval_samples_per_second": 0.68,
"eval_steps_per_second": 0.68,
"step": 200
},
{
"epoch": 0.6,
"grad_norm": 1829.602720191424,
"learning_rate": 2.065879555832674e-07,
"logits/chosen": -4.250877857208252,
"logits/rejected": -4.542287826538086,
"logps/chosen": -303.2696838378906,
"logps/rejected": -262.7691650390625,
"loss": 0.8229,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 7.95089864730835,
"rewards/margins": 15.162821769714355,
"rewards/rejected": -7.211922645568848,
"step": 210
},
{
"epoch": 0.63,
"grad_norm": 1788.0044780541311,
"learning_rate": 1.8228988296424875e-07,
"logits/chosen": -4.257784366607666,
"logits/rejected": -4.608870983123779,
"logps/chosen": -293.54071044921875,
"logps/rejected": -233.70166015625,
"loss": 0.7993,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 7.973156929016113,
"rewards/margins": 16.081945419311523,
"rewards/rejected": -8.108789443969727,
"step": 220
},
{
"epoch": 0.66,
"grad_norm": 1900.3878505004946,
"learning_rate": 1.5866474390840124e-07,
"logits/chosen": -4.334306716918945,
"logits/rejected": -4.525221824645996,
"logps/chosen": -278.41827392578125,
"logps/rejected": -233.2617645263672,
"loss": 0.6293,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 6.539956569671631,
"rewards/margins": 12.842289924621582,
"rewards/rejected": -6.30233097076416,
"step": 230
},
{
"epoch": 0.69,
"grad_norm": 1759.0935765619065,
"learning_rate": 1.3594733566170925e-07,
"logits/chosen": -4.360232353210449,
"logits/rejected": -4.688153266906738,
"logps/chosen": -255.55807495117188,
"logps/rejected": -222.76504516601562,
"loss": 1.0017,
"rewards/accuracies": 0.831250011920929,
"rewards/chosen": 6.956341743469238,
"rewards/margins": 13.633699417114258,
"rewards/rejected": -6.677358150482178,
"step": 240
},
{
"epoch": 0.71,
"grad_norm": 1730.0292892661773,
"learning_rate": 1.1436343403356016e-07,
"logits/chosen": -4.352273941040039,
"logits/rejected": -4.6365180015563965,
"logps/chosen": -276.07659912109375,
"logps/rejected": -241.01425170898438,
"loss": 0.8087,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 6.498913764953613,
"rewards/margins": 14.106257438659668,
"rewards/rejected": -7.6073455810546875,
"step": 250
},
{
"epoch": 0.74,
"grad_norm": 1731.9100006560693,
"learning_rate": 9.412754953531663e-08,
"logits/chosen": -4.388053894042969,
"logits/rejected": -4.64432954788208,
"logps/chosen": -270.12225341796875,
"logps/rejected": -249.11416625976562,
"loss": 0.6771,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 5.336110591888428,
"rewards/margins": 13.311798095703125,
"rewards/rejected": -7.975686073303223,
"step": 260
},
{
"epoch": 0.77,
"grad_norm": 1559.894108247427,
"learning_rate": 7.544079547848181e-08,
"logits/chosen": -4.511970520019531,
"logits/rejected": -4.677350044250488,
"logps/chosen": -272.5389709472656,
"logps/rejected": -237.0705108642578,
"loss": 0.816,
"rewards/accuracies": 0.831250011920929,
"rewards/chosen": 4.5388288497924805,
"rewards/margins": 12.382614135742188,
"rewards/rejected": -7.843785762786865,
"step": 270
},
{
"epoch": 0.8,
"grad_norm": 2131.975353118901,
"learning_rate": 5.848888922025552e-08,
"logits/chosen": -4.293630123138428,
"logits/rejected": -4.587409496307373,
"logps/chosen": -272.18914794921875,
"logps/rejected": -237.04507446289062,
"loss": 0.7076,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 6.89593505859375,
"rewards/margins": 13.17906379699707,
"rewards/rejected": -6.283128261566162,
"step": 280
},
{
"epoch": 0.83,
"grad_norm": 2073.5868352302195,
"learning_rate": 4.3440306421001324e-08,
"logits/chosen": -4.306157112121582,
"logits/rejected": -4.501837253570557,
"logps/chosen": -279.41571044921875,
"logps/rejected": -245.3014678955078,
"loss": 0.7257,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 7.150078773498535,
"rewards/margins": 15.104804992675781,
"rewards/rejected": -7.954724311828613,
"step": 290
},
{
"epoch": 0.86,
"grad_norm": 1664.7949029760775,
"learning_rate": 3.044460665744283e-08,
"logits/chosen": -4.291565418243408,
"logits/rejected": -4.546942234039307,
"logps/chosen": -294.171142578125,
"logps/rejected": -248.09219360351562,
"loss": 0.782,
"rewards/accuracies": 0.875,
"rewards/chosen": 8.660406112670898,
"rewards/margins": 15.387075424194336,
"rewards/rejected": -6.726672172546387,
"step": 300
},
{
"epoch": 0.86,
"eval_logits/chosen": -3.222372531890869,
"eval_logits/rejected": -3.222372531890869,
"eval_logps/chosen": -164.06509399414062,
"eval_logps/rejected": -164.06509399414062,
"eval_loss": 0.6931471824645996,
"eval_rewards/accuracies": 0.0,
"eval_rewards/chosen": -8.488082885742188,
"eval_rewards/margins": 0.0,
"eval_rewards/rejected": -8.488082885742188,
"eval_runtime": 1.4741,
"eval_samples_per_second": 0.678,
"eval_steps_per_second": 0.678,
"step": 300
},
{
"epoch": 0.89,
"grad_norm": 2673.859330983399,
"learning_rate": 1.9630947032398066e-08,
"logits/chosen": -4.44521427154541,
"logits/rejected": -4.5893964767456055,
"logps/chosen": -260.0516662597656,
"logps/rejected": -234.23690795898438,
"loss": 0.7257,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 6.064515113830566,
"rewards/margins": 13.008413314819336,
"rewards/rejected": -6.943899631500244,
"step": 310
},
{
"epoch": 0.91,
"grad_norm": 1691.3211570500287,
"learning_rate": 1.1106798553464802e-08,
"logits/chosen": -4.273613929748535,
"logits/rejected": -4.540968894958496,
"logps/chosen": -289.5185546875,
"logps/rejected": -251.7852325439453,
"loss": 0.6936,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 7.365248203277588,
"rewards/margins": 14.206552505493164,
"rewards/rejected": -6.84130334854126,
"step": 320
},
{
"epoch": 0.94,
"grad_norm": 2384.133770310185,
"learning_rate": 4.956878037864043e-09,
"logits/chosen": -4.335446834564209,
"logits/rejected": -4.543330669403076,
"logps/chosen": -303.6029968261719,
"logps/rejected": -260.98760986328125,
"loss": 0.7573,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 5.982678413391113,
"rewards/margins": 13.970565795898438,
"rewards/rejected": -7.987887382507324,
"step": 330
},
{
"epoch": 0.97,
"grad_norm": 2211.192973189294,
"learning_rate": 1.2423061586496476e-09,
"logits/chosen": -4.319240093231201,
"logits/rejected": -4.623973846435547,
"logps/chosen": -283.38006591796875,
"logps/rejected": -234.4169921875,
"loss": 0.8286,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 5.565188407897949,
"rewards/margins": 11.47750186920166,
"rewards/rejected": -5.912313938140869,
"step": 340
},
{
"epoch": 1.0,
"grad_norm": 1586.2152634156816,
"learning_rate": 0.0,
"logits/chosen": -4.285008430480957,
"logits/rejected": -4.503040790557861,
"logps/chosen": -289.0204162597656,
"logps/rejected": -251.9452362060547,
"loss": 0.7666,
"rewards/accuracies": 0.875,
"rewards/chosen": 7.000736236572266,
"rewards/margins": 13.714078903198242,
"rewards/rejected": -6.713343143463135,
"step": 350
},
{
"epoch": 1.0,
"step": 350,
"total_flos": 0.0,
"train_loss": 0.7428068714482444,
"train_runtime": 5299.044,
"train_samples_per_second": 8.446,
"train_steps_per_second": 0.066
}
],
"logging_steps": 10,
"max_steps": 350,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}