|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9984301412872841, |
|
"eval_steps": 200, |
|
"global_step": 477, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.0416666666666666e-08, |
|
"logits/chosen": -2.7851884365081787, |
|
"logits/rejected": -2.649298906326294, |
|
"logps/chosen": -296.01092529296875, |
|
"logps/rejected": -290.09039306640625, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.0416666666666667e-07, |
|
"logits/chosen": -2.648496389389038, |
|
"logits/rejected": -2.668022394180298, |
|
"logps/chosen": -278.4987487792969, |
|
"logps/rejected": -242.15708923339844, |
|
"loss": 0.7226, |
|
"rewards/accuracies": 0.4861111044883728, |
|
"rewards/chosen": 0.06170377507805824, |
|
"rewards/margins": 0.054149795323610306, |
|
"rewards/rejected": 0.007553977891802788, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.0833333333333333e-07, |
|
"logits/chosen": -2.6808462142944336, |
|
"logits/rejected": -2.656245470046997, |
|
"logps/chosen": -276.23272705078125, |
|
"logps/rejected": -246.00448608398438, |
|
"loss": 0.6864, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.09794271737337112, |
|
"rewards/margins": 0.13353291153907776, |
|
"rewards/rejected": -0.03559018298983574, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": -2.6753792762756348, |
|
"logits/rejected": -2.658241033554077, |
|
"logps/chosen": -291.89227294921875, |
|
"logps/rejected": -274.1100769042969, |
|
"loss": 0.657, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 0.4894474148750305, |
|
"rewards/margins": 0.5969166159629822, |
|
"rewards/rejected": -0.10746921598911285, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.1666666666666667e-07, |
|
"logits/chosen": -2.6500256061553955, |
|
"logits/rejected": -2.640502452850342, |
|
"logps/chosen": -306.4776916503906, |
|
"logps/rejected": -277.1504821777344, |
|
"loss": 0.6103, |
|
"rewards/accuracies": 0.721875011920929, |
|
"rewards/chosen": 1.0548440217971802, |
|
"rewards/margins": 1.0432922840118408, |
|
"rewards/rejected": 0.011551743373274803, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.999731868769026e-07, |
|
"logits/chosen": -2.6383259296417236, |
|
"logits/rejected": -2.6259989738464355, |
|
"logps/chosen": -298.2965087890625, |
|
"logps/rejected": -280.02117919921875, |
|
"loss": 0.6394, |
|
"rewards/accuracies": 0.6968749761581421, |
|
"rewards/chosen": 0.34443342685699463, |
|
"rewards/margins": 1.087842583656311, |
|
"rewards/rejected": -0.7434090375900269, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.990353313429303e-07, |
|
"logits/chosen": -2.6224470138549805, |
|
"logits/rejected": -2.572944402694702, |
|
"logps/chosen": -282.615966796875, |
|
"logps/rejected": -236.1990966796875, |
|
"loss": 0.8549, |
|
"rewards/accuracies": 0.684374988079071, |
|
"rewards/chosen": 0.3339448571205139, |
|
"rewards/margins": 1.3397847414016724, |
|
"rewards/rejected": -1.0058400630950928, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.967625656594781e-07, |
|
"logits/chosen": -2.6648612022399902, |
|
"logits/rejected": -2.6511025428771973, |
|
"logps/chosen": -278.946044921875, |
|
"logps/rejected": -247.79544067382812, |
|
"loss": 0.6758, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 1.0940700769424438, |
|
"rewards/margins": 1.4048646688461304, |
|
"rewards/rejected": -0.3107944428920746, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.93167072587771e-07, |
|
"logits/chosen": -2.652858257293701, |
|
"logits/rejected": -2.648376703262329, |
|
"logps/chosen": -286.8718566894531, |
|
"logps/rejected": -288.487548828125, |
|
"loss": 1.0973, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.4459499418735504, |
|
"rewards/margins": 0.44234198331832886, |
|
"rewards/rejected": -0.8882920145988464, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.882681251368548e-07, |
|
"logits/chosen": -2.6302177906036377, |
|
"logits/rejected": -2.6423885822296143, |
|
"logps/chosen": -260.6453857421875, |
|
"logps/rejected": -240.8030548095703, |
|
"loss": 0.7506, |
|
"rewards/accuracies": 0.690625011920929, |
|
"rewards/chosen": 0.48692113161087036, |
|
"rewards/margins": 1.6797454357147217, |
|
"rewards/rejected": -1.192824125289917, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.820919832540181e-07, |
|
"logits/chosen": -2.7023916244506836, |
|
"logits/rejected": -2.6528306007385254, |
|
"logps/chosen": -280.9873046875, |
|
"logps/rejected": -265.68017578125, |
|
"loss": 0.8709, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 1.7643026113510132, |
|
"rewards/margins": 1.6466624736785889, |
|
"rewards/rejected": 0.1176399439573288, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.7467175306295647e-07, |
|
"logits/chosen": -2.6879727840423584, |
|
"logits/rejected": -2.660984516143799, |
|
"logps/chosen": -296.12017822265625, |
|
"logps/rejected": -265.49176025390625, |
|
"loss": 0.8715, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 1.2846415042877197, |
|
"rewards/margins": 1.6656051874160767, |
|
"rewards/rejected": -0.38096341490745544, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.6604720940421207e-07, |
|
"logits/chosen": -2.6495959758758545, |
|
"logits/rejected": -2.633176803588867, |
|
"logps/chosen": -285.0, |
|
"logps/rejected": -238.13760375976562, |
|
"loss": 0.9971, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.48344117403030396, |
|
"rewards/margins": 1.2780396938323975, |
|
"rewards/rejected": -0.7945985198020935, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.5626458262912735e-07, |
|
"logits/chosen": -2.6688625812530518, |
|
"logits/rejected": -2.661983013153076, |
|
"logps/chosen": -276.2964172363281, |
|
"logps/rejected": -271.0657043457031, |
|
"loss": 0.9757, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.47159165143966675, |
|
"rewards/margins": 0.9514617919921875, |
|
"rewards/rejected": -1.4230536222457886, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.453763107901675e-07, |
|
"logits/chosen": -2.720446825027466, |
|
"logits/rejected": -2.721700429916382, |
|
"logps/chosen": -296.03277587890625, |
|
"logps/rejected": -267.8554992675781, |
|
"loss": 0.7556, |
|
"rewards/accuracies": 0.671875, |
|
"rewards/chosen": 0.4882308542728424, |
|
"rewards/margins": 1.5448472499847412, |
|
"rewards/rejected": -1.0566164255142212, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.3344075855595097e-07, |
|
"logits/chosen": -2.6869354248046875, |
|
"logits/rejected": -2.678093433380127, |
|
"logps/chosen": -282.30657958984375, |
|
"logps/rejected": -251.81973266601562, |
|
"loss": 1.4312, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 1.9246972799301147, |
|
"rewards/margins": 2.1031014919281006, |
|
"rewards/rejected": -0.1784040927886963, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.2052190435769554e-07, |
|
"logits/chosen": -2.644658088684082, |
|
"logits/rejected": -2.618086099624634, |
|
"logps/chosen": -279.2743835449219, |
|
"logps/rejected": -268.9571228027344, |
|
"loss": 0.8955, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": 2.6602110862731934, |
|
"rewards/margins": 2.369070053100586, |
|
"rewards/rejected": 0.29114121198654175, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.0668899744407567e-07, |
|
"logits/chosen": -2.6654858589172363, |
|
"logits/rejected": -2.6409499645233154, |
|
"logps/chosen": -262.2958984375, |
|
"logps/rejected": -229.4518585205078, |
|
"loss": 1.0858, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.36875656247138977, |
|
"rewards/margins": 1.1033318042755127, |
|
"rewards/rejected": -0.7345751523971558, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.920161866827889e-07, |
|
"logits/chosen": -2.6779518127441406, |
|
"logits/rejected": -2.694640636444092, |
|
"logps/chosen": -276.3097229003906, |
|
"logps/rejected": -262.0291442871094, |
|
"loss": 1.0793, |
|
"rewards/accuracies": 0.6656249761581421, |
|
"rewards/chosen": 1.123580813407898, |
|
"rewards/margins": 2.2866923809051514, |
|
"rewards/rejected": -1.1631114482879639, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 3.765821230985757e-07, |
|
"logits/chosen": -2.6965956687927246, |
|
"logits/rejected": -2.6685996055603027, |
|
"logps/chosen": -267.73687744140625, |
|
"logps/rejected": -259.35357666015625, |
|
"loss": 0.8591, |
|
"rewards/accuracies": 0.6656249761581421, |
|
"rewards/chosen": 0.6378843784332275, |
|
"rewards/margins": 1.7748029232025146, |
|
"rewards/rejected": -1.1369187831878662, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.604695382782159e-07, |
|
"logits/chosen": -2.6755518913269043, |
|
"logits/rejected": -2.6578094959259033, |
|
"logps/chosen": -290.0172424316406, |
|
"logps/rejected": -252.45919799804688, |
|
"loss": 0.9481, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.05484447628259659, |
|
"rewards/margins": 2.1967554092407227, |
|
"rewards/rejected": -2.141911029815674, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"eval_logits/chosen": -2.7043323516845703, |
|
"eval_logits/rejected": -2.6753692626953125, |
|
"eval_logps/chosen": -284.77410888671875, |
|
"eval_logps/rejected": -264.4426574707031, |
|
"eval_loss": 0.9532725214958191, |
|
"eval_rewards/accuracies": 0.716269850730896, |
|
"eval_rewards/chosen": -0.6643804311752319, |
|
"eval_rewards/margins": 1.9997262954711914, |
|
"eval_rewards/rejected": -2.664106607437134, |
|
"eval_runtime": 236.1755, |
|
"eval_samples_per_second": 8.468, |
|
"eval_steps_per_second": 0.267, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.4376480090239047e-07, |
|
"logits/chosen": -2.712965726852417, |
|
"logits/rejected": -2.6518542766571045, |
|
"logps/chosen": -289.49072265625, |
|
"logps/rejected": -262.79595947265625, |
|
"loss": 0.8643, |
|
"rewards/accuracies": 0.684374988079071, |
|
"rewards/chosen": -0.5081722140312195, |
|
"rewards/margins": 1.8118541240692139, |
|
"rewards/rejected": -2.320026397705078, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 3.265574537815398e-07, |
|
"logits/chosen": -2.6325292587280273, |
|
"logits/rejected": -2.629293918609619, |
|
"logps/chosen": -259.7528076171875, |
|
"logps/rejected": -245.0424346923828, |
|
"loss": 0.9605, |
|
"rewards/accuracies": 0.6343749761581421, |
|
"rewards/chosen": 0.8424679040908813, |
|
"rewards/margins": 1.7455743551254272, |
|
"rewards/rejected": -0.9031065702438354, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 3.0893973387735683e-07, |
|
"logits/chosen": -2.5948872566223145, |
|
"logits/rejected": -2.583383798599243, |
|
"logps/chosen": -263.6277770996094, |
|
"logps/rejected": -252.355224609375, |
|
"loss": 0.9994, |
|
"rewards/accuracies": 0.6656249761581421, |
|
"rewards/chosen": 0.9661090970039368, |
|
"rewards/margins": 3.1072936058044434, |
|
"rewards/rejected": -2.1411845684051514, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.910060778827554e-07, |
|
"logits/chosen": -2.631458044052124, |
|
"logits/rejected": -2.603405714035034, |
|
"logps/chosen": -264.26947021484375, |
|
"logps/rejected": -245.29733276367188, |
|
"loss": 1.0716, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": 1.2004565000534058, |
|
"rewards/margins": 1.8289244174957275, |
|
"rewards/rejected": -0.6284679174423218, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 2.7285261601056697e-07, |
|
"logits/chosen": -2.6394925117492676, |
|
"logits/rejected": -2.6251111030578613, |
|
"logps/chosen": -272.2856750488281, |
|
"logps/rejected": -250.0095672607422, |
|
"loss": 0.9656, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 1.3560218811035156, |
|
"rewards/margins": 1.9196758270263672, |
|
"rewards/rejected": -0.5636539459228516, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 2.5457665670441937e-07, |
|
"logits/chosen": -2.651275396347046, |
|
"logits/rejected": -2.6304633617401123, |
|
"logps/chosen": -280.27410888671875, |
|
"logps/rejected": -252.9152374267578, |
|
"loss": 1.0543, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": -0.03507481887936592, |
|
"rewards/margins": 1.2433618307113647, |
|
"rewards/rejected": -1.2784364223480225, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 2.3627616503391812e-07, |
|
"logits/chosen": -2.6496596336364746, |
|
"logits/rejected": -2.6472182273864746, |
|
"logps/chosen": -288.3260498046875, |
|
"logps/rejected": -280.3363037109375, |
|
"loss": 0.9168, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": 0.828028678894043, |
|
"rewards/margins": 2.5466268062591553, |
|
"rewards/rejected": -1.7185981273651123, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 2.1804923757009882e-07, |
|
"logits/chosen": -2.661709785461426, |
|
"logits/rejected": -2.6548213958740234, |
|
"logps/chosen": -267.2577819824219, |
|
"logps/rejected": -241.8800048828125, |
|
"loss": 1.1955, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": 0.9889399409294128, |
|
"rewards/margins": 2.1581242084503174, |
|
"rewards/rejected": -1.1691839694976807, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.9999357655598891e-07, |
|
"logits/chosen": -2.6423428058624268, |
|
"logits/rejected": -2.6236939430236816, |
|
"logps/chosen": -274.1272277832031, |
|
"logps/rejected": -249.1870880126953, |
|
"loss": 1.2642, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.9026322364807129, |
|
"rewards/margins": 1.699610948562622, |
|
"rewards/rejected": -0.7969785332679749, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.8220596619089573e-07, |
|
"logits/chosen": -2.637317419052124, |
|
"logits/rejected": -2.597743034362793, |
|
"logps/chosen": -298.6712951660156, |
|
"logps/rejected": -295.92657470703125, |
|
"loss": 1.1698, |
|
"rewards/accuracies": 0.671875, |
|
"rewards/chosen": 0.8392047882080078, |
|
"rewards/margins": 1.0658338069915771, |
|
"rewards/rejected": -0.22662897408008575, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.647817538357072e-07, |
|
"logits/chosen": -2.6557185649871826, |
|
"logits/rejected": -2.6295552253723145, |
|
"logps/chosen": -273.03802490234375, |
|
"logps/rejected": -228.7330322265625, |
|
"loss": 0.9258, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 0.48593106865882874, |
|
"rewards/margins": 1.9354314804077148, |
|
"rewards/rejected": -1.4495004415512085, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.478143389201113e-07, |
|
"logits/chosen": -2.5909767150878906, |
|
"logits/rejected": -2.5993030071258545, |
|
"logps/chosen": -272.3155212402344, |
|
"logps/rejected": -257.76409912109375, |
|
"loss": 0.8658, |
|
"rewards/accuracies": 0.7281249761581421, |
|
"rewards/chosen": 0.7982915043830872, |
|
"rewards/margins": 2.247641086578369, |
|
"rewards/rejected": -1.4493494033813477, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.3139467229135998e-07, |
|
"logits/chosen": -2.705423355102539, |
|
"logits/rejected": -2.7025485038757324, |
|
"logps/chosen": -302.91082763671875, |
|
"logps/rejected": -265.9149475097656, |
|
"loss": 0.9932, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 1.026079773902893, |
|
"rewards/margins": 2.0061373710632324, |
|
"rewards/rejected": -0.9800575971603394, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.1561076868822755e-07, |
|
"logits/chosen": -2.610400438308716, |
|
"logits/rejected": -2.614844560623169, |
|
"logps/chosen": -271.6020812988281, |
|
"logps/rejected": -254.05477905273438, |
|
"loss": 0.965, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.7108423113822937, |
|
"rewards/margins": 1.3122758865356445, |
|
"rewards/rejected": -0.6014334559440613, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.0054723495346482e-07, |
|
"logits/chosen": -2.616030216217041, |
|
"logits/rejected": -2.5651869773864746, |
|
"logps/chosen": -256.9847106933594, |
|
"logps/rejected": -244.5271453857422, |
|
"loss": 0.9356, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.8414610028266907, |
|
"rewards/margins": 2.031790018081665, |
|
"rewards/rejected": -1.1903290748596191, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.628481651367875e-08, |
|
"logits/chosen": -2.6491808891296387, |
|
"logits/rejected": -2.6336731910705566, |
|
"logps/chosen": -297.15911865234375, |
|
"logps/rejected": -255.7582550048828, |
|
"loss": 0.8238, |
|
"rewards/accuracies": 0.690625011920929, |
|
"rewards/chosen": 1.010446310043335, |
|
"rewards/margins": 3.2363944053649902, |
|
"rewards/rejected": -2.2259488105773926, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 7.289996455765748e-08, |
|
"logits/chosen": -2.619288206100464, |
|
"logits/rejected": -2.616589069366455, |
|
"logps/chosen": -307.8691711425781, |
|
"logps/rejected": -256.36322021484375, |
|
"loss": 1.0289, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 1.2276843786239624, |
|
"rewards/margins": 2.0671706199645996, |
|
"rewards/rejected": -0.8394859433174133, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 6.046442623320145e-08, |
|
"logits/chosen": -2.6064088344573975, |
|
"logits/rejected": -2.617523431777954, |
|
"logps/chosen": -279.7250061035156, |
|
"logps/rejected": -265.47808837890625, |
|
"loss": 0.9126, |
|
"rewards/accuracies": 0.671875, |
|
"rewards/chosen": 1.0034754276275635, |
|
"rewards/margins": 1.880950927734375, |
|
"rewards/rejected": -0.8774752616882324, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.904486005914027e-08, |
|
"logits/chosen": -2.626131534576416, |
|
"logits/rejected": -2.612278461456299, |
|
"logps/chosen": -274.55963134765625, |
|
"logps/rejected": -268.88525390625, |
|
"loss": 0.9176, |
|
"rewards/accuracies": 0.6968749761581421, |
|
"rewards/chosen": 0.5394259691238403, |
|
"rewards/margins": 1.4705688953399658, |
|
"rewards/rejected": -0.9311429858207703, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.8702478614051345e-08, |
|
"logits/chosen": -2.6065127849578857, |
|
"logits/rejected": -2.5645954608917236, |
|
"logps/chosen": -320.189208984375, |
|
"logps/rejected": -285.04571533203125, |
|
"loss": 1.0499, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.5851168632507324, |
|
"rewards/margins": 2.4354395866394043, |
|
"rewards/rejected": -1.8503224849700928, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_logits/chosen": -2.6585872173309326, |
|
"eval_logits/rejected": -2.6278274059295654, |
|
"eval_logps/chosen": -283.5934753417969, |
|
"eval_logps/rejected": -263.3767395019531, |
|
"eval_loss": 0.8541988134384155, |
|
"eval_rewards/accuracies": 0.6765872836112976, |
|
"eval_rewards/chosen": 0.5162815451622009, |
|
"eval_rewards/margins": 2.114495038986206, |
|
"eval_rewards/rejected": -1.5982133150100708, |
|
"eval_runtime": 236.3982, |
|
"eval_samples_per_second": 8.46, |
|
"eval_steps_per_second": 0.266, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 2.9492720416985e-08, |
|
"logits/chosen": -2.5927202701568604, |
|
"logits/rejected": -2.6027348041534424, |
|
"logps/chosen": -265.5550537109375, |
|
"logps/rejected": -238.1681365966797, |
|
"loss": 0.9647, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": 0.5153275728225708, |
|
"rewards/margins": 1.5753355026245117, |
|
"rewards/rejected": -1.060007929801941, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.1464952759020856e-08, |
|
"logits/chosen": -2.587930679321289, |
|
"logits/rejected": -2.5975794792175293, |
|
"logps/chosen": -280.38665771484375, |
|
"logps/rejected": -264.66094970703125, |
|
"loss": 0.9527, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": 0.6154674291610718, |
|
"rewards/margins": 2.1581873893737793, |
|
"rewards/rejected": -1.542720079421997, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.4662207078575684e-08, |
|
"logits/chosen": -2.6294283866882324, |
|
"logits/rejected": -2.6279261112213135, |
|
"logps/chosen": -288.611572265625, |
|
"logps/rejected": -249.09817504882812, |
|
"loss": 0.9541, |
|
"rewards/accuracies": 0.690625011920929, |
|
"rewards/chosen": 0.7420939207077026, |
|
"rewards/margins": 1.885738730430603, |
|
"rewards/rejected": -1.14364492893219, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 9.12094829893642e-09, |
|
"logits/chosen": -2.6458382606506348, |
|
"logits/rejected": -2.6251447200775146, |
|
"logps/chosen": -259.57086181640625, |
|
"logps/rejected": -250.9589080810547, |
|
"loss": 0.9229, |
|
"rewards/accuracies": 0.721875011920929, |
|
"rewards/chosen": 0.9782251119613647, |
|
"rewards/margins": 2.130990982055664, |
|
"rewards/rejected": -1.1527659893035889, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.8708793644441086e-09, |
|
"logits/chosen": -2.6016457080841064, |
|
"logits/rejected": -2.5447189807891846, |
|
"logps/chosen": -281.54058837890625, |
|
"logps/rejected": -276.61578369140625, |
|
"loss": 0.8586, |
|
"rewards/accuracies": 0.7093750238418579, |
|
"rewards/chosen": 0.8872340321540833, |
|
"rewards/margins": 2.6381373405456543, |
|
"rewards/rejected": -1.7509028911590576, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 1.9347820230782295e-09, |
|
"logits/chosen": -2.627577781677246, |
|
"logits/rejected": -2.5947518348693848, |
|
"logps/chosen": -270.9955139160156, |
|
"logps/rejected": -251.592529296875, |
|
"loss": 0.8516, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.6147862672805786, |
|
"rewards/margins": 2.4109296798706055, |
|
"rewards/rejected": -1.7961437702178955, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.2839470889836627e-10, |
|
"logits/chosen": -2.6408920288085938, |
|
"logits/rejected": -2.6343941688537598, |
|
"logps/chosen": -300.63250732421875, |
|
"logps/rejected": -269.65399169921875, |
|
"loss": 0.7941, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": 0.6158030033111572, |
|
"rewards/margins": 2.596193790435791, |
|
"rewards/rejected": -1.9803907871246338, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 477, |
|
"total_flos": 0.0, |
|
"train_loss": 0.9276265908087324, |
|
"train_runtime": 18165.5867, |
|
"train_samples_per_second": 3.365, |
|
"train_steps_per_second": 0.026 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 477, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 900, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|