clm7b0129-cds-0.8-kendall-onof-ofif-corr-max-2-simpo-max1500-default
/
checkpoint-500
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.4103405826836274, | |
"eval_steps": 50, | |
"global_step": 500, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.008206811653672548, | |
"grad_norm": 0.07778492569923401, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": -2.053281307220459, | |
"logits/rejected": -2.495474338531494, | |
"logps/chosen": -0.3126755356788635, | |
"logps/rejected": -0.3312620520591736, | |
"loss": 7.6211, | |
"rewards/accuracies": 0.44999998807907104, | |
"rewards/chosen": -0.4690132737159729, | |
"rewards/margins": 0.027879873290657997, | |
"rewards/rejected": -0.49689316749572754, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.016413623307345096, | |
"grad_norm": 0.07773654907941818, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": -2.0624098777770996, | |
"logits/rejected": -2.4424185752868652, | |
"logps/chosen": -0.26926660537719727, | |
"logps/rejected": -0.2978014051914215, | |
"loss": 7.5195, | |
"rewards/accuracies": 0.48750001192092896, | |
"rewards/chosen": -0.4038998484611511, | |
"rewards/margins": 0.04280223697423935, | |
"rewards/rejected": -0.44670209288597107, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.024620434961017644, | |
"grad_norm": 0.07357177883386612, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": -2.068427562713623, | |
"logits/rejected": -2.486642360687256, | |
"logps/chosen": -0.29993391036987305, | |
"logps/rejected": -0.34360918402671814, | |
"loss": 7.4913, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.4499008059501648, | |
"rewards/margins": 0.06551288068294525, | |
"rewards/rejected": -0.5154137015342712, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.03282724661469019, | |
"grad_norm": 0.14212799072265625, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": -2.015650987625122, | |
"logits/rejected": -2.3838727474212646, | |
"logps/chosen": -0.2911723852157593, | |
"logps/rejected": -0.30521970987319946, | |
"loss": 7.5217, | |
"rewards/accuracies": 0.48750001192092896, | |
"rewards/chosen": -0.4367586076259613, | |
"rewards/margins": 0.021070968359708786, | |
"rewards/rejected": -0.4578295648097992, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.04103405826836274, | |
"grad_norm": 0.08107248693704605, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": -2.1150989532470703, | |
"logits/rejected": -2.4338631629943848, | |
"logps/chosen": -0.26249754428863525, | |
"logps/rejected": -0.3132360577583313, | |
"loss": 7.519, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.3937462866306305, | |
"rewards/margins": 0.07610772550106049, | |
"rewards/rejected": -0.4698540270328522, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04103405826836274, | |
"eval_logits/chosen": -2.0232737064361572, | |
"eval_logits/rejected": -2.4952735900878906, | |
"eval_logps/chosen": -0.27974528074264526, | |
"eval_logps/rejected": -0.3420677185058594, | |
"eval_loss": 0.9291417598724365, | |
"eval_rewards/accuracies": 0.49494948983192444, | |
"eval_rewards/chosen": -0.41961798071861267, | |
"eval_rewards/margins": 0.09348361939191818, | |
"eval_rewards/rejected": -0.5131015777587891, | |
"eval_runtime": 26.0563, | |
"eval_samples_per_second": 30.242, | |
"eval_steps_per_second": 3.799, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04924086992203529, | |
"grad_norm": 0.06815352290868759, | |
"learning_rate": 4.980286753286196e-06, | |
"logits/chosen": -1.9890680313110352, | |
"logits/rejected": -2.3848204612731934, | |
"logps/chosen": -0.26213228702545166, | |
"logps/rejected": -0.31342557072639465, | |
"loss": 7.432, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.3931984603404999, | |
"rewards/margins": 0.0769399031996727, | |
"rewards/rejected": -0.4701383709907532, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.057447681575707836, | |
"grad_norm": 0.06748568266630173, | |
"learning_rate": 4.973180832407471e-06, | |
"logits/chosen": -2.070542812347412, | |
"logits/rejected": -2.3977038860321045, | |
"logps/chosen": -0.24570491909980774, | |
"logps/rejected": -0.3655605912208557, | |
"loss": 7.35, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.3685573935508728, | |
"rewards/margins": 0.17978355288505554, | |
"rewards/rejected": -0.548340916633606, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.06565449322938038, | |
"grad_norm": 0.10909309983253479, | |
"learning_rate": 4.964990092676263e-06, | |
"logits/chosen": -2.2012317180633545, | |
"logits/rejected": -2.346029758453369, | |
"logps/chosen": -0.2279246598482132, | |
"logps/rejected": -0.35396742820739746, | |
"loss": 7.5082, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.341886967420578, | |
"rewards/margins": 0.18906418979167938, | |
"rewards/rejected": -0.5309511423110962, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.07386130488305294, | |
"grad_norm": 0.05977805703878403, | |
"learning_rate": 4.9557181268217225e-06, | |
"logits/chosen": -2.0719449520111084, | |
"logits/rejected": -2.4491190910339355, | |
"logps/chosen": -0.2503294348716736, | |
"logps/rejected": -0.29939892888069153, | |
"loss": 7.5129, | |
"rewards/accuracies": 0.4625000059604645, | |
"rewards/chosen": -0.37549418210983276, | |
"rewards/margins": 0.07360419631004333, | |
"rewards/rejected": -0.4490983486175537, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.08206811653672548, | |
"grad_norm": 0.051751479506492615, | |
"learning_rate": 4.9453690018345144e-06, | |
"logits/chosen": -2.0634045600891113, | |
"logits/rejected": -2.458428382873535, | |
"logps/chosen": -0.24033495783805847, | |
"logps/rejected": -0.29080909490585327, | |
"loss": 7.4432, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.3605024516582489, | |
"rewards/margins": 0.07571124285459518, | |
"rewards/rejected": -0.4362136721611023, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.08206811653672548, | |
"eval_logits/chosen": -2.0207154750823975, | |
"eval_logits/rejected": -2.486215353012085, | |
"eval_logps/chosen": -0.2376101016998291, | |
"eval_logps/rejected": -0.32593628764152527, | |
"eval_loss": 0.9085211753845215, | |
"eval_rewards/accuracies": 0.5353535413742065, | |
"eval_rewards/chosen": -0.35641518235206604, | |
"eval_rewards/margins": 0.13248924911022186, | |
"eval_rewards/rejected": -0.4889043867588043, | |
"eval_runtime": 26.0119, | |
"eval_samples_per_second": 30.294, | |
"eval_steps_per_second": 3.806, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.09027492819039803, | |
"grad_norm": 0.06007291004061699, | |
"learning_rate": 4.933947257182901e-06, | |
"logits/chosen": -2.1248741149902344, | |
"logits/rejected": -2.409808874130249, | |
"logps/chosen": -0.2354653775691986, | |
"logps/rejected": -0.30269068479537964, | |
"loss": 7.317, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.3531980812549591, | |
"rewards/margins": 0.10083796828985214, | |
"rewards/rejected": -0.45403605699539185, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.09848173984407058, | |
"grad_norm": 0.055738095194101334, | |
"learning_rate": 4.921457902821578e-06, | |
"logits/chosen": -2.0635311603546143, | |
"logits/rejected": -2.4297730922698975, | |
"logps/chosen": -0.2315257489681244, | |
"logps/rejected": -0.33639490604400635, | |
"loss": 7.2775, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.3472886383533478, | |
"rewards/margins": 0.15730372071266174, | |
"rewards/rejected": -0.5045923590660095, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.10668855149774313, | |
"grad_norm": 0.07971248030662537, | |
"learning_rate": 4.907906416994146e-06, | |
"logits/chosen": -2.07852840423584, | |
"logits/rejected": -2.4043469429016113, | |
"logps/chosen": -0.20596058666706085, | |
"logps/rejected": -0.33416762948036194, | |
"loss": 7.336, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.3089408874511719, | |
"rewards/margins": 0.19231058657169342, | |
"rewards/rejected": -0.5012514591217041, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.11489536315141567, | |
"grad_norm": 0.08581534773111343, | |
"learning_rate": 4.893298743830168e-06, | |
"logits/chosen": -2.115981340408325, | |
"logits/rejected": -2.5363636016845703, | |
"logps/chosen": -0.22111928462982178, | |
"logps/rejected": -0.3136863708496094, | |
"loss": 7.2892, | |
"rewards/accuracies": 0.550000011920929, | |
"rewards/chosen": -0.33167898654937744, | |
"rewards/margins": 0.1388506144285202, | |
"rewards/rejected": -0.47052955627441406, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.12310217480508823, | |
"grad_norm": 0.06293604522943497, | |
"learning_rate": 4.8776412907378845e-06, | |
"logits/chosen": -2.070842742919922, | |
"logits/rejected": -2.4669342041015625, | |
"logps/chosen": -0.20812074840068817, | |
"logps/rejected": -0.29536327719688416, | |
"loss": 7.26, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.31218111515045166, | |
"rewards/margins": 0.13086381554603577, | |
"rewards/rejected": -0.4430449604988098, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.12310217480508823, | |
"eval_logits/chosen": -2.062544822692871, | |
"eval_logits/rejected": -2.5318312644958496, | |
"eval_logps/chosen": -0.2108660489320755, | |
"eval_logps/rejected": -0.3196176588535309, | |
"eval_loss": 0.8929102420806885, | |
"eval_rewards/accuracies": 0.5555555820465088, | |
"eval_rewards/chosen": -0.31629908084869385, | |
"eval_rewards/margins": 0.1631273776292801, | |
"eval_rewards/rejected": -0.47942644357681274, | |
"eval_runtime": 26.0407, | |
"eval_samples_per_second": 30.26, | |
"eval_steps_per_second": 3.802, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.13130898645876077, | |
"grad_norm": 0.06755395233631134, | |
"learning_rate": 4.860940925593703e-06, | |
"logits/chosen": -2.187638998031616, | |
"logits/rejected": -2.4928510189056396, | |
"logps/chosen": -0.2070399969816208, | |
"logps/rejected": -0.30727890133857727, | |
"loss": 7.1947, | |
"rewards/accuracies": 0.550000011920929, | |
"rewards/chosen": -0.3105599880218506, | |
"rewards/margins": 0.1503583937883377, | |
"rewards/rejected": -0.4609183669090271, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.1395157981124333, | |
"grad_norm": 0.08956371247768402, | |
"learning_rate": 4.84320497372973e-06, | |
"logits/chosen": -2.0751285552978516, | |
"logits/rejected": -2.478673219680786, | |
"logps/chosen": -0.18197472393512726, | |
"logps/rejected": -0.2756109833717346, | |
"loss": 7.1774, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.2729620933532715, | |
"rewards/margins": 0.14045441150665283, | |
"rewards/rejected": -0.41341647505760193, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.14772260976610588, | |
"grad_norm": 0.07708129286766052, | |
"learning_rate": 4.824441214720629e-06, | |
"logits/chosen": -2.113537311553955, | |
"logits/rejected": -2.530677556991577, | |
"logps/chosen": -0.20599500834941864, | |
"logps/rejected": -0.2911488711833954, | |
"loss": 7.1722, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.308992475271225, | |
"rewards/margins": 0.1277308166027069, | |
"rewards/rejected": -0.4367233216762543, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.15592942141977842, | |
"grad_norm": 0.0884585976600647, | |
"learning_rate": 4.804657878971252e-06, | |
"logits/chosen": -2.151444673538208, | |
"logits/rejected": -2.559861898422241, | |
"logps/chosen": -0.2093551605939865, | |
"logps/rejected": -0.2878231108188629, | |
"loss": 7.1186, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.31403273344039917, | |
"rewards/margins": 0.11770190298557281, | |
"rewards/rejected": -0.4317346513271332, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.16413623307345096, | |
"grad_norm": 0.09445559978485107, | |
"learning_rate": 4.783863644106502e-06, | |
"logits/chosen": -2.278620481491089, | |
"logits/rejected": -2.5897645950317383, | |
"logps/chosen": -0.18631704151630402, | |
"logps/rejected": -0.3201253116130829, | |
"loss": 7.082, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.2794755697250366, | |
"rewards/margins": 0.2007124423980713, | |
"rewards/rejected": -0.4801879823207855, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.16413623307345096, | |
"eval_logits/chosen": -2.1718955039978027, | |
"eval_logits/rejected": -2.6710257530212402, | |
"eval_logps/chosen": -0.20382821559906006, | |
"eval_logps/rejected": -0.3390556573867798, | |
"eval_loss": 0.8775798678398132, | |
"eval_rewards/accuracies": 0.5858585834503174, | |
"eval_rewards/chosen": -0.3057423532009125, | |
"eval_rewards/margins": 0.20284107327461243, | |
"eval_rewards/rejected": -0.5085834264755249, | |
"eval_runtime": 26.0531, | |
"eval_samples_per_second": 30.246, | |
"eval_steps_per_second": 3.8, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.1723430447271235, | |
"grad_norm": 0.11077430099248886, | |
"learning_rate": 4.762067631165049e-06, | |
"logits/chosen": -2.2566323280334473, | |
"logits/rejected": -2.621065378189087, | |
"logps/chosen": -0.18663282692432404, | |
"logps/rejected": -0.290865957736969, | |
"loss": 7.1321, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -0.27994924783706665, | |
"rewards/margins": 0.15634974837303162, | |
"rewards/rejected": -0.43629899621009827, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.18054985638079607, | |
"grad_norm": 0.15500974655151367, | |
"learning_rate": 4.7392794005985324e-06, | |
"logits/chosen": -2.27781343460083, | |
"logits/rejected": -2.700369358062744, | |
"logps/chosen": -0.21367880702018738, | |
"logps/rejected": -0.31559067964553833, | |
"loss": 6.9886, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.3205181956291199, | |
"rewards/margins": 0.15286779403686523, | |
"rewards/rejected": -0.4733859896659851, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.1887566680344686, | |
"grad_norm": 0.12770676612854004, | |
"learning_rate": 4.715508948078037e-06, | |
"logits/chosen": -2.216815710067749, | |
"logits/rejected": -2.759458541870117, | |
"logps/chosen": -0.21546092629432678, | |
"logps/rejected": -0.34664005041122437, | |
"loss": 6.966, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.32319143414497375, | |
"rewards/margins": 0.196768656373024, | |
"rewards/rejected": -0.5199600458145142, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.19696347968814115, | |
"grad_norm": 0.15062908828258514, | |
"learning_rate": 4.690766700109659e-06, | |
"logits/chosen": -2.2262110710144043, | |
"logits/rejected": -2.7840607166290283, | |
"logps/chosen": -0.2078159749507904, | |
"logps/rejected": -0.4006090760231018, | |
"loss": 7.012, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.3117239773273468, | |
"rewards/margins": 0.2891896665096283, | |
"rewards/rejected": -0.6009136438369751, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.2051702913418137, | |
"grad_norm": 0.24995267391204834, | |
"learning_rate": 4.665063509461098e-06, | |
"logits/chosen": -2.4198288917541504, | |
"logits/rejected": -2.8148205280303955, | |
"logps/chosen": -0.23191122710704803, | |
"logps/rejected": -0.38251757621765137, | |
"loss": 6.7812, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.34786686301231384, | |
"rewards/margins": 0.2259095013141632, | |
"rewards/rejected": -0.573776364326477, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.2051702913418137, | |
"eval_logits/chosen": -2.3532145023345947, | |
"eval_logits/rejected": -2.9015841484069824, | |
"eval_logps/chosen": -0.22620753943920135, | |
"eval_logps/rejected": -0.4290919005870819, | |
"eval_loss": 0.8434350490570068, | |
"eval_rewards/accuracies": 0.5959596037864685, | |
"eval_rewards/chosen": -0.33931130170822144, | |
"eval_rewards/margins": 0.304326593875885, | |
"eval_rewards/rejected": -0.6436378955841064, | |
"eval_runtime": 26.012, | |
"eval_samples_per_second": 30.294, | |
"eval_steps_per_second": 3.806, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.21337710299548626, | |
"grad_norm": 0.22134838998317719, | |
"learning_rate": 4.638410650401267e-06, | |
"logits/chosen": -2.4054629802703857, | |
"logits/rejected": -2.8117618560791016, | |
"logps/chosen": -0.20882606506347656, | |
"logps/rejected": -0.41059261560440063, | |
"loss": 6.6024, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.31323909759521484, | |
"rewards/margins": 0.3026497960090637, | |
"rewards/rejected": -0.6158889532089233, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.2215839146491588, | |
"grad_norm": 0.23838171362876892, | |
"learning_rate": 4.610819813755038e-06, | |
"logits/chosen": -2.534034490585327, | |
"logits/rejected": -2.846797227859497, | |
"logps/chosen": -0.2372482568025589, | |
"logps/rejected": -0.493452787399292, | |
"loss": 6.7377, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.35587236285209656, | |
"rewards/margins": 0.38430681824684143, | |
"rewards/rejected": -0.740179181098938, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.22979072630283134, | |
"grad_norm": 0.2903882563114166, | |
"learning_rate": 4.582303101775249e-06, | |
"logits/chosen": -2.4838695526123047, | |
"logits/rejected": -2.8494999408721924, | |
"logps/chosen": -0.23487380146980286, | |
"logps/rejected": -0.5447143316268921, | |
"loss": 6.6463, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -0.3523106873035431, | |
"rewards/margins": 0.4647606909275055, | |
"rewards/rejected": -0.8170714378356934, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.23799753795650389, | |
"grad_norm": 0.2977660596370697, | |
"learning_rate": 4.55287302283426e-06, | |
"logits/chosen": -2.539785861968994, | |
"logits/rejected": -2.922631025314331, | |
"logps/chosen": -0.2749824821949005, | |
"logps/rejected": -0.5817859768867493, | |
"loss": 6.2424, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.41247373819351196, | |
"rewards/margins": 0.4602052569389343, | |
"rewards/rejected": -0.8726789355278015, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.24620434961017645, | |
"grad_norm": 0.388954222202301, | |
"learning_rate": 4.522542485937369e-06, | |
"logits/chosen": -2.6039352416992188, | |
"logits/rejected": -2.8011627197265625, | |
"logps/chosen": -0.37122753262519836, | |
"logps/rejected": -0.6376734972000122, | |
"loss": 6.3458, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.5568413138389587, | |
"rewards/margins": 0.39966899156570435, | |
"rewards/rejected": -0.9565103650093079, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.24620434961017645, | |
"eval_logits/chosen": -2.448641061782837, | |
"eval_logits/rejected": -2.8639307022094727, | |
"eval_logps/chosen": -0.35992980003356934, | |
"eval_logps/rejected": -0.7961164712905884, | |
"eval_loss": 0.7593368887901306, | |
"eval_rewards/accuracies": 0.6060606241226196, | |
"eval_rewards/chosen": -0.5398945808410645, | |
"eval_rewards/margins": 0.6542800664901733, | |
"eval_rewards/rejected": -1.1941747665405273, | |
"eval_runtime": 26.0378, | |
"eval_samples_per_second": 30.264, | |
"eval_steps_per_second": 3.802, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.254411161263849, | |
"grad_norm": 0.41667941212654114, | |
"learning_rate": 4.491324795060491e-06, | |
"logits/chosen": -2.596318483352661, | |
"logits/rejected": -2.8140273094177246, | |
"logps/chosen": -0.3550270199775696, | |
"logps/rejected": -0.673725426197052, | |
"loss": 6.2201, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.532540500164032, | |
"rewards/margins": 0.47804751992225647, | |
"rewards/rejected": -1.0105879306793213, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.26261797291752154, | |
"grad_norm": 0.5356656312942505, | |
"learning_rate": 4.4592336433146e-06, | |
"logits/chosen": -2.5131359100341797, | |
"logits/rejected": -2.7125701904296875, | |
"logps/chosen": -0.4790285527706146, | |
"logps/rejected": -0.9030188322067261, | |
"loss": 5.9656, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.7185428738594055, | |
"rewards/margins": 0.6359853744506836, | |
"rewards/rejected": -1.3545281887054443, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.2708247845711941, | |
"grad_norm": 0.6395894885063171, | |
"learning_rate": 4.426283106939474e-06, | |
"logits/chosen": -2.408463954925537, | |
"logits/rejected": -2.784302234649658, | |
"logps/chosen": -0.43364158272743225, | |
"logps/rejected": -0.8759375810623169, | |
"loss": 5.6972, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.6504624485969543, | |
"rewards/margins": 0.6634438037872314, | |
"rewards/rejected": -1.313906192779541, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.2790315962248666, | |
"grad_norm": 0.5482347011566162, | |
"learning_rate": 4.3924876391293915e-06, | |
"logits/chosen": -2.4591078758239746, | |
"logits/rejected": -2.7352993488311768, | |
"logps/chosen": -0.4786604344844818, | |
"logps/rejected": -1.337053894996643, | |
"loss": 5.5123, | |
"rewards/accuracies": 0.6625000238418579, | |
"rewards/chosen": -0.7179905772209167, | |
"rewards/margins": 1.2875900268554688, | |
"rewards/rejected": -2.0055809020996094, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.2872384078785392, | |
"grad_norm": 0.44611233472824097, | |
"learning_rate": 4.357862063693486e-06, | |
"logits/chosen": -2.426030397415161, | |
"logits/rejected": -2.7272305488586426, | |
"logps/chosen": -0.5190738439559937, | |
"logps/rejected": -1.3207228183746338, | |
"loss": 5.242, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -0.7786108255386353, | |
"rewards/margins": 1.2024734020233154, | |
"rewards/rejected": -1.9810841083526611, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.2872384078785392, | |
"eval_logits/chosen": -2.5548582077026367, | |
"eval_logits/rejected": -2.8742663860321045, | |
"eval_logps/chosen": -0.5429244637489319, | |
"eval_logps/rejected": -1.556882619857788, | |
"eval_loss": 0.6406257748603821, | |
"eval_rewards/accuracies": 0.6464646458625793, | |
"eval_rewards/chosen": -0.8143868446350098, | |
"eval_rewards/margins": 1.520937204360962, | |
"eval_rewards/rejected": -2.335324287414551, | |
"eval_runtime": 26.0367, | |
"eval_samples_per_second": 30.265, | |
"eval_steps_per_second": 3.802, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.29544521953221176, | |
"grad_norm": 0.6435768008232117, | |
"learning_rate": 4.322421568553529e-06, | |
"logits/chosen": -2.5521063804626465, | |
"logits/rejected": -2.8884310722351074, | |
"logps/chosen": -0.5124669075012207, | |
"logps/rejected": -1.4227123260498047, | |
"loss": 5.2755, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.768700361251831, | |
"rewards/margins": 1.365368127822876, | |
"rewards/rejected": -2.134068489074707, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.3036520311858843, | |
"grad_norm": 0.7270930409431458, | |
"learning_rate": 4.286181699082008e-06, | |
"logits/chosen": -2.613312244415283, | |
"logits/rejected": -2.8482322692871094, | |
"logps/chosen": -0.7397282719612122, | |
"logps/rejected": -1.7584373950958252, | |
"loss": 4.6592, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -1.1095924377441406, | |
"rewards/margins": 1.528063416481018, | |
"rewards/rejected": -2.6376559734344482, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.31185884283955684, | |
"grad_norm": 0.417350172996521, | |
"learning_rate": 4.249158351283414e-06, | |
"logits/chosen": -2.6574740409851074, | |
"logits/rejected": -2.8720998764038086, | |
"logps/chosen": -0.7095499634742737, | |
"logps/rejected": -2.2104392051696777, | |
"loss": 4.4512, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -1.0643248558044434, | |
"rewards/margins": 2.2513341903686523, | |
"rewards/rejected": -3.3156590461730957, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.3200656544932294, | |
"grad_norm": 0.5288704633712769, | |
"learning_rate": 4.211367764821722e-06, | |
"logits/chosen": -2.7438926696777344, | |
"logits/rejected": -2.9578990936279297, | |
"logps/chosen": -0.7221090793609619, | |
"logps/rejected": -2.169327735900879, | |
"loss": 4.8545, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -1.0831634998321533, | |
"rewards/margins": 2.170828104019165, | |
"rewards/rejected": -3.2539916038513184, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.3282724661469019, | |
"grad_norm": 0.5342369675636292, | |
"learning_rate": 4.172826515897146e-06, | |
"logits/chosen": -2.451097011566162, | |
"logits/rejected": -2.8298330307006836, | |
"logps/chosen": -0.6682878732681274, | |
"logps/rejected": -2.4527621269226074, | |
"loss": 4.5987, | |
"rewards/accuracies": 0.7124999761581421, | |
"rewards/chosen": -1.0024317502975464, | |
"rewards/margins": 2.676711320877075, | |
"rewards/rejected": -3.679143190383911, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.3282724661469019, | |
"eval_logits/chosen": -2.486518621444702, | |
"eval_logits/rejected": -2.884521722793579, | |
"eval_logps/chosen": -0.6234225630760193, | |
"eval_logps/rejected": -2.3772060871124268, | |
"eval_loss": 0.5354036688804626, | |
"eval_rewards/accuracies": 0.6464646458625793, | |
"eval_rewards/chosen": -0.9351338744163513, | |
"eval_rewards/margins": 2.6306753158569336, | |
"eval_rewards/rejected": -3.5658092498779297, | |
"eval_runtime": 26.0392, | |
"eval_samples_per_second": 30.262, | |
"eval_steps_per_second": 3.802, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.33647927780057446, | |
"grad_norm": 0.3894485831260681, | |
"learning_rate": 4.133551509975264e-06, | |
"logits/chosen": -2.498328685760498, | |
"logits/rejected": -2.832383394241333, | |
"logps/chosen": -0.8072039484977722, | |
"logps/rejected": -3.0143685340881348, | |
"loss": 4.1825, | |
"rewards/accuracies": 0.737500011920929, | |
"rewards/chosen": -1.210806131362915, | |
"rewards/margins": 3.310746669769287, | |
"rewards/rejected": -4.521553039550781, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.344686089454247, | |
"grad_norm": 1.1384832859039307, | |
"learning_rate": 4.093559974371725e-06, | |
"logits/chosen": -2.3939032554626465, | |
"logits/rejected": -2.736551284790039, | |
"logps/chosen": -0.8623531460762024, | |
"logps/rejected": -3.228794813156128, | |
"loss": 4.0383, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -1.293529748916626, | |
"rewards/margins": 3.5496623516082764, | |
"rewards/rejected": -4.843192100524902, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.3528929011079196, | |
"grad_norm": 0.5636156797409058, | |
"learning_rate": 4.052869450695776e-06, | |
"logits/chosen": -2.466431140899658, | |
"logits/rejected": -2.7423882484436035, | |
"logps/chosen": -0.867216944694519, | |
"logps/rejected": -2.9713022708892822, | |
"loss": 3.9764, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -1.3008254766464233, | |
"rewards/margins": 3.156127691268921, | |
"rewards/rejected": -4.456953525543213, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.36109971276159214, | |
"grad_norm": 1.3254886865615845, | |
"learning_rate": 4.011497787155938e-06, | |
"logits/chosen": -2.438335418701172, | |
"logits/rejected": -2.716845989227295, | |
"logps/chosen": -0.9805113673210144, | |
"logps/rejected": -3.646573543548584, | |
"loss": 4.0643, | |
"rewards/accuracies": 0.737500011920929, | |
"rewards/chosen": -1.4707671403884888, | |
"rewards/margins": 3.9990932941436768, | |
"rewards/rejected": -5.469860553741455, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.3693065244152647, | |
"grad_norm": 1.0398495197296143, | |
"learning_rate": 3.969463130731183e-06, | |
"logits/chosen": -2.3812689781188965, | |
"logits/rejected": -2.7526910305023193, | |
"logps/chosen": -1.0975981950759888, | |
"logps/rejected": -3.9439749717712402, | |
"loss": 3.9749, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -1.6463972330093384, | |
"rewards/margins": 4.269565105438232, | |
"rewards/rejected": -5.915962219238281, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.3693065244152647, | |
"eval_logits/chosen": -2.4038827419281006, | |
"eval_logits/rejected": -2.7936673164367676, | |
"eval_logps/chosen": -1.0481945276260376, | |
"eval_logps/rejected": -3.5816102027893066, | |
"eval_loss": 0.47537824511528015, | |
"eval_rewards/accuracies": 0.7373737096786499, | |
"eval_rewards/chosen": -1.5722918510437012, | |
"eval_rewards/margins": 3.800123691558838, | |
"eval_rewards/rejected": -5.372415542602539, | |
"eval_runtime": 26.0116, | |
"eval_samples_per_second": 30.294, | |
"eval_steps_per_second": 3.806, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.3775133360689372, | |
"grad_norm": 1.091323971748352, | |
"learning_rate": 3.92678391921108e-06, | |
"logits/chosen": -2.5687508583068848, | |
"logits/rejected": -2.766624927520752, | |
"logps/chosen": -1.1868960857391357, | |
"logps/rejected": -3.4050726890563965, | |
"loss": 4.114, | |
"rewards/accuracies": 0.762499988079071, | |
"rewards/chosen": -1.780344009399414, | |
"rewards/margins": 3.3272647857666016, | |
"rewards/rejected": -5.107609272003174, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.38572014772260976, | |
"grad_norm": 0.8932163715362549, | |
"learning_rate": 3.88347887310836e-06, | |
"logits/chosen": -2.438856601715088, | |
"logits/rejected": -2.8660335540771484, | |
"logps/chosen": -1.420062780380249, | |
"logps/rejected": -4.232975006103516, | |
"loss": 3.9707, | |
"rewards/accuracies": 0.7749999761581421, | |
"rewards/chosen": -2.130094051361084, | |
"rewards/margins": 4.219367980957031, | |
"rewards/rejected": -6.349462032318115, | |
"step": 470 | |
}, | |
{ | |
"epoch": 0.3939269593762823, | |
"grad_norm": 1.110568642616272, | |
"learning_rate": 3.839566987447492e-06, | |
"logits/chosen": -2.5447945594787598, | |
"logits/rejected": -2.720705032348633, | |
"logps/chosen": -1.9743589162826538, | |
"logps/rejected": -3.963395595550537, | |
"loss": 3.7559, | |
"rewards/accuracies": 0.8125, | |
"rewards/chosen": -2.961538314819336, | |
"rewards/margins": 2.983555316925049, | |
"rewards/rejected": -5.945094108581543, | |
"step": 480 | |
}, | |
{ | |
"epoch": 0.40213377102995485, | |
"grad_norm": 1.1167631149291992, | |
"learning_rate": 3.795067523432826e-06, | |
"logits/chosen": -2.6660027503967285, | |
"logits/rejected": -2.839933395385742, | |
"logps/chosen": -2.3371267318725586, | |
"logps/rejected": -4.315327167510986, | |
"loss": 3.5333, | |
"rewards/accuracies": 0.7875000238418579, | |
"rewards/chosen": -3.505690097808838, | |
"rewards/margins": 2.9673006534576416, | |
"rewards/rejected": -6.4729905128479, | |
"step": 490 | |
}, | |
{ | |
"epoch": 0.4103405826836274, | |
"grad_norm": 1.7681843042373657, | |
"learning_rate": 3.7500000000000005e-06, | |
"logits/chosen": -2.4737250804901123, | |
"logits/rejected": -2.7522482872009277, | |
"logps/chosen": -2.802852153778076, | |
"logps/rejected": -5.4331955909729, | |
"loss": 3.1958, | |
"rewards/accuracies": 0.800000011920929, | |
"rewards/chosen": -4.204278469085693, | |
"rewards/margins": 3.9455153942108154, | |
"rewards/rejected": -8.14979362487793, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.4103405826836274, | |
"eval_logits/chosen": -2.464381456375122, | |
"eval_logits/rejected": -2.8693907260894775, | |
"eval_logps/chosen": -2.83821177482605, | |
"eval_logps/rejected": -5.77597188949585, | |
"eval_loss": 0.3702296018600464, | |
"eval_rewards/accuracies": 0.8383838534355164, | |
"eval_rewards/chosen": -4.257318019866943, | |
"eval_rewards/margins": 4.406640529632568, | |
"eval_rewards/rejected": -8.663958549499512, | |
"eval_runtime": 26.0401, | |
"eval_samples_per_second": 30.261, | |
"eval_steps_per_second": 3.802, | |
"step": 500 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 1.816762881216086e+18, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |