clm7b0129-cds-0.8-kendall-onof-ofif-corr-max-2-simpo-max1500-default
/
checkpoint-250
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.2051702913418137, | |
"eval_steps": 50, | |
"global_step": 250, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.008206811653672548, | |
"grad_norm": 0.07778492569923401, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": -2.053281307220459, | |
"logits/rejected": -2.495474338531494, | |
"logps/chosen": -0.3126755356788635, | |
"logps/rejected": -0.3312620520591736, | |
"loss": 7.6211, | |
"rewards/accuracies": 0.44999998807907104, | |
"rewards/chosen": -0.4690132737159729, | |
"rewards/margins": 0.027879873290657997, | |
"rewards/rejected": -0.49689316749572754, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.016413623307345096, | |
"grad_norm": 0.07773654907941818, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": -2.0624098777770996, | |
"logits/rejected": -2.4424185752868652, | |
"logps/chosen": -0.26926660537719727, | |
"logps/rejected": -0.2978014051914215, | |
"loss": 7.5195, | |
"rewards/accuracies": 0.48750001192092896, | |
"rewards/chosen": -0.4038998484611511, | |
"rewards/margins": 0.04280223697423935, | |
"rewards/rejected": -0.44670209288597107, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.024620434961017644, | |
"grad_norm": 0.07357177883386612, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": -2.068427562713623, | |
"logits/rejected": -2.486642360687256, | |
"logps/chosen": -0.29993391036987305, | |
"logps/rejected": -0.34360918402671814, | |
"loss": 7.4913, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.4499008059501648, | |
"rewards/margins": 0.06551288068294525, | |
"rewards/rejected": -0.5154137015342712, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.03282724661469019, | |
"grad_norm": 0.14212799072265625, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": -2.015650987625122, | |
"logits/rejected": -2.3838727474212646, | |
"logps/chosen": -0.2911723852157593, | |
"logps/rejected": -0.30521970987319946, | |
"loss": 7.5217, | |
"rewards/accuracies": 0.48750001192092896, | |
"rewards/chosen": -0.4367586076259613, | |
"rewards/margins": 0.021070968359708786, | |
"rewards/rejected": -0.4578295648097992, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.04103405826836274, | |
"grad_norm": 0.08107248693704605, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": -2.1150989532470703, | |
"logits/rejected": -2.4338631629943848, | |
"logps/chosen": -0.26249754428863525, | |
"logps/rejected": -0.3132360577583313, | |
"loss": 7.519, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.3937462866306305, | |
"rewards/margins": 0.07610772550106049, | |
"rewards/rejected": -0.4698540270328522, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04103405826836274, | |
"eval_logits/chosen": -2.0232737064361572, | |
"eval_logits/rejected": -2.4952735900878906, | |
"eval_logps/chosen": -0.27974528074264526, | |
"eval_logps/rejected": -0.3420677185058594, | |
"eval_loss": 0.9291417598724365, | |
"eval_rewards/accuracies": 0.49494948983192444, | |
"eval_rewards/chosen": -0.41961798071861267, | |
"eval_rewards/margins": 0.09348361939191818, | |
"eval_rewards/rejected": -0.5131015777587891, | |
"eval_runtime": 26.0563, | |
"eval_samples_per_second": 30.242, | |
"eval_steps_per_second": 3.799, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04924086992203529, | |
"grad_norm": 0.06815352290868759, | |
"learning_rate": 4.980286753286196e-06, | |
"logits/chosen": -1.9890680313110352, | |
"logits/rejected": -2.3848204612731934, | |
"logps/chosen": -0.26213228702545166, | |
"logps/rejected": -0.31342557072639465, | |
"loss": 7.432, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.3931984603404999, | |
"rewards/margins": 0.0769399031996727, | |
"rewards/rejected": -0.4701383709907532, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.057447681575707836, | |
"grad_norm": 0.06748568266630173, | |
"learning_rate": 4.973180832407471e-06, | |
"logits/chosen": -2.070542812347412, | |
"logits/rejected": -2.3977038860321045, | |
"logps/chosen": -0.24570491909980774, | |
"logps/rejected": -0.3655605912208557, | |
"loss": 7.35, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.3685573935508728, | |
"rewards/margins": 0.17978355288505554, | |
"rewards/rejected": -0.548340916633606, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.06565449322938038, | |
"grad_norm": 0.10909309983253479, | |
"learning_rate": 4.964990092676263e-06, | |
"logits/chosen": -2.2012317180633545, | |
"logits/rejected": -2.346029758453369, | |
"logps/chosen": -0.2279246598482132, | |
"logps/rejected": -0.35396742820739746, | |
"loss": 7.5082, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.341886967420578, | |
"rewards/margins": 0.18906418979167938, | |
"rewards/rejected": -0.5309511423110962, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.07386130488305294, | |
"grad_norm": 0.05977805703878403, | |
"learning_rate": 4.9557181268217225e-06, | |
"logits/chosen": -2.0719449520111084, | |
"logits/rejected": -2.4491190910339355, | |
"logps/chosen": -0.2503294348716736, | |
"logps/rejected": -0.29939892888069153, | |
"loss": 7.5129, | |
"rewards/accuracies": 0.4625000059604645, | |
"rewards/chosen": -0.37549418210983276, | |
"rewards/margins": 0.07360419631004333, | |
"rewards/rejected": -0.4490983486175537, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.08206811653672548, | |
"grad_norm": 0.051751479506492615, | |
"learning_rate": 4.9453690018345144e-06, | |
"logits/chosen": -2.0634045600891113, | |
"logits/rejected": -2.458428382873535, | |
"logps/chosen": -0.24033495783805847, | |
"logps/rejected": -0.29080909490585327, | |
"loss": 7.4432, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.3605024516582489, | |
"rewards/margins": 0.07571124285459518, | |
"rewards/rejected": -0.4362136721611023, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.08206811653672548, | |
"eval_logits/chosen": -2.0207154750823975, | |
"eval_logits/rejected": -2.486215353012085, | |
"eval_logps/chosen": -0.2376101016998291, | |
"eval_logps/rejected": -0.32593628764152527, | |
"eval_loss": 0.9085211753845215, | |
"eval_rewards/accuracies": 0.5353535413742065, | |
"eval_rewards/chosen": -0.35641518235206604, | |
"eval_rewards/margins": 0.13248924911022186, | |
"eval_rewards/rejected": -0.4889043867588043, | |
"eval_runtime": 26.0119, | |
"eval_samples_per_second": 30.294, | |
"eval_steps_per_second": 3.806, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.09027492819039803, | |
"grad_norm": 0.06007291004061699, | |
"learning_rate": 4.933947257182901e-06, | |
"logits/chosen": -2.1248741149902344, | |
"logits/rejected": -2.409808874130249, | |
"logps/chosen": -0.2354653775691986, | |
"logps/rejected": -0.30269068479537964, | |
"loss": 7.317, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.3531980812549591, | |
"rewards/margins": 0.10083796828985214, | |
"rewards/rejected": -0.45403605699539185, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.09848173984407058, | |
"grad_norm": 0.055738095194101334, | |
"learning_rate": 4.921457902821578e-06, | |
"logits/chosen": -2.0635311603546143, | |
"logits/rejected": -2.4297730922698975, | |
"logps/chosen": -0.2315257489681244, | |
"logps/rejected": -0.33639490604400635, | |
"loss": 7.2775, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.3472886383533478, | |
"rewards/margins": 0.15730372071266174, | |
"rewards/rejected": -0.5045923590660095, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.10668855149774313, | |
"grad_norm": 0.07971248030662537, | |
"learning_rate": 4.907906416994146e-06, | |
"logits/chosen": -2.07852840423584, | |
"logits/rejected": -2.4043469429016113, | |
"logps/chosen": -0.20596058666706085, | |
"logps/rejected": -0.33416762948036194, | |
"loss": 7.336, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.3089408874511719, | |
"rewards/margins": 0.19231058657169342, | |
"rewards/rejected": -0.5012514591217041, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.11489536315141567, | |
"grad_norm": 0.08581534773111343, | |
"learning_rate": 4.893298743830168e-06, | |
"logits/chosen": -2.115981340408325, | |
"logits/rejected": -2.5363636016845703, | |
"logps/chosen": -0.22111928462982178, | |
"logps/rejected": -0.3136863708496094, | |
"loss": 7.2892, | |
"rewards/accuracies": 0.550000011920929, | |
"rewards/chosen": -0.33167898654937744, | |
"rewards/margins": 0.1388506144285202, | |
"rewards/rejected": -0.47052955627441406, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.12310217480508823, | |
"grad_norm": 0.06293604522943497, | |
"learning_rate": 4.8776412907378845e-06, | |
"logits/chosen": -2.070842742919922, | |
"logits/rejected": -2.4669342041015625, | |
"logps/chosen": -0.20812074840068817, | |
"logps/rejected": -0.29536327719688416, | |
"loss": 7.26, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.31218111515045166, | |
"rewards/margins": 0.13086381554603577, | |
"rewards/rejected": -0.4430449604988098, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.12310217480508823, | |
"eval_logits/chosen": -2.062544822692871, | |
"eval_logits/rejected": -2.5318312644958496, | |
"eval_logps/chosen": -0.2108660489320755, | |
"eval_logps/rejected": -0.3196176588535309, | |
"eval_loss": 0.8929102420806885, | |
"eval_rewards/accuracies": 0.5555555820465088, | |
"eval_rewards/chosen": -0.31629908084869385, | |
"eval_rewards/margins": 0.1631273776292801, | |
"eval_rewards/rejected": -0.47942644357681274, | |
"eval_runtime": 26.0407, | |
"eval_samples_per_second": 30.26, | |
"eval_steps_per_second": 3.802, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.13130898645876077, | |
"grad_norm": 0.06755395233631134, | |
"learning_rate": 4.860940925593703e-06, | |
"logits/chosen": -2.187638998031616, | |
"logits/rejected": -2.4928510189056396, | |
"logps/chosen": -0.2070399969816208, | |
"logps/rejected": -0.30727890133857727, | |
"loss": 7.1947, | |
"rewards/accuracies": 0.550000011920929, | |
"rewards/chosen": -0.3105599880218506, | |
"rewards/margins": 0.1503583937883377, | |
"rewards/rejected": -0.4609183669090271, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.1395157981124333, | |
"grad_norm": 0.08956371247768402, | |
"learning_rate": 4.84320497372973e-06, | |
"logits/chosen": -2.0751285552978516, | |
"logits/rejected": -2.478673219680786, | |
"logps/chosen": -0.18197472393512726, | |
"logps/rejected": -0.2756109833717346, | |
"loss": 7.1774, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.2729620933532715, | |
"rewards/margins": 0.14045441150665283, | |
"rewards/rejected": -0.41341647505760193, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.14772260976610588, | |
"grad_norm": 0.07708129286766052, | |
"learning_rate": 4.824441214720629e-06, | |
"logits/chosen": -2.113537311553955, | |
"logits/rejected": -2.530677556991577, | |
"logps/chosen": -0.20599500834941864, | |
"logps/rejected": -0.2911488711833954, | |
"loss": 7.1722, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.308992475271225, | |
"rewards/margins": 0.1277308166027069, | |
"rewards/rejected": -0.4367233216762543, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.15592942141977842, | |
"grad_norm": 0.0884585976600647, | |
"learning_rate": 4.804657878971252e-06, | |
"logits/chosen": -2.151444673538208, | |
"logits/rejected": -2.559861898422241, | |
"logps/chosen": -0.2093551605939865, | |
"logps/rejected": -0.2878231108188629, | |
"loss": 7.1186, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.31403273344039917, | |
"rewards/margins": 0.11770190298557281, | |
"rewards/rejected": -0.4317346513271332, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.16413623307345096, | |
"grad_norm": 0.09445559978485107, | |
"learning_rate": 4.783863644106502e-06, | |
"logits/chosen": -2.278620481491089, | |
"logits/rejected": -2.5897645950317383, | |
"logps/chosen": -0.18631704151630402, | |
"logps/rejected": -0.3201253116130829, | |
"loss": 7.082, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.2794755697250366, | |
"rewards/margins": 0.2007124423980713, | |
"rewards/rejected": -0.4801879823207855, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.16413623307345096, | |
"eval_logits/chosen": -2.1718955039978027, | |
"eval_logits/rejected": -2.6710257530212402, | |
"eval_logps/chosen": -0.20382821559906006, | |
"eval_logps/rejected": -0.3390556573867798, | |
"eval_loss": 0.8775798678398132, | |
"eval_rewards/accuracies": 0.5858585834503174, | |
"eval_rewards/chosen": -0.3057423532009125, | |
"eval_rewards/margins": 0.20284107327461243, | |
"eval_rewards/rejected": -0.5085834264755249, | |
"eval_runtime": 26.0531, | |
"eval_samples_per_second": 30.246, | |
"eval_steps_per_second": 3.8, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.1723430447271235, | |
"grad_norm": 0.11077430099248886, | |
"learning_rate": 4.762067631165049e-06, | |
"logits/chosen": -2.2566323280334473, | |
"logits/rejected": -2.621065378189087, | |
"logps/chosen": -0.18663282692432404, | |
"logps/rejected": -0.290865957736969, | |
"loss": 7.1321, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -0.27994924783706665, | |
"rewards/margins": 0.15634974837303162, | |
"rewards/rejected": -0.43629899621009827, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.18054985638079607, | |
"grad_norm": 0.15500974655151367, | |
"learning_rate": 4.7392794005985324e-06, | |
"logits/chosen": -2.27781343460083, | |
"logits/rejected": -2.700369358062744, | |
"logps/chosen": -0.21367880702018738, | |
"logps/rejected": -0.31559067964553833, | |
"loss": 6.9886, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.3205181956291199, | |
"rewards/margins": 0.15286779403686523, | |
"rewards/rejected": -0.4733859896659851, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.1887566680344686, | |
"grad_norm": 0.12770676612854004, | |
"learning_rate": 4.715508948078037e-06, | |
"logits/chosen": -2.216815710067749, | |
"logits/rejected": -2.759458541870117, | |
"logps/chosen": -0.21546092629432678, | |
"logps/rejected": -0.34664005041122437, | |
"loss": 6.966, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.32319143414497375, | |
"rewards/margins": 0.196768656373024, | |
"rewards/rejected": -0.5199600458145142, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.19696347968814115, | |
"grad_norm": 0.15062908828258514, | |
"learning_rate": 4.690766700109659e-06, | |
"logits/chosen": -2.2262110710144043, | |
"logits/rejected": -2.7840607166290283, | |
"logps/chosen": -0.2078159749507904, | |
"logps/rejected": -0.4006090760231018, | |
"loss": 7.012, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.3117239773273468, | |
"rewards/margins": 0.2891896665096283, | |
"rewards/rejected": -0.6009136438369751, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.2051702913418137, | |
"grad_norm": 0.24995267391204834, | |
"learning_rate": 4.665063509461098e-06, | |
"logits/chosen": -2.4198288917541504, | |
"logits/rejected": -2.8148205280303955, | |
"logps/chosen": -0.23191122710704803, | |
"logps/rejected": -0.38251757621765137, | |
"loss": 6.7812, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.34786686301231384, | |
"rewards/margins": 0.2259095013141632, | |
"rewards/rejected": -0.573776364326477, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.2051702913418137, | |
"eval_logits/chosen": -2.3532145023345947, | |
"eval_logits/rejected": -2.9015841484069824, | |
"eval_logps/chosen": -0.22620753943920135, | |
"eval_logps/rejected": -0.4290919005870819, | |
"eval_loss": 0.8434350490570068, | |
"eval_rewards/accuracies": 0.5959596037864685, | |
"eval_rewards/chosen": -0.33931130170822144, | |
"eval_rewards/margins": 0.304326593875885, | |
"eval_rewards/rejected": -0.6436378955841064, | |
"eval_runtime": 26.012, | |
"eval_samples_per_second": 30.294, | |
"eval_steps_per_second": 3.806, | |
"step": 250 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 9.091481073027645e+17, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |