phi3m0128-cds-0.5-kendall-onof-ofif-corr-max-2-simpo-max1500-default
/
checkpoint-450
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.47974413646055436, | |
"eval_steps": 50, | |
"global_step": 450, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.010660980810234541, | |
"grad_norm": 0.051327500492334366, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": 14.755006790161133, | |
"logits/rejected": 14.735244750976562, | |
"logps/chosen": -0.29377540946006775, | |
"logps/rejected": -0.30969956517219543, | |
"loss": 0.952, | |
"rewards/accuracies": 0.4375, | |
"rewards/chosen": -0.44066309928894043, | |
"rewards/margins": 0.023886267095804214, | |
"rewards/rejected": -0.46454939246177673, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.021321961620469083, | |
"grad_norm": 0.04346882924437523, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": 14.513801574707031, | |
"logits/rejected": 14.946454048156738, | |
"logps/chosen": -0.27995699644088745, | |
"logps/rejected": -0.30138006806373596, | |
"loss": 0.9726, | |
"rewards/accuracies": 0.4124999940395355, | |
"rewards/chosen": -0.4199354648590088, | |
"rewards/margins": 0.03213457390666008, | |
"rewards/rejected": -0.45207005739212036, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.031982942430703626, | |
"grad_norm": 0.05228634551167488, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": 14.266324043273926, | |
"logits/rejected": 14.423965454101562, | |
"logps/chosen": -0.2919609546661377, | |
"logps/rejected": -0.32358455657958984, | |
"loss": 0.9622, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -0.43794146180152893, | |
"rewards/margins": 0.047435395419597626, | |
"rewards/rejected": -0.48537683486938477, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.042643923240938165, | |
"grad_norm": 0.05487598106265068, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": 14.965211868286133, | |
"logits/rejected": 15.058088302612305, | |
"logps/chosen": -0.277716726064682, | |
"logps/rejected": -0.3055034577846527, | |
"loss": 0.9403, | |
"rewards/accuracies": 0.4000000059604645, | |
"rewards/chosen": -0.4165751039981842, | |
"rewards/margins": 0.04168009012937546, | |
"rewards/rejected": -0.4582551419734955, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.053304904051172705, | |
"grad_norm": 0.057255037128925323, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": 14.539288520812988, | |
"logits/rejected": 15.174041748046875, | |
"logps/chosen": -0.26362231373786926, | |
"logps/rejected": -0.3325727581977844, | |
"loss": 0.9588, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.3954334557056427, | |
"rewards/margins": 0.10342560708522797, | |
"rewards/rejected": -0.49885907769203186, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.053304904051172705, | |
"eval_logits/chosen": 14.618952751159668, | |
"eval_logits/rejected": 15.176809310913086, | |
"eval_logps/chosen": -0.2685677409172058, | |
"eval_logps/rejected": -0.3283654451370239, | |
"eval_loss": 0.9551004767417908, | |
"eval_rewards/accuracies": 0.5131579041481018, | |
"eval_rewards/chosen": -0.4028516113758087, | |
"eval_rewards/margins": 0.08969658613204956, | |
"eval_rewards/rejected": -0.4925481975078583, | |
"eval_runtime": 21.4453, | |
"eval_samples_per_second": 28.305, | |
"eval_steps_per_second": 3.544, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.06396588486140725, | |
"grad_norm": 0.05227242782711983, | |
"learning_rate": 4.980286753286196e-06, | |
"logits/chosen": 14.787714004516602, | |
"logits/rejected": 15.379422187805176, | |
"logps/chosen": -0.3143109679222107, | |
"logps/rejected": -0.3425135612487793, | |
"loss": 0.9636, | |
"rewards/accuracies": 0.4625000059604645, | |
"rewards/chosen": -0.4714665412902832, | |
"rewards/margins": 0.042303841561079025, | |
"rewards/rejected": -0.513770341873169, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.07462686567164178, | |
"grad_norm": 0.0658508762717247, | |
"learning_rate": 4.973180832407471e-06, | |
"logits/chosen": 15.149365425109863, | |
"logits/rejected": 15.115835189819336, | |
"logps/chosen": -0.31501108407974243, | |
"logps/rejected": -0.2854115962982178, | |
"loss": 0.9677, | |
"rewards/accuracies": 0.36250001192092896, | |
"rewards/chosen": -0.47251659631729126, | |
"rewards/margins": -0.04439922422170639, | |
"rewards/rejected": -0.4281173646450043, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.08528784648187633, | |
"grad_norm": 0.06567618995904922, | |
"learning_rate": 4.964990092676263e-06, | |
"logits/chosen": 15.393908500671387, | |
"logits/rejected": 15.454248428344727, | |
"logps/chosen": -0.31166282296180725, | |
"logps/rejected": -0.3178747594356537, | |
"loss": 0.9609, | |
"rewards/accuracies": 0.42500001192092896, | |
"rewards/chosen": -0.4674941897392273, | |
"rewards/margins": 0.009317949414253235, | |
"rewards/rejected": -0.47681212425231934, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.09594882729211088, | |
"grad_norm": 0.07566913962364197, | |
"learning_rate": 4.9557181268217225e-06, | |
"logits/chosen": 15.229632377624512, | |
"logits/rejected": 15.477168083190918, | |
"logps/chosen": -0.3294064998626709, | |
"logps/rejected": -0.3528878390789032, | |
"loss": 0.9587, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.49410971999168396, | |
"rewards/margins": 0.03522203490138054, | |
"rewards/rejected": -0.5293318033218384, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.10660980810234541, | |
"grad_norm": 0.09082464128732681, | |
"learning_rate": 4.9453690018345144e-06, | |
"logits/chosen": 14.481330871582031, | |
"logits/rejected": 15.092982292175293, | |
"logps/chosen": -0.2656436562538147, | |
"logps/rejected": -0.33982905745506287, | |
"loss": 0.9548, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.39846545457839966, | |
"rewards/margins": 0.11127817630767822, | |
"rewards/rejected": -0.5097435712814331, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.10660980810234541, | |
"eval_logits/chosen": 14.7100830078125, | |
"eval_logits/rejected": 15.274725914001465, | |
"eval_logps/chosen": -0.26462864875793457, | |
"eval_logps/rejected": -0.331702321767807, | |
"eval_loss": 0.947841465473175, | |
"eval_rewards/accuracies": 0.5394737124443054, | |
"eval_rewards/chosen": -0.39694297313690186, | |
"eval_rewards/margins": 0.10061051696538925, | |
"eval_rewards/rejected": -0.4975534677505493, | |
"eval_runtime": 21.4421, | |
"eval_samples_per_second": 28.309, | |
"eval_steps_per_second": 3.544, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.11727078891257996, | |
"grad_norm": 0.20198923349380493, | |
"learning_rate": 4.933947257182901e-06, | |
"logits/chosen": 14.932653427124023, | |
"logits/rejected": 15.476409912109375, | |
"logps/chosen": -0.27830976247787476, | |
"logps/rejected": -0.34150317311286926, | |
"loss": 0.9487, | |
"rewards/accuracies": 0.550000011920929, | |
"rewards/chosen": -0.41746464371681213, | |
"rewards/margins": 0.09479012340307236, | |
"rewards/rejected": -0.5122548341751099, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.1279317697228145, | |
"grad_norm": 0.31938356161117554, | |
"learning_rate": 4.921457902821578e-06, | |
"logits/chosen": 15.280967712402344, | |
"logits/rejected": 15.5416259765625, | |
"logps/chosen": -0.2816022038459778, | |
"logps/rejected": -0.3262938857078552, | |
"loss": 0.9483, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.4224032461643219, | |
"rewards/margins": 0.06703753769397736, | |
"rewards/rejected": -0.48944082856178284, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.13859275053304904, | |
"grad_norm": 0.12567812204360962, | |
"learning_rate": 4.907906416994146e-06, | |
"logits/chosen": 14.967382431030273, | |
"logits/rejected": 15.351877212524414, | |
"logps/chosen": -0.3148510456085205, | |
"logps/rejected": -0.3488944172859192, | |
"loss": 0.957, | |
"rewards/accuracies": 0.48750001192092896, | |
"rewards/chosen": -0.47227659821510315, | |
"rewards/margins": 0.05106503888964653, | |
"rewards/rejected": -0.5233416557312012, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.14925373134328357, | |
"grad_norm": 0.09151162207126617, | |
"learning_rate": 4.893298743830168e-06, | |
"logits/chosen": 14.900466918945312, | |
"logits/rejected": 15.075350761413574, | |
"logps/chosen": -0.2766302227973938, | |
"logps/rejected": -0.312236487865448, | |
"loss": 0.9373, | |
"rewards/accuracies": 0.42500001192092896, | |
"rewards/chosen": -0.4149452745914459, | |
"rewards/margins": 0.05340944975614548, | |
"rewards/rejected": -0.4683547616004944, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.15991471215351813, | |
"grad_norm": 0.1259378045797348, | |
"learning_rate": 4.8776412907378845e-06, | |
"logits/chosen": 14.528109550476074, | |
"logits/rejected": 14.861102104187012, | |
"logps/chosen": -0.2683579921722412, | |
"logps/rejected": -0.33838269114494324, | |
"loss": 0.9388, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.40253695845603943, | |
"rewards/margins": 0.10503707826137543, | |
"rewards/rejected": -0.5075740218162537, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.15991471215351813, | |
"eval_logits/chosen": 14.12246036529541, | |
"eval_logits/rejected": 14.733266830444336, | |
"eval_logps/chosen": -0.2611957788467407, | |
"eval_logps/rejected": -0.3492279350757599, | |
"eval_loss": 0.9302574396133423, | |
"eval_rewards/accuracies": 0.5657894611358643, | |
"eval_rewards/chosen": -0.3917936384677887, | |
"eval_rewards/margins": 0.13204820454120636, | |
"eval_rewards/rejected": -0.5238418579101562, | |
"eval_runtime": 21.4406, | |
"eval_samples_per_second": 28.311, | |
"eval_steps_per_second": 3.545, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.17057569296375266, | |
"grad_norm": 0.11400051414966583, | |
"learning_rate": 4.860940925593703e-06, | |
"logits/chosen": 14.4571533203125, | |
"logits/rejected": 14.769159317016602, | |
"logps/chosen": -0.31032469868659973, | |
"logps/rejected": -0.34650668501853943, | |
"loss": 0.9396, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.4654870927333832, | |
"rewards/margins": 0.05427298694849014, | |
"rewards/rejected": -0.519760012626648, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.1812366737739872, | |
"grad_norm": 0.1102401539683342, | |
"learning_rate": 4.84320497372973e-06, | |
"logits/chosen": 13.959765434265137, | |
"logits/rejected": 14.27458381652832, | |
"logps/chosen": -0.2744378447532654, | |
"logps/rejected": -0.35702812671661377, | |
"loss": 0.9222, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.41165676712989807, | |
"rewards/margins": 0.12388546764850616, | |
"rewards/rejected": -0.5355421900749207, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.19189765458422176, | |
"grad_norm": 0.14721031486988068, | |
"learning_rate": 4.824441214720629e-06, | |
"logits/chosen": 13.54602336883545, | |
"logits/rejected": 14.076690673828125, | |
"logps/chosen": -0.2713850140571594, | |
"logps/rejected": -0.40618976950645447, | |
"loss": 0.9052, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -0.40707746148109436, | |
"rewards/margins": 0.20220720767974854, | |
"rewards/rejected": -0.6092846989631653, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.2025586353944563, | |
"grad_norm": 0.1756824553012848, | |
"learning_rate": 4.804657878971252e-06, | |
"logits/chosen": 12.6314697265625, | |
"logits/rejected": 13.246849060058594, | |
"logps/chosen": -0.27216213941574097, | |
"logps/rejected": -0.4351380467414856, | |
"loss": 0.8996, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -0.40824323892593384, | |
"rewards/margins": 0.24446387588977814, | |
"rewards/rejected": -0.652707040309906, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.21321961620469082, | |
"grad_norm": 0.15476027131080627, | |
"learning_rate": 4.783863644106502e-06, | |
"logits/chosen": 12.40199089050293, | |
"logits/rejected": 12.966108322143555, | |
"logps/chosen": -0.303610622882843, | |
"logps/rejected": -0.423031747341156, | |
"loss": 0.9015, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.4554159641265869, | |
"rewards/margins": 0.17913168668746948, | |
"rewards/rejected": -0.6345476508140564, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.21321961620469082, | |
"eval_logits/chosen": 11.887229919433594, | |
"eval_logits/rejected": 12.5900239944458, | |
"eval_logps/chosen": -0.269090861082077, | |
"eval_logps/rejected": -0.42408913373947144, | |
"eval_loss": 0.8796805143356323, | |
"eval_rewards/accuracies": 0.6447368264198303, | |
"eval_rewards/chosen": -0.40363630652427673, | |
"eval_rewards/margins": 0.23249731957912445, | |
"eval_rewards/rejected": -0.6361336708068848, | |
"eval_runtime": 21.4455, | |
"eval_samples_per_second": 28.304, | |
"eval_steps_per_second": 3.544, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.22388059701492538, | |
"grad_norm": 0.18212148547172546, | |
"learning_rate": 4.762067631165049e-06, | |
"logits/chosen": 12.375594139099121, | |
"logits/rejected": 12.701678276062012, | |
"logps/chosen": -0.3136894702911377, | |
"logps/rejected": -0.3944609761238098, | |
"loss": 0.8898, | |
"rewards/accuracies": 0.4625000059604645, | |
"rewards/chosen": -0.47053417563438416, | |
"rewards/margins": 0.12115727365016937, | |
"rewards/rejected": -0.5916914939880371, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.2345415778251599, | |
"grad_norm": 0.5440058708190918, | |
"learning_rate": 4.7392794005985324e-06, | |
"logits/chosen": 11.23914909362793, | |
"logits/rejected": 11.926396369934082, | |
"logps/chosen": -0.3077571392059326, | |
"logps/rejected": -0.43772149085998535, | |
"loss": 0.8806, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.4616357684135437, | |
"rewards/margins": 0.19494646787643433, | |
"rewards/rejected": -0.656582236289978, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.24520255863539445, | |
"grad_norm": 0.5628307461738586, | |
"learning_rate": 4.715508948078037e-06, | |
"logits/chosen": 11.177714347839355, | |
"logits/rejected": 11.534266471862793, | |
"logps/chosen": -0.31991320848464966, | |
"logps/rejected": -0.4394511282444, | |
"loss": 0.8778, | |
"rewards/accuracies": 0.550000011920929, | |
"rewards/chosen": -0.4798697829246521, | |
"rewards/margins": 0.17930689454078674, | |
"rewards/rejected": -0.6591767072677612, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.255863539445629, | |
"grad_norm": 0.40485626459121704, | |
"learning_rate": 4.690766700109659e-06, | |
"logits/chosen": 10.132668495178223, | |
"logits/rejected": 10.29063606262207, | |
"logps/chosen": -0.3195653557777405, | |
"logps/rejected": -0.47949132323265076, | |
"loss": 0.8551, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.47934800386428833, | |
"rewards/margins": 0.23988890647888184, | |
"rewards/rejected": -0.7192369699478149, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.26652452025586354, | |
"grad_norm": 0.6199322938919067, | |
"learning_rate": 4.665063509461098e-06, | |
"logits/chosen": 8.781888008117676, | |
"logits/rejected": 9.237382888793945, | |
"logps/chosen": -0.3370448052883148, | |
"logps/rejected": -0.610824465751648, | |
"loss": 0.8416, | |
"rewards/accuracies": 0.7250000238418579, | |
"rewards/chosen": -0.505567193031311, | |
"rewards/margins": 0.4106695055961609, | |
"rewards/rejected": -0.9162367582321167, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.26652452025586354, | |
"eval_logits/chosen": 8.437722206115723, | |
"eval_logits/rejected": 8.843962669372559, | |
"eval_logps/chosen": -0.3058585226535797, | |
"eval_logps/rejected": -0.582990825176239, | |
"eval_loss": 0.8036603331565857, | |
"eval_rewards/accuracies": 0.6447368264198303, | |
"eval_rewards/chosen": -0.4587877094745636, | |
"eval_rewards/margins": 0.4156985878944397, | |
"eval_rewards/rejected": -0.8744862079620361, | |
"eval_runtime": 21.4423, | |
"eval_samples_per_second": 28.308, | |
"eval_steps_per_second": 3.544, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.2771855010660981, | |
"grad_norm": 0.3213505744934082, | |
"learning_rate": 4.638410650401267e-06, | |
"logits/chosen": 7.914826393127441, | |
"logits/rejected": 8.010818481445312, | |
"logps/chosen": -0.3556877374649048, | |
"logps/rejected": -0.7540119886398315, | |
"loss": 0.7811, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.5335315465927124, | |
"rewards/margins": 0.5974863171577454, | |
"rewards/rejected": -1.1310179233551025, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.2878464818763326, | |
"grad_norm": 1.0119378566741943, | |
"learning_rate": 4.610819813755038e-06, | |
"logits/chosen": 7.584845542907715, | |
"logits/rejected": 7.812608242034912, | |
"logps/chosen": -0.3649575412273407, | |
"logps/rejected": -0.8042632937431335, | |
"loss": 0.7391, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.5474363565444946, | |
"rewards/margins": 0.6589586734771729, | |
"rewards/rejected": -1.206395149230957, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.29850746268656714, | |
"grad_norm": 0.5339816808700562, | |
"learning_rate": 4.582303101775249e-06, | |
"logits/chosen": 6.687758445739746, | |
"logits/rejected": 6.233181476593018, | |
"logps/chosen": -0.415935218334198, | |
"logps/rejected": -1.2987438440322876, | |
"loss": 0.7419, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.6239027976989746, | |
"rewards/margins": 1.3242127895355225, | |
"rewards/rejected": -1.9481157064437866, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.3091684434968017, | |
"grad_norm": 0.3514000475406647, | |
"learning_rate": 4.55287302283426e-06, | |
"logits/chosen": 6.2503981590271, | |
"logits/rejected": 5.798542499542236, | |
"logps/chosen": -0.4319223463535309, | |
"logps/rejected": -1.2257453203201294, | |
"loss": 0.7235, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.6478835344314575, | |
"rewards/margins": 1.1907342672348022, | |
"rewards/rejected": -1.8386180400848389, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.31982942430703626, | |
"grad_norm": 0.6761008501052856, | |
"learning_rate": 4.522542485937369e-06, | |
"logits/chosen": 4.4480695724487305, | |
"logits/rejected": 4.290585994720459, | |
"logps/chosen": -0.42002564668655396, | |
"logps/rejected": -1.4215493202209473, | |
"loss": 0.7058, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.6300384402275085, | |
"rewards/margins": 1.5022855997085571, | |
"rewards/rejected": -2.132323980331421, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.31982942430703626, | |
"eval_logits/chosen": 4.789332389831543, | |
"eval_logits/rejected": 4.481485366821289, | |
"eval_logps/chosen": -0.4049508571624756, | |
"eval_logps/rejected": -1.395646095275879, | |
"eval_loss": 0.6695442199707031, | |
"eval_rewards/accuracies": 0.6710526347160339, | |
"eval_rewards/chosen": -0.6074262857437134, | |
"eval_rewards/margins": 1.4860429763793945, | |
"eval_rewards/rejected": -2.0934693813323975, | |
"eval_runtime": 21.4397, | |
"eval_samples_per_second": 28.312, | |
"eval_steps_per_second": 3.545, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.3304904051172708, | |
"grad_norm": 0.44682690501213074, | |
"learning_rate": 4.491324795060491e-06, | |
"logits/chosen": 5.487166404724121, | |
"logits/rejected": 4.501384258270264, | |
"logps/chosen": -0.5215579867362976, | |
"logps/rejected": -1.7223398685455322, | |
"loss": 0.6988, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.7823370695114136, | |
"rewards/margins": 1.8011726140975952, | |
"rewards/rejected": -2.5835094451904297, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.3411513859275053, | |
"grad_norm": 0.41085830330848694, | |
"learning_rate": 4.4592336433146e-06, | |
"logits/chosen": 4.162590026855469, | |
"logits/rejected": 2.876271963119507, | |
"logps/chosen": -0.5402930974960327, | |
"logps/rejected": -1.7925996780395508, | |
"loss": 0.6811, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.8104397058486938, | |
"rewards/margins": 1.8784599304199219, | |
"rewards/rejected": -2.688899517059326, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.35181236673773986, | |
"grad_norm": 0.5611584186553955, | |
"learning_rate": 4.426283106939474e-06, | |
"logits/chosen": 4.088540077209473, | |
"logits/rejected": 3.081679582595825, | |
"logps/chosen": -0.541223406791687, | |
"logps/rejected": -1.9464069604873657, | |
"loss": 0.6614, | |
"rewards/accuracies": 0.699999988079071, | |
"rewards/chosen": -0.8118351101875305, | |
"rewards/margins": 2.1077752113342285, | |
"rewards/rejected": -2.919610023498535, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.3624733475479744, | |
"grad_norm": 4.05828857421875, | |
"learning_rate": 4.3924876391293915e-06, | |
"logits/chosen": 3.3937134742736816, | |
"logits/rejected": 2.4182538986206055, | |
"logps/chosen": -0.6656067967414856, | |
"logps/rejected": -1.5255868434906006, | |
"loss": 0.6583, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.9984102249145508, | |
"rewards/margins": 1.2899701595306396, | |
"rewards/rejected": -2.2883803844451904, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.373134328358209, | |
"grad_norm": 0.8311880230903625, | |
"learning_rate": 4.357862063693486e-06, | |
"logits/chosen": 2.503194570541382, | |
"logits/rejected": 1.5284960269927979, | |
"logps/chosen": -0.6593035459518433, | |
"logps/rejected": -2.211193323135376, | |
"loss": 0.5911, | |
"rewards/accuracies": 0.699999988079071, | |
"rewards/chosen": -0.9889553189277649, | |
"rewards/margins": 2.3278346061706543, | |
"rewards/rejected": -3.3167896270751953, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.373134328358209, | |
"eval_logits/chosen": 2.556962728500366, | |
"eval_logits/rejected": 1.830418586730957, | |
"eval_logps/chosen": -0.6546408534049988, | |
"eval_logps/rejected": -1.9014692306518555, | |
"eval_loss": 0.5961893200874329, | |
"eval_rewards/accuracies": 0.6842105388641357, | |
"eval_rewards/chosen": -0.9819613099098206, | |
"eval_rewards/margins": 1.8702424764633179, | |
"eval_rewards/rejected": -2.852203845977783, | |
"eval_runtime": 21.4393, | |
"eval_samples_per_second": 28.312, | |
"eval_steps_per_second": 3.545, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.3837953091684435, | |
"grad_norm": 1.4237236976623535, | |
"learning_rate": 4.322421568553529e-06, | |
"logits/chosen": 3.0001542568206787, | |
"logits/rejected": 1.9715242385864258, | |
"logps/chosen": -0.8050466775894165, | |
"logps/rejected": -2.2938907146453857, | |
"loss": 0.58, | |
"rewards/accuracies": 0.737500011920929, | |
"rewards/chosen": -1.20756995677948, | |
"rewards/margins": 2.2332661151885986, | |
"rewards/rejected": -3.440835952758789, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.39445628997867804, | |
"grad_norm": 2.2651443481445312, | |
"learning_rate": 4.286181699082008e-06, | |
"logits/chosen": 2.7526040077209473, | |
"logits/rejected": 2.05066180229187, | |
"logps/chosen": -1.6301355361938477, | |
"logps/rejected": -2.9630703926086426, | |
"loss": 0.5823, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -2.4452033042907715, | |
"rewards/margins": 1.999402642250061, | |
"rewards/rejected": -4.444605827331543, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.4051172707889126, | |
"grad_norm": 1.9120367765426636, | |
"learning_rate": 4.249158351283414e-06, | |
"logits/chosen": 1.9757938385009766, | |
"logits/rejected": 1.5915673971176147, | |
"logps/chosen": -2.063323497772217, | |
"logps/rejected": -2.899749755859375, | |
"loss": 0.5675, | |
"rewards/accuracies": 0.737500011920929, | |
"rewards/chosen": -3.094984769821167, | |
"rewards/margins": 1.2546398639678955, | |
"rewards/rejected": -4.3496246337890625, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.4157782515991471, | |
"grad_norm": 3.0018720626831055, | |
"learning_rate": 4.211367764821722e-06, | |
"logits/chosen": 2.541440486907959, | |
"logits/rejected": 1.7436832189559937, | |
"logps/chosen": -2.279510736465454, | |
"logps/rejected": -3.3447775840759277, | |
"loss": 0.4969, | |
"rewards/accuracies": 0.8500000238418579, | |
"rewards/chosen": -3.4192657470703125, | |
"rewards/margins": 1.5979007482528687, | |
"rewards/rejected": -5.017167091369629, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.42643923240938164, | |
"grad_norm": 1.9656275510787964, | |
"learning_rate": 4.172826515897146e-06, | |
"logits/chosen": 1.6748476028442383, | |
"logits/rejected": 1.0921740531921387, | |
"logps/chosen": -2.147991180419922, | |
"logps/rejected": -3.380042314529419, | |
"loss": 0.5135, | |
"rewards/accuracies": 0.8374999761581421, | |
"rewards/chosen": -3.221986770629883, | |
"rewards/margins": 1.8480768203735352, | |
"rewards/rejected": -5.07006311416626, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.42643923240938164, | |
"eval_logits/chosen": 2.210231065750122, | |
"eval_logits/rejected": 1.679926872253418, | |
"eval_logps/chosen": -2.044506788253784, | |
"eval_logps/rejected": -3.713956356048584, | |
"eval_loss": 0.47455134987831116, | |
"eval_rewards/accuracies": 0.9342105388641357, | |
"eval_rewards/chosen": -3.0667598247528076, | |
"eval_rewards/margins": 2.5041754245758057, | |
"eval_rewards/rejected": -5.570935249328613, | |
"eval_runtime": 21.4401, | |
"eval_samples_per_second": 28.311, | |
"eval_steps_per_second": 3.545, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.43710021321961623, | |
"grad_norm": 2.501361131668091, | |
"learning_rate": 4.133551509975264e-06, | |
"logits/chosen": 1.9820306301116943, | |
"logits/rejected": 1.3992068767547607, | |
"logps/chosen": -2.300197124481201, | |
"logps/rejected": -3.813164472579956, | |
"loss": 0.498, | |
"rewards/accuracies": 0.8500000238418579, | |
"rewards/chosen": -3.4502956867218018, | |
"rewards/margins": 2.2694506645202637, | |
"rewards/rejected": -5.7197465896606445, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.44776119402985076, | |
"grad_norm": 3.828648090362549, | |
"learning_rate": 4.093559974371725e-06, | |
"logits/chosen": 2.7997095584869385, | |
"logits/rejected": 2.4387598037719727, | |
"logps/chosen": -2.687736749649048, | |
"logps/rejected": -4.425741195678711, | |
"loss": 0.4494, | |
"rewards/accuracies": 0.862500011920929, | |
"rewards/chosen": -4.031605243682861, | |
"rewards/margins": 2.607006788253784, | |
"rewards/rejected": -6.638613224029541, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.4584221748400853, | |
"grad_norm": 2.635803461074829, | |
"learning_rate": 4.052869450695776e-06, | |
"logits/chosen": 2.942661762237549, | |
"logits/rejected": 2.019963026046753, | |
"logps/chosen": -2.98117733001709, | |
"logps/rejected": -4.717232704162598, | |
"loss": 0.4796, | |
"rewards/accuracies": 0.8500000238418579, | |
"rewards/chosen": -4.471765518188477, | |
"rewards/margins": 2.60408353805542, | |
"rewards/rejected": -7.075850009918213, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.4690831556503198, | |
"grad_norm": 3.140829086303711, | |
"learning_rate": 4.011497787155938e-06, | |
"logits/chosen": 3.2747459411621094, | |
"logits/rejected": 2.2958083152770996, | |
"logps/chosen": -3.129321575164795, | |
"logps/rejected": -4.921725273132324, | |
"loss": 0.4468, | |
"rewards/accuracies": 0.862500011920929, | |
"rewards/chosen": -4.69398307800293, | |
"rewards/margins": 2.688605785369873, | |
"rewards/rejected": -7.3825883865356445, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.47974413646055436, | |
"grad_norm": 2.7932240962982178, | |
"learning_rate": 3.969463130731183e-06, | |
"logits/chosen": 2.205420970916748, | |
"logits/rejected": 1.4024155139923096, | |
"logps/chosen": -2.7564563751220703, | |
"logps/rejected": -4.563851356506348, | |
"loss": 0.4073, | |
"rewards/accuracies": 0.887499988079071, | |
"rewards/chosen": -4.1346845626831055, | |
"rewards/margins": 2.711092472076416, | |
"rewards/rejected": -6.8457770347595215, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.47974413646055436, | |
"eval_logits/chosen": 2.0136826038360596, | |
"eval_logits/rejected": 1.561701774597168, | |
"eval_logps/chosen": -2.7486908435821533, | |
"eval_logps/rejected": -4.690793514251709, | |
"eval_loss": 0.41499289870262146, | |
"eval_rewards/accuracies": 0.9210526347160339, | |
"eval_rewards/chosen": -4.123035907745361, | |
"eval_rewards/margins": 2.913153648376465, | |
"eval_rewards/rejected": -7.036189079284668, | |
"eval_runtime": 21.4387, | |
"eval_samples_per_second": 28.313, | |
"eval_steps_per_second": 3.545, | |
"step": 450 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 1.0777446819802644e+18, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |