|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 193, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 18.047405023374292, |
|
"learning_rate": 2.5e-08, |
|
"logits/chosen": -2.3762412071228027, |
|
"logits/rejected": -2.8935458660125732, |
|
"logps/chosen": -633.0433349609375, |
|
"logps/rejected": -1050.6610107421875, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 19.360233123290687, |
|
"learning_rate": 2.5e-07, |
|
"logits/chosen": -2.7744946479797363, |
|
"logits/rejected": -2.76593017578125, |
|
"logps/chosen": -479.1276550292969, |
|
"logps/rejected": -986.8477783203125, |
|
"loss": 0.6924, |
|
"rewards/accuracies": 0.4305555522441864, |
|
"rewards/chosen": 0.0005723661743104458, |
|
"rewards/margins": 0.0007126606651581824, |
|
"rewards/rejected": -0.00014029450539965183, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 20.190218055543376, |
|
"learning_rate": 5e-07, |
|
"logits/chosen": -2.8035528659820557, |
|
"logits/rejected": -2.70589280128479, |
|
"logps/chosen": -569.3781127929688, |
|
"logps/rejected": -963.7088012695312, |
|
"loss": 0.6772, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.028690308332443237, |
|
"rewards/margins": 0.031130677089095116, |
|
"rewards/rejected": -0.002440371550619602, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 23.740010487903497, |
|
"learning_rate": 4.958892245285593e-07, |
|
"logits/chosen": -2.8474576473236084, |
|
"logits/rejected": -2.897707462310791, |
|
"logps/chosen": -587.6588134765625, |
|
"logps/rejected": -1056.246337890625, |
|
"loss": 0.6377, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": 0.03852864354848862, |
|
"rewards/margins": 0.11339948326349258, |
|
"rewards/rejected": -0.07487084716558456, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 41.01050783833045, |
|
"learning_rate": 4.836920859140499e-07, |
|
"logits/chosen": -3.0017783641815186, |
|
"logits/rejected": -3.0174295902252197, |
|
"logps/chosen": -484.53204345703125, |
|
"logps/rejected": -1140.9752197265625, |
|
"loss": 0.4858, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": -0.15590691566467285, |
|
"rewards/margins": 0.6636868119239807, |
|
"rewards/rejected": -0.8195937275886536, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 29.37990905047941, |
|
"learning_rate": 4.638097017423782e-07, |
|
"logits/chosen": -3.24768328666687, |
|
"logits/rejected": -3.380809783935547, |
|
"logps/chosen": -613.0138549804688, |
|
"logps/rejected": -1256.5755615234375, |
|
"loss": 0.3608, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": -0.6601036190986633, |
|
"rewards/margins": 2.2774975299835205, |
|
"rewards/rejected": -2.937601327896118, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 76.05443101336851, |
|
"learning_rate": 4.3689592815087764e-07, |
|
"logits/chosen": -3.0450172424316406, |
|
"logits/rejected": -3.2747421264648438, |
|
"logps/chosen": -602.8151245117188, |
|
"logps/rejected": -1274.7425537109375, |
|
"loss": 0.3022, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -0.6452581882476807, |
|
"rewards/margins": 2.6640372276306152, |
|
"rewards/rejected": -3.309295654296875, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 33.12857974455004, |
|
"learning_rate": 4.038358569821387e-07, |
|
"logits/chosen": -3.0854501724243164, |
|
"logits/rejected": -3.26892352104187, |
|
"logps/chosen": -661.424072265625, |
|
"logps/rejected": -1598.431884765625, |
|
"loss": 0.2321, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": -1.154323697090149, |
|
"rewards/margins": 4.336134910583496, |
|
"rewards/rejected": -5.4904584884643555, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 40.2672306106755, |
|
"learning_rate": 3.65716708473318e-07, |
|
"logits/chosen": -3.1047134399414062, |
|
"logits/rejected": -3.264630079269409, |
|
"logps/chosen": -605.7648315429688, |
|
"logps/rejected": -1600.621826171875, |
|
"loss": 0.1992, |
|
"rewards/accuracies": 0.9437500238418579, |
|
"rewards/chosen": -0.9309484362602234, |
|
"rewards/margins": 4.815203666687012, |
|
"rewards/rejected": -5.746151924133301, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 31.009610555577336, |
|
"learning_rate": 3.2379207670987346e-07, |
|
"logits/chosen": -3.1034348011016846, |
|
"logits/rejected": -3.04689359664917, |
|
"logps/chosen": -630.8348388671875, |
|
"logps/rejected": -1542.02685546875, |
|
"loss": 0.1838, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": -0.8493152856826782, |
|
"rewards/margins": 5.170994758605957, |
|
"rewards/rejected": -6.020310401916504, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 49.251429406444444, |
|
"learning_rate": 2.79440703675024e-07, |
|
"logits/chosen": -3.0847506523132324, |
|
"logits/rejected": -3.1429543495178223, |
|
"logps/chosen": -606.3587036132812, |
|
"logps/rejected": -1756.322265625, |
|
"loss": 0.1625, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": -0.8517366647720337, |
|
"rewards/margins": 6.2657976150512695, |
|
"rewards/rejected": -7.1175336837768555, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_logits/chosen": -3.3846025466918945, |
|
"eval_logits/rejected": -3.3073556423187256, |
|
"eval_logps/chosen": -972.7769165039062, |
|
"eval_logps/rejected": -1444.57763671875, |
|
"eval_loss": 0.33459049463272095, |
|
"eval_rewards/accuracies": 0.7916666865348816, |
|
"eval_rewards/chosen": -1.212856411933899, |
|
"eval_rewards/margins": 4.115774631500244, |
|
"eval_rewards/rejected": -5.328631401062012, |
|
"eval_runtime": 34.9981, |
|
"eval_samples_per_second": 8.115, |
|
"eval_steps_per_second": 0.257, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 17.436897320860403, |
|
"learning_rate": 2.341211376599406e-07, |
|
"logits/chosen": -3.0867555141448975, |
|
"logits/rejected": -3.298828125, |
|
"logps/chosen": -614.1629028320312, |
|
"logps/rejected": -1688.4345703125, |
|
"loss": 0.1466, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": -0.7798967361450195, |
|
"rewards/margins": 5.776389122009277, |
|
"rewards/rejected": -6.556285858154297, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 31.209651266501922, |
|
"learning_rate": 1.8932376714743234e-07, |
|
"logits/chosen": -3.143540620803833, |
|
"logits/rejected": -3.1130645275115967, |
|
"logps/chosen": -650.592041015625, |
|
"logps/rejected": -1678.760498046875, |
|
"loss": 0.1488, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.9372560381889343, |
|
"rewards/margins": 5.595232963562012, |
|
"rewards/rejected": -6.532488822937012, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 27.932007137908535, |
|
"learning_rate": 1.465218075926022e-07, |
|
"logits/chosen": -3.0368003845214844, |
|
"logits/rejected": -3.2008068561553955, |
|
"logps/chosen": -692.443359375, |
|
"logps/rejected": -1812.3238525390625, |
|
"loss": 0.1393, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": -1.2292582988739014, |
|
"rewards/margins": 6.150660514831543, |
|
"rewards/rejected": -7.379919528961182, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 38.49048498668927, |
|
"learning_rate": 1.071228529591909e-07, |
|
"logits/chosen": -3.0865681171417236, |
|
"logits/rejected": -3.1455368995666504, |
|
"logps/chosen": -677.2738037109375, |
|
"logps/rejected": -1936.8187255859375, |
|
"loss": 0.1349, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": -1.3584263324737549, |
|
"rewards/margins": 7.760031700134277, |
|
"rewards/rejected": -9.118457794189453, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 131.57411160189204, |
|
"learning_rate": 7.242258529765793e-08, |
|
"logits/chosen": -3.0663182735443115, |
|
"logits/rejected": -3.130246877670288, |
|
"logps/chosen": -719.6699829101562, |
|
"logps/rejected": -2027.244873046875, |
|
"loss": 0.1059, |
|
"rewards/accuracies": 0.9437500238418579, |
|
"rewards/chosen": -1.5801483392715454, |
|
"rewards/margins": 8.059651374816895, |
|
"rewards/rejected": -9.639799118041992, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 20.032754023483275, |
|
"learning_rate": 4.3562164681246824e-08, |
|
"logits/chosen": -3.028076410293579, |
|
"logits/rejected": -3.1169402599334717, |
|
"logps/chosen": -710.513427734375, |
|
"logps/rejected": -2098.9404296875, |
|
"loss": 0.1314, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": -1.6005052328109741, |
|
"rewards/margins": 8.92524528503418, |
|
"rewards/rejected": -10.525751113891602, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 43.60438550060245, |
|
"learning_rate": 2.1490700783280884e-08, |
|
"logits/chosen": -2.9330756664276123, |
|
"logits/rejected": -3.0294029712677, |
|
"logps/chosen": -678.80615234375, |
|
"logps/rejected": -1974.254638671875, |
|
"loss": 0.1197, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": -1.2993422746658325, |
|
"rewards/margins": 9.22239875793457, |
|
"rewards/rejected": -10.52174186706543, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 26.084829370342806, |
|
"learning_rate": 6.9340402630445885e-09, |
|
"logits/chosen": -3.012688159942627, |
|
"logits/rejected": -3.114205837249756, |
|
"logps/chosen": -689.8292236328125, |
|
"logps/rejected": -2119.27197265625, |
|
"loss": 0.1299, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": -1.4270641803741455, |
|
"rewards/margins": 9.246889114379883, |
|
"rewards/rejected": -10.673952102661133, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 56.458996538652926, |
|
"learning_rate": 3.708964246392221e-10, |
|
"logits/chosen": -3.041151523590088, |
|
"logits/rejected": -2.961495876312256, |
|
"logps/chosen": -715.9195556640625, |
|
"logps/rejected": -2031.1800537109375, |
|
"loss": 0.1285, |
|
"rewards/accuracies": 0.9312499761581421, |
|
"rewards/chosen": -1.2981992959976196, |
|
"rewards/margins": 9.372197151184082, |
|
"rewards/rejected": -10.67039680480957, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 193, |
|
"total_flos": 0.0, |
|
"train_loss": 0.2672084366101675, |
|
"train_runtime": 3083.1249, |
|
"train_samples_per_second": 3.999, |
|
"train_steps_per_second": 0.063 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 193, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|