|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9968652037617555, |
|
"eval_steps": 500, |
|
"global_step": 159, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.125e-08, |
|
"logits/chosen": -2.20993709564209, |
|
"logits/rejected": -2.2606422901153564, |
|
"logps/chosen": -444.5045166015625, |
|
"logps/pi_response": -229.9012451171875, |
|
"logps/ref_response": -229.9012451171875, |
|
"logps/rejected": -527.7989501953125, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": -2.234208345413208, |
|
"logits/rejected": -2.2051124572753906, |
|
"logps/chosen": -379.3716735839844, |
|
"logps/pi_response": -156.47958374023438, |
|
"logps/ref_response": -156.6222381591797, |
|
"logps/rejected": -399.9841613769531, |
|
"loss": 0.6912, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": -0.009471815079450607, |
|
"rewards/margins": 0.002212581457570195, |
|
"rewards/rejected": -0.011684396304190159, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.990353313429303e-07, |
|
"logits/chosen": -2.198673725128174, |
|
"logits/rejected": -2.1738362312316895, |
|
"logps/chosen": -383.15740966796875, |
|
"logps/pi_response": -155.57437133789062, |
|
"logps/ref_response": -156.165771484375, |
|
"logps/rejected": -493.2591247558594, |
|
"loss": 0.6586, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.2709082067012787, |
|
"rewards/margins": 0.12124800682067871, |
|
"rewards/rejected": -0.3921562135219574, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.882681251368548e-07, |
|
"logits/chosen": -2.128652572631836, |
|
"logits/rejected": -2.114102840423584, |
|
"logps/chosen": -446.29705810546875, |
|
"logps/pi_response": -175.34231567382812, |
|
"logps/ref_response": -164.71237182617188, |
|
"logps/rejected": -506.73028564453125, |
|
"loss": 0.6682, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": -0.6163755059242249, |
|
"rewards/margins": 0.27332210540771484, |
|
"rewards/rejected": -0.8896976709365845, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.6604720940421207e-07, |
|
"logits/chosen": -2.166658401489258, |
|
"logits/rejected": -2.1318435668945312, |
|
"logps/chosen": -464.4285583496094, |
|
"logps/pi_response": -174.4441680908203, |
|
"logps/ref_response": -160.98480224609375, |
|
"logps/rejected": -571.810546875, |
|
"loss": 0.627, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.8367161750793457, |
|
"rewards/margins": 0.28963375091552734, |
|
"rewards/rejected": -1.126349925994873, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.3344075855595097e-07, |
|
"logits/chosen": -2.2756810188293457, |
|
"logits/rejected": -2.178605794906616, |
|
"logps/chosen": -466.92510986328125, |
|
"logps/pi_response": -185.86959838867188, |
|
"logps/ref_response": -169.9374237060547, |
|
"logps/rejected": -561.4849853515625, |
|
"loss": 0.6054, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.6169408559799194, |
|
"rewards/margins": 0.3747444748878479, |
|
"rewards/rejected": -0.9916852116584778, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.920161866827889e-07, |
|
"logits/chosen": -2.074246406555176, |
|
"logits/rejected": -2.054436206817627, |
|
"logps/chosen": -469.52886962890625, |
|
"logps/pi_response": -202.82102966308594, |
|
"logps/ref_response": -161.68600463867188, |
|
"logps/rejected": -567.7276611328125, |
|
"loss": 0.6033, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.7857006788253784, |
|
"rewards/margins": 0.4861486852169037, |
|
"rewards/rejected": -1.2718493938446045, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.4376480090239047e-07, |
|
"logits/chosen": -2.0156216621398926, |
|
"logits/rejected": -1.9241374731063843, |
|
"logps/chosen": -472.0431213378906, |
|
"logps/pi_response": -208.27285766601562, |
|
"logps/ref_response": -146.0672607421875, |
|
"logps/rejected": -601.1537475585938, |
|
"loss": 0.6029, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.0457053184509277, |
|
"rewards/margins": 0.5965746641159058, |
|
"rewards/rejected": -1.6422799825668335, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.910060778827554e-07, |
|
"logits/chosen": -2.0666327476501465, |
|
"logits/rejected": -2.0221619606018066, |
|
"logps/chosen": -502.83892822265625, |
|
"logps/pi_response": -218.2173614501953, |
|
"logps/ref_response": -163.6803436279297, |
|
"logps/rejected": -607.4405517578125, |
|
"loss": 0.5537, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.0255662202835083, |
|
"rewards/margins": 0.5309929847717285, |
|
"rewards/rejected": -1.5565593242645264, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 2.3627616503391812e-07, |
|
"logits/chosen": -1.9774086475372314, |
|
"logits/rejected": -1.9186586141586304, |
|
"logps/chosen": -529.7247314453125, |
|
"logps/pi_response": -238.0616455078125, |
|
"logps/ref_response": -170.34803771972656, |
|
"logps/rejected": -632.8688354492188, |
|
"loss": 0.5758, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.2858139276504517, |
|
"rewards/margins": 0.49250563979148865, |
|
"rewards/rejected": -1.7783195972442627, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.8220596619089573e-07, |
|
"logits/chosen": -1.9811958074569702, |
|
"logits/rejected": -1.916778326034546, |
|
"logps/chosen": -523.2442626953125, |
|
"logps/pi_response": -249.0938262939453, |
|
"logps/ref_response": -173.4689178466797, |
|
"logps/rejected": -613.4296875, |
|
"loss": 0.5684, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -1.1589275598526, |
|
"rewards/margins": 0.5028344988822937, |
|
"rewards/rejected": -1.661761999130249, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.3139467229135998e-07, |
|
"logits/chosen": -1.9319040775299072, |
|
"logits/rejected": -1.8885990381240845, |
|
"logps/chosen": -558.5001220703125, |
|
"logps/pi_response": -242.63754272460938, |
|
"logps/ref_response": -159.67039489746094, |
|
"logps/rejected": -609.6783447265625, |
|
"loss": 0.5924, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -1.302638053894043, |
|
"rewards/margins": 0.45316019654273987, |
|
"rewards/rejected": -1.755798101425171, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.628481651367875e-08, |
|
"logits/chosen": -1.9305217266082764, |
|
"logits/rejected": -1.899649977684021, |
|
"logps/chosen": -504.75457763671875, |
|
"logps/pi_response": -237.00631713867188, |
|
"logps/ref_response": -152.2351531982422, |
|
"logps/rejected": -623.0756225585938, |
|
"loss": 0.5467, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -1.2465002536773682, |
|
"rewards/margins": 0.48606938123703003, |
|
"rewards/rejected": -1.7325694561004639, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.904486005914027e-08, |
|
"logits/chosen": -1.9227378368377686, |
|
"logits/rejected": -1.8802906274795532, |
|
"logps/chosen": -526.3011474609375, |
|
"logps/pi_response": -246.909423828125, |
|
"logps/ref_response": -162.397705078125, |
|
"logps/rejected": -621.2098388671875, |
|
"loss": 0.5693, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -1.1543651819229126, |
|
"rewards/margins": 0.5442228317260742, |
|
"rewards/rejected": -1.6985880136489868, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.1464952759020856e-08, |
|
"logits/chosen": -1.9093992710113525, |
|
"logits/rejected": -1.8462045192718506, |
|
"logps/chosen": -492.194091796875, |
|
"logps/pi_response": -236.12588500976562, |
|
"logps/ref_response": -152.87637329101562, |
|
"logps/rejected": -622.8535766601562, |
|
"loss": 0.5623, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -1.1474370956420898, |
|
"rewards/margins": 0.6022692918777466, |
|
"rewards/rejected": -1.749706506729126, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.8708793644441086e-09, |
|
"logits/chosen": -1.9677903652191162, |
|
"logits/rejected": -1.8742889165878296, |
|
"logps/chosen": -523.8792114257812, |
|
"logps/pi_response": -255.8824462890625, |
|
"logps/ref_response": -172.59982299804688, |
|
"logps/rejected": -627.2048950195312, |
|
"loss": 0.5606, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -1.12137770652771, |
|
"rewards/margins": 0.625280499458313, |
|
"rewards/rejected": -1.7466580867767334, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 159, |
|
"total_flos": 0.0, |
|
"train_loss": 0.595289353304689, |
|
"train_runtime": 4243.0644, |
|
"train_samples_per_second": 4.803, |
|
"train_steps_per_second": 0.037 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 159, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|