|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9969104016477858, |
|
"eval_steps": 100, |
|
"global_step": 121, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.008238928939237899, |
|
"grad_norm": 3.6043033117037053, |
|
"learning_rate": 2.3076923076923076e-08, |
|
"logits/chosen": -0.971167802810669, |
|
"logits/rejected": -1.2423464059829712, |
|
"logps/chosen": -290.201171875, |
|
"logps/rejected": -263.9928894042969, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.082389289392379, |
|
"grad_norm": 3.4484942444647335, |
|
"learning_rate": 2.3076923076923078e-07, |
|
"logits/chosen": -0.9607771635055542, |
|
"logits/rejected": -0.9658682942390442, |
|
"logps/chosen": -330.0989685058594, |
|
"logps/rejected": -330.9446716308594, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.4236111044883728, |
|
"rewards/chosen": 0.0003925244091078639, |
|
"rewards/margins": 4.308321513235569e-05, |
|
"rewards/rejected": 0.0003494411939755082, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.164778578784758, |
|
"grad_norm": 3.5480284480156086, |
|
"learning_rate": 2.969010932648327e-07, |
|
"logits/chosen": -1.0245492458343506, |
|
"logits/rejected": -1.0337975025177002, |
|
"logps/chosen": -347.72344970703125, |
|
"logps/rejected": -345.3406066894531, |
|
"loss": 0.6907, |
|
"rewards/accuracies": 0.543749988079071, |
|
"rewards/chosen": 0.007954178377985954, |
|
"rewards/margins": 0.005248874891549349, |
|
"rewards/rejected": 0.00270530441775918, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.24716786817713698, |
|
"grad_norm": 3.121263888821987, |
|
"learning_rate": 2.8203020867701666e-07, |
|
"logits/chosen": -0.8977586030960083, |
|
"logits/rejected": -1.0179085731506348, |
|
"logps/chosen": -304.6155700683594, |
|
"logps/rejected": -275.14447021484375, |
|
"loss": 0.6823, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": 0.025795791298151016, |
|
"rewards/margins": 0.025993749499320984, |
|
"rewards/rejected": -0.00019796025299001485, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.329557157569516, |
|
"grad_norm": 2.6779190942219393, |
|
"learning_rate": 2.560660171779821e-07, |
|
"logits/chosen": -0.9125121235847473, |
|
"logits/rejected": -1.0349690914154053, |
|
"logps/chosen": -340.72442626953125, |
|
"logps/rejected": -313.9574890136719, |
|
"loss": 0.675, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 0.02803516387939453, |
|
"rewards/margins": 0.04235466197133064, |
|
"rewards/rejected": -0.01431949995458126, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.411946446961895, |
|
"grad_norm": 2.814818260153817, |
|
"learning_rate": 2.2119005546214604e-07, |
|
"logits/chosen": -0.9567445516586304, |
|
"logits/rejected": -0.9532279968261719, |
|
"logps/chosen": -272.302978515625, |
|
"logps/rejected": -295.3742370605469, |
|
"loss": 0.6649, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": 0.006898154504597187, |
|
"rewards/margins": 0.06003887206315994, |
|
"rewards/rejected": -0.05314071848988533, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.49433573635427397, |
|
"grad_norm": 2.653170547007469, |
|
"learning_rate": 1.803326358498057e-07, |
|
"logits/chosen": -0.8227835893630981, |
|
"logits/rejected": -0.8951934576034546, |
|
"logps/chosen": -364.8822021484375, |
|
"logps/rejected": -332.07489013671875, |
|
"loss": 0.6563, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.002518509980291128, |
|
"rewards/margins": 0.14299212396144867, |
|
"rewards/rejected": -0.1404736191034317, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.576725025746653, |
|
"grad_norm": 2.7392936630658764, |
|
"learning_rate": 1.3692663858785124e-07, |
|
"logits/chosen": -0.8262346386909485, |
|
"logits/rejected": -0.9113919138908386, |
|
"logps/chosen": -337.5333251953125, |
|
"logps/rejected": -330.14459228515625, |
|
"loss": 0.6558, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": -0.06830023974180222, |
|
"rewards/margins": 0.05756063386797905, |
|
"rewards/rejected": -0.12586086988449097, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.659114315139032, |
|
"grad_norm": 4.467409541426095, |
|
"learning_rate": 9.461907790309735e-08, |
|
"logits/chosen": -0.8138895034790039, |
|
"logits/rejected": -0.9478031992912292, |
|
"logps/chosen": -370.1873779296875, |
|
"logps/rejected": -331.826904296875, |
|
"loss": 0.645, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.046670813113451004, |
|
"rewards/margins": 0.14053179323673248, |
|
"rewards/rejected": -0.187202587723732, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.741503604531411, |
|
"grad_norm": 2.8035199165767315, |
|
"learning_rate": 5.6964676309761004e-08, |
|
"logits/chosen": -0.8749955296516418, |
|
"logits/rejected": -0.9215911030769348, |
|
"logps/chosen": -359.80511474609375, |
|
"logps/rejected": -361.1700134277344, |
|
"loss": 0.647, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.08991885185241699, |
|
"rewards/margins": 0.14792749285697937, |
|
"rewards/rejected": -0.23784634470939636, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.82389289392379, |
|
"grad_norm": 3.018969560415363, |
|
"learning_rate": 2.7127193356651214e-08, |
|
"logits/chosen": -0.838853657245636, |
|
"logits/rejected": -0.981770396232605, |
|
"logps/chosen": -380.34307861328125, |
|
"logps/rejected": -369.41180419921875, |
|
"loss": 0.6411, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.10747754573822021, |
|
"rewards/margins": 0.17505204677581787, |
|
"rewards/rejected": -0.28252965211868286, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.82389289392379, |
|
"eval_logits/chosen": -1.0462558269500732, |
|
"eval_logits/rejected": -1.1581745147705078, |
|
"eval_logps/chosen": -335.3811340332031, |
|
"eval_logps/rejected": -418.57818603515625, |
|
"eval_loss": 0.6494396924972534, |
|
"eval_rewards/accuracies": 0.5625, |
|
"eval_rewards/chosen": -0.17523552477359772, |
|
"eval_rewards/margins": 0.04425430670380592, |
|
"eval_rewards/rejected": -0.21948984265327454, |
|
"eval_runtime": 9.1337, |
|
"eval_samples_per_second": 109.485, |
|
"eval_steps_per_second": 0.876, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.9062821833161689, |
|
"grad_norm": 2.971778165880155, |
|
"learning_rate": 7.613603464044916e-09, |
|
"logits/chosen": -0.8196293115615845, |
|
"logits/rejected": -0.9439749717712402, |
|
"logps/chosen": -354.5921630859375, |
|
"logps/rejected": -318.67962646484375, |
|
"loss": 0.6449, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.1414671391248703, |
|
"rewards/margins": 0.08098333328962326, |
|
"rewards/rejected": -0.22245049476623535, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.9886714727085479, |
|
"grad_norm": 2.832084621380708, |
|
"learning_rate": 6.34574876699101e-11, |
|
"logits/chosen": -0.9086641073226929, |
|
"logits/rejected": -0.9848729372024536, |
|
"logps/chosen": -327.4648132324219, |
|
"logps/rejected": -343.4488525390625, |
|
"loss": 0.6437, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.10709067434072495, |
|
"rewards/margins": 0.1814233958721161, |
|
"rewards/rejected": -0.28851407766342163, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.9969104016477858, |
|
"step": 121, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6619468080110786, |
|
"train_runtime": 1288.9507, |
|
"train_samples_per_second": 48.206, |
|
"train_steps_per_second": 0.094 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 121, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|