|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 1000, |
|
"global_step": 229, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.004366812227074236, |
|
"grad_norm": 0.4543283495867887, |
|
"learning_rate": 2.173913043478261e-07, |
|
"logits/chosen": -0.9449982047080994, |
|
"logits/rejected": -0.915056049823761, |
|
"logps/chosen": -328.08038330078125, |
|
"logps/rejected": -274.1044921875, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.043668122270742356, |
|
"grad_norm": 0.47008608128274937, |
|
"learning_rate": 2.173913043478261e-06, |
|
"logits/chosen": -0.9651342034339905, |
|
"logits/rejected": -1.0432785749435425, |
|
"logps/chosen": -326.21295166015625, |
|
"logps/rejected": -277.12603759765625, |
|
"loss": 0.6928, |
|
"rewards/accuracies": 0.4583333432674408, |
|
"rewards/chosen": 0.000499483838211745, |
|
"rewards/margins": 0.00042585484334267676, |
|
"rewards/rejected": 7.362888572970405e-05, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.08733624454148471, |
|
"grad_norm": 0.5077455364034207, |
|
"learning_rate": 4.347826086956522e-06, |
|
"logits/chosen": -1.0618306398391724, |
|
"logits/rejected": -1.009238600730896, |
|
"logps/chosen": -334.39141845703125, |
|
"logps/rejected": -346.700927734375, |
|
"loss": 0.6882, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.0003057918802369386, |
|
"rewards/margins": 0.00985435489565134, |
|
"rewards/rejected": -0.010160146281123161, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.13100436681222707, |
|
"grad_norm": 0.5720215285315815, |
|
"learning_rate": 4.985768230048011e-06, |
|
"logits/chosen": -1.0507558584213257, |
|
"logits/rejected": -1.075430989265442, |
|
"logps/chosen": -308.62359619140625, |
|
"logps/rejected": -301.9061279296875, |
|
"loss": 0.6651, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 0.0059205591678619385, |
|
"rewards/margins": 0.05983991548418999, |
|
"rewards/rejected": -0.053919363766908646, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.17467248908296942, |
|
"grad_norm": 0.8551226198033497, |
|
"learning_rate": 4.9164513914144005e-06, |
|
"logits/chosen": -1.1152777671813965, |
|
"logits/rejected": -1.1143274307250977, |
|
"logps/chosen": -288.18585205078125, |
|
"logps/rejected": -310.5125732421875, |
|
"loss": 0.5986, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -0.003953046165406704, |
|
"rewards/margins": 0.21825852990150452, |
|
"rewards/rejected": -0.2222115695476532, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.2183406113537118, |
|
"grad_norm": 1.0672212168131259, |
|
"learning_rate": 4.791042480696179e-06, |
|
"logits/chosen": -1.1128385066986084, |
|
"logits/rejected": -1.0449755191802979, |
|
"logps/chosen": -317.44451904296875, |
|
"logps/rejected": -390.5851135253906, |
|
"loss": 0.4535, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": 0.03481646627187729, |
|
"rewards/margins": 0.6241441965103149, |
|
"rewards/rejected": -0.5893277525901794, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.26200873362445415, |
|
"grad_norm": 0.6925238532589472, |
|
"learning_rate": 4.612452562309975e-06, |
|
"logits/chosen": -1.0924533605575562, |
|
"logits/rejected": -1.0897819995880127, |
|
"logps/chosen": -312.33837890625, |
|
"logps/rejected": -432.6812438964844, |
|
"loss": 0.2332, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": 0.08799274265766144, |
|
"rewards/margins": 1.521544098854065, |
|
"rewards/rejected": -1.4335514307022095, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.3056768558951965, |
|
"grad_norm": 0.8913228040925837, |
|
"learning_rate": 4.384827169085993e-06, |
|
"logits/chosen": -1.0938880443572998, |
|
"logits/rejected": -1.1012872457504272, |
|
"logps/chosen": -322.24139404296875, |
|
"logps/rejected": -572.3203125, |
|
"loss": 0.114, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -0.0598033182322979, |
|
"rewards/margins": 2.8274989128112793, |
|
"rewards/rejected": -2.8873023986816406, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.34934497816593885, |
|
"grad_norm": 1.025805090974843, |
|
"learning_rate": 4.1134500737541026e-06, |
|
"logits/chosen": -1.0882785320281982, |
|
"logits/rejected": -0.9281471371650696, |
|
"logps/chosen": -279.4626770019531, |
|
"logps/rejected": -729.4642333984375, |
|
"loss": 0.0632, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.04784034937620163, |
|
"rewards/margins": 4.184046745300293, |
|
"rewards/rejected": -4.13620662689209, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.3930131004366812, |
|
"grad_norm": 0.7021281871664607, |
|
"learning_rate": 3.8046206389447916e-06, |
|
"logits/chosen": -0.9880226850509644, |
|
"logits/rejected": -0.9430360794067383, |
|
"logps/chosen": -297.018310546875, |
|
"logps/rejected": -715.1465454101562, |
|
"loss": 0.0491, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.10021056234836578, |
|
"rewards/margins": 4.483721733093262, |
|
"rewards/rejected": -4.383510589599609, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.4366812227074236, |
|
"grad_norm": 0.586209743573467, |
|
"learning_rate": 3.4655075927279576e-06, |
|
"logits/chosen": -0.9809297323226929, |
|
"logits/rejected": -0.8725347518920898, |
|
"logps/chosen": -311.753662109375, |
|
"logps/rejected": -830.88671875, |
|
"loss": 0.0353, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.36457541584968567, |
|
"rewards/margins": 4.913618564605713, |
|
"rewards/rejected": -5.278193950653076, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.48034934497816595, |
|
"grad_norm": 0.3979818436750899, |
|
"learning_rate": 3.1039826239365754e-06, |
|
"logits/chosen": -0.9446805119514465, |
|
"logits/rejected": -0.8940761685371399, |
|
"logps/chosen": -431.5677795410156, |
|
"logps/rejected": -925.68017578125, |
|
"loss": 0.0288, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -1.047907829284668, |
|
"rewards/margins": 5.1847357749938965, |
|
"rewards/rejected": -6.232643127441406, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.5240174672489083, |
|
"grad_norm": 0.44864039181837617, |
|
"learning_rate": 2.7284376599571776e-06, |
|
"logits/chosen": -0.9638189077377319, |
|
"logits/rejected": -0.8882731199264526, |
|
"logps/chosen": -392.0179748535156, |
|
"logps/rejected": -961.5611572265625, |
|
"loss": 0.0207, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.6613569259643555, |
|
"rewards/margins": 6.055957794189453, |
|
"rewards/rejected": -6.71731424331665, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.5676855895196506, |
|
"grad_norm": 0.15662373138721433, |
|
"learning_rate": 2.3475900684411027e-06, |
|
"logits/chosen": -0.97578364610672, |
|
"logits/rejected": -0.8818238973617554, |
|
"logps/chosen": -417.825439453125, |
|
"logps/rejected": -989.6531372070312, |
|
"loss": 0.0229, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -0.840344250202179, |
|
"rewards/margins": 5.920441627502441, |
|
"rewards/rejected": -6.760786533355713, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.611353711790393, |
|
"grad_norm": 1.0644930727731667, |
|
"learning_rate": 1.970280304707447e-06, |
|
"logits/chosen": -1.0020447969436646, |
|
"logits/rejected": -0.8326355814933777, |
|
"logps/chosen": -390.6119384765625, |
|
"logps/rejected": -1086.9949951171875, |
|
"loss": 0.012, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8912075757980347, |
|
"rewards/margins": 6.5178542137146, |
|
"rewards/rejected": -7.40906286239624, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.6550218340611353, |
|
"grad_norm": 0.33838570745300933, |
|
"learning_rate": 1.6052667019636462e-06, |
|
"logits/chosen": -0.9777904748916626, |
|
"logits/rejected": -0.9008253216743469, |
|
"logps/chosen": -421.56658935546875, |
|
"logps/rejected": -1008.5904541015625, |
|
"loss": 0.0166, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.0421282052993774, |
|
"rewards/margins": 6.398242950439453, |
|
"rewards/rejected": -7.440371513366699, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.6986899563318777, |
|
"grad_norm": 0.1209081830310832, |
|
"learning_rate": 1.261022167792161e-06, |
|
"logits/chosen": -1.0033915042877197, |
|
"logits/rejected": -0.8852731585502625, |
|
"logps/chosen": -402.15692138671875, |
|
"logps/rejected": -1126.320556640625, |
|
"loss": 0.011, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8826059103012085, |
|
"rewards/margins": 6.96124267578125, |
|
"rewards/rejected": -7.843849182128906, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.74235807860262, |
|
"grad_norm": 0.9669967772246254, |
|
"learning_rate": 9.455375061024319e-07, |
|
"logits/chosen": -0.9981080889701843, |
|
"logits/rejected": -0.914253830909729, |
|
"logps/chosen": -445.781982421875, |
|
"logps/rejected": -1072.5477294921875, |
|
"loss": 0.0161, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -1.2199878692626953, |
|
"rewards/margins": 6.577937126159668, |
|
"rewards/rejected": -7.797924995422363, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.7860262008733624, |
|
"grad_norm": 0.09826043440610506, |
|
"learning_rate": 6.661359299530626e-07, |
|
"logits/chosen": -0.9915900230407715, |
|
"logits/rejected": -0.8251992464065552, |
|
"logps/chosen": -402.2227478027344, |
|
"logps/rejected": -1161.5181884765625, |
|
"loss": 0.0089, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.1185193061828613, |
|
"rewards/margins": 7.358689308166504, |
|
"rewards/rejected": -8.477209091186523, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.8296943231441049, |
|
"grad_norm": 0.3084190328632994, |
|
"learning_rate": 4.293030708802834e-07, |
|
"logits/chosen": -1.0055569410324097, |
|
"logits/rejected": -0.8623663187026978, |
|
"logps/chosen": -418.1572265625, |
|
"logps/rejected": -1129.2655029296875, |
|
"loss": 0.0108, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.1186723709106445, |
|
"rewards/margins": 6.933996677398682, |
|
"rewards/rejected": -8.0526704788208, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.8733624454148472, |
|
"grad_norm": 0.10794356987220223, |
|
"learning_rate": 2.405364306547955e-07, |
|
"logits/chosen": -0.8744913339614868, |
|
"logits/rejected": -0.7203727960586548, |
|
"logps/chosen": -407.0746765136719, |
|
"logps/rejected": -1108.8079833984375, |
|
"loss": 0.0118, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.0695985555648804, |
|
"rewards/margins": 7.07146692276001, |
|
"rewards/rejected": -8.14106559753418, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.9170305676855895, |
|
"grad_norm": 1.7933618946506555, |
|
"learning_rate": 1.0421777008019663e-07, |
|
"logits/chosen": -1.047523856163025, |
|
"logits/rejected": -0.8576782941818237, |
|
"logps/chosen": -400.34527587890625, |
|
"logps/rejected": -1216.9423828125, |
|
"loss": 0.0123, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.9318978190422058, |
|
"rewards/margins": 7.647104740142822, |
|
"rewards/rejected": -8.579002380371094, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.9606986899563319, |
|
"grad_norm": 1.0019715180278645, |
|
"learning_rate": 2.351139701825267e-08, |
|
"logits/chosen": -0.9511087536811829, |
|
"logits/rejected": -0.8330629467964172, |
|
"logps/chosen": -415.8382873535156, |
|
"logps/rejected": -1095.948974609375, |
|
"loss": 0.0086, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.0705822706222534, |
|
"rewards/margins": 7.189767360687256, |
|
"rewards/rejected": -8.26034927368164, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 229, |
|
"total_flos": 0.0, |
|
"train_loss": 0.16486623261823405, |
|
"train_runtime": 2148.0634, |
|
"train_samples_per_second": 6.816, |
|
"train_steps_per_second": 0.107 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 229, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|