|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.992, |
|
"eval_steps": 100, |
|
"global_step": 124, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 3.846153846153847e-07, |
|
"logits/chosen": 0.7132297158241272, |
|
"logits/rejected": 0.7902912497520447, |
|
"logps/chosen": -182.8937530517578, |
|
"logps/rejected": -166.20700073242188, |
|
"loss": 0.0102, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 3.846153846153847e-06, |
|
"logits/chosen": 0.87026047706604, |
|
"logits/rejected": 0.8367093801498413, |
|
"logps/chosen": -155.32986450195312, |
|
"logps/rejected": -139.0651397705078, |
|
"loss": 0.0102, |
|
"rewards/accuracies": 0.3611111044883728, |
|
"rewards/chosen": 0.0006189693231135607, |
|
"rewards/margins": 9.571156988386065e-05, |
|
"rewards/rejected": 0.0005232577677816153, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.951096619903317e-06, |
|
"logits/chosen": 0.8673034906387329, |
|
"logits/rejected": 0.8491884469985962, |
|
"logps/chosen": -148.64974975585938, |
|
"logps/rejected": -141.98843383789062, |
|
"loss": 0.0103, |
|
"rewards/accuracies": 0.33125001192092896, |
|
"rewards/chosen": 6.371500785462558e-05, |
|
"rewards/margins": -0.00015192512364592403, |
|
"rewards/rejected": 0.00021564005874097347, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.716164218065246e-06, |
|
"logits/chosen": 0.8203651309013367, |
|
"logits/rejected": 0.8144901990890503, |
|
"logps/chosen": -143.59263610839844, |
|
"logps/rejected": -129.68614196777344, |
|
"loss": 0.0102, |
|
"rewards/accuracies": 0.4124999940395355, |
|
"rewards/chosen": 0.0005150804063305259, |
|
"rewards/margins": 0.00021757767535746098, |
|
"rewards/rejected": 0.00029750276007689536, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.3048902348863116e-06, |
|
"logits/chosen": 0.8285233378410339, |
|
"logits/rejected": 0.8196646571159363, |
|
"logps/chosen": -175.5411376953125, |
|
"logps/rejected": -162.92697143554688, |
|
"loss": 0.0104, |
|
"rewards/accuracies": 0.3187499940395355, |
|
"rewards/chosen": -3.423704811211792e-06, |
|
"rewards/margins": -0.0005603213212452829, |
|
"rewards/rejected": 0.0005568976048380136, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 3.7500000000000005e-06, |
|
"logits/chosen": 0.8533094525337219, |
|
"logits/rejected": 0.8660014271736145, |
|
"logps/chosen": -154.22348022460938, |
|
"logps/rejected": -141.3745880126953, |
|
"loss": 0.0102, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": -0.0006007859483361244, |
|
"rewards/margins": 9.4042276032269e-05, |
|
"rewards/rejected": -0.0006948282243683934, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 3.0956464785579125e-06, |
|
"logits/chosen": 0.840475857257843, |
|
"logits/rejected": 0.835978627204895, |
|
"logps/chosen": -147.8980255126953, |
|
"logps/rejected": -135.54531860351562, |
|
"loss": 0.0102, |
|
"rewards/accuracies": 0.3499999940395355, |
|
"rewards/chosen": -0.0003634719760157168, |
|
"rewards/margins": -3.2313815609086305e-05, |
|
"rewards/rejected": -0.00033115819678641856, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 2.39389699200963e-06, |
|
"logits/chosen": 0.9384347796440125, |
|
"logits/rejected": 0.9138472676277161, |
|
"logps/chosen": -141.71607971191406, |
|
"logps/rejected": -131.02381896972656, |
|
"loss": 0.0102, |
|
"rewards/accuracies": 0.4000000059604645, |
|
"rewards/chosen": -0.0002880954125430435, |
|
"rewards/margins": 0.0002658166631590575, |
|
"rewards/rejected": -0.0005539121339097619, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 1.700590188571887e-06, |
|
"logits/chosen": 0.8153573274612427, |
|
"logits/rejected": 0.8099604845046997, |
|
"logps/chosen": -133.85093688964844, |
|
"logps/rejected": -118.66615295410156, |
|
"loss": 0.0102, |
|
"rewards/accuracies": 0.41874998807907104, |
|
"rewards/chosen": -0.00017777756147552282, |
|
"rewards/margins": 0.0004668553010560572, |
|
"rewards/rejected": -0.000644632731564343, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 1.0708929268538034e-06, |
|
"logits/chosen": 0.8284982442855835, |
|
"logits/rejected": 0.8237984776496887, |
|
"logps/chosen": -158.91497802734375, |
|
"logps/rejected": -146.9730682373047, |
|
"loss": 0.0102, |
|
"rewards/accuracies": 0.4437499940395355, |
|
"rewards/chosen": -0.000682778307236731, |
|
"rewards/margins": 0.0004111085145268589, |
|
"rewards/rejected": -0.0010938867926597595, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 5.549106142039018e-07, |
|
"logits/chosen": 0.8617427945137024, |
|
"logits/rejected": 0.8781857490539551, |
|
"logps/chosen": -152.7745819091797, |
|
"logps/rejected": -141.37179565429688, |
|
"loss": 0.0103, |
|
"rewards/accuracies": 0.34375, |
|
"rewards/chosen": -0.001233941875398159, |
|
"rewards/margins": -0.00010122207459062338, |
|
"rewards/rejected": -0.0011327198008075356, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 1.937002879188285e-07, |
|
"logits/chosen": 0.8532508611679077, |
|
"logits/rejected": 0.8138865232467651, |
|
"logps/chosen": -175.2100830078125, |
|
"logps/rejected": -158.42974853515625, |
|
"loss": 0.0102, |
|
"rewards/accuracies": 0.48124998807907104, |
|
"rewards/chosen": -0.0008482037228532135, |
|
"rewards/margins": 0.00030287052504718304, |
|
"rewards/rejected": -0.0011510740732774138, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 1.6003680950742728e-08, |
|
"logits/chosen": 0.8294161558151245, |
|
"logits/rejected": 0.826370120048523, |
|
"logps/chosen": -158.0736083984375, |
|
"logps/rejected": -146.1743927001953, |
|
"loss": 0.01, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": -0.0006492567481473088, |
|
"rewards/margins": 0.0010562509996816516, |
|
"rewards/rejected": -0.0017055077478289604, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"step": 124, |
|
"total_flos": 0.0, |
|
"train_loss": 0.001956978782771095, |
|
"train_runtime": 290.9594, |
|
"train_samples_per_second": 6.874, |
|
"train_steps_per_second": 0.426 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 124, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|