|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9905956112852664, |
|
"eval_steps": 500, |
|
"global_step": 79, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.25e-08, |
|
"logits/chosen": -0.9686082005500793, |
|
"logits/rejected": -0.7667080163955688, |
|
"logps/chosen": -304.88916015625, |
|
"logps/pi_response": -174.6251220703125, |
|
"logps/ref_response": -174.6251220703125, |
|
"logps/rejected": -560.704833984375, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.990217055187362e-07, |
|
"logits/chosen": -1.5470714569091797, |
|
"logits/rejected": -1.1779179573059082, |
|
"logps/chosen": -347.94091796875, |
|
"logps/pi_response": -176.1438751220703, |
|
"logps/ref_response": -174.41168212890625, |
|
"logps/rejected": -613.462646484375, |
|
"loss": 0.6405, |
|
"rewards/accuracies": 0.6423611044883728, |
|
"rewards/chosen": -0.11746331304311752, |
|
"rewards/margins": 0.18620280921459198, |
|
"rewards/rejected": -0.3036660850048065, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.655786431300069e-07, |
|
"logits/chosen": -0.04107005149126053, |
|
"logits/rejected": 0.5205573439598083, |
|
"logps/chosen": -461.720947265625, |
|
"logps/pi_response": -247.1218719482422, |
|
"logps/ref_response": -176.59713745117188, |
|
"logps/rejected": -896.8013916015625, |
|
"loss": 0.6234, |
|
"rewards/accuracies": 0.746874988079071, |
|
"rewards/chosen": -1.4934273958206177, |
|
"rewards/margins": 1.6034536361694336, |
|
"rewards/rejected": -3.0968809127807617, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.9061232191019517e-07, |
|
"logits/chosen": 0.25087007880210876, |
|
"logits/rejected": 0.9124622344970703, |
|
"logps/chosen": -447.6351013183594, |
|
"logps/pi_response": -237.4333038330078, |
|
"logps/ref_response": -185.14527893066406, |
|
"logps/rejected": -821.3446044921875, |
|
"loss": 0.4964, |
|
"rewards/accuracies": 0.746874988079071, |
|
"rewards/chosen": -1.1763864755630493, |
|
"rewards/margins": 1.3816043138504028, |
|
"rewards/rejected": -2.557990789413452, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.8856223324132555e-07, |
|
"logits/chosen": 0.004806989338248968, |
|
"logits/rejected": 1.2118862867355347, |
|
"logps/chosen": -357.9244384765625, |
|
"logps/pi_response": -218.65719604492188, |
|
"logps/ref_response": -178.06735229492188, |
|
"logps/rejected": -763.8504028320312, |
|
"loss": 0.4663, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": -0.6063495874404907, |
|
"rewards/margins": 1.2730048894882202, |
|
"rewards/rejected": -1.879354476928711, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.7908455541642582e-07, |
|
"logits/chosen": 0.6758335828781128, |
|
"logits/rejected": 1.2263845205307007, |
|
"logps/chosen": -416.4789123535156, |
|
"logps/pi_response": -241.4830322265625, |
|
"logps/ref_response": -178.06594848632812, |
|
"logps/rejected": -815.0631103515625, |
|
"loss": 0.4644, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.9604195356369019, |
|
"rewards/margins": 1.3460404872894287, |
|
"rewards/rejected": -2.30646014213562, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.32661172908373e-08, |
|
"logits/chosen": 1.4804556369781494, |
|
"logits/rejected": 2.2675604820251465, |
|
"logps/chosen": -387.3179931640625, |
|
"logps/pi_response": -213.3051300048828, |
|
"logps/ref_response": -167.53524780273438, |
|
"logps/rejected": -784.0960083007812, |
|
"loss": 0.4624, |
|
"rewards/accuracies": 0.784375011920929, |
|
"rewards/chosen": -0.7926492691040039, |
|
"rewards/margins": 1.3485177755355835, |
|
"rewards/rejected": -2.141166925430298, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.956279997278043e-08, |
|
"logits/chosen": 1.3448526859283447, |
|
"logits/rejected": 2.2897496223449707, |
|
"logps/chosen": -391.35345458984375, |
|
"logps/pi_response": -210.98974609375, |
|
"logps/ref_response": -175.8311004638672, |
|
"logps/rejected": -786.43701171875, |
|
"loss": 0.4534, |
|
"rewards/accuracies": 0.778124988079071, |
|
"rewards/chosen": -0.7257815003395081, |
|
"rewards/margins": 1.268803358078003, |
|
"rewards/rejected": -1.9945847988128662, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"step": 79, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5050203770021849, |
|
"train_runtime": 4555.434, |
|
"train_samples_per_second": 4.473, |
|
"train_steps_per_second": 0.017 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 79, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|