|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9905956112852664, |
|
"eval_steps": 500, |
|
"global_step": 79, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.25e-08, |
|
"logits/chosen": -2.199268341064453, |
|
"logits/rejected": -2.1993045806884766, |
|
"logps/chosen": -444.4443359375, |
|
"logps/pi_response": -249.17221069335938, |
|
"logps/ref_response": -249.17221069335938, |
|
"logps/rejected": -656.5706787109375, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.990217055187362e-07, |
|
"logits/chosen": -2.2055578231811523, |
|
"logits/rejected": -2.1615922451019287, |
|
"logps/chosen": -339.12615966796875, |
|
"logps/pi_response": -225.56497192382812, |
|
"logps/ref_response": -218.07363891601562, |
|
"logps/rejected": -589.6128540039062, |
|
"loss": 0.6499, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.1427740752696991, |
|
"rewards/margins": 0.1424369066953659, |
|
"rewards/rejected": -0.2852109670639038, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.655786431300069e-07, |
|
"logits/chosen": -1.9026947021484375, |
|
"logits/rejected": -1.8411662578582764, |
|
"logps/chosen": -455.3936462402344, |
|
"logps/pi_response": -284.5272521972656, |
|
"logps/ref_response": -217.56393432617188, |
|
"logps/rejected": -904.8353271484375, |
|
"loss": 0.521, |
|
"rewards/accuracies": 0.784375011920929, |
|
"rewards/chosen": -1.3254053592681885, |
|
"rewards/margins": 1.8717145919799805, |
|
"rewards/rejected": -3.197119951248169, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.9061232191019517e-07, |
|
"logits/chosen": -2.0006089210510254, |
|
"logits/rejected": -1.9059054851531982, |
|
"logps/chosen": -461.3187561035156, |
|
"logps/pi_response": -265.7572326660156, |
|
"logps/ref_response": -216.7289581298828, |
|
"logps/rejected": -917.9791259765625, |
|
"loss": 0.5136, |
|
"rewards/accuracies": 0.796875, |
|
"rewards/chosen": -1.244099497795105, |
|
"rewards/margins": 1.8518199920654297, |
|
"rewards/rejected": -3.095919132232666, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.8856223324132555e-07, |
|
"logits/chosen": -2.099766731262207, |
|
"logits/rejected": -2.014448642730713, |
|
"logps/chosen": -427.6517028808594, |
|
"logps/pi_response": -249.57998657226562, |
|
"logps/ref_response": -217.0635986328125, |
|
"logps/rejected": -758.6690673828125, |
|
"loss": 0.4599, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": -0.7826972603797913, |
|
"rewards/margins": 0.9983444213867188, |
|
"rewards/rejected": -1.7810415029525757, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.7908455541642582e-07, |
|
"logits/chosen": -2.0843515396118164, |
|
"logits/rejected": -1.9910612106323242, |
|
"logps/chosen": -448.34722900390625, |
|
"logps/pi_response": -273.9801940917969, |
|
"logps/ref_response": -225.388671875, |
|
"logps/rejected": -805.3088989257812, |
|
"loss": 0.4435, |
|
"rewards/accuracies": 0.793749988079071, |
|
"rewards/chosen": -1.0244767665863037, |
|
"rewards/margins": 1.1706058979034424, |
|
"rewards/rejected": -2.195082426071167, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.32661172908373e-08, |
|
"logits/chosen": -2.002279043197632, |
|
"logits/rejected": -1.9122340679168701, |
|
"logps/chosen": -430.016845703125, |
|
"logps/pi_response": -267.3531799316406, |
|
"logps/ref_response": -211.29519653320312, |
|
"logps/rejected": -843.6468505859375, |
|
"loss": 0.42, |
|
"rewards/accuracies": 0.784375011920929, |
|
"rewards/chosen": -1.0640926361083984, |
|
"rewards/margins": 1.4147446155548096, |
|
"rewards/rejected": -2.478837490081787, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.956279997278043e-08, |
|
"logits/chosen": -1.9821714162826538, |
|
"logits/rejected": -1.8856136798858643, |
|
"logps/chosen": -447.898681640625, |
|
"logps/pi_response": -278.2708740234375, |
|
"logps/ref_response": -217.0521697998047, |
|
"logps/rejected": -860.4439697265625, |
|
"loss": 0.4232, |
|
"rewards/accuracies": 0.7906249761581421, |
|
"rewards/chosen": -1.141607403755188, |
|
"rewards/margins": 1.4329321384429932, |
|
"rewards/rejected": -2.5745396614074707, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"step": 79, |
|
"total_flos": 0.0, |
|
"train_loss": 0.4766373211824441, |
|
"train_runtime": 4474.9999, |
|
"train_samples_per_second": 4.554, |
|
"train_steps_per_second": 0.018 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 79, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|