|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.992, |
|
"eval_steps": 500, |
|
"global_step": 31, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.25e-07, |
|
"logits/chosen": -2.4519619941711426, |
|
"logits/rejected": -2.4373748302459717, |
|
"logps/chosen": -313.0509033203125, |
|
"logps/pi_response": -118.84771728515625, |
|
"logps/ref_response": -118.84771728515625, |
|
"logps/rejected": -805.4427490234375, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.415111107797445e-07, |
|
"logits/chosen": -2.326218843460083, |
|
"logits/rejected": -2.284130811691284, |
|
"logps/chosen": -410.3197326660156, |
|
"logps/pi_response": -151.0199432373047, |
|
"logps/ref_response": -117.56155395507812, |
|
"logps/rejected": -908.8378295898438, |
|
"loss": 0.6621, |
|
"rewards/accuracies": 0.6493055820465088, |
|
"rewards/chosen": -0.6402885913848877, |
|
"rewards/margins": 0.8867037892341614, |
|
"rewards/rejected": -1.5269925594329834, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.782991918222275e-07, |
|
"logits/chosen": -2.226139545440674, |
|
"logits/rejected": -2.1810803413391113, |
|
"logps/chosen": -529.3506469726562, |
|
"logps/pi_response": -162.92213439941406, |
|
"logps/ref_response": -109.0821533203125, |
|
"logps/rejected": -1107.5057373046875, |
|
"loss": 0.6638, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.5208525657653809, |
|
"rewards/margins": 1.4993822574615479, |
|
"rewards/rejected": -3.0202348232269287, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 1.690410564514244e-09, |
|
"logits/chosen": -2.2474887371063232, |
|
"logits/rejected": -2.211569309234619, |
|
"logps/chosen": -406.5466613769531, |
|
"logps/pi_response": -142.61892700195312, |
|
"logps/ref_response": -118.6996078491211, |
|
"logps/rejected": -982.9817504882812, |
|
"loss": 0.5018, |
|
"rewards/accuracies": 0.7906249761581421, |
|
"rewards/chosen": -0.5995205044746399, |
|
"rewards/margins": 0.9062793850898743, |
|
"rewards/rejected": -1.5057997703552246, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"step": 31, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6071660441737021, |
|
"train_runtime": 1788.9782, |
|
"train_samples_per_second": 4.472, |
|
"train_steps_per_second": 0.017 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 31, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|