|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9874476987447699, |
|
"eval_steps": 500, |
|
"global_step": 59, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.6666666666666664e-08, |
|
"logits/chosen": -2.5598204135894775, |
|
"logits/rejected": -2.54148006439209, |
|
"logps/chosen": -231.18460083007812, |
|
"logps/pi_response": -153.6551971435547, |
|
"logps/ref_response": -153.6551971435547, |
|
"logps/rejected": -481.5467529296875, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 9.860114570402053e-08, |
|
"logits/chosen": -2.5779542922973633, |
|
"logits/rejected": -2.4980809688568115, |
|
"logps/chosen": -278.1629333496094, |
|
"logps/pi_response": -136.0104217529297, |
|
"logps/ref_response": -135.50714111328125, |
|
"logps/rejected": -486.1741027832031, |
|
"loss": 0.6872, |
|
"rewards/accuracies": 0.5833333134651184, |
|
"rewards/chosen": -0.016039522364735603, |
|
"rewards/margins": 0.00932595506310463, |
|
"rewards/rejected": -0.025365475565195084, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 8.374915007591053e-08, |
|
"logits/chosen": -2.5219459533691406, |
|
"logits/rejected": -2.475327968597412, |
|
"logps/chosen": -305.75347900390625, |
|
"logps/pi_response": -152.5146026611328, |
|
"logps/ref_response": -147.16122436523438, |
|
"logps/rejected": -493.6817321777344, |
|
"loss": 0.6359, |
|
"rewards/accuracies": 0.6656249761581421, |
|
"rewards/chosen": -0.19058512151241302, |
|
"rewards/margins": 0.14784559607505798, |
|
"rewards/rejected": -0.3384307026863098, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 5.738232820012406e-08, |
|
"logits/chosen": -2.4824159145355225, |
|
"logits/rejected": -2.4323043823242188, |
|
"logps/chosen": -296.9906921386719, |
|
"logps/pi_response": -145.09451293945312, |
|
"logps/ref_response": -141.3872528076172, |
|
"logps/rejected": -551.1701049804688, |
|
"loss": 0.586, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.26490381360054016, |
|
"rewards/margins": 0.3826388120651245, |
|
"rewards/rejected": -0.6475426554679871, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 2.8496739886173992e-08, |
|
"logits/chosen": -2.471738815307617, |
|
"logits/rejected": -2.4239768981933594, |
|
"logps/chosen": -319.9653015136719, |
|
"logps/pi_response": -151.64266967773438, |
|
"logps/ref_response": -145.77267456054688, |
|
"logps/rejected": -595.8931884765625, |
|
"loss": 0.5548, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.4044647216796875, |
|
"rewards/margins": 0.6303603649139404, |
|
"rewards/rejected": -1.034825086593628, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 6.947819411632222e-09, |
|
"logits/chosen": -2.472991466522217, |
|
"logits/rejected": -2.4102094173431396, |
|
"logps/chosen": -351.69976806640625, |
|
"logps/pi_response": -155.18856811523438, |
|
"logps/ref_response": -148.97946166992188, |
|
"logps/rejected": -625.48193359375, |
|
"loss": 0.5394, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.5062131285667419, |
|
"rewards/margins": 0.6884612441062927, |
|
"rewards/rejected": -1.1946743726730347, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"step": 59, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5946870900816836, |
|
"train_runtime": 3503.4654, |
|
"train_samples_per_second": 4.362, |
|
"train_steps_per_second": 0.017 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 59, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|