|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 48, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.020833333333333332, |
|
"grad_norm": 11.455097876072475, |
|
"learning_rate": 4e-09, |
|
"logits/chosen": -2.8467929363250732, |
|
"logits/rejected": -2.780571222305298, |
|
"logps/chosen": -151.4452667236328, |
|
"logps/pi_response": -91.28145599365234, |
|
"logps/ref_response": -91.28145599365234, |
|
"logps/rejected": -264.91851806640625, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.20833333333333334, |
|
"grad_norm": 12.46753151950194, |
|
"learning_rate": 1.9340161087325482e-08, |
|
"logits/chosen": -2.7625913619995117, |
|
"logits/rejected": -2.722522497177124, |
|
"logps/chosen": -257.7454528808594, |
|
"logps/pi_response": -119.42616271972656, |
|
"logps/ref_response": -119.43565368652344, |
|
"logps/rejected": -241.74166870117188, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.4513888955116272, |
|
"rewards/chosen": 0.0003132917336188257, |
|
"rewards/margins": 0.0002936705423053354, |
|
"rewards/rejected": 1.9621227693278342e-05, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.4166666666666667, |
|
"grad_norm": 11.771419982484536, |
|
"learning_rate": 1.4572423233046386e-08, |
|
"logits/chosen": -2.7909836769104004, |
|
"logits/rejected": -2.7895350456237793, |
|
"logps/chosen": -241.4215545654297, |
|
"logps/pi_response": -111.34303283691406, |
|
"logps/ref_response": -111.34957122802734, |
|
"logps/rejected": -252.1947784423828, |
|
"loss": 0.6929, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -0.0006047965725883842, |
|
"rewards/margins": 0.0007416198495775461, |
|
"rewards/rejected": -0.0013464164221659303, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.625, |
|
"grad_norm": 11.645476844633674, |
|
"learning_rate": 7.470666176083192e-09, |
|
"logits/chosen": -2.782238483428955, |
|
"logits/rejected": -2.719805955886841, |
|
"logps/chosen": -234.60403442382812, |
|
"logps/pi_response": -108.2123031616211, |
|
"logps/ref_response": -108.2181625366211, |
|
"logps/rejected": -235.2075653076172, |
|
"loss": 0.6925, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.0014660778688266873, |
|
"rewards/margins": 0.0014644510811194777, |
|
"rewards/rejected": -0.0029305291827768087, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.8333333333333334, |
|
"grad_norm": 11.258043931060174, |
|
"learning_rate": 1.6600218211012219e-09, |
|
"logits/chosen": -2.769465923309326, |
|
"logits/rejected": -2.7586264610290527, |
|
"logps/chosen": -239.3582000732422, |
|
"logps/pi_response": -107.45182800292969, |
|
"logps/ref_response": -107.4380111694336, |
|
"logps/rejected": -248.4120330810547, |
|
"loss": 0.6922, |
|
"rewards/accuracies": 0.6031249761581421, |
|
"rewards/chosen": -0.002408261876553297, |
|
"rewards/margins": 0.002155891852453351, |
|
"rewards/rejected": -0.004564153961837292, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 48, |
|
"total_flos": 0.0, |
|
"train_loss": 0.692573219537735, |
|
"train_runtime": 2218.1613, |
|
"train_samples_per_second": 5.512, |
|
"train_steps_per_second": 0.022 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 48, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|