|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 200, |
|
"global_step": 125, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.008, |
|
"grad_norm": 147.89808677022958, |
|
"learning_rate": 3.846153846153846e-08, |
|
"logits/generated": -2.517829418182373, |
|
"logits/real": -2.19148588180542, |
|
"logps/generated": -122.74644470214844, |
|
"logps/real": -171.45494079589844, |
|
"loss": 0.843, |
|
"rewards/accuracies": 0.0, |
|
"rewards/generated": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/real": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 134.47376195877482, |
|
"learning_rate": 3.8461538461538463e-07, |
|
"logits/generated": -2.4292914867401123, |
|
"logits/real": -2.433361530303955, |
|
"logps/generated": -128.08148193359375, |
|
"logps/real": -129.51199340820312, |
|
"loss": 0.8235, |
|
"rewards/accuracies": 0.4444444477558136, |
|
"rewards/generated": -0.045434772968292236, |
|
"rewards/margins": 0.06507062166929245, |
|
"rewards/real": 0.019635863602161407, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 116.28359724719947, |
|
"learning_rate": 4.6874999999999996e-07, |
|
"logits/generated": -2.5721659660339355, |
|
"logits/real": -2.5940754413604736, |
|
"logps/generated": -140.29403686523438, |
|
"logps/real": -152.1209259033203, |
|
"loss": 0.7834, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/generated": -0.023977667093276978, |
|
"rewards/margins": 0.11755923926830292, |
|
"rewards/real": 0.09358155727386475, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 125.42421242327684, |
|
"learning_rate": 4.2410714285714283e-07, |
|
"logits/generated": -2.451955556869507, |
|
"logits/real": -2.5324044227600098, |
|
"logps/generated": -132.75013732910156, |
|
"logps/real": -126.1029052734375, |
|
"loss": 0.8007, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/generated": 0.14888708293437958, |
|
"rewards/margins": 0.19555671513080597, |
|
"rewards/real": 0.34444382786750793, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 120.25166417318326, |
|
"learning_rate": 3.794642857142857e-07, |
|
"logits/generated": -2.4365317821502686, |
|
"logits/real": -2.4784128665924072, |
|
"logps/generated": -121.6016845703125, |
|
"logps/real": -121.78703308105469, |
|
"loss": 0.7746, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/generated": 0.10297657549381256, |
|
"rewards/margins": 0.5478044152259827, |
|
"rewards/real": 0.650780975818634, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 175.1395451582952, |
|
"learning_rate": 3.348214285714285e-07, |
|
"logits/generated": -2.4824845790863037, |
|
"logits/real": -2.628711223602295, |
|
"logps/generated": -108.2540054321289, |
|
"logps/real": -110.33675384521484, |
|
"loss": 0.8005, |
|
"rewards/accuracies": 0.625, |
|
"rewards/generated": 0.8452979326248169, |
|
"rewards/margins": 0.32963427901268005, |
|
"rewards/real": 1.1749321222305298, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 136.08698116076778, |
|
"learning_rate": 2.9017857142857143e-07, |
|
"logits/generated": -2.5444846153259277, |
|
"logits/real": -2.5586628913879395, |
|
"logps/generated": -103.48173522949219, |
|
"logps/real": -113.9021224975586, |
|
"loss": 0.7874, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/generated": 0.6939820051193237, |
|
"rewards/margins": 0.3397112190723419, |
|
"rewards/real": 1.0336930751800537, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 146.30984170549294, |
|
"learning_rate": 2.4553571428571425e-07, |
|
"logits/generated": -2.645383358001709, |
|
"logits/real": -2.6484053134918213, |
|
"logps/generated": -125.13639831542969, |
|
"logps/real": -122.11183166503906, |
|
"loss": 0.8997, |
|
"rewards/accuracies": 0.5, |
|
"rewards/generated": 1.455406665802002, |
|
"rewards/margins": -0.3015662729740143, |
|
"rewards/real": 1.1538405418395996, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 84.4045941576579, |
|
"learning_rate": 2.0089285714285714e-07, |
|
"logits/generated": -2.6049187183380127, |
|
"logits/real": -2.5537009239196777, |
|
"logps/generated": -118.47220611572266, |
|
"logps/real": -142.0023193359375, |
|
"loss": 0.7769, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/generated": 0.9211456179618835, |
|
"rewards/margins": 0.8597875833511353, |
|
"rewards/real": 1.7809330224990845, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 127.03498544690872, |
|
"learning_rate": 1.5624999999999999e-07, |
|
"logits/generated": -2.618711233139038, |
|
"logits/real": -2.6723551750183105, |
|
"logps/generated": -114.33888244628906, |
|
"logps/real": -143.75564575195312, |
|
"loss": 0.7321, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/generated": 1.1004014015197754, |
|
"rewards/margins": 0.39606350660324097, |
|
"rewards/real": 1.4964649677276611, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 122.17610215112185, |
|
"learning_rate": 1.1160714285714285e-07, |
|
"logits/generated": -2.5757622718811035, |
|
"logits/real": -2.5916008949279785, |
|
"logps/generated": -127.0289306640625, |
|
"logps/real": -129.51431274414062, |
|
"loss": 0.8665, |
|
"rewards/accuracies": 0.625, |
|
"rewards/generated": 1.3618738651275635, |
|
"rewards/margins": 0.11677682399749756, |
|
"rewards/real": 1.4786508083343506, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 133.71019821071505, |
|
"learning_rate": 6.696428571428571e-08, |
|
"logits/generated": -2.4835140705108643, |
|
"logits/real": -2.498384952545166, |
|
"logps/generated": -125.1689682006836, |
|
"logps/real": -121.4903564453125, |
|
"loss": 0.7741, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/generated": 1.3967167139053345, |
|
"rewards/margins": 0.36560097336769104, |
|
"rewards/real": 1.7623176574707031, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 87.60022795114257, |
|
"learning_rate": 2.2321428571428572e-08, |
|
"logits/generated": -2.532543897628784, |
|
"logits/real": -2.6096348762512207, |
|
"logps/generated": -124.0304183959961, |
|
"logps/real": -137.7096405029297, |
|
"loss": 0.8181, |
|
"rewards/accuracies": 0.625, |
|
"rewards/generated": 1.4160971641540527, |
|
"rewards/margins": 0.37906622886657715, |
|
"rewards/real": 1.7951635122299194, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 125, |
|
"total_flos": 0.0, |
|
"train_loss": 0.7958872079849243, |
|
"train_runtime": 757.4298, |
|
"train_samples_per_second": 2.641, |
|
"train_steps_per_second": 0.165 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 125, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 200, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|