|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9921671018276762, |
|
"eval_steps": 100, |
|
"global_step": 95, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.000000000000001e-07, |
|
"logits/chosen": 0.8826487064361572, |
|
"logits/rejected": 0.921362042427063, |
|
"logps/chosen": -36.58121871948242, |
|
"logps/rejected": -54.902320861816406, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 5e-06, |
|
"logits/chosen": 0.8917223811149597, |
|
"logits/rejected": 0.875190019607544, |
|
"logps/chosen": -87.82333374023438, |
|
"logps/rejected": -96.38070678710938, |
|
"loss": 0.6934, |
|
"rewards/accuracies": 0.2361111044883728, |
|
"rewards/chosen": -0.00021312937315087765, |
|
"rewards/margins": -0.00011523600551299751, |
|
"rewards/rejected": -9.78933458100073e-05, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.83118057351089e-06, |
|
"logits/chosen": 0.780292272567749, |
|
"logits/rejected": 0.8473358154296875, |
|
"logps/chosen": -91.71416473388672, |
|
"logps/rejected": -85.1246566772461, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.25, |
|
"rewards/chosen": 0.00043298042146489024, |
|
"rewards/margins": 0.00031665078131482005, |
|
"rewards/rejected": 0.00011632966197794303, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.3475222930516484e-06, |
|
"logits/chosen": 0.8648103475570679, |
|
"logits/rejected": 0.8488438725471497, |
|
"logps/chosen": -85.26679992675781, |
|
"logps/rejected": -78.0839614868164, |
|
"loss": 0.6929, |
|
"rewards/accuracies": 0.3125, |
|
"rewards/chosen": 0.0005245368229225278, |
|
"rewards/margins": 0.00098425371106714, |
|
"rewards/rejected": -0.0004597169754561037, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.6143458894413463e-06, |
|
"logits/chosen": 0.7656652927398682, |
|
"logits/rejected": 0.8423829078674316, |
|
"logps/chosen": -122.68544006347656, |
|
"logps/rejected": -108.7737045288086, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.28125, |
|
"rewards/chosen": 0.00047112005995586514, |
|
"rewards/margins": 0.00047660223208367825, |
|
"rewards/rejected": -5.482271262735594e-06, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 2.730670898658255e-06, |
|
"logits/chosen": 0.8398499488830566, |
|
"logits/rejected": 0.8686380386352539, |
|
"logps/chosen": -65.76744079589844, |
|
"logps/rejected": -71.65315246582031, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.23749999701976776, |
|
"rewards/chosen": -0.00012343151320237666, |
|
"rewards/margins": -0.0002229490492027253, |
|
"rewards/rejected": 9.951753600034863e-05, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.8158425248197931e-06, |
|
"logits/chosen": 0.814559817314148, |
|
"logits/rejected": 0.8853690028190613, |
|
"logps/chosen": -117.60276794433594, |
|
"logps/rejected": -110.95735168457031, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.32499998807907104, |
|
"rewards/chosen": 0.0003024066681973636, |
|
"rewards/margins": 0.000562971574254334, |
|
"rewards/rejected": -0.0002605649351608008, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 9.934134090518593e-07, |
|
"logits/chosen": 0.8051468133926392, |
|
"logits/rejected": 0.8624836802482605, |
|
"logps/chosen": -82.67887115478516, |
|
"logps/rejected": -83.9195327758789, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.26249998807907104, |
|
"rewards/chosen": -0.00015662939404137433, |
|
"rewards/margins": -4.1384984797332436e-05, |
|
"rewards/rejected": -0.00011524439469212666, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.7445716067596506e-07, |
|
"logits/chosen": 0.7656813263893127, |
|
"logits/rejected": 0.8298920392990112, |
|
"logps/chosen": -105.78785705566406, |
|
"logps/rejected": -104.972900390625, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.2750000059604645, |
|
"rewards/chosen": 0.0002620227460283786, |
|
"rewards/margins": 0.00032469426514580846, |
|
"rewards/rejected": -6.267154822126031e-05, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.256725079024554e-08, |
|
"logits/chosen": 0.8350532650947571, |
|
"logits/rejected": 0.8562027215957642, |
|
"logps/chosen": -77.66747283935547, |
|
"logps/rejected": -90.00565338134766, |
|
"loss": 0.6929, |
|
"rewards/accuracies": 0.30000001192092896, |
|
"rewards/chosen": 0.00024019060947466642, |
|
"rewards/margins": 0.0006700255908071995, |
|
"rewards/rejected": -0.0004298349958844483, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"step": 95, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6931231824975265, |
|
"train_runtime": 1147.8486, |
|
"train_samples_per_second": 5.326, |
|
"train_steps_per_second": 0.083 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 95, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|