|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9921671018276762, |
|
"eval_steps": 100, |
|
"global_step": 95, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.000000000000001e-07, |
|
"logits/chosen": 0.8826487064361572, |
|
"logits/rejected": 0.921362042427063, |
|
"logps/chosen": -36.58121871948242, |
|
"logps/rejected": -54.902320861816406, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 5e-06, |
|
"logits/chosen": 0.8916183114051819, |
|
"logits/rejected": 0.873969316482544, |
|
"logps/chosen": -87.77237701416016, |
|
"logps/rejected": -96.3612289428711, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.2222222238779068, |
|
"rewards/chosen": 0.0002965294988825917, |
|
"rewards/margins": 0.00019970430003013462, |
|
"rewards/rejected": 9.682523523224518e-05, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.83118057351089e-06, |
|
"logits/chosen": 0.7805377244949341, |
|
"logits/rejected": 0.8473492860794067, |
|
"logps/chosen": -91.75300598144531, |
|
"logps/rejected": -85.11421966552734, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.25, |
|
"rewards/chosen": 4.452450230019167e-05, |
|
"rewards/margins": -0.00017617340199649334, |
|
"rewards/rejected": 0.00022069788246881217, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.3475222930516484e-06, |
|
"logits/chosen": 0.8648212552070618, |
|
"logits/rejected": 0.8478900194168091, |
|
"logps/chosen": -85.289306640625, |
|
"logps/rejected": -78.0145492553711, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.28125, |
|
"rewards/chosen": 0.00029949203599244356, |
|
"rewards/margins": 6.509931699838489e-05, |
|
"rewards/rejected": 0.00023439270444214344, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.6143458894413463e-06, |
|
"logits/chosen": 0.7649093270301819, |
|
"logits/rejected": 0.8426358103752136, |
|
"logps/chosen": -122.76078033447266, |
|
"logps/rejected": -108.79180908203125, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.26249998807907104, |
|
"rewards/chosen": -0.0002823365502990782, |
|
"rewards/margins": -9.583422070136294e-05, |
|
"rewards/rejected": -0.00018650232232175767, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 2.730670898658255e-06, |
|
"logits/chosen": 0.8404033780097961, |
|
"logits/rejected": 0.8685251474380493, |
|
"logps/chosen": -65.78496551513672, |
|
"logps/rejected": -71.65745544433594, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.23749999701976776, |
|
"rewards/chosen": -0.0002987021580338478, |
|
"rewards/margins": -0.0003552257257979363, |
|
"rewards/rejected": 5.65235604881309e-05, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.8158425248197931e-06, |
|
"logits/chosen": 0.8147390484809875, |
|
"logits/rejected": 0.8857711553573608, |
|
"logps/chosen": -117.61279296875, |
|
"logps/rejected": -110.9428482055664, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.30000001192092896, |
|
"rewards/chosen": 0.00020218415011186153, |
|
"rewards/margins": 0.0003176866448484361, |
|
"rewards/rejected": -0.00011550241470104083, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 9.934134090518593e-07, |
|
"logits/chosen": 0.8052595257759094, |
|
"logits/rejected": 0.8626030087471008, |
|
"logps/chosen": -82.66513061523438, |
|
"logps/rejected": -83.91613006591797, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.2750000059604645, |
|
"rewards/chosen": -1.9253784557804465e-05, |
|
"rewards/margins": 6.199729250511155e-05, |
|
"rewards/rejected": -8.125107706291601e-05, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.7445716067596506e-07, |
|
"logits/chosen": 0.7661033272743225, |
|
"logits/rejected": 0.829667866230011, |
|
"logps/chosen": -105.8118667602539, |
|
"logps/rejected": -104.9473876953125, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.2874999940395355, |
|
"rewards/chosen": 2.1945637854514644e-05, |
|
"rewards/margins": -0.00017051820759661496, |
|
"rewards/rejected": 0.00019246386364102364, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.256725079024554e-08, |
|
"logits/chosen": 0.8347122073173523, |
|
"logits/rejected": 0.8553287386894226, |
|
"logps/chosen": -77.658203125, |
|
"logps/rejected": -89.99678039550781, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 0.34375, |
|
"rewards/chosen": 0.0003329566097818315, |
|
"rewards/margins": 0.0006740919779986143, |
|
"rewards/rejected": -0.0003411353682167828, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"step": 95, |
|
"total_flos": 0.0, |
|
"train_loss": 0.0001254068662283795, |
|
"train_runtime": 1151.127, |
|
"train_samples_per_second": 5.31, |
|
"train_steps_per_second": 0.083 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 95, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|