zephyr-7b-gpo-update3-i1 / trainer_state.json
lole25's picture
Model save
f8f0b71 verified
raw history blame
No virus
12.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 100,
"global_step": 250,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 2.0000000000000002e-07,
"logits/chosen": -1.8503975868225098,
"logits/rejected": -1.8503975868225098,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0011,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.04,
"learning_rate": 2.0000000000000003e-06,
"logits/chosen": -1.8588156700134277,
"logits/rejected": -1.8588156700134277,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0019,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 10
},
{
"epoch": 0.08,
"learning_rate": 4.000000000000001e-06,
"logits/chosen": -1.970517873764038,
"logits/rejected": -1.970517873764038,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0018,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 20
},
{
"epoch": 0.12,
"learning_rate": 4.993910125649561e-06,
"logits/chosen": -1.9209930896759033,
"logits/rejected": -1.9209930896759033,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0013,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 30
},
{
"epoch": 0.16,
"learning_rate": 4.9453690018345144e-06,
"logits/chosen": -1.883547067642212,
"logits/rejected": -1.883547067642212,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0019,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 40
},
{
"epoch": 0.2,
"learning_rate": 4.849231551964771e-06,
"logits/chosen": -1.9128715991973877,
"logits/rejected": -1.9128715991973877,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0013,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 50
},
{
"epoch": 0.24,
"learning_rate": 4.707368982147318e-06,
"logits/chosen": -2.0107295513153076,
"logits/rejected": -2.0107295513153076,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0019,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 60
},
{
"epoch": 0.28,
"learning_rate": 4.522542485937369e-06,
"logits/chosen": -1.9920228719711304,
"logits/rejected": -1.9920228719711304,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0014,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 70
},
{
"epoch": 0.32,
"learning_rate": 4.2983495008466285e-06,
"logits/chosen": -1.8801155090332031,
"logits/rejected": -1.8801155090332031,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0024,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 80
},
{
"epoch": 0.36,
"learning_rate": 4.039153688314146e-06,
"logits/chosen": -2.050198793411255,
"logits/rejected": -2.050198793411255,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0021,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 90
},
{
"epoch": 0.4,
"learning_rate": 3.7500000000000005e-06,
"logits/chosen": -1.8852717876434326,
"logits/rejected": -1.8852717876434326,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0013,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 100
},
{
"epoch": 0.4,
"eval_logits/chosen": -1.9617642164230347,
"eval_logits/rejected": -1.8066532611846924,
"eval_logps/chosen": -266.6976013183594,
"eval_logps/rejected": -254.9398193359375,
"eval_loss": 0.053734518587589264,
"eval_rewards/accuracies": 0.0,
"eval_rewards/chosen": 0.0,
"eval_rewards/margins": 0.0,
"eval_rewards/rejected": 0.0,
"eval_runtime": 702.6753,
"eval_samples_per_second": 2.846,
"eval_steps_per_second": 1.423,
"step": 100
},
{
"epoch": 0.44,
"learning_rate": 3.436516483539781e-06,
"logits/chosen": -1.731688141822815,
"logits/rejected": -1.731688141822815,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0012,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 110
},
{
"epoch": 0.48,
"learning_rate": 3.1048047389991693e-06,
"logits/chosen": -1.8530235290527344,
"logits/rejected": -1.8530235290527344,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0013,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 120
},
{
"epoch": 0.52,
"learning_rate": 2.761321158169134e-06,
"logits/chosen": -2.0225424766540527,
"logits/rejected": -2.0225424766540527,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0016,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 130
},
{
"epoch": 0.56,
"learning_rate": 2.4127512582437486e-06,
"logits/chosen": -1.8995482921600342,
"logits/rejected": -1.8995482921600342,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0014,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 140
},
{
"epoch": 0.6,
"learning_rate": 2.0658795558326745e-06,
"logits/chosen": -1.86004638671875,
"logits/rejected": -1.8391777276992798,
"logps/chosen": -4.896004676818848,
"logps/rejected": -1.6084611415863037,
"loss": 0.0016,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 150
},
{
"epoch": 0.64,
"learning_rate": 1.7274575140626318e-06,
"logits/chosen": -2.013669490814209,
"logits/rejected": -2.013669490814209,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0019,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 160
},
{
"epoch": 0.68,
"learning_rate": 1.4040721330273063e-06,
"logits/chosen": -1.8206443786621094,
"logits/rejected": -1.8206443786621094,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0019,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 170
},
{
"epoch": 0.72,
"learning_rate": 1.1020177413231334e-06,
"logits/chosen": -1.9178645610809326,
"logits/rejected": -1.8847808837890625,
"logps/chosen": -10.10865306854248,
"logps/rejected": -3.267775297164917,
"loss": 0.0015,
"rewards/accuracies": 0.02500000037252903,
"rewards/chosen": -0.0017203291645273566,
"rewards/margins": 0.0016426773509010673,
"rewards/rejected": -0.003363006515428424,
"step": 180
},
{
"epoch": 0.76,
"learning_rate": 8.271734841028553e-07,
"logits/chosen": -1.8469321727752686,
"logits/rejected": -1.8469321727752686,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0021,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 190
},
{
"epoch": 0.8,
"learning_rate": 5.848888922025553e-07,
"logits/chosen": -1.9015337228775024,
"logits/rejected": -1.9031871557235718,
"logps/chosen": -5.545676231384277,
"logps/rejected": -6.33315372467041,
"loss": 0.0013,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0002652378170751035,
"rewards/margins": -0.0003098316374234855,
"rewards/rejected": 0.000575069454498589,
"step": 200
},
{
"epoch": 0.8,
"eval_logits/chosen": -1.969506859779358,
"eval_logits/rejected": -1.813860297203064,
"eval_logps/chosen": -266.95574951171875,
"eval_logps/rejected": -254.3690948486328,
"eval_loss": 0.057529229670763016,
"eval_rewards/accuracies": 0.3799999952316284,
"eval_rewards/chosen": -0.001290707616135478,
"eval_rewards/margins": -0.004144246224313974,
"eval_rewards/rejected": 0.00285353884100914,
"eval_runtime": 704.8428,
"eval_samples_per_second": 2.838,
"eval_steps_per_second": 1.419,
"step": 200
},
{
"epoch": 0.84,
"learning_rate": 3.798797596089351e-07,
"logits/chosen": -1.8618186712265015,
"logits/rejected": -1.8618186712265015,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0012,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 210
},
{
"epoch": 0.88,
"learning_rate": 2.1613635589349756e-07,
"logits/chosen": -2.0101757049560547,
"logits/rejected": -2.0101757049560547,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0018,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 220
},
{
"epoch": 0.92,
"learning_rate": 9.684576015420277e-08,
"logits/chosen": -1.938612699508667,
"logits/rejected": -1.9380161762237549,
"logps/chosen": -1.706319808959961,
"logps/rejected": -1.771810531616211,
"loss": 0.0022,
"rewards/accuracies": 0.0,
"rewards/chosen": -0.0014353947481140494,
"rewards/margins": -0.0004587741568684578,
"rewards/rejected": -0.0009766205912455916,
"step": 230
},
{
"epoch": 0.96,
"learning_rate": 2.4329828146074096e-08,
"logits/chosen": -2.1376256942749023,
"logits/rejected": -2.1376256942749023,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0016,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 240
},
{
"epoch": 1.0,
"learning_rate": 0.0,
"logits/chosen": -1.9790098667144775,
"logits/rejected": -1.9790098667144775,
"logps/chosen": 0.0,
"logps/rejected": 0.0,
"loss": 0.0018,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 250
},
{
"epoch": 1.0,
"step": 250,
"total_flos": 0.0,
"train_loss": 0.0016641309279948472,
"train_runtime": 2302.1859,
"train_samples_per_second": 0.434,
"train_steps_per_second": 0.109
}
],
"logging_steps": 10,
"max_steps": 250,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}