llava-lora-dpo-1227lrvtail2000_sft-self-sampled-beta-0.5-lr-5e-5-avg-False-epoch-2
/
trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 1.8666666666666667, | |
"global_step": 14, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0, | |
"logps_train/chosen": -100.49485778808594, | |
"logps_train/ref_chosen": -100.5, | |
"logps_train/ref_rejected": -105.0, | |
"logps_train/rejected": -104.80752563476562, | |
"rewards_train/accuracies": 0.46875, | |
"rewards_train/chosen": -0.020498279482126236, | |
"rewards_train/margins": -0.021032828837633133, | |
"rewards_train/rejected": 0.000534549355506897, | |
"step": 0 | |
}, | |
{ | |
"epoch": 0, | |
"logps_train/chosen": -89.90950012207031, | |
"logps_train/ref_chosen": -90.0, | |
"logps_train/ref_rejected": -101.0, | |
"logps_train/rejected": -100.86872863769531, | |
"rewards_train/accuracies": 0.5, | |
"rewards_train/chosen": 0.0191262885928154, | |
"rewards_train/margins": 0.0020734891295433044, | |
"rewards_train/rejected": 0.017052799463272095, | |
"step": 0 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 5e-05, | |
"loss": 0.7043, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.13, | |
"logps_train/chosen": -104.38166809082031, | |
"logps_train/ref_chosen": -104.5, | |
"logps_train/ref_rejected": -98.0, | |
"logps_train/rejected": -97.89907836914062, | |
"rewards_train/accuracies": 0.515625, | |
"rewards_train/chosen": 0.03059956058859825, | |
"rewards_train/margins": 0.004916800186038017, | |
"rewards_train/rejected": 0.025682760402560234, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.13, | |
"logps_train/chosen": -94.88069152832031, | |
"logps_train/ref_chosen": -95.0, | |
"logps_train/ref_rejected": -95.0, | |
"logps_train/rejected": -95.29293823242188, | |
"rewards_train/accuracies": 0.546875, | |
"rewards_train/chosen": -0.009438544511795044, | |
"rewards_train/margins": -0.020688317716121674, | |
"rewards_train/rejected": 0.01124977320432663, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 4.92735454356513e-05, | |
"loss": 0.7061, | |
"step": 2 | |
}, | |
{ | |
"epoch": 0.27, | |
"logps_train/chosen": -82.24234771728516, | |
"logps_train/ref_chosen": -82.5, | |
"logps_train/ref_rejected": -89.0, | |
"logps_train/rejected": -88.62696075439453, | |
"rewards_train/accuracies": 0.421875, | |
"rewards_train/chosen": 0.15220294892787933, | |
"rewards_train/margins": -0.038771942257881165, | |
"rewards_train/rejected": 0.1909748911857605, | |
"step": 2 | |
}, | |
{ | |
"epoch": 0.27, | |
"logps_train/chosen": -99.8189926147461, | |
"logps_train/ref_chosen": -100.0, | |
"logps_train/ref_rejected": -106.0, | |
"logps_train/rejected": -105.7823257446289, | |
"rewards_train/accuracies": 0.484375, | |
"rewards_train/chosen": 0.17058409750461578, | |
"rewards_train/margins": 0.055155299603939056, | |
"rewards_train/rejected": 0.11542879790067673, | |
"step": 2 | |
}, | |
{ | |
"epoch": 0.4, | |
"learning_rate": 4.713640064133025e-05, | |
"loss": 0.7021, | |
"step": 3 | |
}, | |
{ | |
"epoch": 0.4, | |
"logps_train/chosen": -85.39012145996094, | |
"logps_train/ref_chosen": -86.0, | |
"logps_train/ref_rejected": -90.0, | |
"logps_train/rejected": -89.733154296875, | |
"rewards_train/accuracies": 0.609375, | |
"rewards_train/chosen": 0.22778785228729248, | |
"rewards_train/margins": 0.07618004083633423, | |
"rewards_train/rejected": 0.15160781145095825, | |
"step": 3 | |
}, | |
{ | |
"epoch": 0.4, | |
"logps_train/chosen": -91.78669738769531, | |
"logps_train/ref_chosen": -92.0, | |
"logps_train/ref_rejected": -77.5, | |
"logps_train/rejected": -77.08824157714844, | |
"rewards_train/accuracies": 0.46875, | |
"rewards_train/chosen": 0.2614349126815796, | |
"rewards_train/margins": 0.015637695789337158, | |
"rewards_train/rejected": 0.24579721689224243, | |
"step": 3 | |
}, | |
{ | |
"epoch": 0.53, | |
"learning_rate": 4.371276870427753e-05, | |
"loss": 0.701, | |
"step": 4 | |
}, | |
{ | |
"epoch": 0.53, | |
"logps_train/chosen": -88.01852416992188, | |
"logps_train/ref_chosen": -88.5, | |
"logps_train/ref_rejected": -91.5, | |
"logps_train/rejected": -91.21440124511719, | |
"rewards_train/accuracies": 0.625, | |
"rewards_train/chosen": 0.29049867391586304, | |
"rewards_train/margins": 0.13419204950332642, | |
"rewards_train/rejected": 0.15630662441253662, | |
"step": 4 | |
}, | |
{ | |
"epoch": 0.53, | |
"logps_train/chosen": -103.98135375976562, | |
"logps_train/ref_chosen": -105.0, | |
"logps_train/ref_rejected": -104.5, | |
"logps_train/rejected": -103.89208984375, | |
"rewards_train/accuracies": 0.640625, | |
"rewards_train/chosen": 0.4727012515068054, | |
"rewards_train/margins": 0.21867156028747559, | |
"rewards_train/rejected": 0.25402969121932983, | |
"step": 4 | |
}, | |
{ | |
"epoch": 0.67, | |
"learning_rate": 3.920161866827889e-05, | |
"loss": 0.6539, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.67, | |
"logps_train/chosen": -108.25979614257812, | |
"logps_train/ref_chosen": -109.0, | |
"logps_train/ref_rejected": -99.5, | |
"logps_train/rejected": -98.85391235351562, | |
"rewards_train/accuracies": 0.609375, | |
"rewards_train/chosen": 0.5204911231994629, | |
"rewards_train/margins": 0.25702032446861267, | |
"rewards_train/rejected": 0.2634707987308502, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.67, | |
"logps_train/chosen": -81.6476821899414, | |
"logps_train/ref_chosen": -82.5, | |
"logps_train/ref_rejected": -80.0, | |
"logps_train/rejected": -78.95549011230469, | |
"rewards_train/accuracies": 0.484375, | |
"rewards_train/chosen": 0.4765724837779999, | |
"rewards_train/margins": -0.0074152350425720215, | |
"rewards_train/rejected": 0.4839877188205719, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 3.386512217606339e-05, | |
"loss": 0.7324, | |
"step": 6 | |
}, | |
{ | |
"epoch": 0.8, | |
"logps_train/chosen": -91.17733001708984, | |
"logps_train/ref_chosen": -92.0, | |
"logps_train/ref_rejected": -89.5, | |
"logps_train/rejected": -88.92269134521484, | |
"rewards_train/accuracies": 0.53125, | |
"rewards_train/chosen": 0.518998384475708, | |
"rewards_train/margins": 0.1605202555656433, | |
"rewards_train/rejected": 0.3584781289100647, | |
"step": 6 | |
}, | |
{ | |
"epoch": 0.8, | |
"logps_train/chosen": -87.36846923828125, | |
"logps_train/ref_chosen": -88.5, | |
"logps_train/ref_rejected": -94.0, | |
"logps_train/rejected": -93.19339752197266, | |
"rewards_train/accuracies": 0.59375, | |
"rewards_train/chosen": 0.4690890610218048, | |
"rewards_train/margins": 0.18302035331726074, | |
"rewards_train/rejected": 0.28606870770454407, | |
"step": 6 | |
}, | |
{ | |
"epoch": 0.93, | |
"learning_rate": 2.8013417006383076e-05, | |
"loss": 0.6995, | |
"step": 7 | |
}, | |
{ | |
"epoch": 0.93, | |
"logps_train/chosen": -98.05281066894531, | |
"logps_train/ref_chosen": -107.0, | |
"logps_train/ref_rejected": -104.0, | |
"logps_train/rejected": -112.13198852539062, | |
"rewards_train/accuracies": 0.90625, | |
"rewards_train/chosen": 4.626056671142578, | |
"rewards_train/margins": 8.7623610496521, | |
"rewards_train/rejected": -4.1363043785095215, | |
"step": 7 | |
}, | |
{ | |
"epoch": 0.93, | |
"logps_train/chosen": -88.66700744628906, | |
"logps_train/ref_chosen": -96.0, | |
"logps_train/ref_rejected": -90.5, | |
"logps_train/rejected": -95.32691955566406, | |
"rewards_train/accuracies": 0.96875, | |
"rewards_train/chosen": 3.6146154403686523, | |
"rewards_train/margins": 6.129823684692383, | |
"rewards_train/rejected": -2.5152082443237305, | |
"step": 7 | |
}, | |
{ | |
"epoch": 1.07, | |
"learning_rate": 2.1986582993616926e-05, | |
"loss": 0.1626, | |
"step": 8 | |
}, | |
{ | |
"epoch": 1.07, | |
"logps_train/chosen": -94.38668060302734, | |
"logps_train/ref_chosen": -104.0, | |
"logps_train/ref_rejected": -91.0, | |
"logps_train/rejected": -98.58480834960938, | |
"rewards_train/accuracies": 1.0, | |
"rewards_train/chosen": 4.836201190948486, | |
"rewards_train/margins": 8.564030647277832, | |
"rewards_train/rejected": -3.7278294563293457, | |
"step": 8 | |
}, | |
{ | |
"epoch": 1.07, | |
"logps_train/chosen": -82.82890319824219, | |
"logps_train/ref_chosen": -90.0, | |
"logps_train/ref_rejected": -91.0, | |
"logps_train/rejected": -97.17626953125, | |
"rewards_train/accuracies": 0.984375, | |
"rewards_train/chosen": 3.66611385345459, | |
"rewards_train/margins": 6.670999765396118, | |
"rewards_train/rejected": -3.0048859119415283, | |
"step": 8 | |
}, | |
{ | |
"epoch": 1.2, | |
"learning_rate": 1.613487782393661e-05, | |
"loss": 0.0834, | |
"step": 9 | |
}, | |
{ | |
"epoch": 1.2, | |
"logps_train/chosen": -91.4707260131836, | |
"logps_train/ref_chosen": -100.0, | |
"logps_train/ref_rejected": -91.5, | |
"logps_train/rejected": -99.23442077636719, | |
"rewards_train/accuracies": 0.984375, | |
"rewards_train/chosen": 4.370593070983887, | |
"rewards_train/margins": 8.266491174697876, | |
"rewards_train/rejected": -3.8958981037139893, | |
"step": 9 | |
}, | |
{ | |
"epoch": 1.2, | |
"logps_train/chosen": -89.33580780029297, | |
"logps_train/ref_chosen": -98.0, | |
"logps_train/ref_rejected": -95.5, | |
"logps_train/rejected": -101.87576293945312, | |
"rewards_train/accuracies": 0.984375, | |
"rewards_train/chosen": 4.202215671539307, | |
"rewards_train/margins": 7.457237482070923, | |
"rewards_train/rejected": -3.255021810531616, | |
"step": 9 | |
}, | |
{ | |
"epoch": 1.33, | |
"learning_rate": 1.0798381331721109e-05, | |
"loss": 0.0654, | |
"step": 10 | |
}, | |
{ | |
"epoch": 1.33, | |
"logps_train/chosen": -92.18928527832031, | |
"logps_train/ref_chosen": -99.5, | |
"logps_train/ref_rejected": -97.0, | |
"logps_train/rejected": -102.6868896484375, | |
"rewards_train/accuracies": 1.0, | |
"rewards_train/chosen": 3.7073636054992676, | |
"rewards_train/margins": 6.655301570892334, | |
"rewards_train/rejected": -2.9479379653930664, | |
"step": 10 | |
}, | |
{ | |
"epoch": 1.33, | |
"logps_train/chosen": -83.0275650024414, | |
"logps_train/ref_chosen": -91.0, | |
"logps_train/ref_rejected": -97.5, | |
"logps_train/rejected": -104.69689178466797, | |
"rewards_train/accuracies": 0.984375, | |
"rewards_train/chosen": 4.168407440185547, | |
"rewards_train/margins": 7.665899753570557, | |
"rewards_train/rejected": -3.4974923133850098, | |
"step": 10 | |
}, | |
{ | |
"epoch": 1.47, | |
"learning_rate": 6.28723129572247e-06, | |
"loss": 0.0669, | |
"step": 11 | |
}, | |
{ | |
"epoch": 1.47, | |
"logps_train/chosen": -87.6376724243164, | |
"logps_train/ref_chosen": -97.0, | |
"logps_train/ref_rejected": -99.0, | |
"logps_train/rejected": -106.98680877685547, | |
"rewards_train/accuracies": 1.0, | |
"rewards_train/chosen": 4.543467044830322, | |
"rewards_train/margins": 8.686774730682373, | |
"rewards_train/rejected": -4.143307685852051, | |
"step": 11 | |
}, | |
{ | |
"epoch": 1.47, | |
"logps_train/chosen": -73.75891876220703, | |
"logps_train/ref_chosen": -81.0, | |
"logps_train/ref_rejected": -90.0, | |
"logps_train/rejected": -95.56880950927734, | |
"rewards_train/accuracies": 0.96875, | |
"rewards_train/chosen": 3.678358554840088, | |
"rewards_train/margins": 6.431073427200317, | |
"rewards_train/rejected": -2.7527148723602295, | |
"step": 11 | |
}, | |
{ | |
"epoch": 1.6, | |
"learning_rate": 2.8635993586697553e-06, | |
"loss": 0.0643, | |
"step": 12 | |
}, | |
{ | |
"epoch": 1.6, | |
"logps_train/chosen": -81.66705322265625, | |
"logps_train/ref_chosen": -90.0, | |
"logps_train/ref_rejected": -96.0, | |
"logps_train/rejected": -102.6563720703125, | |
"rewards_train/accuracies": 1.0, | |
"rewards_train/chosen": 4.149624824523926, | |
"rewards_train/margins": 7.561353445053101, | |
"rewards_train/rejected": -3.411728620529175, | |
"step": 12 | |
}, | |
{ | |
"epoch": 1.6, | |
"logps_train/chosen": -75.94863891601562, | |
"logps_train/ref_chosen": -83.0, | |
"logps_train/ref_rejected": -85.0, | |
"logps_train/rejected": -91.42919158935547, | |
"rewards_train/accuracies": 1.0, | |
"rewards_train/chosen": 3.591111183166504, | |
"rewards_train/margins": 6.727336406707764, | |
"rewards_train/rejected": -3.1362252235412598, | |
"step": 12 | |
}, | |
{ | |
"epoch": 1.73, | |
"learning_rate": 7.264545643486997e-07, | |
"loss": 0.08, | |
"step": 13 | |
}, | |
{ | |
"epoch": 1.73, | |
"logps_train/chosen": -94.35868835449219, | |
"logps_train/ref_chosen": -105.0, | |
"logps_train/ref_rejected": -101.0, | |
"logps_train/rejected": -109.37545013427734, | |
"rewards_train/accuracies": 0.984375, | |
"rewards_train/chosen": 5.367044448852539, | |
"rewards_train/margins": 9.60921335220337, | |
"rewards_train/rejected": -4.24216890335083, | |
"step": 13 | |
}, | |
{ | |
"epoch": 1.73, | |
"logps_train/chosen": -81.82484436035156, | |
"logps_train/ref_chosen": -91.0, | |
"logps_train/ref_rejected": -85.0, | |
"logps_train/rejected": -91.75001525878906, | |
"rewards_train/accuracies": 1.0, | |
"rewards_train/chosen": 4.351003646850586, | |
"rewards_train/margins": 7.681576251983643, | |
"rewards_train/rejected": -3.3305726051330566, | |
"step": 13 | |
}, | |
{ | |
"epoch": 1.87, | |
"learning_rate": 0.0, | |
"loss": 0.0427, | |
"step": 14 | |
}, | |
{ | |
"epoch": 1.87, | |
"step": 14, | |
"total_flos": 0.0, | |
"train_loss": 0.39032512956431936, | |
"train_runtime": 164.095, | |
"train_samples_per_second": 11.079, | |
"train_steps_per_second": 0.085 | |
} | |
], | |
"max_steps": 14, | |
"num_train_epochs": 2, | |
"total_flos": 0.0, | |
"trial_name": null, | |
"trial_params": null | |
} | |