alexshengzhili's picture
Upload folder using huggingface_hub
b179e6a
raw
history blame
No virus
20.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.8,
"global_step": 21,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0,
"logps_train/chosen": -100.49485778808594,
"logps_train/ref_chosen": -100.5,
"logps_train/ref_rejected": -105.0,
"logps_train/rejected": -104.80752563476562,
"rewards_train/accuracies": 0.46875,
"rewards_train/chosen": -0.020498279482126236,
"rewards_train/margins": -0.021032828837633133,
"rewards_train/rejected": 0.000534549355506897,
"step": 0
},
{
"epoch": 0,
"logps_train/chosen": -89.90950012207031,
"logps_train/ref_chosen": -90.0,
"logps_train/ref_rejected": -101.0,
"logps_train/rejected": -100.86872863769531,
"rewards_train/accuracies": 0.5,
"rewards_train/chosen": 0.0191262885928154,
"rewards_train/margins": 0.0020734891295433044,
"rewards_train/rejected": 0.017052799463272095,
"step": 0
},
{
"epoch": 0.13,
"learning_rate": 5e-05,
"loss": 0.7043,
"step": 1
},
{
"epoch": 0.13,
"logps_train/chosen": -104.38166809082031,
"logps_train/ref_chosen": -104.5,
"logps_train/ref_rejected": -98.0,
"logps_train/rejected": -97.89907836914062,
"rewards_train/accuracies": 0.515625,
"rewards_train/chosen": 0.03059956058859825,
"rewards_train/margins": 0.004916800186038017,
"rewards_train/rejected": 0.025682760402560234,
"step": 1
},
{
"epoch": 0.13,
"logps_train/chosen": -94.88069152832031,
"logps_train/ref_chosen": -95.0,
"logps_train/ref_rejected": -95.0,
"logps_train/rejected": -95.29293823242188,
"rewards_train/accuracies": 0.546875,
"rewards_train/chosen": -0.009438544511795044,
"rewards_train/margins": -0.020688317716121674,
"rewards_train/rejected": 0.01124977320432663,
"step": 1
},
{
"epoch": 0.27,
"learning_rate": 4.9692208514878444e-05,
"loss": 0.7061,
"step": 2
},
{
"epoch": 0.27,
"logps_train/chosen": -82.19304656982422,
"logps_train/ref_chosen": -82.5,
"logps_train/ref_rejected": -89.0,
"logps_train/rejected": -88.60076904296875,
"rewards_train/accuracies": 0.40625,
"rewards_train/chosen": 0.1768513023853302,
"rewards_train/margins": -0.027216821908950806,
"rewards_train/rejected": 0.204068124294281,
"step": 2
},
{
"epoch": 0.27,
"logps_train/chosen": -99.76242065429688,
"logps_train/ref_chosen": -100.0,
"logps_train/ref_rejected": -106.0,
"logps_train/rejected": -105.78941345214844,
"rewards_train/accuracies": 0.609375,
"rewards_train/chosen": 0.19887080788612366,
"rewards_train/margins": 0.0869825929403305,
"rewards_train/rejected": 0.11188821494579315,
"step": 2
},
{
"epoch": 0.4,
"learning_rate": 4.877641290737884e-05,
"loss": 0.6893,
"step": 3
},
{
"epoch": 0.4,
"logps_train/chosen": -85.4503173828125,
"logps_train/ref_chosen": -86.0,
"logps_train/ref_rejected": -90.0,
"logps_train/rejected": -89.7308120727539,
"rewards_train/accuracies": 0.5625,
"rewards_train/chosen": 0.1976955533027649,
"rewards_train/margins": 0.044912755489349365,
"rewards_train/rejected": 0.15278279781341553,
"step": 3
},
{
"epoch": 0.4,
"logps_train/chosen": -91.95987701416016,
"logps_train/ref_chosen": -92.0,
"logps_train/ref_rejected": -77.5,
"logps_train/rejected": -77.18235778808594,
"rewards_train/accuracies": 0.484375,
"rewards_train/chosen": 0.17484739422798157,
"rewards_train/margins": -0.023889049887657166,
"rewards_train/rejected": 0.19873644411563873,
"step": 3
},
{
"epoch": 0.53,
"learning_rate": 4.72751631047092e-05,
"loss": 0.7254,
"step": 4
},
{
"epoch": 0.53,
"logps_train/chosen": -88.06124114990234,
"logps_train/ref_chosen": -88.5,
"logps_train/ref_rejected": -91.5,
"logps_train/rejected": -91.30616760253906,
"rewards_train/accuracies": 0.5625,
"rewards_train/chosen": 0.2691383361816406,
"rewards_train/margins": 0.15871848165988922,
"rewards_train/rejected": 0.1104198545217514,
"step": 4
},
{
"epoch": 0.53,
"logps_train/chosen": -104.14408111572266,
"logps_train/ref_chosen": -105.0,
"logps_train/ref_rejected": -104.5,
"logps_train/rejected": -103.88468933105469,
"rewards_train/accuracies": 0.578125,
"rewards_train/chosen": 0.3913380205631256,
"rewards_train/margins": 0.13361021876335144,
"rewards_train/rejected": 0.25772780179977417,
"step": 4
},
{
"epoch": 0.67,
"learning_rate": 4.522542485937369e-05,
"loss": 0.672,
"step": 5
},
{
"epoch": 0.67,
"logps_train/chosen": -108.3313980102539,
"logps_train/ref_chosen": -109.0,
"logps_train/ref_rejected": -99.5,
"logps_train/rejected": -98.8963623046875,
"rewards_train/accuracies": 0.640625,
"rewards_train/chosen": 0.48469099402427673,
"rewards_train/margins": 0.2424430549144745,
"rewards_train/rejected": 0.24224793910980225,
"step": 5
},
{
"epoch": 0.67,
"logps_train/chosen": -81.63034057617188,
"logps_train/ref_chosen": -82.5,
"logps_train/ref_rejected": -80.0,
"logps_train/rejected": -78.99322509765625,
"rewards_train/accuracies": 0.484375,
"rewards_train/chosen": 0.48524150252342224,
"rewards_train/margins": 0.020122677087783813,
"rewards_train/rejected": 0.4651188254356384,
"step": 5
},
{
"epoch": 0.8,
"learning_rate": 4.267766952966369e-05,
"loss": 0.7247,
"step": 6
},
{
"epoch": 0.8,
"logps_train/chosen": -91.17040252685547,
"logps_train/ref_chosen": -92.0,
"logps_train/ref_rejected": -89.5,
"logps_train/rejected": -88.93339538574219,
"rewards_train/accuracies": 0.5,
"rewards_train/chosen": 0.522466778755188,
"rewards_train/margins": 0.16933834552764893,
"rewards_train/rejected": 0.35312843322753906,
"step": 6
},
{
"epoch": 0.8,
"logps_train/chosen": -87.31026458740234,
"logps_train/ref_chosen": -88.5,
"logps_train/ref_rejected": -94.0,
"logps_train/rejected": -93.22328186035156,
"rewards_train/accuracies": 0.625,
"rewards_train/chosen": 0.4981892704963684,
"rewards_train/margins": 0.22706186771392822,
"rewards_train/rejected": 0.2711274027824402,
"step": 6
},
{
"epoch": 0.93,
"learning_rate": 3.969463130731183e-05,
"loss": 0.6977,
"step": 7
},
{
"epoch": 0.93,
"logps_train/chosen": -97.33760833740234,
"logps_train/ref_chosen": -107.0,
"logps_train/ref_rejected": -104.0,
"logps_train/rejected": -112.84634399414062,
"rewards_train/accuracies": 0.921875,
"rewards_train/chosen": 4.983658790588379,
"rewards_train/margins": 9.4771409034729,
"rewards_train/rejected": -4.4934821128845215,
"step": 7
},
{
"epoch": 0.93,
"logps_train/chosen": -87.83363342285156,
"logps_train/ref_chosen": -96.0,
"logps_train/ref_rejected": -90.5,
"logps_train/rejected": -96.15026092529297,
"rewards_train/accuracies": 0.96875,
"rewards_train/chosen": 4.031304359436035,
"rewards_train/margins": 6.9581804275512695,
"rewards_train/rejected": -2.9268760681152344,
"step": 7
},
{
"epoch": 1.07,
"learning_rate": 3.634976249348867e-05,
"loss": 0.1455,
"step": 8
},
{
"epoch": 1.07,
"logps_train/chosen": -92.92860412597656,
"logps_train/ref_chosen": -104.0,
"logps_train/ref_rejected": -91.0,
"logps_train/rejected": -100.07354736328125,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 5.565237522125244,
"rewards_train/margins": 10.037434577941895,
"rewards_train/rejected": -4.47219705581665,
"step": 8
},
{
"epoch": 1.07,
"logps_train/chosen": -81.51614379882812,
"logps_train/ref_chosen": -90.0,
"logps_train/ref_rejected": -91.0,
"logps_train/rejected": -98.16658782958984,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 4.322493553161621,
"rewards_train/margins": 7.822535753250122,
"rewards_train/rejected": -3.500042200088501,
"step": 8
},
{
"epoch": 1.2,
"learning_rate": 3.272542485937369e-05,
"loss": 0.0635,
"step": 9
},
{
"epoch": 1.2,
"logps_train/chosen": -89.58404541015625,
"logps_train/ref_chosen": -100.0,
"logps_train/ref_rejected": -91.5,
"logps_train/rejected": -101.0616455078125,
"rewards_train/accuracies": 0.984375,
"rewards_train/chosen": 5.313934326171875,
"rewards_train/margins": 10.12344217300415,
"rewards_train/rejected": -4.809507846832275,
"step": 9
},
{
"epoch": 1.2,
"logps_train/chosen": -87.73910522460938,
"logps_train/ref_chosen": -98.0,
"logps_train/ref_rejected": -95.5,
"logps_train/rejected": -103.09068298339844,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 5.000567436218262,
"rewards_train/margins": 8.863048553466797,
"rewards_train/rejected": -3.862481117248535,
"step": 9
},
{
"epoch": 1.33,
"learning_rate": 2.8910861626005776e-05,
"loss": 0.0449,
"step": 10
},
{
"epoch": 1.33,
"logps_train/chosen": -90.21186065673828,
"logps_train/ref_chosen": -99.5,
"logps_train/ref_rejected": -97.0,
"logps_train/rejected": -104.75820922851562,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 4.69606876373291,
"rewards_train/margins": 8.679665327072144,
"rewards_train/rejected": -3.9835965633392334,
"step": 10
},
{
"epoch": 1.33,
"logps_train/chosen": -81.06768798828125,
"logps_train/ref_chosen": -91.0,
"logps_train/ref_rejected": -97.5,
"logps_train/rejected": -106.78590393066406,
"rewards_train/accuracies": 0.984375,
"rewards_train/chosen": 5.148348808288574,
"rewards_train/margins": 9.690350532531738,
"rewards_train/rejected": -4.542001724243164,
"step": 10
},
{
"epoch": 1.47,
"learning_rate": 2.5e-05,
"loss": 0.0452,
"step": 11
},
{
"epoch": 1.47,
"logps_train/chosen": -85.45967102050781,
"logps_train/ref_chosen": -97.0,
"logps_train/ref_rejected": -99.0,
"logps_train/rejected": -109.41343688964844,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 5.63247013092041,
"rewards_train/margins": 10.989093780517578,
"rewards_train/rejected": -5.356623649597168,
"step": 11
},
{
"epoch": 1.47,
"logps_train/chosen": -71.80900573730469,
"logps_train/ref_chosen": -81.0,
"logps_train/ref_rejected": -90.0,
"logps_train/rejected": -97.481689453125,
"rewards_train/accuracies": 0.96875,
"rewards_train/chosen": 4.653310775756836,
"rewards_train/margins": 8.362460613250732,
"rewards_train/rejected": -3.7091498374938965,
"step": 11
},
{
"epoch": 1.6,
"learning_rate": 2.1089138373994223e-05,
"loss": 0.0475,
"step": 12
},
{
"epoch": 1.6,
"logps_train/chosen": -79.43351745605469,
"logps_train/ref_chosen": -90.0,
"logps_train/ref_rejected": -96.0,
"logps_train/rejected": -105.02793884277344,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 5.266395568847656,
"rewards_train/margins": 9.863905906677246,
"rewards_train/rejected": -4.59751033782959,
"step": 12
},
{
"epoch": 1.6,
"logps_train/chosen": -73.93589782714844,
"logps_train/ref_chosen": -83.0,
"logps_train/ref_rejected": -85.0,
"logps_train/rejected": -93.78298950195312,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 4.597479343414307,
"rewards_train/margins": 8.91060733795166,
"rewards_train/rejected": -4.3131279945373535,
"step": 12
},
{
"epoch": 1.73,
"learning_rate": 1.7274575140626318e-05,
"loss": 0.0501,
"step": 13
},
{
"epoch": 1.73,
"logps_train/chosen": -90.48731231689453,
"logps_train/ref_chosen": -105.0,
"logps_train/ref_rejected": -101.0,
"logps_train/rejected": -113.04267883300781,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 7.302729606628418,
"rewards_train/margins": 13.378514289855957,
"rewards_train/rejected": -6.075784683227539,
"step": 13
},
{
"epoch": 1.73,
"logps_train/chosen": -78.76904296875,
"logps_train/ref_chosen": -91.0,
"logps_train/ref_rejected": -85.0,
"logps_train/rejected": -94.63651275634766,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 5.878904819488525,
"rewards_train/margins": 10.652726650238037,
"rewards_train/rejected": -4.773821830749512,
"step": 13
},
{
"epoch": 1.87,
"learning_rate": 1.3650237506511331e-05,
"loss": 0.023,
"step": 14
},
{
"epoch": 1.87,
"logps_train/chosen": -82.4466552734375,
"logps_train/ref_chosen": -93.0,
"logps_train/ref_rejected": -108.5,
"logps_train/rejected": -120.84589385986328,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 5.197817802429199,
"rewards_train/margins": 11.340189933776855,
"rewards_train/rejected": -6.142372131347656,
"step": 14
},
{
"epoch": 1.87,
"logps_train/chosen": -81.03172302246094,
"logps_train/ref_chosen": -94.0,
"logps_train/ref_rejected": -93.0,
"logps_train/rejected": -104.14176940917969,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 6.500617027282715,
"rewards_train/margins": 12.0755934715271,
"rewards_train/rejected": -5.574976444244385,
"step": 14
},
{
"epoch": 2.0,
"learning_rate": 1.0305368692688174e-05,
"loss": 0.0299,
"step": 15
},
{
"epoch": 2.0,
"logps_train/chosen": -84.29070281982422,
"logps_train/ref_chosen": -97.5,
"logps_train/ref_rejected": -96.5,
"logps_train/rejected": -108.1887435913086,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 6.537390232086182,
"rewards_train/margins": 12.432237148284912,
"rewards_train/rejected": -5.8948469161987305,
"step": 15
},
{
"epoch": 2.0,
"logps_train/chosen": -80.9233627319336,
"logps_train/ref_chosen": -93.5,
"logps_train/ref_rejected": -106.0,
"logps_train/rejected": -118.09178161621094,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 6.2991838455200195,
"rewards_train/margins": 12.446146488189697,
"rewards_train/rejected": -6.146962642669678,
"step": 15
},
{
"epoch": 2.13,
"learning_rate": 7.3223304703363135e-06,
"loss": 0.028,
"step": 16
},
{
"epoch": 2.13,
"logps_train/chosen": -83.49127960205078,
"logps_train/ref_chosen": -95.0,
"logps_train/ref_rejected": -89.0,
"logps_train/rejected": -99.20130157470703,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 5.888392448425293,
"rewards_train/margins": 11.02004623413086,
"rewards_train/rejected": -5.131653785705566,
"step": 16
},
{
"epoch": 2.13,
"logps_train/chosen": -82.60940551757812,
"logps_train/ref_chosen": -94.0,
"logps_train/ref_rejected": -88.5,
"logps_train/rejected": -98.06349182128906,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 5.770737648010254,
"rewards_train/margins": 10.54662036895752,
"rewards_train/rejected": -4.775882720947266,
"step": 16
},
{
"epoch": 2.27,
"learning_rate": 4.7745751406263165e-06,
"loss": 0.034,
"step": 17
},
{
"epoch": 2.27,
"logps_train/chosen": -82.3560791015625,
"logps_train/ref_chosen": -96.0,
"logps_train/ref_rejected": -96.0,
"logps_train/rejected": -108.4398193359375,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 6.842710018157959,
"rewards_train/margins": 13.140255451202393,
"rewards_train/rejected": -6.297545433044434,
"step": 17
},
{
"epoch": 2.27,
"logps_train/chosen": -79.5767822265625,
"logps_train/ref_chosen": -91.5,
"logps_train/ref_rejected": -100.0,
"logps_train/rejected": -114.11109924316406,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 6.006286144256592,
"rewards_train/margins": 13.076303482055664,
"rewards_train/rejected": -7.070017337799072,
"step": 17
},
{
"epoch": 2.4,
"learning_rate": 2.7248368952908053e-06,
"loss": 0.0185,
"step": 18
},
{
"epoch": 2.4,
"logps_train/chosen": -87.6229019165039,
"logps_train/ref_chosen": -102.0,
"logps_train/ref_rejected": -99.0,
"logps_train/rejected": -112.02855682373047,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 7.329846382141113,
"rewards_train/margins": 13.926156997680664,
"rewards_train/rejected": -6.596310615539551,
"step": 18
},
{
"epoch": 2.4,
"logps_train/chosen": -87.40618896484375,
"logps_train/ref_chosen": -101.5,
"logps_train/ref_rejected": -101.0,
"logps_train/rejected": -114.67952728271484,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 7.131863594055176,
"rewards_train/margins": 13.977363586425781,
"rewards_train/rejected": -6.8454999923706055,
"step": 18
},
{
"epoch": 2.53,
"learning_rate": 1.2235870926211619e-06,
"loss": 0.0233,
"step": 19
},
{
"epoch": 2.53,
"logps_train/chosen": -81.33338928222656,
"logps_train/ref_chosen": -94.5,
"logps_train/ref_rejected": -101.0,
"logps_train/rejected": -115.27784729003906,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 6.613577842712402,
"rewards_train/margins": 13.59971284866333,
"rewards_train/rejected": -6.986135005950928,
"step": 19
},
{
"epoch": 2.53,
"logps_train/chosen": -89.27815246582031,
"logps_train/ref_chosen": -101.0,
"logps_train/ref_rejected": -95.5,
"logps_train/rejected": -105.24420928955078,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 5.84676456451416,
"rewards_train/margins": 10.66076374053955,
"rewards_train/rejected": -4.813999176025391,
"step": 19
},
{
"epoch": 2.67,
"learning_rate": 3.077914851215585e-07,
"loss": 0.0211,
"step": 20
},
{
"epoch": 2.67,
"logps_train/chosen": -66.01316833496094,
"logps_train/ref_chosen": -76.0,
"logps_train/ref_rejected": -78.0,
"logps_train/rejected": -87.24235534667969,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 5.09859561920166,
"rewards_train/margins": 9.691985130310059,
"rewards_train/rejected": -4.593389511108398,
"step": 20
},
{
"epoch": 2.67,
"logps_train/chosen": -82.14631652832031,
"logps_train/ref_chosen": -96.0,
"logps_train/ref_rejected": -94.0,
"logps_train/rejected": -106.15888214111328,
"rewards_train/accuracies": 1.0,
"rewards_train/chosen": 6.794517993927002,
"rewards_train/margins": 12.825128555297852,
"rewards_train/rejected": -6.03061056137085,
"step": 20
},
{
"epoch": 2.8,
"learning_rate": 0.0,
"loss": 0.0335,
"step": 21
},
{
"epoch": 2.8,
"step": 21,
"total_flos": 0.0,
"train_loss": 0.2632110191597825,
"train_runtime": 244.5091,
"train_samples_per_second": 11.153,
"train_steps_per_second": 0.086
}
],
"max_steps": 21,
"num_train_epochs": 3,
"total_flos": 0.0,
"trial_name": null,
"trial_params": null
}