File size: 10,670 Bytes
a137e28 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9984301412872841,
"eval_steps": 500,
"global_step": 159,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006279434850863423,
"grad_norm": 13.884043484189487,
"learning_rate": 6.25e-09,
"logits/chosen": 0.6537042260169983,
"logits/rejected": 0.7058504819869995,
"logps/chosen": -351.26483154296875,
"logps/pi_response": -184.36062622070312,
"logps/ref_response": -184.36062622070312,
"logps/rejected": -575.6107788085938,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.06279434850863422,
"grad_norm": 15.047882494364583,
"learning_rate": 6.25e-08,
"logits/chosen": 0.5573610663414001,
"logits/rejected": 0.7342559099197388,
"logps/chosen": -347.8555908203125,
"logps/pi_response": -139.0795440673828,
"logps/ref_response": -139.16233825683594,
"logps/rejected": -516.4610595703125,
"loss": 0.6931,
"rewards/accuracies": 0.4375,
"rewards/chosen": -2.2081812858232297e-05,
"rewards/margins": -0.0008097346290014684,
"rewards/rejected": 0.0007876526215113699,
"step": 10
},
{
"epoch": 0.12558869701726844,
"grad_norm": 13.801490389190754,
"learning_rate": 9.980706626858606e-08,
"logits/chosen": 0.6113969683647156,
"logits/rejected": 0.8057534098625183,
"logps/chosen": -298.69696044921875,
"logps/pi_response": -135.04879760742188,
"logps/ref_response": -135.14186096191406,
"logps/rejected": -531.4634399414062,
"loss": 0.6922,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": -0.005118087865412235,
"rewards/margins": 0.0010911704739555717,
"rewards/rejected": -0.006209257524460554,
"step": 20
},
{
"epoch": 0.18838304552590268,
"grad_norm": 12.702542077611918,
"learning_rate": 9.765362502737097e-08,
"logits/chosen": 0.6679919958114624,
"logits/rejected": 0.8110311627388,
"logps/chosen": -356.38702392578125,
"logps/pi_response": -146.6771697998047,
"logps/ref_response": -146.90872192382812,
"logps/rejected": -486.9811096191406,
"loss": 0.6884,
"rewards/accuracies": 0.53125,
"rewards/chosen": -0.009140288457274437,
"rewards/margins": 0.007989184930920601,
"rewards/rejected": -0.017129475250840187,
"step": 30
},
{
"epoch": 0.25117739403453687,
"grad_norm": 14.943778079312272,
"learning_rate": 9.320944188084241e-08,
"logits/chosen": 0.49053049087524414,
"logits/rejected": 0.7109462022781372,
"logps/chosen": -310.234619140625,
"logps/pi_response": -138.77902221679688,
"logps/ref_response": -138.47763061523438,
"logps/rejected": -532.02685546875,
"loss": 0.6814,
"rewards/accuracies": 0.668749988079071,
"rewards/chosen": -0.01783907599747181,
"rewards/margins": 0.026475772261619568,
"rewards/rejected": -0.044314850121736526,
"step": 40
},
{
"epoch": 0.3139717425431711,
"grad_norm": 12.526036792984396,
"learning_rate": 8.668815171119019e-08,
"logits/chosen": 0.5844839811325073,
"logits/rejected": 0.8390130996704102,
"logps/chosen": -288.446533203125,
"logps/pi_response": -146.24270629882812,
"logps/ref_response": -146.1750946044922,
"logps/rejected": -509.0594787597656,
"loss": 0.6767,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": -0.026793677359819412,
"rewards/margins": 0.03173010051250458,
"rewards/rejected": -0.05852377414703369,
"step": 50
},
{
"epoch": 0.37676609105180536,
"grad_norm": 12.963675959029416,
"learning_rate": 7.840323733655779e-08,
"logits/chosen": 0.5787652730941772,
"logits/rejected": 0.7449785470962524,
"logps/chosen": -284.3918762207031,
"logps/pi_response": -137.13790893554688,
"logps/ref_response": -136.751953125,
"logps/rejected": -550.5841674804688,
"loss": 0.6653,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.05285344645380974,
"rewards/margins": 0.0631423145532608,
"rewards/rejected": -0.11599576473236084,
"step": 60
},
{
"epoch": 0.43956043956043955,
"grad_norm": 12.831673435039468,
"learning_rate": 6.87529601804781e-08,
"logits/chosen": 0.39561206102371216,
"logits/rejected": 0.6105276346206665,
"logps/chosen": -280.70928955078125,
"logps/pi_response": -137.5051727294922,
"logps/ref_response": -136.8078155517578,
"logps/rejected": -481.21539306640625,
"loss": 0.6561,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -0.06504122167825699,
"rewards/margins": 0.06430341303348541,
"rewards/rejected": -0.129344642162323,
"step": 70
},
{
"epoch": 0.5023547880690737,
"grad_norm": 10.720879071714686,
"learning_rate": 5.8201215576551086e-08,
"logits/chosen": 0.488506019115448,
"logits/rejected": 0.6578499674797058,
"logps/chosen": -342.3384704589844,
"logps/pi_response": -152.0958709716797,
"logps/ref_response": -151.61917114257812,
"logps/rejected": -559.8889770507812,
"loss": 0.6564,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.0973019003868103,
"rewards/margins": 0.09300337731838226,
"rewards/rejected": -0.19030529260635376,
"step": 80
},
{
"epoch": 0.565149136577708,
"grad_norm": 12.292298954117202,
"learning_rate": 4.725523300678362e-08,
"logits/chosen": 0.40867680311203003,
"logits/rejected": 0.7344620227813721,
"logps/chosen": -293.9271240234375,
"logps/pi_response": -148.54888916015625,
"logps/ref_response": -147.9942169189453,
"logps/rejected": -589.199951171875,
"loss": 0.6488,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -0.0772012248635292,
"rewards/margins": 0.13705095648765564,
"rewards/rejected": -0.21425218880176544,
"step": 90
},
{
"epoch": 0.6279434850863422,
"grad_norm": 10.50350761550398,
"learning_rate": 3.644119323817915e-08,
"logits/chosen": 0.6718475222587585,
"logits/rejected": 0.8242921829223633,
"logps/chosen": -311.2980651855469,
"logps/pi_response": -144.72332763671875,
"logps/ref_response": -144.91741943359375,
"logps/rejected": -527.7688598632812,
"loss": 0.6483,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.08537373691797256,
"rewards/margins": 0.11054044961929321,
"rewards/rejected": -0.19591419398784637,
"step": 100
},
{
"epoch": 0.6907378335949764,
"grad_norm": 10.142028760053885,
"learning_rate": 2.6278934458271994e-08,
"logits/chosen": 0.406488835811615,
"logits/rejected": 0.5887733697891235,
"logps/chosen": -326.5669250488281,
"logps/pi_response": -145.37783813476562,
"logps/ref_response": -144.95806884765625,
"logps/rejected": -553.6624145507812,
"loss": 0.6484,
"rewards/accuracies": 0.668749988079071,
"rewards/chosen": -0.11705747991800308,
"rewards/margins": 0.09957389533519745,
"rewards/rejected": -0.21663138270378113,
"step": 110
},
{
"epoch": 0.7535321821036107,
"grad_norm": 12.632716685626612,
"learning_rate": 1.725696330273575e-08,
"logits/chosen": 0.5998940467834473,
"logits/rejected": 0.7598485946655273,
"logps/chosen": -308.08221435546875,
"logps/pi_response": -143.70516967773438,
"logps/ref_response": -143.9131317138672,
"logps/rejected": -536.3736572265625,
"loss": 0.6396,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -0.0918205976486206,
"rewards/margins": 0.12002626806497574,
"rewards/rejected": -0.21184685826301575,
"step": 120
},
{
"epoch": 0.8163265306122449,
"grad_norm": 11.411700247352561,
"learning_rate": 9.808972011828054e-09,
"logits/chosen": 0.5050215721130371,
"logits/rejected": 0.8139595985412598,
"logps/chosen": -316.27911376953125,
"logps/pi_response": -152.6710205078125,
"logps/ref_response": -152.75784301757812,
"logps/rejected": -555.2142333984375,
"loss": 0.6455,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": -0.09827511012554169,
"rewards/margins": 0.11614187061786652,
"rewards/rejected": -0.2144169807434082,
"step": 130
},
{
"epoch": 0.8791208791208791,
"grad_norm": 11.032584818753614,
"learning_rate": 4.2929905518041705e-09,
"logits/chosen": 0.42998820543289185,
"logits/rejected": 0.6386924982070923,
"logps/chosen": -308.99957275390625,
"logps/pi_response": -140.1665496826172,
"logps/ref_response": -140.09066772460938,
"logps/rejected": -510.5171813964844,
"loss": 0.6427,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": -0.10153601318597794,
"rewards/margins": 0.11025869846343994,
"rewards/rejected": -0.21179470419883728,
"step": 140
},
{
"epoch": 0.9419152276295133,
"grad_norm": 9.898419952530531,
"learning_rate": 9.741758728888216e-10,
"logits/chosen": 0.464769184589386,
"logits/rejected": 0.7060795426368713,
"logps/chosen": -325.2671203613281,
"logps/pi_response": -155.88412475585938,
"logps/ref_response": -155.28140258789062,
"logps/rejected": -531.9527587890625,
"loss": 0.6455,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.0997537225484848,
"rewards/margins": 0.1331712305545807,
"rewards/rejected": -0.23292496800422668,
"step": 150
},
{
"epoch": 0.9984301412872841,
"step": 159,
"total_flos": 0.0,
"train_loss": 0.6602971028981719,
"train_runtime": 3007.0775,
"train_samples_per_second": 6.777,
"train_steps_per_second": 0.053
}
],
"logging_steps": 10,
"max_steps": 159,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}
|