File size: 8,266 Bytes
8f545af |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9968652037617555,
"eval_steps": 500,
"global_step": 159,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 3.125e-08,
"logits/chosen": -2.191464900970459,
"logits/rejected": -2.192023754119873,
"logps/chosen": -337.12921142578125,
"logps/rejected": -401.0337829589844,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.06,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": -2.265860080718994,
"logits/rejected": -2.208857536315918,
"logps/chosen": -230.86334228515625,
"logps/rejected": -311.0320739746094,
"loss": 0.6885,
"rewards/accuracies": 0.5208333134651184,
"rewards/chosen": -0.012912267819046974,
"rewards/margins": 0.01672884076833725,
"rewards/rejected": -0.029641108587384224,
"step": 10
},
{
"epoch": 0.13,
"learning_rate": 4.990353313429303e-07,
"logits/chosen": -2.1798629760742188,
"logits/rejected": -2.165292263031006,
"logps/chosen": -277.3706359863281,
"logps/rejected": -361.240234375,
"loss": 0.679,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.4758126735687256,
"rewards/margins": 0.16070665419101715,
"rewards/rejected": -0.6365193128585815,
"step": 20
},
{
"epoch": 0.19,
"learning_rate": 4.882681251368548e-07,
"logits/chosen": -2.065920829772949,
"logits/rejected": -2.056377649307251,
"logps/chosen": -284.94183349609375,
"logps/rejected": -382.9114685058594,
"loss": 0.6709,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": -0.44684070348739624,
"rewards/margins": 0.24182553589344025,
"rewards/rejected": -0.6886661648750305,
"step": 30
},
{
"epoch": 0.25,
"learning_rate": 4.6604720940421207e-07,
"logits/chosen": -2.139819383621216,
"logits/rejected": -2.0983104705810547,
"logps/chosen": -261.02471923828125,
"logps/rejected": -352.98162841796875,
"loss": 0.6523,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.22146382927894592,
"rewards/margins": 0.14547555148601532,
"rewards/rejected": -0.36693936586380005,
"step": 40
},
{
"epoch": 0.31,
"learning_rate": 4.3344075855595097e-07,
"logits/chosen": -2.1886887550354004,
"logits/rejected": -2.159538984298706,
"logps/chosen": -278.70196533203125,
"logps/rejected": -405.36700439453125,
"loss": 0.6368,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -0.3322455883026123,
"rewards/margins": 0.2992514371871948,
"rewards/rejected": -0.6314969658851624,
"step": 50
},
{
"epoch": 0.38,
"learning_rate": 3.920161866827889e-07,
"logits/chosen": -2.08825421333313,
"logits/rejected": -2.0380659103393555,
"logps/chosen": -288.3702392578125,
"logps/rejected": -361.09112548828125,
"loss": 0.644,
"rewards/accuracies": 0.5375000238418579,
"rewards/chosen": -0.3619961142539978,
"rewards/margins": 0.1461850106716156,
"rewards/rejected": -0.5081811547279358,
"step": 60
},
{
"epoch": 0.44,
"learning_rate": 3.4376480090239047e-07,
"logits/chosen": -2.074207305908203,
"logits/rejected": -2.0528454780578613,
"logps/chosen": -277.1039123535156,
"logps/rejected": -329.77154541015625,
"loss": 0.6446,
"rewards/accuracies": 0.606249988079071,
"rewards/chosen": -0.2841487526893616,
"rewards/margins": 0.1815183013677597,
"rewards/rejected": -0.4656670093536377,
"step": 70
},
{
"epoch": 0.5,
"learning_rate": 2.910060778827554e-07,
"logits/chosen": -2.169900894165039,
"logits/rejected": -2.136660099029541,
"logps/chosen": -323.7901916503906,
"logps/rejected": -358.14471435546875,
"loss": 0.6368,
"rewards/accuracies": 0.543749988079071,
"rewards/chosen": -0.4767797887325287,
"rewards/margins": 0.1753493845462799,
"rewards/rejected": -0.6521291732788086,
"step": 80
},
{
"epoch": 0.56,
"learning_rate": 2.3627616503391812e-07,
"logits/chosen": -2.2116239070892334,
"logits/rejected": -2.107689380645752,
"logps/chosen": -287.3326721191406,
"logps/rejected": -396.2935791015625,
"loss": 0.6241,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -0.35074305534362793,
"rewards/margins": 0.3240629732608795,
"rewards/rejected": -0.6748059988021851,
"step": 90
},
{
"epoch": 0.63,
"learning_rate": 1.8220596619089573e-07,
"logits/chosen": -2.1734859943389893,
"logits/rejected": -2.1512904167175293,
"logps/chosen": -280.8328552246094,
"logps/rejected": -403.6314697265625,
"loss": 0.6331,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.32056325674057007,
"rewards/margins": 0.2918151617050171,
"rewards/rejected": -0.6123784184455872,
"step": 100
},
{
"epoch": 0.69,
"learning_rate": 1.3139467229135998e-07,
"logits/chosen": -2.1452653408050537,
"logits/rejected": -2.156574249267578,
"logps/chosen": -303.7337646484375,
"logps/rejected": -390.61248779296875,
"loss": 0.6426,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": -0.33146318793296814,
"rewards/margins": 0.27743712067604065,
"rewards/rejected": -0.6089003086090088,
"step": 110
},
{
"epoch": 0.75,
"learning_rate": 8.628481651367875e-08,
"logits/chosen": -2.1824703216552734,
"logits/rejected": -2.148677110671997,
"logps/chosen": -270.36383056640625,
"logps/rejected": -371.7115478515625,
"loss": 0.6145,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -0.35747700929641724,
"rewards/margins": 0.259312242269516,
"rewards/rejected": -0.6167892217636108,
"step": 120
},
{
"epoch": 0.82,
"learning_rate": 4.904486005914027e-08,
"logits/chosen": -2.155510187149048,
"logits/rejected": -2.1049563884735107,
"logps/chosen": -283.0008850097656,
"logps/rejected": -388.17022705078125,
"loss": 0.6513,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": -0.3318966329097748,
"rewards/margins": 0.3010888993740082,
"rewards/rejected": -0.632985532283783,
"step": 130
},
{
"epoch": 0.88,
"learning_rate": 2.1464952759020856e-08,
"logits/chosen": -2.147397756576538,
"logits/rejected": -2.157912492752075,
"logps/chosen": -267.1644592285156,
"logps/rejected": -378.49908447265625,
"loss": 0.6245,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": -0.32303911447525024,
"rewards/margins": 0.2776426374912262,
"rewards/rejected": -0.6006818413734436,
"step": 140
},
{
"epoch": 0.94,
"learning_rate": 4.8708793644441086e-09,
"logits/chosen": -2.1643731594085693,
"logits/rejected": -2.187424659729004,
"logps/chosen": -267.67608642578125,
"logps/rejected": -405.1327209472656,
"loss": 0.6384,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.3011611998081207,
"rewards/margins": 0.31678175926208496,
"rewards/rejected": -0.6179429888725281,
"step": 150
},
{
"epoch": 1.0,
"step": 159,
"total_flos": 0.0,
"train_loss": 0.6447140285803837,
"train_runtime": 2654.6606,
"train_samples_per_second": 7.676,
"train_steps_per_second": 0.06
}
],
"logging_steps": 10,
"max_steps": 159,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}
|