File size: 12,616 Bytes
501e263
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 1.0,
  "eval_steps": 100,
  "global_step": 200,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.01,
      "grad_norm": 4.600574569217216,
      "learning_rate": 2.5e-08,
      "logits/chosen": -0.665952742099762,
      "logits/rejected": -0.7915734052658081,
      "logps/chosen": -817.56982421875,
      "logps/rejected": -1233.5159912109375,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.05,
      "grad_norm": 4.6956668605469805,
      "learning_rate": 2.5e-07,
      "logits/chosen": -0.6826351881027222,
      "logits/rejected": -0.8164599537849426,
      "logps/chosen": -999.0946044921875,
      "logps/rejected": -1313.702880859375,
      "loss": 0.6928,
      "rewards/accuracies": 0.4791666567325592,
      "rewards/chosen": 0.00046035187551751733,
      "rewards/margins": 0.0011585307074710727,
      "rewards/rejected": -0.0006981787737458944,
      "step": 10
    },
    {
      "epoch": 0.1,
      "grad_norm": 4.568367716631967,
      "learning_rate": 5e-07,
      "logits/chosen": -0.7570823431015015,
      "logits/rejected": -0.8555054664611816,
      "logps/chosen": -1019.2281494140625,
      "logps/rejected": -1418.564453125,
      "loss": 0.6925,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": 0.0018463224405422807,
      "rewards/margins": 0.0015529071679338813,
      "rewards/rejected": 0.0002934155927505344,
      "step": 20
    },
    {
      "epoch": 0.15,
      "grad_norm": 4.602323361612275,
      "learning_rate": 4.96201938253052e-07,
      "logits/chosen": -0.7583962678909302,
      "logits/rejected": -0.8155515789985657,
      "logps/chosen": -1024.038818359375,
      "logps/rejected": -1309.645263671875,
      "loss": 0.689,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": 0.004936753772199154,
      "rewards/margins": 0.006319736130535603,
      "rewards/rejected": -0.0013829817762598395,
      "step": 30
    },
    {
      "epoch": 0.2,
      "grad_norm": 4.543733206128541,
      "learning_rate": 4.849231551964771e-07,
      "logits/chosen": -0.7214664220809937,
      "logits/rejected": -0.8081992268562317,
      "logps/chosen": -998.6472778320312,
      "logps/rejected": -1311.484130859375,
      "loss": 0.6835,
      "rewards/accuracies": 0.7437499761581421,
      "rewards/chosen": 0.009940719231963158,
      "rewards/margins": 0.02101372741162777,
      "rewards/rejected": -0.011073010042309761,
      "step": 40
    },
    {
      "epoch": 0.25,
      "grad_norm": 5.088718809722254,
      "learning_rate": 4.6650635094610966e-07,
      "logits/chosen": -0.6799867749214172,
      "logits/rejected": -0.7925044298171997,
      "logps/chosen": -994.2998046875,
      "logps/rejected": -1354.9202880859375,
      "loss": 0.6734,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": 0.02278929203748703,
      "rewards/margins": 0.04287552461028099,
      "rewards/rejected": -0.02008623257279396,
      "step": 50
    },
    {
      "epoch": 0.3,
      "grad_norm": 4.7938552939091,
      "learning_rate": 4.415111107797445e-07,
      "logits/chosen": -0.7093895673751831,
      "logits/rejected": -0.7959045171737671,
      "logps/chosen": -1028.694091796875,
      "logps/rejected": -1305.4276123046875,
      "loss": 0.6663,
      "rewards/accuracies": 0.831250011920929,
      "rewards/chosen": 0.02415267750620842,
      "rewards/margins": 0.06639216095209122,
      "rewards/rejected": -0.0422394797205925,
      "step": 60
    },
    {
      "epoch": 0.35,
      "grad_norm": 5.054883757482895,
      "learning_rate": 4.106969024216348e-07,
      "logits/chosen": -0.7344843149185181,
      "logits/rejected": -0.8120015859603882,
      "logps/chosen": -1003.9986572265625,
      "logps/rejected": -1363.428955078125,
      "loss": 0.6526,
      "rewards/accuracies": 0.831250011920929,
      "rewards/chosen": 0.020409032702445984,
      "rewards/margins": 0.08794097602367401,
      "rewards/rejected": -0.06753192842006683,
      "step": 70
    },
    {
      "epoch": 0.4,
      "grad_norm": 5.482448828665516,
      "learning_rate": 3.75e-07,
      "logits/chosen": -0.7606137990951538,
      "logits/rejected": -0.8274203538894653,
      "logps/chosen": -1024.800048828125,
      "logps/rejected": -1341.73095703125,
      "loss": 0.6352,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -0.019309956580400467,
      "rewards/margins": 0.10660652071237564,
      "rewards/rejected": -0.1259164810180664,
      "step": 80
    },
    {
      "epoch": 0.45,
      "grad_norm": 5.606863838837641,
      "learning_rate": 3.355050358314172e-07,
      "logits/chosen": -0.7421795725822449,
      "logits/rejected": -0.8470423817634583,
      "logps/chosen": -994.8064575195312,
      "logps/rejected": -1336.379150390625,
      "loss": 0.6015,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -0.0030502979643642902,
      "rewards/margins": 0.22511807084083557,
      "rewards/rejected": -0.22816836833953857,
      "step": 90
    },
    {
      "epoch": 0.5,
      "grad_norm": 5.3547776029335505,
      "learning_rate": 2.934120444167326e-07,
      "logits/chosen": -0.8171275854110718,
      "logits/rejected": -0.8435714840888977,
      "logps/chosen": -1030.83984375,
      "logps/rejected": -1344.560791015625,
      "loss": 0.5643,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -0.050915710628032684,
      "rewards/margins": 0.47365838289260864,
      "rewards/rejected": -0.5245740413665771,
      "step": 100
    },
    {
      "epoch": 0.5,
      "eval_logits/chosen": -0.6995198726654053,
      "eval_logits/rejected": -0.8644800186157227,
      "eval_logps/chosen": -818.2396850585938,
      "eval_logps/rejected": -1334.0771484375,
      "eval_loss": 0.5890205502510071,
      "eval_rewards/accuracies": 0.7727272510528564,
      "eval_rewards/chosen": -0.04838310554623604,
      "eval_rewards/margins": 0.2467191368341446,
      "eval_rewards/rejected": -0.2951022684574127,
      "eval_runtime": 80.5531,
      "eval_samples_per_second": 8.392,
      "eval_steps_per_second": 0.273,
      "step": 100
    },
    {
      "epoch": 0.55,
      "grad_norm": 6.918657542547641,
      "learning_rate": 2.5e-07,
      "logits/chosen": -0.8259917497634888,
      "logits/rejected": -0.8844992518424988,
      "logps/chosen": -1003.9481201171875,
      "logps/rejected": -1435.4678955078125,
      "loss": 0.5411,
      "rewards/accuracies": 0.8187500238418579,
      "rewards/chosen": -0.06742595881223679,
      "rewards/margins": 0.6114541292190552,
      "rewards/rejected": -0.6788800954818726,
      "step": 110
    },
    {
      "epoch": 0.6,
      "grad_norm": 7.371942457502656,
      "learning_rate": 2.065879555832674e-07,
      "logits/chosen": -0.8082677125930786,
      "logits/rejected": -0.8919416666030884,
      "logps/chosen": -1007.8558349609375,
      "logps/rejected": -1447.9739990234375,
      "loss": 0.506,
      "rewards/accuracies": 0.8812500238418579,
      "rewards/chosen": -0.1385834515094757,
      "rewards/margins": 0.7867234349250793,
      "rewards/rejected": -0.9253069162368774,
      "step": 120
    },
    {
      "epoch": 0.65,
      "grad_norm": 12.09684601520378,
      "learning_rate": 1.6449496416858282e-07,
      "logits/chosen": -0.8252215385437012,
      "logits/rejected": -0.907401442527771,
      "logps/chosen": -990.1673583984375,
      "logps/rejected": -1487.195068359375,
      "loss": 0.4781,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -0.21052339673042297,
      "rewards/margins": 1.0436497926712036,
      "rewards/rejected": -1.2541732788085938,
      "step": 130
    },
    {
      "epoch": 0.7,
      "grad_norm": 7.3453809585551495,
      "learning_rate": 1.2500000000000005e-07,
      "logits/chosen": -0.8709625005722046,
      "logits/rejected": -0.9511100053787231,
      "logps/chosen": -987.8455810546875,
      "logps/rejected": -1538.638916015625,
      "loss": 0.4291,
      "rewards/accuracies": 0.893750011920929,
      "rewards/chosen": -0.31802910566329956,
      "rewards/margins": 1.2913882732391357,
      "rewards/rejected": -1.6094173192977905,
      "step": 140
    },
    {
      "epoch": 0.75,
      "grad_norm": 8.079843532621563,
      "learning_rate": 8.930309757836516e-08,
      "logits/chosen": -0.8827797770500183,
      "logits/rejected": -0.9422929883003235,
      "logps/chosen": -1121.2596435546875,
      "logps/rejected": -1523.8992919921875,
      "loss": 0.4159,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -0.4621273875236511,
      "rewards/margins": 1.4384998083114624,
      "rewards/rejected": -1.9006273746490479,
      "step": 150
    },
    {
      "epoch": 0.8,
      "grad_norm": 7.053680331045143,
      "learning_rate": 5.848888922025552e-08,
      "logits/chosen": -0.8414640426635742,
      "logits/rejected": -0.9282405972480774,
      "logps/chosen": -1008.7062377929688,
      "logps/rejected": -1539.466796875,
      "loss": 0.3966,
      "rewards/accuracies": 0.8812500238418579,
      "rewards/chosen": -0.3745620548725128,
      "rewards/margins": 1.7526756525039673,
      "rewards/rejected": -2.1272377967834473,
      "step": 160
    },
    {
      "epoch": 0.85,
      "grad_norm": 8.515809533873794,
      "learning_rate": 3.349364905389032e-08,
      "logits/chosen": -0.8843591809272766,
      "logits/rejected": -0.9637457728385925,
      "logps/chosen": -1041.8538818359375,
      "logps/rejected": -1473.283203125,
      "loss": 0.3949,
      "rewards/accuracies": 0.831250011920929,
      "rewards/chosen": -0.4412936568260193,
      "rewards/margins": 1.4416576623916626,
      "rewards/rejected": -1.8829511404037476,
      "step": 170
    },
    {
      "epoch": 0.9,
      "grad_norm": 8.34230704065185,
      "learning_rate": 1.507684480352292e-08,
      "logits/chosen": -0.9421852827072144,
      "logits/rejected": -0.9663949012756348,
      "logps/chosen": -1073.7769775390625,
      "logps/rejected": -1609.543701171875,
      "loss": 0.3902,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -0.5003660917282104,
      "rewards/margins": 1.6850690841674805,
      "rewards/rejected": -2.1854348182678223,
      "step": 180
    },
    {
      "epoch": 0.95,
      "grad_norm": 8.155374618910328,
      "learning_rate": 3.798061746947995e-09,
      "logits/chosen": -0.8831602931022644,
      "logits/rejected": -0.9347723722457886,
      "logps/chosen": -1112.270263671875,
      "logps/rejected": -1539.087890625,
      "loss": 0.4006,
      "rewards/accuracies": 0.8187500238418579,
      "rewards/chosen": -0.5728045701980591,
      "rewards/margins": 1.4792672395706177,
      "rewards/rejected": -2.0520718097686768,
      "step": 190
    },
    {
      "epoch": 1.0,
      "grad_norm": 7.802479752642293,
      "learning_rate": 0.0,
      "logits/chosen": -0.8749731183052063,
      "logits/rejected": -0.9254159927368164,
      "logps/chosen": -1024.6395263671875,
      "logps/rejected": -1483.4818115234375,
      "loss": 0.3959,
      "rewards/accuracies": 0.8687499761581421,
      "rewards/chosen": -0.5080244541168213,
      "rewards/margins": 1.5510140657424927,
      "rewards/rejected": -2.0590386390686035,
      "step": 200
    },
    {
      "epoch": 1.0,
      "eval_logits/chosen": -0.8309964537620544,
      "eval_logits/rejected": -0.9202789068222046,
      "eval_logps/chosen": -859.0581665039062,
      "eval_logps/rejected": -1444.6895751953125,
      "eval_loss": 0.4459438920021057,
      "eval_rewards/accuracies": 0.8068181872367859,
      "eval_rewards/chosen": -0.4565673768520355,
      "eval_rewards/margins": 0.9446592926979065,
      "eval_rewards/rejected": -1.4012266397476196,
      "eval_runtime": 80.5966,
      "eval_samples_per_second": 8.387,
      "eval_steps_per_second": 0.273,
      "step": 200
    },
    {
      "epoch": 1.0,
      "step": 200,
      "total_flos": 0.0,
      "train_loss": 0.5449820566177368,
      "train_runtime": 3295.9797,
      "train_samples_per_second": 3.881,
      "train_steps_per_second": 0.061
    }
  ],
  "logging_steps": 10,
  "max_steps": 200,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}