File size: 14,125 Bytes
020d6ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.9963674104826155,
  "eval_steps": 1000,
  "global_step": 240,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.0041515308770108976,
      "grad_norm": 6.2112270362028115,
      "learning_rate": 2.083333333333333e-08,
      "logits/chosen": -2.6958627700805664,
      "logits/rejected": -2.650068759918213,
      "logps/chosen": -403.2443542480469,
      "logps/rejected": -397.0637512207031,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.04151530877010898,
      "grad_norm": 6.539288745411817,
      "learning_rate": 2.0833333333333333e-07,
      "logits/chosen": -2.690112352371216,
      "logits/rejected": -2.6779024600982666,
      "logps/chosen": -365.9743957519531,
      "logps/rejected": -393.799560546875,
      "loss": 0.6925,
      "rewards/accuracies": 0.5303819179534912,
      "rewards/chosen": 0.0005260682082735002,
      "rewards/margins": 0.0014649044023826718,
      "rewards/rejected": -0.0009388362523168325,
      "step": 10
    },
    {
      "epoch": 0.08303061754021795,
      "grad_norm": 6.876570939434779,
      "learning_rate": 4.1666666666666667e-07,
      "logits/chosen": -2.6719064712524414,
      "logits/rejected": -2.6511917114257812,
      "logps/chosen": -357.5186462402344,
      "logps/rejected": -380.5492858886719,
      "loss": 0.6749,
      "rewards/accuracies": 0.848437488079071,
      "rewards/chosen": 0.018601149320602417,
      "rewards/margins": 0.03796255216002464,
      "rewards/rejected": -0.019361400976777077,
      "step": 20
    },
    {
      "epoch": 0.12454592631032693,
      "grad_norm": 8.242981852125217,
      "learning_rate": 4.990486745229364e-07,
      "logits/chosen": -2.653783082962036,
      "logits/rejected": -2.639296293258667,
      "logps/chosen": -355.69207763671875,
      "logps/rejected": -415.68768310546875,
      "loss": 0.5772,
      "rewards/accuracies": 0.867968738079071,
      "rewards/chosen": 0.0034460537135601044,
      "rewards/margins": 0.2814808487892151,
      "rewards/rejected": -0.2780347764492035,
      "step": 30
    },
    {
      "epoch": 0.1660612350804359,
      "grad_norm": 14.409074768095572,
      "learning_rate": 4.932612176449559e-07,
      "logits/chosen": -2.6512644290924072,
      "logits/rejected": -2.613352060317993,
      "logps/chosen": -496.20770263671875,
      "logps/rejected": -619.66943359375,
      "loss": 0.4041,
      "rewards/accuracies": 0.848437488079071,
      "rewards/chosen": -1.1875219345092773,
      "rewards/margins": 1.0682841539382935,
      "rewards/rejected": -2.2558062076568604,
      "step": 40
    },
    {
      "epoch": 0.2075765438505449,
      "grad_norm": 14.911382681454745,
      "learning_rate": 4.823368810567056e-07,
      "logits/chosen": -2.64001202583313,
      "logits/rejected": -2.6261935234069824,
      "logps/chosen": -600.037109375,
      "logps/rejected": -856.2901611328125,
      "loss": 0.3072,
      "rewards/accuracies": 0.86328125,
      "rewards/chosen": -2.279493570327759,
      "rewards/margins": 2.3056600093841553,
      "rewards/rejected": -4.5851545333862305,
      "step": 50
    },
    {
      "epoch": 0.24909185262065386,
      "grad_norm": 18.472917689948186,
      "learning_rate": 4.6650635094610966e-07,
      "logits/chosen": -2.5924713611602783,
      "logits/rejected": -2.579529285430908,
      "logps/chosen": -587.2591552734375,
      "logps/rejected": -904.9329833984375,
      "loss": 0.2658,
      "rewards/accuracies": 0.88671875,
      "rewards/chosen": -2.1298751831054688,
      "rewards/margins": 2.945924758911133,
      "rewards/rejected": -5.07580041885376,
      "step": 60
    },
    {
      "epoch": 0.29060716139076287,
      "grad_norm": 12.678464521005063,
      "learning_rate": 4.461039162298939e-07,
      "logits/chosen": -2.2207727432250977,
      "logits/rejected": -1.98198664188385,
      "logps/chosen": -591.1898803710938,
      "logps/rejected": -989.9195556640625,
      "loss": 0.2276,
      "rewards/accuracies": 0.9046875238418579,
      "rewards/chosen": -2.155735969543457,
      "rewards/margins": 3.7345290184020996,
      "rewards/rejected": -5.890264987945557,
      "step": 70
    },
    {
      "epoch": 0.3321224701608718,
      "grad_norm": 14.772965954120863,
      "learning_rate": 4.2156040946718343e-07,
      "logits/chosen": -1.5045950412750244,
      "logits/rejected": -0.8392450213432312,
      "logps/chosen": -616.548828125,
      "logps/rejected": -1064.140869140625,
      "loss": 0.1988,
      "rewards/accuracies": 0.914843738079071,
      "rewards/chosen": -2.4090540409088135,
      "rewards/margins": 4.233697891235352,
      "rewards/rejected": -6.642751216888428,
      "step": 80
    },
    {
      "epoch": 0.3736377789309808,
      "grad_norm": 16.72422342704544,
      "learning_rate": 3.933941090877615e-07,
      "logits/chosen": -1.3608930110931396,
      "logits/rejected": -0.5576863288879395,
      "logps/chosen": -595.0042724609375,
      "logps/rejected": -1064.654052734375,
      "loss": 0.2026,
      "rewards/accuracies": 0.91015625,
      "rewards/chosen": -2.351076126098633,
      "rewards/margins": 4.467952728271484,
      "rewards/rejected": -6.819028377532959,
      "step": 90
    },
    {
      "epoch": 0.4151530877010898,
      "grad_norm": 9.743291843330928,
      "learning_rate": 3.6219979505011555e-07,
      "logits/chosen": -0.8342965245246887,
      "logits/rejected": -0.012925502844154835,
      "logps/chosen": -646.4923706054688,
      "logps/rejected": -1148.498779296875,
      "loss": 0.1867,
      "rewards/accuracies": 0.9195312261581421,
      "rewards/chosen": -2.8086471557617188,
      "rewards/margins": 4.717282295227051,
      "rewards/rejected": -7.525929927825928,
      "step": 100
    },
    {
      "epoch": 0.4566683964711988,
      "grad_norm": 11.81658995353539,
      "learning_rate": 3.286361890379034e-07,
      "logits/chosen": -0.1246568113565445,
      "logits/rejected": 0.6437393426895142,
      "logps/chosen": -648.837890625,
      "logps/rejected": -1166.3304443359375,
      "loss": 0.1785,
      "rewards/accuracies": 0.917187511920929,
      "rewards/chosen": -2.7460227012634277,
      "rewards/margins": 4.996233940124512,
      "rewards/rejected": -7.742256164550781,
      "step": 110
    },
    {
      "epoch": 0.49818370524130773,
      "grad_norm": 9.286297855632633,
      "learning_rate": 2.934120444167326e-07,
      "logits/chosen": -0.19480545818805695,
      "logits/rejected": 0.7439680695533752,
      "logps/chosen": -654.71875,
      "logps/rejected": -1212.785888671875,
      "loss": 0.1786,
      "rewards/accuracies": 0.91796875,
      "rewards/chosen": -2.892089366912842,
      "rewards/margins": 5.356635093688965,
      "rewards/rejected": -8.248723983764648,
      "step": 120
    },
    {
      "epoch": 0.5396990140114167,
      "grad_norm": 12.2921471616094,
      "learning_rate": 2.5727117968577785e-07,
      "logits/chosen": -0.4111465513706207,
      "logits/rejected": 0.8093876838684082,
      "logps/chosen": -645.12109375,
      "logps/rejected": -1192.375244140625,
      "loss": 0.1676,
      "rewards/accuracies": 0.9234374761581421,
      "rewards/chosen": -2.742527961730957,
      "rewards/margins": 5.263998031616211,
      "rewards/rejected": -8.006526947021484,
      "step": 130
    },
    {
      "epoch": 0.5812143227815257,
      "grad_norm": 17.430656596120464,
      "learning_rate": 2.209767714686924e-07,
      "logits/chosen": -0.029587041586637497,
      "logits/rejected": 1.3743274211883545,
      "logps/chosen": -648.7828979492188,
      "logps/rejected": -1265.0289306640625,
      "loss": 0.1632,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -2.7970809936523438,
      "rewards/margins": 5.923464775085449,
      "rewards/rejected": -8.720545768737793,
      "step": 140
    },
    {
      "epoch": 0.6227296315516346,
      "grad_norm": 9.425394796778923,
      "learning_rate": 1.8529523872436977e-07,
      "logits/chosen": -0.18061885237693787,
      "logits/rejected": 1.3279445171356201,
      "logps/chosen": -643.9744873046875,
      "logps/rejected": -1251.9317626953125,
      "loss": 0.1642,
      "rewards/accuracies": 0.9273437261581421,
      "rewards/chosen": -2.7806482315063477,
      "rewards/margins": 5.8400492668151855,
      "rewards/rejected": -8.620697975158691,
      "step": 150
    },
    {
      "epoch": 0.6642449403217436,
      "grad_norm": 8.084677706157798,
      "learning_rate": 1.5098005849021078e-07,
      "logits/chosen": -0.20541512966156006,
      "logits/rejected": 1.3262333869934082,
      "logps/chosen": -645.2593994140625,
      "logps/rejected": -1234.405517578125,
      "loss": 0.1618,
      "rewards/accuracies": 0.907031238079071,
      "rewards/chosen": -2.804506301879883,
      "rewards/margins": 5.688388347625732,
      "rewards/rejected": -8.492895126342773,
      "step": 160
    },
    {
      "epoch": 0.7057602490918526,
      "grad_norm": 10.460249704683227,
      "learning_rate": 1.1875585491635998e-07,
      "logits/chosen": -0.6104969382286072,
      "logits/rejected": 1.1572777032852173,
      "logps/chosen": -623.3721923828125,
      "logps/rejected": -1259.294189453125,
      "loss": 0.1573,
      "rewards/accuracies": 0.9203125238418579,
      "rewards/chosen": -2.5536274909973145,
      "rewards/margins": 6.065129280090332,
      "rewards/rejected": -8.618757247924805,
      "step": 170
    },
    {
      "epoch": 0.7472755578619616,
      "grad_norm": 12.223705856186251,
      "learning_rate": 8.930309757836516e-08,
      "logits/chosen": -0.717302680015564,
      "logits/rejected": 1.1799119710922241,
      "logps/chosen": -645.1275634765625,
      "logps/rejected": -1283.7489013671875,
      "loss": 0.1616,
      "rewards/accuracies": 0.934374988079071,
      "rewards/chosen": -2.7811636924743652,
      "rewards/margins": 6.2163591384887695,
      "rewards/rejected": -8.997522354125977,
      "step": 180
    },
    {
      "epoch": 0.7887908666320705,
      "grad_norm": 9.676735610238229,
      "learning_rate": 6.324373218975104e-08,
      "logits/chosen": -0.8794542551040649,
      "logits/rejected": 0.99409419298172,
      "logps/chosen": -619.7286987304688,
      "logps/rejected": -1276.9617919921875,
      "loss": 0.1608,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -2.615447521209717,
      "rewards/margins": 6.301668643951416,
      "rewards/rejected": -8.917116165161133,
      "step": 190
    },
    {
      "epoch": 0.8303061754021795,
      "grad_norm": 11.57113935464218,
      "learning_rate": 4.112804714676593e-08,
      "logits/chosen": -0.7840622663497925,
      "logits/rejected": 1.0056122541427612,
      "logps/chosen": -637.8081665039062,
      "logps/rejected": -1261.2838134765625,
      "loss": 0.162,
      "rewards/accuracies": 0.9281250238418579,
      "rewards/chosen": -2.6473495960235596,
      "rewards/margins": 6.003285884857178,
      "rewards/rejected": -8.650635719299316,
      "step": 200
    },
    {
      "epoch": 0.8718214841722886,
      "grad_norm": 13.665084975047886,
      "learning_rate": 2.3423053240837514e-08,
      "logits/chosen": -0.6102296710014343,
      "logits/rejected": 1.163267731666565,
      "logps/chosen": -638.1444091796875,
      "logps/rejected": -1243.947265625,
      "loss": 0.1576,
      "rewards/accuracies": 0.918749988079071,
      "rewards/chosen": -2.7065281867980957,
      "rewards/margins": 5.835700988769531,
      "rewards/rejected": -8.542229652404785,
      "step": 210
    },
    {
      "epoch": 0.9133367929423976,
      "grad_norm": 10.802886491160189,
      "learning_rate": 1.0502621921127774e-08,
      "logits/chosen": -0.5309673547744751,
      "logits/rejected": 1.326080560684204,
      "logps/chosen": -637.7506713867188,
      "logps/rejected": -1279.3565673828125,
      "loss": 0.1579,
      "rewards/accuracies": 0.922656238079071,
      "rewards/chosen": -2.6862006187438965,
      "rewards/margins": 6.1917243003845215,
      "rewards/rejected": -8.877924919128418,
      "step": 220
    },
    {
      "epoch": 0.9548521017125065,
      "grad_norm": 10.752955125049585,
      "learning_rate": 2.639590354763882e-09,
      "logits/chosen": -0.5724472403526306,
      "logits/rejected": 1.2889587879180908,
      "logps/chosen": -629.98779296875,
      "logps/rejected": -1255.991943359375,
      "loss": 0.1579,
      "rewards/accuracies": 0.94140625,
      "rewards/chosen": -2.661499500274658,
      "rewards/margins": 6.085452556610107,
      "rewards/rejected": -8.746953010559082,
      "step": 230
    },
    {
      "epoch": 0.9963674104826155,
      "grad_norm": 9.420585318893654,
      "learning_rate": 0.0,
      "logits/chosen": -0.5639629364013672,
      "logits/rejected": 1.3717336654663086,
      "logps/chosen": -631.6014404296875,
      "logps/rejected": -1273.350341796875,
      "loss": 0.1514,
      "rewards/accuracies": 0.934374988079071,
      "rewards/chosen": -2.7341790199279785,
      "rewards/margins": 6.176712989807129,
      "rewards/rejected": -8.91089153289795,
      "step": 240
    },
    {
      "epoch": 0.9963674104826155,
      "step": 240,
      "total_flos": 0.0,
      "train_loss": 0.250737202167511,
      "train_runtime": 29028.2374,
      "train_samples_per_second": 4.248,
      "train_steps_per_second": 0.008
    }
  ],
  "logging_steps": 10,
  "max_steps": 240,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 16,
  "trial_name": null,
  "trial_params": null
}