File size: 12,640 Bytes
9a67d7f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 1.0,
  "eval_steps": 100,
  "global_step": 204,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.0,
      "grad_norm": 4.7662348778659345,
      "learning_rate": 2.3809523809523807e-08,
      "logits/chosen": -0.6931805610656738,
      "logits/rejected": -0.752622127532959,
      "logps/chosen": -943.81298828125,
      "logps/rejected": -1116.720458984375,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.05,
      "grad_norm": 4.669662834206546,
      "learning_rate": 2.3809523809523806e-07,
      "logits/chosen": -0.696596622467041,
      "logits/rejected": -0.8358520269393921,
      "logps/chosen": -951.6690673828125,
      "logps/rejected": -1368.560791015625,
      "loss": 0.6929,
      "rewards/accuracies": 0.4375,
      "rewards/chosen": 3.076584107475355e-05,
      "rewards/margins": 0.00013853682321496308,
      "rewards/rejected": -0.00010777104762382805,
      "step": 10
    },
    {
      "epoch": 0.1,
      "grad_norm": 4.756013817746375,
      "learning_rate": 4.761904761904761e-07,
      "logits/chosen": -0.6912363767623901,
      "logits/rejected": -0.8333228826522827,
      "logps/chosen": -956.8513793945312,
      "logps/rejected": -1383.606689453125,
      "loss": 0.6927,
      "rewards/accuracies": 0.59375,
      "rewards/chosen": 0.001694918260909617,
      "rewards/margins": 0.0018514245748519897,
      "rewards/rejected": -0.000156506517669186,
      "step": 20
    },
    {
      "epoch": 0.15,
      "grad_norm": 4.691044502544357,
      "learning_rate": 4.970219740227693e-07,
      "logits/chosen": -0.6911897659301758,
      "logits/rejected": -0.81409752368927,
      "logps/chosen": -969.5514526367188,
      "logps/rejected": -1438.34423828125,
      "loss": 0.6899,
      "rewards/accuracies": 0.6812499761581421,
      "rewards/chosen": 0.0049940794706344604,
      "rewards/margins": 0.006473910063505173,
      "rewards/rejected": -0.0014798302436247468,
      "step": 30
    },
    {
      "epoch": 0.2,
      "grad_norm": 4.743099548922799,
      "learning_rate": 4.868186180746791e-07,
      "logits/chosen": -0.750874400138855,
      "logits/rejected": -0.8206331133842468,
      "logps/chosen": -997.53515625,
      "logps/rejected": -1345.9227294921875,
      "loss": 0.6848,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": 0.010853204876184464,
      "rewards/margins": 0.017669402062892914,
      "rewards/rejected": -0.00681619718670845,
      "step": 40
    },
    {
      "epoch": 0.25,
      "grad_norm": 4.612800219757607,
      "learning_rate": 4.6965306126428705e-07,
      "logits/chosen": -0.7323765158653259,
      "logits/rejected": -0.8034559488296509,
      "logps/chosen": -979.4150390625,
      "logps/rejected": -1321.0777587890625,
      "loss": 0.6766,
      "rewards/accuracies": 0.84375,
      "rewards/chosen": 0.02218214049935341,
      "rewards/margins": 0.03583066910505295,
      "rewards/rejected": -0.01364852488040924,
      "step": 50
    },
    {
      "epoch": 0.29,
      "grad_norm": 4.734300185901907,
      "learning_rate": 4.460299516441776e-07,
      "logits/chosen": -0.7304792404174805,
      "logits/rejected": -0.8109104037284851,
      "logps/chosen": -956.0986328125,
      "logps/rejected": -1335.644775390625,
      "loss": 0.6685,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": 0.023367006331682205,
      "rewards/margins": 0.05144554376602173,
      "rewards/rejected": -0.028078541159629822,
      "step": 60
    },
    {
      "epoch": 0.34,
      "grad_norm": 4.976984827730564,
      "learning_rate": 4.166437820523908e-07,
      "logits/chosen": -0.7663556933403015,
      "logits/rejected": -0.8029876947402954,
      "logps/chosen": -1034.396484375,
      "logps/rejected": -1318.064453125,
      "loss": 0.6545,
      "rewards/accuracies": 0.84375,
      "rewards/chosen": 0.021550346165895462,
      "rewards/margins": 0.07895718514919281,
      "rewards/rejected": -0.05740683525800705,
      "step": 70
    },
    {
      "epoch": 0.39,
      "grad_norm": 5.209420701938796,
      "learning_rate": 3.8235847280454626e-07,
      "logits/chosen": -0.7715634107589722,
      "logits/rejected": -0.8099991679191589,
      "logps/chosen": -982.25390625,
      "logps/rejected": -1321.496826171875,
      "loss": 0.6428,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": 0.03210069611668587,
      "rewards/margins": 0.12542924284934998,
      "rewards/rejected": -0.093328557908535,
      "step": 80
    },
    {
      "epoch": 0.44,
      "grad_norm": 6.26348159013905,
      "learning_rate": 3.4418197340879627e-07,
      "logits/chosen": -0.7251774668693542,
      "logits/rejected": -0.852216899394989,
      "logps/chosen": -954.3899536132812,
      "logps/rejected": -1384.3223876953125,
      "loss": 0.6149,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -0.004368312656879425,
      "rewards/margins": 0.15939339995384216,
      "rewards/rejected": -0.16376173496246338,
      "step": 90
    },
    {
      "epoch": 0.49,
      "grad_norm": 6.478126566665074,
      "learning_rate": 3.032366299846039e-07,
      "logits/chosen": -0.7600661516189575,
      "logits/rejected": -0.8617668151855469,
      "logps/chosen": -996.2058715820312,
      "logps/rejected": -1373.546630859375,
      "loss": 0.583,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -0.0174024049192667,
      "rewards/margins": 0.29133570194244385,
      "rewards/rejected": -0.3087380826473236,
      "step": 100
    },
    {
      "epoch": 0.49,
      "eval_logits/chosen": -0.8420065641403198,
      "eval_logits/rejected": -0.8345804214477539,
      "eval_logps/chosen": -961.308349609375,
      "eval_logps/rejected": -1141.75390625,
      "eval_loss": 0.6358456015586853,
      "eval_rewards/accuracies": 0.6442307829856873,
      "eval_rewards/chosen": -0.03915131092071533,
      "eval_rewards/margins": 0.10087858885526657,
      "eval_rewards/rejected": -0.1400299221277237,
      "eval_runtime": 47.8789,
      "eval_samples_per_second": 8.689,
      "eval_steps_per_second": 0.272,
      "step": 100
    },
    {
      "epoch": 0.54,
      "grad_norm": 5.686513671618474,
      "learning_rate": 2.6072618954988863e-07,
      "logits/chosen": -0.794592559337616,
      "logits/rejected": -0.9113503694534302,
      "logps/chosen": -995.4345703125,
      "logps/rejected": -1395.327392578125,
      "loss": 0.5316,
      "rewards/accuracies": 0.8187500238418579,
      "rewards/chosen": -0.08229684829711914,
      "rewards/margins": 0.6027085781097412,
      "rewards/rejected": -0.6850053668022156,
      "step": 110
    },
    {
      "epoch": 0.59,
      "grad_norm": 6.476191805092081,
      "learning_rate": 2.1790041121336222e-07,
      "logits/chosen": -0.8089746236801147,
      "logits/rejected": -0.8950665593147278,
      "logps/chosen": -932.7145385742188,
      "logps/rejected": -1423.200439453125,
      "loss": 0.4985,
      "rewards/accuracies": 0.8812500238418579,
      "rewards/chosen": -0.09183833748102188,
      "rewards/margins": 0.9589554667472839,
      "rewards/rejected": -1.0507938861846924,
      "step": 120
    },
    {
      "epoch": 0.64,
      "grad_norm": 6.264931876023589,
      "learning_rate": 1.7601832466317766e-07,
      "logits/chosen": -0.8219178915023804,
      "logits/rejected": -0.9162119626998901,
      "logps/chosen": -952.8595581054688,
      "logps/rejected": -1457.125732421875,
      "loss": 0.4458,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -0.19050545990467072,
      "rewards/margins": 1.1150668859481812,
      "rewards/rejected": -1.305572271347046,
      "step": 130
    },
    {
      "epoch": 0.69,
      "grad_norm": 8.259230762638646,
      "learning_rate": 1.3631121611097362e-07,
      "logits/chosen": -0.7936364412307739,
      "logits/rejected": -0.8935419321060181,
      "logps/chosen": -1018.28076171875,
      "logps/rejected": -1476.8875732421875,
      "loss": 0.4301,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -0.3831838369369507,
      "rewards/margins": 1.1915802955627441,
      "rewards/rejected": -1.5747641324996948,
      "step": 140
    },
    {
      "epoch": 0.74,
      "grad_norm": 10.79519502625416,
      "learning_rate": 9.994642986290797e-08,
      "logits/chosen": -0.8472112417221069,
      "logits/rejected": -0.9009857177734375,
      "logps/chosen": -1037.80615234375,
      "logps/rejected": -1523.073974609375,
      "loss": 0.4218,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -0.5019618272781372,
      "rewards/margins": 1.2838261127471924,
      "rewards/rejected": -1.7857879400253296,
      "step": 150
    },
    {
      "epoch": 0.78,
      "grad_norm": 17.681124035346016,
      "learning_rate": 6.799304971075381e-08,
      "logits/chosen": -0.8725306391716003,
      "logits/rejected": -0.9383252263069153,
      "logps/chosen": -1061.177490234375,
      "logps/rejected": -1531.541748046875,
      "loss": 0.3935,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -0.48161983489990234,
      "rewards/margins": 2.3118531703948975,
      "rewards/rejected": -2.7934727668762207,
      "step": 160
    },
    {
      "epoch": 0.83,
      "grad_norm": 10.740688995016072,
      "learning_rate": 4.1390469071538175e-08,
      "logits/chosen": -0.8565770387649536,
      "logits/rejected": -0.917068600654602,
      "logps/chosen": -1082.66064453125,
      "logps/rejected": -1601.59814453125,
      "loss": 0.395,
      "rewards/accuracies": 0.8062499761581421,
      "rewards/chosen": -0.5443286895751953,
      "rewards/margins": 1.6077324151992798,
      "rewards/rejected": -2.1520609855651855,
      "step": 170
    },
    {
      "epoch": 0.88,
      "grad_norm": 8.295318107749528,
      "learning_rate": 2.0920773878248837e-08,
      "logits/chosen": -0.8446859121322632,
      "logits/rejected": -0.9334647059440613,
      "logps/chosen": -1048.026123046875,
      "logps/rejected": -1580.143798828125,
      "loss": 0.3634,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -0.5169867277145386,
      "rewards/margins": 1.9314336776733398,
      "rewards/rejected": -2.448420286178589,
      "step": 180
    },
    {
      "epoch": 0.93,
      "grad_norm": 9.983013656964681,
      "learning_rate": 7.185750133542168e-09,
      "logits/chosen": -0.8189274072647095,
      "logits/rejected": -0.9247575998306274,
      "logps/chosen": -999.5103759765625,
      "logps/rejected": -1621.4892578125,
      "loss": 0.3857,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -0.4853218197822571,
      "rewards/margins": 1.7728582620620728,
      "rewards/rejected": -2.2581801414489746,
      "step": 190
    },
    {
      "epoch": 0.98,
      "grad_norm": 7.545281401464384,
      "learning_rate": 5.891920784984184e-10,
      "logits/chosen": -0.864946722984314,
      "logits/rejected": -0.9326463937759399,
      "logps/chosen": -1065.3690185546875,
      "logps/rejected": -1624.238525390625,
      "loss": 0.3768,
      "rewards/accuracies": 0.893750011920929,
      "rewards/chosen": -0.5211979150772095,
      "rewards/margins": 2.2624778747558594,
      "rewards/rejected": -2.7836756706237793,
      "step": 200
    },
    {
      "epoch": 0.98,
      "eval_logits/chosen": -0.8903875350952148,
      "eval_logits/rejected": -0.8883093595504761,
      "eval_logps/chosen": -986.139892578125,
      "eval_logps/rejected": -1205.26025390625,
      "eval_loss": 0.53563392162323,
      "eval_rewards/accuracies": 0.7019230723381042,
      "eval_rewards/chosen": -0.28746694326400757,
      "eval_rewards/margins": 0.48762574791908264,
      "eval_rewards/rejected": -0.7750927209854126,
      "eval_runtime": 47.9973,
      "eval_samples_per_second": 8.667,
      "eval_steps_per_second": 0.271,
      "step": 200
    },
    {
      "epoch": 1.0,
      "step": 204,
      "total_flos": 0.0,
      "train_loss": 0.539328022330415,
      "train_runtime": 3313.1548,
      "train_samples_per_second": 3.939,
      "train_steps_per_second": 0.062
    }
  ],
  "logging_steps": 10,
  "max_steps": 204,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}