File size: 13,958 Bytes
b60293c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 3.968,
  "eval_steps": 100,
  "global_step": 248,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.02,
      "learning_rate": 2.0000000000000002e-07,
      "logits/chosen": 0.2908777594566345,
      "logits/rejected": 0.2437899112701416,
      "logps/chosen": -201.0093536376953,
      "logps/rejected": -168.08958435058594,
      "loss": 0.0011,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.16,
      "learning_rate": 2.0000000000000003e-06,
      "logits/chosen": 0.1902419924736023,
      "logits/rejected": 0.1756160408258438,
      "logps/chosen": -197.62713623046875,
      "logps/rejected": -158.33740234375,
      "loss": 0.0012,
      "rewards/accuracies": 0.3680555522441864,
      "rewards/chosen": -0.0008540299604646862,
      "rewards/margins": 0.0005007751169614494,
      "rewards/rejected": -0.0013548050774261355,
      "step": 10
    },
    {
      "epoch": 0.32,
      "learning_rate": 4.000000000000001e-06,
      "logits/chosen": -0.013944757170975208,
      "logits/rejected": 0.14085523784160614,
      "logps/chosen": -181.885009765625,
      "logps/rejected": -141.39883422851562,
      "loss": 0.0011,
      "rewards/accuracies": 0.4124999940395355,
      "rewards/chosen": -0.002464529126882553,
      "rewards/margins": 0.0010917274048551917,
      "rewards/rejected": -0.003556256415322423,
      "step": 20
    },
    {
      "epoch": 0.48,
      "learning_rate": 4.993800445762451e-06,
      "logits/chosen": 0.13207468390464783,
      "logits/rejected": 0.1876787692308426,
      "logps/chosen": -170.35769653320312,
      "logps/rejected": -137.86196899414062,
      "loss": 0.0012,
      "rewards/accuracies": 0.39375001192092896,
      "rewards/chosen": -0.0012240457581356168,
      "rewards/margins": 0.00047588563757017255,
      "rewards/rejected": -0.0016999313374981284,
      "step": 30
    },
    {
      "epoch": 0.64,
      "learning_rate": 4.944388344834205e-06,
      "logits/chosen": 0.10822992026805878,
      "logits/rejected": 0.14006157219409943,
      "logps/chosen": -189.98333740234375,
      "logps/rejected": -149.42813110351562,
      "loss": 0.0011,
      "rewards/accuracies": 0.48124998807907104,
      "rewards/chosen": 0.0006204573437571526,
      "rewards/margins": 0.002387461019679904,
      "rewards/rejected": -0.0017670036759227514,
      "step": 40
    },
    {
      "epoch": 0.8,
      "learning_rate": 4.8465431931347904e-06,
      "logits/chosen": 0.19466236233711243,
      "logits/rejected": 0.2593666613101959,
      "logps/chosen": -200.7144012451172,
      "logps/rejected": -158.9434814453125,
      "loss": 0.0011,
      "rewards/accuracies": 0.4437499940395355,
      "rewards/chosen": 0.0003794836229644716,
      "rewards/margins": 0.0014064621645957232,
      "rewards/rejected": -0.0010269784834235907,
      "step": 50
    },
    {
      "epoch": 0.96,
      "learning_rate": 4.702203692102539e-06,
      "logits/chosen": 0.12232597917318344,
      "logits/rejected": 0.16534267365932465,
      "logps/chosen": -186.4813995361328,
      "logps/rejected": -143.80760192871094,
      "loss": 0.001,
      "rewards/accuracies": 0.4625000059604645,
      "rewards/chosen": 0.0033827635925263166,
      "rewards/margins": 0.0028825027402490377,
      "rewards/rejected": 0.0005002607358619571,
      "step": 60
    },
    {
      "epoch": 1.12,
      "learning_rate": 4.514229781074239e-06,
      "logits/chosen": 0.1178612932562828,
      "logits/rejected": 0.13041642308235168,
      "logps/chosen": -209.6604766845703,
      "logps/rejected": -166.01910400390625,
      "loss": 0.0011,
      "rewards/accuracies": 0.4375,
      "rewards/chosen": 0.001676331041380763,
      "rewards/margins": 0.002085132524371147,
      "rewards/rejected": -0.0004088011628482491,
      "step": 70
    },
    {
      "epoch": 1.28,
      "learning_rate": 4.286345970517195e-06,
      "logits/chosen": 0.08851321786642075,
      "logits/rejected": 0.14383965730667114,
      "logps/chosen": -195.59994506835938,
      "logps/rejected": -156.7967071533203,
      "loss": 0.0011,
      "rewards/accuracies": 0.4625000059604645,
      "rewards/chosen": 0.0038293059915304184,
      "rewards/margins": 0.0034115822054445744,
      "rewards/rejected": 0.0004177238733973354,
      "step": 80
    },
    {
      "epoch": 1.44,
      "learning_rate": 4.023067544670082e-06,
      "logits/chosen": 0.03590596467256546,
      "logits/rejected": 0.12467700242996216,
      "logps/chosen": -194.0576629638672,
      "logps/rejected": -146.26744079589844,
      "loss": 0.0011,
      "rewards/accuracies": 0.48124998807907104,
      "rewards/chosen": 0.002118715550750494,
      "rewards/margins": 0.002578428713604808,
      "rewards/rejected": -0.0004597128718160093,
      "step": 90
    },
    {
      "epoch": 1.6,
      "learning_rate": 3.7296110958116845e-06,
      "logits/chosen": 0.14186295866966248,
      "logits/rejected": 0.1782732456922531,
      "logps/chosen": -181.44393920898438,
      "logps/rejected": -144.7650146484375,
      "loss": 0.001,
      "rewards/accuracies": 0.4625000059604645,
      "rewards/chosen": 0.0033587156794965267,
      "rewards/margins": 0.0031067118979990482,
      "rewards/rejected": 0.0002520036359783262,
      "step": 100
    },
    {
      "epoch": 1.6,
      "eval_logits/chosen": -0.04001983627676964,
      "eval_logits/rejected": 0.05830775573849678,
      "eval_logps/chosen": -307.1775207519531,
      "eval_logps/rejected": -279.2533874511719,
      "eval_loss": 0.0017920270329341292,
      "eval_rewards/accuracies": 0.47850000858306885,
      "eval_rewards/chosen": -0.003455465892329812,
      "eval_rewards/margins": -0.0011921715922653675,
      "eval_rewards/rejected": -0.002263294532895088,
      "eval_runtime": 412.7782,
      "eval_samples_per_second": 4.845,
      "eval_steps_per_second": 1.211,
      "step": 100
    },
    {
      "epoch": 1.76,
      "learning_rate": 3.4117911628292944e-06,
      "logits/chosen": 0.1313779205083847,
      "logits/rejected": 0.07039429247379303,
      "logps/chosen": -194.87823486328125,
      "logps/rejected": -160.7736358642578,
      "loss": 0.001,
      "rewards/accuracies": 0.45625001192092896,
      "rewards/chosen": 0.005443253554403782,
      "rewards/margins": 0.004203255753964186,
      "rewards/rejected": 0.0012399973347783089,
      "step": 110
    },
    {
      "epoch": 1.92,
      "learning_rate": 3.075905022087675e-06,
      "logits/chosen": 0.12658381462097168,
      "logits/rejected": 0.23149845004081726,
      "logps/chosen": -178.28390502929688,
      "logps/rejected": -143.67715454101562,
      "loss": 0.001,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": 0.004203868098556995,
      "rewards/margins": 0.004237414337694645,
      "rewards/rejected": -3.354633372509852e-05,
      "step": 120
    },
    {
      "epoch": 2.08,
      "learning_rate": 2.728607913349464e-06,
      "logits/chosen": 0.15758894383907318,
      "logits/rejected": 0.2553151249885559,
      "logps/chosen": -181.8848114013672,
      "logps/rejected": -135.99990844726562,
      "loss": 0.001,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": 0.009758567437529564,
      "rewards/margins": 0.006436445750296116,
      "rewards/rejected": 0.0033221219200640917,
      "step": 130
    },
    {
      "epoch": 2.24,
      "learning_rate": 2.376781173017589e-06,
      "logits/chosen": 0.1286291778087616,
      "logits/rejected": 0.17975321412086487,
      "logps/chosen": -187.51390075683594,
      "logps/rejected": -150.4195556640625,
      "loss": 0.0009,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": 0.007837988436222076,
      "rewards/margins": 0.007232234813272953,
      "rewards/rejected": 0.0006057542050257325,
      "step": 140
    },
    {
      "epoch": 2.4,
      "learning_rate": 2.0273958875043877e-06,
      "logits/chosen": 0.15904466807842255,
      "logits/rejected": 0.0839221328496933,
      "logps/chosen": -204.20040893554688,
      "logps/rejected": -167.21383666992188,
      "loss": 0.001,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": 0.006473573390394449,
      "rewards/margins": 0.005708726122975349,
      "rewards/rejected": 0.0007648469763807952,
      "step": 150
    },
    {
      "epoch": 2.56,
      "learning_rate": 1.6873747682962393e-06,
      "logits/chosen": 0.024461204186081886,
      "logits/rejected": 0.16477885842323303,
      "logps/chosen": -176.70254516601562,
      "logps/rejected": -132.0756072998047,
      "loss": 0.0009,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": 0.010805860161781311,
      "rewards/margins": 0.007954512722790241,
      "rewards/rejected": 0.002851347904652357,
      "step": 160
    },
    {
      "epoch": 2.72,
      "learning_rate": 1.363454985517803e-06,
      "logits/chosen": 0.07506619393825531,
      "logits/rejected": 0.18663427233695984,
      "logps/chosen": -182.42135620117188,
      "logps/rejected": -143.20504760742188,
      "loss": 0.0009,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": 0.010290954262018204,
      "rewards/margins": 0.008597767911851406,
      "rewards/rejected": 0.0016931863501667976,
      "step": 170
    },
    {
      "epoch": 2.88,
      "learning_rate": 1.062054677808238e-06,
      "logits/chosen": 0.1575741171836853,
      "logits/rejected": 0.14560917019844055,
      "logps/chosen": -210.0597686767578,
      "logps/rejected": -157.42813110351562,
      "loss": 0.0008,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": 0.011623604223132133,
      "rewards/margins": 0.008374488912522793,
      "rewards/rejected": 0.0032491139136254787,
      "step": 180
    },
    {
      "epoch": 3.04,
      "learning_rate": 7.891457834794711e-07,
      "logits/chosen": 0.21218101680278778,
      "logits/rejected": 0.16236719489097595,
      "logps/chosen": -174.3014373779297,
      "logps/rejected": -142.51658630371094,
      "loss": 0.001,
      "rewards/accuracies": 0.5,
      "rewards/chosen": 0.008685302920639515,
      "rewards/margins": 0.005553290247917175,
      "rewards/rejected": 0.0031320122070610523,
      "step": 190
    },
    {
      "epoch": 3.2,
      "learning_rate": 5.501357126768117e-07,
      "logits/chosen": 0.043852098286151886,
      "logits/rejected": 0.14689919352531433,
      "logps/chosen": -192.6407012939453,
      "logps/rejected": -153.15914916992188,
      "loss": 0.0009,
      "rewards/accuracies": 0.59375,
      "rewards/chosen": 0.010076276957988739,
      "rewards/margins": 0.0075560063123703,
      "rewards/rejected": 0.0025202720426023006,
      "step": 200
    },
    {
      "epoch": 3.2,
      "eval_logits/chosen": -0.05530371889472008,
      "eval_logits/rejected": 0.045542649924755096,
      "eval_logps/chosen": -307.6504211425781,
      "eval_logps/rejected": -279.6910400390625,
      "eval_loss": 0.0019263506401330233,
      "eval_rewards/accuracies": 0.45649999380111694,
      "eval_rewards/chosen": -0.008184487000107765,
      "eval_rewards/margins": -0.0015445188619196415,
      "eval_rewards/rejected": -0.006639969069510698,
      "eval_runtime": 412.4498,
      "eval_samples_per_second": 4.849,
      "eval_steps_per_second": 1.212,
      "step": 200
    },
    {
      "epoch": 3.36,
      "learning_rate": 3.4976020508682345e-07,
      "logits/chosen": 0.14779892563819885,
      "logits/rejected": 0.2323513776063919,
      "logps/chosen": -182.7720489501953,
      "logps/rejected": -142.44332885742188,
      "loss": 0.0009,
      "rewards/accuracies": 0.543749988079071,
      "rewards/chosen": 0.009827367961406708,
      "rewards/margins": 0.00690328236669302,
      "rewards/rejected": 0.0029240844305604696,
      "step": 210
    },
    {
      "epoch": 3.52,
      "learning_rate": 1.9198949610721273e-07,
      "logits/chosen": 0.1586412489414215,
      "logits/rejected": 0.13802394270896912,
      "logps/chosen": -198.50106811523438,
      "logps/rejected": -160.4164581298828,
      "loss": 0.0009,
      "rewards/accuracies": 0.4937500059604645,
      "rewards/chosen": 0.010713080875575542,
      "rewards/margins": 0.006927688606083393,
      "rewards/rejected": 0.0037853927351534367,
      "step": 220
    },
    {
      "epoch": 3.68,
      "learning_rate": 7.994965069994143e-08,
      "logits/chosen": 0.07671042531728745,
      "logits/rejected": 0.1526561677455902,
      "logps/chosen": -182.83004760742188,
      "logps/rejected": -152.03683471679688,
      "loss": 0.0009,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": 0.00937131978571415,
      "rewards/margins": 0.006407036446034908,
      "rewards/rejected": 0.002964283572509885,
      "step": 230
    },
    {
      "epoch": 3.84,
      "learning_rate": 1.5860623616664183e-08,
      "logits/chosen": 0.07526861876249313,
      "logits/rejected": 0.0892513170838356,
      "logps/chosen": -178.50074768066406,
      "logps/rejected": -141.47157287597656,
      "loss": 0.001,
      "rewards/accuracies": 0.48750001192092896,
      "rewards/chosen": 0.012702028267085552,
      "rewards/margins": 0.005834975745528936,
      "rewards/rejected": 0.006867053918540478,
      "step": 240
    },
    {
      "epoch": 3.97,
      "step": 248,
      "total_flos": 0.0,
      "train_loss": 0.0009980735903222775,
      "train_runtime": 2694.3955,
      "train_samples_per_second": 1.485,
      "train_steps_per_second": 0.092
    }
  ],
  "logging_steps": 10,
  "max_steps": 248,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 4,
  "save_steps": 100,
  "total_flos": 0.0,
  "train_batch_size": 4,
  "trial_name": null,
  "trial_params": null
}