RikkiXu commited on
Commit
0696dde
1 Parent(s): b8b36a7

Model save

Browse files
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 2.0,
3
- "train_loss": 0.6161670185949492,
4
- "train_runtime": 9955.6802,
5
  "train_samples": 47095,
6
- "train_samples_per_second": 9.461,
7
- "train_steps_per_second": 0.037
8
  }
 
1
  {
2
  "epoch": 2.0,
3
+ "train_loss": 0.26489045179408527,
4
+ "train_runtime": 4520.7899,
5
  "train_samples": 47095,
6
+ "train_samples_per_second": 20.835,
7
+ "train_steps_per_second": 0.081
8
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0378e033bd057ff4230c269dec32f7e6a2ae0600e579fc309e400f7996014301
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89021f925795319ec6cd79c57490ff100b4d209128279330c40d869a1257ea07
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0e0f203f7c24487720e7e86483d08c9428bd96841fa9d4427eaa05442d00a663
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc9bb6d1ff6e5e594487f490ec146b7666201b9e396d8c36acadcf28b8944e2e
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c5591b65d2ba8c3131411b497a8c0a8d0af91ce6f9c61707543a7ee405946ec
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d64a4a2eb8fd994c6c2e00218546b04c07ec10f9918e36094d7621932e29d265
3
  size 4540516344
runs/Jun20_09-58-48_n136-100-194/events.out.tfevents.1718848801.n136-100-194.155161.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad66b0fb63b414c906409ee8053960f3cf88465d60719b79a58c1235bc60df21
3
- size 11813
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c162588195abc77ed21a2660c04be8b902d7ab73b7d3519958cc814c5462585c
3
+ size 16295
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 2.0,
3
- "train_loss": 0.6161670185949492,
4
- "train_runtime": 9955.6802,
5
  "train_samples": 47095,
6
- "train_samples_per_second": 9.461,
7
- "train_steps_per_second": 0.037
8
  }
 
1
  {
2
  "epoch": 2.0,
3
+ "train_loss": 0.26489045179408527,
4
+ "train_runtime": 4520.7899,
5
  "train_samples": 47095,
6
+ "train_samples_per_second": 20.835,
7
+ "train_steps_per_second": 0.081
8
  }
trainer_state.json CHANGED
@@ -10,13 +10,13 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.01,
13
- "grad_norm": 2062.9417756205603,
14
  "learning_rate": 2.702702702702703e-10,
15
  "logits/chosen": -1.3332719802856445,
16
  "logits/rejected": -1.246394395828247,
17
  "logps/chosen": -286.9539794921875,
18
  "logps/rejected": -263.3782958984375,
19
- "loss": 0.7136,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
22
  "rewards/margins": 0.0,
@@ -25,552 +25,552 @@
25
  },
26
  {
27
  "epoch": 0.05,
28
- "grad_norm": 2488.3980990852974,
29
  "learning_rate": 2.702702702702703e-09,
30
- "logits/chosen": -1.6142714023590088,
31
- "logits/rejected": -1.3925563097000122,
32
- "logps/chosen": -342.4814758300781,
33
- "logps/rejected": -294.5446472167969,
34
- "loss": 0.8226,
35
- "rewards/accuracies": 0.4618055522441864,
36
- "rewards/chosen": 0.079922616481781,
37
- "rewards/margins": 0.09200635552406311,
38
- "rewards/rejected": -0.012083739042282104,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.11,
43
- "grad_norm": 2085.30491295085,
44
  "learning_rate": 5.405405405405406e-09,
45
- "logits/chosen": -1.4863827228546143,
46
- "logits/rejected": -1.3085709810256958,
47
- "logps/chosen": -314.74273681640625,
48
- "logps/rejected": -279.32977294921875,
49
- "loss": 0.8217,
50
- "rewards/accuracies": 0.5375000238418579,
51
- "rewards/chosen": 0.03496693819761276,
52
- "rewards/margins": 0.07092654705047607,
53
- "rewards/rejected": -0.03595960885286331,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.16,
58
- "grad_norm": 2613.9787597915297,
59
  "learning_rate": 8.108108108108109e-09,
60
- "logits/chosen": -1.5464979410171509,
61
- "logits/rejected": -1.3788726329803467,
62
- "logps/chosen": -324.9065246582031,
63
- "logps/rejected": -286.29925537109375,
64
- "loss": 0.8318,
65
- "rewards/accuracies": 0.515625,
66
- "rewards/chosen": -0.0007322698947973549,
67
- "rewards/margins": 0.02973010204732418,
68
- "rewards/rejected": -0.030462373048067093,
69
  "step": 30
70
  },
71
  {
72
  "epoch": 0.22,
73
- "grad_norm": 2309.6989479898994,
74
  "learning_rate": 9.997973265157192e-09,
75
- "logits/chosen": -1.5338213443756104,
76
- "logits/rejected": -1.356065034866333,
77
- "logps/chosen": -325.39349365234375,
78
- "logps/rejected": -285.630859375,
79
- "loss": 0.8544,
80
- "rewards/accuracies": 0.5093749761581421,
81
- "rewards/chosen": -0.00019043684005737305,
82
- "rewards/margins": -0.028223956003785133,
83
- "rewards/rejected": 0.02803351916372776,
84
  "step": 40
85
  },
86
  {
87
  "epoch": 0.27,
88
- "grad_norm": 2372.8781916000794,
89
  "learning_rate": 9.961988113473708e-09,
90
- "logits/chosen": -1.540814757347107,
91
- "logits/rejected": -1.3939155340194702,
92
- "logps/chosen": -337.01385498046875,
93
- "logps/rejected": -297.3047790527344,
94
- "loss": 0.7925,
95
- "rewards/accuracies": 0.5062500238418579,
96
- "rewards/chosen": 0.010568022727966309,
97
- "rewards/margins": 0.0009421706199645996,
98
- "rewards/rejected": 0.009625854901969433,
99
  "step": 50
100
  },
101
  {
102
  "epoch": 0.33,
103
- "grad_norm": 1906.9193219897543,
104
  "learning_rate": 9.881337335184878e-09,
105
- "logits/chosen": -1.5821880102157593,
106
- "logits/rejected": -1.433316707611084,
107
- "logps/chosen": -319.8349609375,
108
- "logps/rejected": -285.03131103515625,
109
- "loss": 0.7444,
110
- "rewards/accuracies": 0.59375,
111
- "rewards/chosen": 0.011926290579140186,
112
- "rewards/margins": 0.23517760634422302,
113
- "rewards/rejected": -0.22325129806995392,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.38,
118
- "grad_norm": 2229.621479388874,
119
  "learning_rate": 9.756746912994832e-09,
120
- "logits/chosen": -1.5089519023895264,
121
- "logits/rejected": -1.3478004932403564,
122
- "logps/chosen": -312.11767578125,
123
- "logps/rejected": -275.03704833984375,
124
- "loss": 0.7381,
125
- "rewards/accuracies": 0.5531250238418579,
126
- "rewards/chosen": -0.015234187245368958,
127
- "rewards/margins": 0.07565010339021683,
128
- "rewards/rejected": -0.09088429063558578,
129
  "step": 70
130
  },
131
  {
132
  "epoch": 0.43,
133
- "grad_norm": 1970.0426820414286,
134
  "learning_rate": 9.589338354885628e-09,
135
- "logits/chosen": -1.591552734375,
136
- "logits/rejected": -1.4374128580093384,
137
- "logps/chosen": -323.3088684082031,
138
- "logps/rejected": -288.12445068359375,
139
- "loss": 0.7257,
140
- "rewards/accuracies": 0.6000000238418579,
141
- "rewards/chosen": 0.1117367148399353,
142
- "rewards/margins": 0.34563174843788147,
143
- "rewards/rejected": -0.23389501869678497,
144
  "step": 80
145
  },
146
  {
147
  "epoch": 0.49,
148
- "grad_norm": 1647.476042777907,
149
  "learning_rate": 9.380618598797472e-09,
150
- "logits/chosen": -1.6083869934082031,
151
- "logits/rejected": -1.4117141962051392,
152
- "logps/chosen": -319.9634094238281,
153
- "logps/rejected": -281.79248046875,
154
- "loss": 0.6768,
155
- "rewards/accuracies": 0.637499988079071,
156
- "rewards/chosen": 0.1753208488225937,
157
- "rewards/margins": 0.44467267394065857,
158
- "rewards/rejected": -0.2693518102169037,
159
  "step": 90
160
  },
161
  {
162
  "epoch": 0.54,
163
- "grad_norm": 1779.591190181612,
164
  "learning_rate": 9.132466447838596e-09,
165
- "logits/chosen": -1.5439790487289429,
166
- "logits/rejected": -1.368858814239502,
167
- "logps/chosen": -321.8800964355469,
168
- "logps/rejected": -282.66168212890625,
169
- "loss": 0.6482,
170
- "rewards/accuracies": 0.668749988079071,
171
- "rewards/chosen": 0.34998807311058044,
172
- "rewards/margins": 0.6073418855667114,
173
- "rewards/rejected": -0.25735384225845337,
174
  "step": 100
175
  },
176
  {
177
  "epoch": 0.6,
178
- "grad_norm": 1799.5128068859713,
179
  "learning_rate": 8.847115658129039e-09,
180
- "logits/chosen": -1.5068881511688232,
181
- "logits/rejected": -1.3783992528915405,
182
- "logps/chosen": -318.10797119140625,
183
- "logps/rejected": -287.1791076660156,
184
- "loss": 0.6577,
185
- "rewards/accuracies": 0.637499988079071,
186
- "rewards/chosen": 0.35399100184440613,
187
- "rewards/margins": 0.5296486616134644,
188
- "rewards/rejected": -0.17565762996673584,
189
  "step": 110
190
  },
191
  {
192
  "epoch": 0.65,
193
- "grad_norm": 1676.764876114058,
194
  "learning_rate": 8.527134831514116e-09,
195
- "logits/chosen": -1.5781362056732178,
196
- "logits/rejected": -1.4229751825332642,
197
- "logps/chosen": -331.3733825683594,
198
- "logps/rejected": -297.85699462890625,
199
- "loss": 0.6575,
200
- "rewards/accuracies": 0.609375,
201
- "rewards/chosen": 0.3793606460094452,
202
- "rewards/margins": 0.4118588864803314,
203
- "rewards/rejected": -0.03249818831682205,
204
  "step": 120
205
  },
206
  {
207
  "epoch": 0.71,
208
- "grad_norm": 1566.6901996912077,
209
  "learning_rate": 8.175404294144481e-09,
210
- "logits/chosen": -1.6145737171173096,
211
- "logits/rejected": -1.4269483089447021,
212
- "logps/chosen": -317.0880432128906,
213
- "logps/rejected": -271.5414123535156,
214
- "loss": 0.6044,
215
- "rewards/accuracies": 0.671875,
216
- "rewards/chosen": 0.6310849189758301,
217
- "rewards/margins": 0.7299145460128784,
218
- "rewards/rejected": -0.09882961958646774,
219
  "step": 130
220
  },
221
  {
222
  "epoch": 0.76,
223
- "grad_norm": 1706.595775593044,
224
  "learning_rate": 7.79509016905158e-09,
225
- "logits/chosen": -1.5648548603057861,
226
- "logits/rejected": -1.4158308506011963,
227
- "logps/chosen": -331.06622314453125,
228
- "logps/rejected": -294.2123718261719,
229
- "loss": 0.6171,
230
- "rewards/accuracies": 0.699999988079071,
231
- "rewards/chosen": 0.7887445092201233,
232
- "rewards/margins": 0.765161395072937,
233
- "rewards/rejected": 0.023583168163895607,
234
  "step": 140
235
  },
236
  {
237
  "epoch": 0.82,
238
- "grad_norm": 1648.2049279025357,
239
  "learning_rate": 7.389615876105773e-09,
240
- "logits/chosen": -1.5560743808746338,
241
- "logits/rejected": -1.4283266067504883,
242
- "logps/chosen": -314.5069274902344,
243
- "logps/rejected": -291.7706298828125,
244
- "loss": 0.6127,
245
- "rewards/accuracies": 0.6656249761581421,
246
- "rewards/chosen": 0.8379846811294556,
247
- "rewards/margins": 0.7371869087219238,
248
- "rewards/rejected": 0.10079775750637054,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.87,
253
- "grad_norm": 1635.8235385722824,
254
  "learning_rate": 6.962631315901861e-09,
255
- "logits/chosen": -1.5186518430709839,
256
- "logits/rejected": -1.4028724431991577,
257
- "logps/chosen": -317.958251953125,
258
- "logps/rejected": -291.0096435546875,
259
- "loss": 0.6088,
260
- "rewards/accuracies": 0.653124988079071,
261
- "rewards/chosen": 0.8378221392631531,
262
- "rewards/margins": 0.6740074753761292,
263
- "rewards/rejected": 0.16381461918354034,
264
  "step": 160
265
  },
266
  {
267
  "epoch": 0.92,
268
- "grad_norm": 1575.6130834814026,
269
  "learning_rate": 6.517980014965139e-09,
270
- "logits/chosen": -1.6025912761688232,
271
- "logits/rejected": -1.4152277708053589,
272
- "logps/chosen": -331.40386962890625,
273
- "logps/rejected": -289.4659729003906,
274
- "loss": 0.5997,
275
- "rewards/accuracies": 0.6937500238418579,
276
- "rewards/chosen": 0.8780991435050964,
277
- "rewards/margins": 0.8349622488021851,
278
- "rewards/rejected": 0.04313689470291138,
279
  "step": 170
280
  },
281
  {
282
  "epoch": 0.98,
283
- "grad_norm": 1546.3751249922345,
284
  "learning_rate": 6.059664528022266e-09,
285
- "logits/chosen": -1.5942988395690918,
286
- "logits/rejected": -1.44364333152771,
287
- "logps/chosen": -315.07196044921875,
288
- "logps/rejected": -276.7376708984375,
289
- "loss": 0.5773,
290
- "rewards/accuracies": 0.706250011920929,
291
- "rewards/chosen": 0.8913241624832153,
292
- "rewards/margins": 0.9472495317459106,
293
- "rewards/rejected": -0.05592530965805054,
294
  "step": 180
295
  },
296
  {
297
  "epoch": 1.03,
298
- "grad_norm": 1681.3148479750444,
299
  "learning_rate": 5.591810408770492e-09,
300
- "logits/chosen": -1.5504480600357056,
301
- "logits/rejected": -1.3759148120880127,
302
- "logps/chosen": -315.5844421386719,
303
- "logps/rejected": -278.6695861816406,
304
- "loss": 0.5632,
305
- "rewards/accuracies": 0.7093750238418579,
306
- "rewards/chosen": 0.8848656415939331,
307
- "rewards/margins": 0.8844806551933289,
308
- "rewards/rejected": 0.00038505197153426707,
309
  "step": 190
310
  },
311
  {
312
  "epoch": 1.09,
313
- "grad_norm": 1651.7882136807318,
314
  "learning_rate": 5.118629073464423e-09,
315
- "logits/chosen": -1.571003794670105,
316
- "logits/rejected": -1.3608561754226685,
317
- "logps/chosen": -325.93023681640625,
318
- "logps/rejected": -282.7080993652344,
319
- "loss": 0.5605,
320
- "rewards/accuracies": 0.71875,
321
- "rewards/chosen": 1.0313498973846436,
322
- "rewards/margins": 0.9450349807739258,
323
- "rewards/rejected": 0.08631500601768494,
324
  "step": 200
325
  },
326
  {
327
  "epoch": 1.14,
328
- "grad_norm": 1538.4386313699126,
329
  "learning_rate": 4.644379891605983e-09,
330
- "logits/chosen": -1.608812689781189,
331
- "logits/rejected": -1.4315342903137207,
332
- "logps/chosen": -324.66522216796875,
333
- "logps/rejected": -291.33428955078125,
334
- "loss": 0.5478,
335
- "rewards/accuracies": 0.7281249761581421,
336
- "rewards/chosen": 1.0752595663070679,
337
- "rewards/margins": 1.0428497791290283,
338
- "rewards/rejected": 0.03240995481610298,
339
  "step": 210
340
  },
341
  {
342
  "epoch": 1.2,
343
- "grad_norm": 1737.3887570467818,
344
  "learning_rate": 4.173331844980362e-09,
345
- "logits/chosen": -1.5384166240692139,
346
- "logits/rejected": -1.4137290716171265,
347
- "logps/chosen": -323.9536437988281,
348
- "logps/rejected": -293.42535400390625,
349
- "loss": 0.563,
350
- "rewards/accuracies": 0.6968749761581421,
351
- "rewards/chosen": 0.9658479690551758,
352
- "rewards/margins": 0.9138795137405396,
353
- "rewards/rejected": 0.051968496292829514,
354
  "step": 220
355
  },
356
  {
357
  "epoch": 1.25,
358
- "grad_norm": 1605.3661746462226,
359
  "learning_rate": 3.7097251001664824e-09,
360
- "logits/chosen": -1.537548542022705,
361
- "logits/rejected": -1.3787362575531006,
362
- "logps/chosen": -323.85125732421875,
363
- "logps/rejected": -286.95379638671875,
364
- "loss": 0.526,
365
- "rewards/accuracies": 0.731249988079071,
366
- "rewards/chosen": 1.146087408065796,
367
- "rewards/margins": 1.0939618349075317,
368
- "rewards/rejected": 0.0521254763007164,
369
  "step": 230
370
  },
371
  {
372
  "epoch": 1.3,
373
- "grad_norm": 1689.839854162397,
374
  "learning_rate": 3.2577328404292057e-09,
375
- "logits/chosen": -1.5391089916229248,
376
- "logits/rejected": -1.4084638357162476,
377
- "logps/chosen": -312.51373291015625,
378
- "logps/rejected": -285.9711608886719,
379
- "loss": 0.5418,
380
- "rewards/accuracies": 0.7093750238418579,
381
- "rewards/chosen": 1.0901774168014526,
382
- "rewards/margins": 1.016390085220337,
383
- "rewards/rejected": 0.07378745824098587,
384
  "step": 240
385
  },
386
  {
387
  "epoch": 1.36,
388
- "grad_norm": 1710.94558540331,
389
  "learning_rate": 2.821423700565763e-09,
390
- "logits/chosen": -1.5968081951141357,
391
- "logits/rejected": -1.4188272953033447,
392
- "logps/chosen": -350.68487548828125,
393
- "logps/rejected": -306.6036071777344,
394
- "loss": 0.532,
395
- "rewards/accuracies": 0.78125,
396
- "rewards/chosen": 1.2678377628326416,
397
- "rewards/margins": 1.2405023574829102,
398
- "rewards/rejected": 0.027335500344634056,
399
  "step": 250
400
  },
401
  {
402
  "epoch": 1.41,
403
- "grad_norm": 1638.2367115980887,
404
  "learning_rate": 2.4047251428513483e-09,
405
- "logits/chosen": -1.6129051446914673,
406
- "logits/rejected": -1.4581451416015625,
407
- "logps/chosen": -325.2450256347656,
408
- "logps/rejected": -291.1476745605469,
409
- "loss": 0.5289,
410
- "rewards/accuracies": 0.734375,
411
- "rewards/chosen": 1.2301806211471558,
412
- "rewards/margins": 1.2308820486068726,
413
- "rewards/rejected": -0.0007013082504272461,
414
  "step": 260
415
  },
416
  {
417
  "epoch": 1.47,
418
- "grad_norm": 1199.4883951774482,
419
  "learning_rate": 2.011388103757442e-09,
420
- "logits/chosen": -1.5265954732894897,
421
- "logits/rejected": -1.3828239440917969,
422
- "logps/chosen": -316.2944641113281,
423
- "logps/rejected": -285.7884826660156,
424
- "loss": 0.5191,
425
- "rewards/accuracies": 0.7437499761581421,
426
- "rewards/chosen": 1.3710923194885254,
427
- "rewards/margins": 1.2594387531280518,
428
- "rewards/rejected": 0.11165344715118408,
429
  "step": 270
430
  },
431
  {
432
  "epoch": 1.52,
433
- "grad_norm": 1472.2115597857592,
434
  "learning_rate": 1.644953229677474e-09,
435
- "logits/chosen": -1.600651502609253,
436
- "logits/rejected": -1.4179413318634033,
437
- "logps/chosen": -326.00335693359375,
438
- "logps/rejected": -284.74188232421875,
439
- "loss": 0.5459,
440
- "rewards/accuracies": 0.75,
441
- "rewards/chosen": 1.3610546588897705,
442
- "rewards/margins": 1.2091944217681885,
443
- "rewards/rejected": 0.1518600881099701,
444
  "step": 280
445
  },
446
  {
447
  "epoch": 1.58,
448
- "grad_norm": 1566.9737970600454,
449
  "learning_rate": 1.308719005590957e-09,
450
- "logits/chosen": -1.5032551288604736,
451
- "logits/rejected": -1.3876453638076782,
452
- "logps/chosen": -318.40948486328125,
453
- "logps/rejected": -282.49554443359375,
454
- "loss": 0.5407,
455
- "rewards/accuracies": 0.7437499761581421,
456
- "rewards/chosen": 1.2658413648605347,
457
- "rewards/margins": 1.187675952911377,
458
- "rewards/rejected": 0.07816555351018906,
459
  "step": 290
460
  },
461
  {
462
  "epoch": 1.63,
463
- "grad_norm": 1348.7257224769698,
464
  "learning_rate": 1.005712063557776e-09,
465
- "logits/chosen": -1.6333671808242798,
466
- "logits/rejected": -1.455556869506836,
467
- "logps/chosen": -324.13885498046875,
468
- "logps/rejected": -290.60186767578125,
469
- "loss": 0.5346,
470
  "rewards/accuracies": 0.6968749761581421,
471
- "rewards/chosen": 1.1175706386566162,
472
- "rewards/margins": 1.0337438583374023,
473
- "rewards/rejected": 0.08382664620876312,
474
  "step": 300
475
  },
476
  {
477
  "epoch": 1.68,
478
- "grad_norm": 1356.5441208888985,
479
  "learning_rate": 7.386599383124321e-10,
480
- "logits/chosen": -1.565224051475525,
481
- "logits/rejected": -1.3825923204421997,
482
- "logps/chosen": -321.80316162109375,
483
- "logps/rejected": -285.7908630371094,
484
- "loss": 0.5304,
485
- "rewards/accuracies": 0.737500011920929,
486
- "rewards/chosen": 1.2159234285354614,
487
- "rewards/margins": 1.1465200185775757,
488
- "rewards/rejected": 0.06940338760614395,
489
  "step": 310
490
  },
491
  {
492
  "epoch": 1.74,
493
- "grad_norm": 1445.3559110776998,
494
  "learning_rate": 5.099665152003929e-10,
495
- "logits/chosen": -1.5921494960784912,
496
- "logits/rejected": -1.3807857036590576,
497
- "logps/chosen": -333.7308654785156,
498
- "logps/rejected": -289.9362487792969,
499
- "loss": 0.5241,
500
- "rewards/accuracies": 0.7718750238418579,
501
- "rewards/chosen": 1.3256893157958984,
502
- "rewards/margins": 1.292041540145874,
503
- "rewards/rejected": 0.03364778310060501,
504
  "step": 320
505
  },
506
  {
507
  "epoch": 1.79,
508
- "grad_norm": 1681.5042999261696,
509
  "learning_rate": 3.216903914633745e-10,
510
- "logits/chosen": -1.5627129077911377,
511
- "logits/rejected": -1.4408833980560303,
512
- "logps/chosen": -325.2505187988281,
513
- "logps/rejected": -296.106201171875,
514
- "loss": 0.5429,
515
- "rewards/accuracies": 0.7562500238418579,
516
- "rewards/chosen": 1.165374517440796,
517
- "rewards/margins": 1.0651426315307617,
518
- "rewards/rejected": 0.1002318263053894,
519
  "step": 330
520
  },
521
  {
522
  "epoch": 1.85,
523
- "grad_norm": 1536.75287567762,
524
  "learning_rate": 1.7552634565570324e-10,
525
- "logits/chosen": -1.5574743747711182,
526
- "logits/rejected": -1.3901411294937134,
527
- "logps/chosen": -329.89141845703125,
528
- "logps/rejected": -292.8751525878906,
529
- "loss": 0.5342,
530
- "rewards/accuracies": 0.753125011920929,
531
- "rewards/chosen": 1.4129165410995483,
532
- "rewards/margins": 1.3112914562225342,
533
- "rewards/rejected": 0.10162514448165894,
534
  "step": 340
535
  },
536
  {
537
  "epoch": 1.9,
538
- "grad_norm": 1492.8399510840338,
539
  "learning_rate": 7.279008199590543e-11,
540
- "logits/chosen": -1.5503973960876465,
541
- "logits/rejected": -1.3889100551605225,
542
- "logps/chosen": -326.42120361328125,
543
- "logps/rejected": -291.9585266113281,
544
- "loss": 0.5261,
545
- "rewards/accuracies": 0.737500011920929,
546
- "rewards/chosen": 1.3398044109344482,
547
- "rewards/margins": 1.2421011924743652,
548
- "rewards/rejected": 0.09770330041646957,
549
  "step": 350
550
  },
551
  {
552
  "epoch": 1.96,
553
- "grad_norm": 1452.281513333118,
554
  "learning_rate": 1.4406386978128017e-11,
555
- "logits/chosen": -1.6207876205444336,
556
- "logits/rejected": -1.424393653869629,
557
- "logps/chosen": -331.06390380859375,
558
- "logps/rejected": -291.6929626464844,
559
- "loss": 0.5043,
560
- "rewards/accuracies": 0.7906249761581421,
561
- "rewards/chosen": 1.518845558166504,
562
- "rewards/margins": 1.381410837173462,
563
- "rewards/rejected": 0.13743488490581512,
564
  "step": 360
565
  },
566
  {
567
  "epoch": 2.0,
568
  "step": 368,
569
  "total_flos": 0.0,
570
- "train_loss": 0.6161670185949492,
571
- "train_runtime": 9955.6802,
572
- "train_samples_per_second": 9.461,
573
- "train_steps_per_second": 0.037
574
  }
575
  ],
576
  "logging_steps": 10,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.01,
13
+ "grad_norm": 2733.5875966596914,
14
  "learning_rate": 2.702702702702703e-10,
15
  "logits/chosen": -1.3332719802856445,
16
  "logits/rejected": -1.246394395828247,
17
  "logps/chosen": -286.9539794921875,
18
  "logps/rejected": -263.3782958984375,
19
+ "loss": 0.7283,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
22
  "rewards/margins": 0.0,
 
25
  },
26
  {
27
  "epoch": 0.05,
28
+ "grad_norm": 3426.28374639058,
29
  "learning_rate": 2.702702702702703e-09,
30
+ "logits/chosen": -1.617490530014038,
31
+ "logits/rejected": -1.3964743614196777,
32
+ "logps/chosen": -342.53607177734375,
33
+ "logps/rejected": -294.5452575683594,
34
+ "loss": 0.9019,
35
+ "rewards/accuracies": 0.4375,
36
+ "rewards/chosen": -0.0025859144516289234,
37
+ "rewards/margins": 0.014665775932371616,
38
+ "rewards/rejected": -0.017251690849661827,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.11,
43
+ "grad_norm": 3068.517544136213,
44
  "learning_rate": 5.405405405405406e-09,
45
+ "logits/chosen": -1.4905732870101929,
46
+ "logits/rejected": -1.3132953643798828,
47
+ "logps/chosen": -314.7499084472656,
48
+ "logps/rejected": -279.27752685546875,
49
+ "loss": 0.9225,
50
+ "rewards/accuracies": 0.4937500059604645,
51
+ "rewards/chosen": 0.03222837299108505,
52
+ "rewards/margins": -0.024351513013243675,
53
+ "rewards/rejected": 0.05657988786697388,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.16,
58
+ "grad_norm": 3019.197796031677,
59
  "learning_rate": 8.108108108108109e-09,
60
+ "logits/chosen": -1.5479624271392822,
61
+ "logits/rejected": -1.3802028894424438,
62
+ "logps/chosen": -324.89044189453125,
63
+ "logps/rejected": -286.2395324707031,
64
+ "loss": 0.9562,
65
+ "rewards/accuracies": 0.5218750238418579,
66
+ "rewards/chosen": 0.031223665922880173,
67
+ "rewards/margins": -0.0476018562912941,
68
+ "rewards/rejected": 0.07882551848888397,
69
  "step": 30
70
  },
71
  {
72
  "epoch": 0.22,
73
+ "grad_norm": 2861.837005400921,
74
  "learning_rate": 9.997973265157192e-09,
75
+ "logits/chosen": -1.5354044437408447,
76
+ "logits/rejected": -1.3576419353485107,
77
+ "logps/chosen": -325.43408203125,
78
+ "logps/rejected": -285.6204528808594,
79
+ "loss": 0.9309,
80
+ "rewards/accuracies": 0.484375,
81
+ "rewards/chosen": -0.08140890300273895,
82
+ "rewards/margins": -0.13951030373573303,
83
+ "rewards/rejected": 0.05810140445828438,
84
  "step": 40
85
  },
86
  {
87
  "epoch": 0.27,
88
+ "grad_norm": 3285.7135249709568,
89
  "learning_rate": 9.961988113473708e-09,
90
+ "logits/chosen": -1.534355640411377,
91
+ "logits/rejected": -1.3875898122787476,
92
+ "logps/chosen": -337.02044677734375,
93
+ "logps/rejected": -297.35101318359375,
94
+ "loss": 0.88,
95
+ "rewards/accuracies": 0.546875,
96
+ "rewards/chosen": 0.0009558796882629395,
97
+ "rewards/margins": 0.08057532459497452,
98
+ "rewards/rejected": -0.07961944490671158,
99
  "step": 50
100
  },
101
  {
102
  "epoch": 0.33,
103
+ "grad_norm": 2547.1665545835035,
104
  "learning_rate": 9.881337335184878e-09,
105
+ "logits/chosen": -1.5822935104370117,
106
+ "logits/rejected": -1.4333903789520264,
107
+ "logps/chosen": -319.79644775390625,
108
+ "logps/rejected": -285.0381164550781,
109
+ "loss": 0.8105,
110
+ "rewards/accuracies": 0.6156250238418579,
111
+ "rewards/chosen": 0.09285839647054672,
112
+ "rewards/margins": 0.40408092737197876,
113
+ "rewards/rejected": -0.31122252345085144,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.38,
118
+ "grad_norm": 3178.8313195657583,
119
  "learning_rate": 9.756746912994832e-09,
120
+ "logits/chosen": -1.5119212865829468,
121
+ "logits/rejected": -1.350838541984558,
122
+ "logps/chosen": -312.1349182128906,
123
+ "logps/rejected": -275.08660888671875,
124
+ "loss": 0.7993,
125
+ "rewards/accuracies": 0.5874999761581421,
126
+ "rewards/chosen": -0.05482473224401474,
127
+ "rewards/margins": 0.165395587682724,
128
+ "rewards/rejected": -0.22022032737731934,
129
  "step": 70
130
  },
131
  {
132
  "epoch": 0.43,
133
+ "grad_norm": 2596.199281277795,
134
  "learning_rate": 9.589338354885628e-09,
135
+ "logits/chosen": -1.5992329120635986,
136
+ "logits/rejected": -1.4463211297988892,
137
+ "logps/chosen": -323.2821960449219,
138
+ "logps/rejected": -288.0993347167969,
139
+ "loss": 0.7772,
140
+ "rewards/accuracies": 0.574999988079071,
141
+ "rewards/chosen": 0.20231203734874725,
142
+ "rewards/margins": 0.4638887345790863,
143
+ "rewards/rejected": -0.26157671213150024,
144
  "step": 80
145
  },
146
  {
147
  "epoch": 0.49,
148
+ "grad_norm": 2404.162696635418,
149
  "learning_rate": 9.380618598797472e-09,
150
+ "logits/chosen": -1.6108148097991943,
151
+ "logits/rejected": -1.4147026538848877,
152
+ "logps/chosen": -319.95526123046875,
153
+ "logps/rejected": -281.7666015625,
154
+ "loss": 0.7649,
155
+ "rewards/accuracies": 0.640625,
156
+ "rewards/chosen": 0.2500740885734558,
157
+ "rewards/margins": 0.5574880838394165,
158
+ "rewards/rejected": -0.3074139356613159,
159
  "step": 90
160
  },
161
  {
162
  "epoch": 0.54,
163
+ "grad_norm": 2500.271614922207,
164
  "learning_rate": 9.132466447838596e-09,
165
+ "logits/chosen": -1.542976975440979,
166
+ "logits/rejected": -1.3676128387451172,
167
+ "logps/chosen": -321.9007263183594,
168
+ "logps/rejected": -282.65899658203125,
169
+ "loss": 0.7305,
170
+ "rewards/accuracies": 0.659375011920929,
171
+ "rewards/chosen": 0.4254188537597656,
172
+ "rewards/margins": 0.763160228729248,
173
+ "rewards/rejected": -0.3377414047718048,
174
  "step": 100
175
  },
176
  {
177
  "epoch": 0.6,
178
+ "grad_norm": 2525.572173445101,
179
  "learning_rate": 8.847115658129039e-09,
180
+ "logits/chosen": -1.512939453125,
181
+ "logits/rejected": -1.3849382400512695,
182
+ "logps/chosen": -318.14813232421875,
183
+ "logps/rejected": -287.1947937011719,
184
+ "loss": 0.7164,
185
+ "rewards/accuracies": 0.628125011920929,
186
+ "rewards/chosen": 0.3916184604167938,
187
+ "rewards/margins": 0.657262921333313,
188
+ "rewards/rejected": -0.265644371509552,
189
  "step": 110
190
  },
191
  {
192
  "epoch": 0.65,
193
+ "grad_norm": 2498.220455730566,
194
  "learning_rate": 8.527134831514116e-09,
195
+ "logits/chosen": -1.5739517211914062,
196
+ "logits/rejected": -1.41860032081604,
197
+ "logps/chosen": -331.3175354003906,
198
+ "logps/rejected": -297.8718566894531,
199
+ "loss": 0.7018,
200
+ "rewards/accuracies": 0.637499988079071,
201
+ "rewards/chosen": 0.6175383925437927,
202
+ "rewards/margins": 0.6906081438064575,
203
+ "rewards/rejected": -0.07306969165802002,
204
  "step": 120
205
  },
206
  {
207
  "epoch": 0.71,
208
+ "grad_norm": 2170.854176631694,
209
  "learning_rate": 8.175404294144481e-09,
210
+ "logits/chosen": -1.616276502609253,
211
+ "logits/rejected": -1.429518699645996,
212
+ "logps/chosen": -317.1609802246094,
213
+ "logps/rejected": -271.5557861328125,
214
+ "loss": 0.6719,
215
+ "rewards/accuracies": 0.6625000238418579,
216
+ "rewards/chosen": 0.6955646872520447,
217
+ "rewards/margins": 0.8560658693313599,
218
+ "rewards/rejected": -0.160501167178154,
219
  "step": 130
220
  },
221
  {
222
  "epoch": 0.76,
223
+ "grad_norm": 2147.543587634845,
224
  "learning_rate": 7.79509016905158e-09,
225
+ "logits/chosen": -1.5726101398468018,
226
+ "logits/rejected": -1.4245671033859253,
227
+ "logps/chosen": -331.12109375,
228
+ "logps/rejected": -294.2488098144531,
229
+ "loss": 0.6686,
230
+ "rewards/accuracies": 0.6968749761581421,
231
+ "rewards/chosen": 0.9418653249740601,
232
+ "rewards/margins": 0.9832620620727539,
233
+ "rewards/rejected": -0.04139674827456474,
234
  "step": 140
235
  },
236
  {
237
  "epoch": 0.82,
238
+ "grad_norm": 2298.0130679506387,
239
  "learning_rate": 7.389615876105773e-09,
240
+ "logits/chosen": -1.5536715984344482,
241
+ "logits/rejected": -1.4254592657089233,
242
+ "logps/chosen": -314.55267333984375,
243
+ "logps/rejected": -291.81536865234375,
244
+ "loss": 0.6793,
245
+ "rewards/accuracies": 0.675000011920929,
246
+ "rewards/chosen": 1.0258944034576416,
247
+ "rewards/margins": 0.981005072593689,
248
+ "rewards/rejected": 0.04488936811685562,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.87,
253
+ "grad_norm": 2220.286727308493,
254
  "learning_rate": 6.962631315901861e-09,
255
+ "logits/chosen": -1.5181314945220947,
256
+ "logits/rejected": -1.4019381999969482,
257
+ "logps/chosen": -318.02752685546875,
258
+ "logps/rejected": -291.03936767578125,
259
+ "loss": 0.6742,
260
+ "rewards/accuracies": 0.628125011920929,
261
+ "rewards/chosen": 0.9784830808639526,
262
+ "rewards/margins": 0.819505512714386,
263
+ "rewards/rejected": 0.15897764265537262,
264
  "step": 160
265
  },
266
  {
267
  "epoch": 0.92,
268
+ "grad_norm": 2125.4983562302123,
269
  "learning_rate": 6.517980014965139e-09,
270
+ "logits/chosen": -1.5958881378173828,
271
+ "logits/rejected": -1.4071909189224243,
272
+ "logps/chosen": -331.4378356933594,
273
+ "logps/rejected": -289.5236511230469,
274
+ "loss": 0.6456,
275
+ "rewards/accuracies": 0.731249988079071,
276
+ "rewards/chosen": 1.1029136180877686,
277
+ "rewards/margins": 1.160766363143921,
278
+ "rewards/rejected": -0.05785265564918518,
279
  "step": 170
280
  },
281
  {
282
  "epoch": 0.98,
283
+ "grad_norm": 2116.9259500110184,
284
  "learning_rate": 6.059664528022266e-09,
285
+ "logits/chosen": -1.5962104797363281,
286
+ "logits/rejected": -1.445967197418213,
287
+ "logps/chosen": -315.10467529296875,
288
+ "logps/rejected": -276.73443603515625,
289
+ "loss": 0.6191,
290
+ "rewards/accuracies": 0.6812499761581421,
291
+ "rewards/chosen": 1.123002052307129,
292
+ "rewards/margins": 1.1910655498504639,
293
+ "rewards/rejected": -0.0680634081363678,
294
  "step": 180
295
  },
296
  {
297
  "epoch": 1.03,
298
+ "grad_norm": 2067.8979230397717,
299
  "learning_rate": 5.591810408770492e-09,
300
+ "logits/chosen": -1.55275559425354,
301
+ "logits/rejected": -1.3787180185317993,
302
+ "logps/chosen": -315.572509765625,
303
+ "logps/rejected": -278.71087646484375,
304
+ "loss": 0.6052,
305
+ "rewards/accuracies": 0.71875,
306
+ "rewards/chosen": 1.2037546634674072,
307
+ "rewards/margins": 1.2858160734176636,
308
+ "rewards/rejected": -0.0820615291595459,
309
  "step": 190
310
  },
311
  {
312
  "epoch": 1.09,
313
+ "grad_norm": 2203.6189854484824,
314
  "learning_rate": 5.118629073464423e-09,
315
+ "logits/chosen": -1.5673738718032837,
316
+ "logits/rejected": -1.3565856218338013,
317
+ "logps/chosen": -325.91680908203125,
318
+ "logps/rejected": -282.65869140625,
319
+ "loss": 0.6024,
320
+ "rewards/accuracies": 0.746874988079071,
321
+ "rewards/chosen": 1.4020355939865112,
322
+ "rewards/margins": 1.1880966424942017,
323
+ "rewards/rejected": 0.21393892168998718,
324
  "step": 200
325
  },
326
  {
327
  "epoch": 1.14,
328
+ "grad_norm": 2263.504332795979,
329
  "learning_rate": 4.644379891605983e-09,
330
+ "logits/chosen": -1.611310601234436,
331
+ "logits/rejected": -1.4343440532684326,
332
+ "logps/chosen": -324.752197265625,
333
+ "logps/rejected": -291.36102294921875,
334
+ "loss": 0.5985,
335
+ "rewards/accuracies": 0.699999988079071,
336
+ "rewards/chosen": 1.2598803043365479,
337
+ "rewards/margins": 1.2701908349990845,
338
+ "rewards/rejected": -0.010310685262084007,
339
  "step": 210
340
  },
341
  {
342
  "epoch": 1.2,
343
+ "grad_norm": 2324.309417872748,
344
  "learning_rate": 4.173331844980362e-09,
345
+ "logits/chosen": -1.5291264057159424,
346
+ "logits/rejected": -1.4033840894699097,
347
+ "logps/chosen": -323.9982604980469,
348
+ "logps/rejected": -293.4136047363281,
349
+ "loss": 0.5948,
350
+ "rewards/accuracies": 0.7281249761581421,
351
+ "rewards/chosen": 1.1985571384429932,
352
+ "rewards/margins": 1.105764627456665,
353
+ "rewards/rejected": 0.09279236942529678,
354
  "step": 220
355
  },
356
  {
357
  "epoch": 1.25,
358
+ "grad_norm": 2275.313721629071,
359
  "learning_rate": 3.7097251001664824e-09,
360
+ "logits/chosen": -1.5342741012573242,
361
+ "logits/rejected": -1.3754017353057861,
362
+ "logps/chosen": -323.9897766113281,
363
+ "logps/rejected": -287.0173645019531,
364
+ "loss": 0.577,
365
+ "rewards/accuracies": 0.71875,
366
+ "rewards/chosen": 1.2511075735092163,
367
+ "rewards/margins": 1.3087403774261475,
368
+ "rewards/rejected": -0.05763290077447891,
369
  "step": 230
370
  },
371
  {
372
  "epoch": 1.3,
373
+ "grad_norm": 2261.889907648677,
374
  "learning_rate": 3.2577328404292057e-09,
375
+ "logits/chosen": -1.5497777462005615,
376
+ "logits/rejected": -1.4208284616470337,
377
+ "logps/chosen": -312.53802490234375,
378
+ "logps/rejected": -285.97076416015625,
379
+ "loss": 0.5741,
380
+ "rewards/accuracies": 0.699999988079071,
381
+ "rewards/chosen": 1.4049217700958252,
382
+ "rewards/margins": 1.3057136535644531,
383
+ "rewards/rejected": 0.09920807182788849,
384
  "step": 240
385
  },
386
  {
387
  "epoch": 1.36,
388
+ "grad_norm": 2520.220972155196,
389
  "learning_rate": 2.821423700565763e-09,
390
+ "logits/chosen": -1.5996572971343994,
391
+ "logits/rejected": -1.4216984510421753,
392
+ "logps/chosen": -350.76129150390625,
393
+ "logps/rejected": -306.58831787109375,
394
+ "loss": 0.5681,
395
+ "rewards/accuracies": 0.762499988079071,
396
+ "rewards/chosen": 1.537647008895874,
397
+ "rewards/margins": 1.4706511497497559,
398
+ "rewards/rejected": 0.06699595600366592,
399
  "step": 250
400
  },
401
  {
402
  "epoch": 1.41,
403
+ "grad_norm": 2185.2135747623915,
404
  "learning_rate": 2.4047251428513483e-09,
405
+ "logits/chosen": -1.61586594581604,
406
+ "logits/rejected": -1.4618706703186035,
407
+ "logps/chosen": -325.3050537109375,
408
+ "logps/rejected": -291.10345458984375,
409
+ "loss": 0.5977,
410
+ "rewards/accuracies": 0.731249988079071,
411
+ "rewards/chosen": 1.5201152563095093,
412
+ "rewards/margins": 1.4326369762420654,
413
+ "rewards/rejected": 0.08747831732034683,
414
  "step": 260
415
  },
416
  {
417
  "epoch": 1.47,
418
+ "grad_norm": 1735.3140228188304,
419
  "learning_rate": 2.011388103757442e-09,
420
+ "logits/chosen": -1.5243465900421143,
421
+ "logits/rejected": -1.3802506923675537,
422
+ "logps/chosen": -316.4330139160156,
423
+ "logps/rejected": -285.81353759765625,
424
+ "loss": 0.5429,
425
+ "rewards/accuracies": 0.7406250238418579,
426
+ "rewards/chosen": 1.550986647605896,
427
+ "rewards/margins": 1.45218026638031,
428
+ "rewards/rejected": 0.09880634397268295,
429
  "step": 270
430
  },
431
  {
432
  "epoch": 1.52,
433
+ "grad_norm": 2121.5830866823144,
434
  "learning_rate": 1.644953229677474e-09,
435
+ "logits/chosen": -1.6015859842300415,
436
+ "logits/rejected": -1.4193016290664673,
437
+ "logps/chosen": -326.1202087402344,
438
+ "logps/rejected": -284.7384033203125,
439
+ "loss": 0.5887,
440
+ "rewards/accuracies": 0.746874988079071,
441
+ "rewards/chosen": 1.580999732017517,
442
+ "rewards/margins": 1.3716144561767578,
443
+ "rewards/rejected": 0.20938535034656525,
444
  "step": 280
445
  },
446
  {
447
  "epoch": 1.58,
448
+ "grad_norm": 2103.020456118981,
449
  "learning_rate": 1.308719005590957e-09,
450
+ "logits/chosen": -1.509340524673462,
451
+ "logits/rejected": -1.3944005966186523,
452
+ "logps/chosen": -318.451416015625,
453
+ "logps/rejected": -282.4563293457031,
454
+ "loss": 0.5721,
455
+ "rewards/accuracies": 0.731249988079071,
456
+ "rewards/chosen": 1.603921890258789,
457
+ "rewards/margins": 1.4211392402648926,
458
+ "rewards/rejected": 0.182782843708992,
459
  "step": 290
460
  },
461
  {
462
  "epoch": 1.63,
463
+ "grad_norm": 2010.5734100649909,
464
  "learning_rate": 1.005712063557776e-09,
465
+ "logits/chosen": -1.6272541284561157,
466
+ "logits/rejected": -1.4480578899383545,
467
+ "logps/chosen": -324.20068359375,
468
+ "logps/rejected": -290.54803466796875,
469
+ "loss": 0.5898,
470
  "rewards/accuracies": 0.6968749761581421,
471
+ "rewards/chosen": 1.3664501905441284,
472
+ "rewards/margins": 1.1471518278121948,
473
+ "rewards/rejected": 0.21929831802845,
474
  "step": 300
475
  },
476
  {
477
  "epoch": 1.68,
478
+ "grad_norm": 1862.455491029429,
479
  "learning_rate": 7.386599383124321e-10,
480
+ "logits/chosen": -1.563561201095581,
481
+ "logits/rejected": -1.3803369998931885,
482
+ "logps/chosen": -321.889404296875,
483
+ "logps/rejected": -285.8083190917969,
484
+ "loss": 0.5879,
485
+ "rewards/accuracies": 0.7124999761581421,
486
+ "rewards/chosen": 1.4487113952636719,
487
+ "rewards/margins": 1.3910987377166748,
488
+ "rewards/rejected": 0.057612527161836624,
489
  "step": 310
490
  },
491
  {
492
  "epoch": 1.74,
493
+ "grad_norm": 1875.0391267528262,
494
  "learning_rate": 5.099665152003929e-10,
495
+ "logits/chosen": -1.5980346202850342,
496
+ "logits/rejected": -1.3878109455108643,
497
+ "logps/chosen": -333.843994140625,
498
+ "logps/rejected": -289.8874816894531,
499
+ "loss": 0.5634,
500
+ "rewards/accuracies": 0.7593749761581421,
501
+ "rewards/chosen": 1.5412708520889282,
502
+ "rewards/margins": 1.3988701105117798,
503
+ "rewards/rejected": 0.14240065217018127,
504
  "step": 320
505
  },
506
  {
507
  "epoch": 1.79,
508
+ "grad_norm": 2232.545936783362,
509
  "learning_rate": 3.216903914633745e-10,
510
+ "logits/chosen": -1.5563807487487793,
511
+ "logits/rejected": -1.4342092275619507,
512
+ "logps/chosen": -325.34674072265625,
513
+ "logps/rejected": -296.1554870605469,
514
+ "loss": 0.5762,
515
+ "rewards/accuracies": 0.706250011920929,
516
+ "rewards/chosen": 1.3614461421966553,
517
+ "rewards/margins": 1.3264009952545166,
518
+ "rewards/rejected": 0.03504505008459091,
519
  "step": 330
520
  },
521
  {
522
  "epoch": 1.85,
523
+ "grad_norm": 1895.2431252651277,
524
  "learning_rate": 1.7552634565570324e-10,
525
+ "logits/chosen": -1.5503065586090088,
526
+ "logits/rejected": -1.381874680519104,
527
+ "logps/chosen": -329.953857421875,
528
+ "logps/rejected": -292.89447021484375,
529
+ "loss": 0.5681,
530
+ "rewards/accuracies": 0.7718750238418579,
531
+ "rewards/chosen": 1.7590471506118774,
532
+ "rewards/margins": 1.6621748208999634,
533
+ "rewards/rejected": 0.09687252342700958,
534
  "step": 340
535
  },
536
  {
537
  "epoch": 1.9,
538
+ "grad_norm": 2070.257116146559,
539
  "learning_rate": 7.279008199590543e-11,
540
+ "logits/chosen": -1.541084885597229,
541
+ "logits/rejected": -1.378144383430481,
542
+ "logps/chosen": -326.51507568359375,
543
+ "logps/rejected": -291.95123291015625,
544
+ "loss": 0.5797,
545
+ "rewards/accuracies": 0.7281249761581421,
546
+ "rewards/chosen": 1.5987285375595093,
547
+ "rewards/margins": 1.4538962841033936,
548
+ "rewards/rejected": 0.1448323279619217,
549
  "step": 350
550
  },
551
  {
552
  "epoch": 1.96,
553
+ "grad_norm": 2166.9724743285583,
554
  "learning_rate": 1.4406386978128017e-11,
555
+ "logits/chosen": -1.6209802627563477,
556
+ "logits/rejected": -1.42485511302948,
557
+ "logps/chosen": -331.2142639160156,
558
+ "logps/rejected": -291.69842529296875,
559
+ "loss": 0.5582,
560
+ "rewards/accuracies": 0.778124988079071,
561
+ "rewards/chosen": 1.724250078201294,
562
+ "rewards/margins": 1.5519315004348755,
563
+ "rewards/rejected": 0.1723184883594513,
564
  "step": 360
565
  },
566
  {
567
  "epoch": 2.0,
568
  "step": 368,
569
  "total_flos": 0.0,
570
+ "train_loss": 0.26489045179408527,
571
+ "train_runtime": 4520.7899,
572
+ "train_samples_per_second": 20.835,
573
+ "train_steps_per_second": 0.081
574
  }
575
  ],
576
  "logging_steps": 10,