RikkiXu commited on
Commit
381edee
1 Parent(s): 2b84ac3

Model save

Browse files
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  tags:
3
  - trl
4
  - dpo
@@ -13,7 +14,7 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # zephyr-7b-dpo-full
15
 
16
- This model was trained from scratch on the None dataset.
17
 
18
  ## Model description
19
 
@@ -32,7 +33,7 @@ More information needed
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
- - learning_rate: 4e-07
36
  - train_batch_size: 4
37
  - eval_batch_size: 4
38
  - seed: 42
 
1
  ---
2
+ base_model: princeton-nlp/Mistral-7B-Base-SFT-SimPO
3
  tags:
4
  - trl
5
  - dpo
 
14
 
15
  # zephyr-7b-dpo-full
16
 
17
+ This model is a fine-tuned version of [princeton-nlp/Mistral-7B-Base-SFT-SimPO](https://huggingface.co/princeton-nlp/Mistral-7B-Base-SFT-SimPO) on the None dataset.
18
 
19
  ## Model description
20
 
 
33
  ### Training hyperparameters
34
 
35
  The following hyperparameters were used during training:
36
+ - learning_rate: 1e-07
37
  - train_batch_size: 4
38
  - eval_batch_size: 4
39
  - seed: 42
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 3.8421780889894426,
4
- "train_runtime": 6381.4933,
5
- "train_samples": 52922,
6
- "train_samples_per_second": 8.293,
7
- "train_steps_per_second": 0.065
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.5863300847029632,
4
+ "train_runtime": 6843.0852,
5
+ "train_samples": 56236,
6
+ "train_samples_per_second": 8.218,
7
+ "train_steps_per_second": 0.064
8
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e88c6ac9b4127cd43c95069301c9c933118b1525797db81fc6d80f544cf7cec
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d085da4a857fb7f218891d9be471fb86e732b224ff99d833d19b15d462e4ce9d
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd64f1edcaa32df3d1b32cd35af92f119cd07ea2ba7ebf00b6828eed52da98f0
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a5dbc0fe2ece97438d30fd6b0f0e3d1f3e734e6156b8e9d5f3c15a5dcaa7cf0
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:87c91c34961d0019bf71d071d06cfd82742211fd4d3379b61e5bc9cad550096c
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:220289bd03f2c16bc923749307f2d3eab7817266cb64a00415b0df5008ec88f8
3
  size 4540516344
runs/Jul01_23-19-09_n136-129-074/events.out.tfevents.1719847173.n136-129-074.2002672.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4497f7ce1c9d1f223465b26c89f67a43949fc300dc8929a9894ceb64c8f39e0a
3
- size 32977
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca5bc1674e38eb08c326a1eb43f08b14e751071a2de1f6aeb0483afae3355ad5
3
+ size 35395
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 3.8421780889894426,
4
- "train_runtime": 6381.4933,
5
- "train_samples": 52922,
6
- "train_samples_per_second": 8.293,
7
- "train_steps_per_second": 0.065
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.5863300847029632,
4
+ "train_runtime": 6843.0852,
5
+ "train_samples": 56236,
6
+ "train_samples_per_second": 8.218,
7
+ "train_steps_per_second": 0.064
8
  }
trainer_state.json CHANGED
@@ -1,22 +1,22 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.9987908101571947,
5
  "eval_steps": 10000000,
6
- "global_step": 413,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
- "grad_norm": 6401.270603874373,
14
- "learning_rate": 9.523809523809522e-09,
15
- "logits/chosen": -2.7005977630615234,
16
- "logits/rejected": -2.6288318634033203,
17
- "logps/chosen": -1.1158788204193115,
18
- "logps/rejected": -1.1333446502685547,
19
- "loss": 0.7544,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
22
  "rewards/margins": 0.0,
@@ -25,631 +25,661 @@
25
  },
26
  {
27
  "epoch": 0.02,
28
- "grad_norm": 9369.590990783972,
29
- "learning_rate": 9.523809523809523e-08,
30
- "logits/chosen": -2.76228666305542,
31
- "logits/rejected": -2.6970374584198,
32
- "logps/chosen": -0.837486743927002,
33
- "logps/rejected": -0.8182350993156433,
34
- "loss": 0.9695,
35
- "rewards/accuracies": 0.4305555522441864,
36
- "rewards/chosen": 0.06597563624382019,
37
- "rewards/margins": 0.437710702419281,
38
- "rewards/rejected": -0.3717350959777832,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.05,
43
- "grad_norm": 5966.657402243146,
44
- "learning_rate": 1.9047619047619045e-07,
45
- "logits/chosen": -2.6901049613952637,
46
- "logits/rejected": -2.6502909660339355,
47
- "logps/chosen": -0.9933319091796875,
48
- "logps/rejected": -1.0394352674484253,
49
- "loss": 1.0318,
50
- "rewards/accuracies": 0.6499999761581421,
51
- "rewards/chosen": -0.794396698474884,
52
- "rewards/margins": 0.7471516728401184,
53
- "rewards/rejected": -1.5415483713150024,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.07,
58
- "grad_norm": 8820.198504372876,
59
- "learning_rate": 2.857142857142857e-07,
60
- "logits/chosen": -2.7333264350891113,
61
- "logits/rejected": -2.6793360710144043,
62
- "logps/chosen": -0.9710652232170105,
63
- "logps/rejected": -0.9799602627754211,
64
- "loss": 1.3198,
65
- "rewards/accuracies": 0.706250011920929,
66
- "rewards/chosen": -2.275942325592041,
67
- "rewards/margins": 0.9020620584487915,
68
- "rewards/rejected": -3.178004264831543,
69
  "step": 30
70
  },
71
  {
72
- "epoch": 0.1,
73
- "grad_norm": 8453.783513094899,
74
- "learning_rate": 3.809523809523809e-07,
75
- "logits/chosen": -2.6771621704101562,
76
- "logits/rejected": -2.6321842670440674,
77
- "logps/chosen": -0.989823043346405,
78
- "logps/rejected": -0.9216930270195007,
79
- "loss": 2.0555,
80
- "rewards/accuracies": 0.762499988079071,
81
- "rewards/chosen": -0.6586966514587402,
82
- "rewards/margins": 5.100310325622559,
83
- "rewards/rejected": -5.759006500244141,
84
  "step": 40
85
  },
86
  {
87
- "epoch": 0.12,
88
- "grad_norm": 4192.139232222726,
89
- "learning_rate": 3.995412608484087e-07,
90
- "logits/chosen": -2.743403911590576,
91
- "logits/rejected": -2.6878693103790283,
92
- "logps/chosen": -0.9671042561531067,
93
- "logps/rejected": -0.917597770690918,
94
- "loss": 2.6495,
95
- "rewards/accuracies": 0.793749988079071,
96
- "rewards/chosen": -0.16885781288146973,
97
- "rewards/margins": 5.186079978942871,
98
- "rewards/rejected": -5.35493803024292,
99
  "step": 50
100
  },
101
  {
102
- "epoch": 0.15,
103
- "grad_norm": 5643.860863524967,
104
- "learning_rate": 3.976812391485896e-07,
105
- "logits/chosen": -2.7438769340515137,
106
- "logits/rejected": -2.676765203475952,
107
- "logps/chosen": -0.911353588104248,
108
- "logps/rejected": -0.9122518301010132,
109
- "loss": 3.8047,
110
- "rewards/accuracies": 0.8062499761581421,
111
- "rewards/chosen": 2.4976494312286377,
112
- "rewards/margins": 5.426072120666504,
113
- "rewards/rejected": -2.928422212600708,
114
  "step": 60
115
  },
116
  {
117
- "epoch": 0.17,
118
- "grad_norm": 4497.230754903385,
119
- "learning_rate": 3.9440458281608213e-07,
120
- "logits/chosen": -2.740940570831299,
121
- "logits/rejected": -2.7162723541259766,
122
- "logps/chosen": -0.9154363870620728,
123
- "logps/rejected": -0.868497371673584,
124
- "loss": 3.6432,
125
- "rewards/accuracies": 0.824999988079071,
126
- "rewards/chosen": 5.681364059448242,
127
- "rewards/margins": 8.000432968139648,
128
- "rewards/rejected": -2.319068431854248,
129
  "step": 70
130
  },
131
  {
132
- "epoch": 0.19,
133
- "grad_norm": 9843.974275847575,
134
- "learning_rate": 3.897347732134074e-07,
135
- "logits/chosen": -2.679215908050537,
136
- "logits/rejected": -2.625516891479492,
137
- "logps/chosen": -0.9146322011947632,
138
- "logps/rejected": -1.0181081295013428,
139
- "loss": 5.767,
140
- "rewards/accuracies": 0.800000011920929,
141
- "rewards/chosen": -10.08639907836914,
142
- "rewards/margins": 6.582289695739746,
143
- "rewards/rejected": -16.668689727783203,
144
  "step": 80
145
  },
146
  {
147
- "epoch": 0.22,
148
- "grad_norm": 4773.013380320505,
149
- "learning_rate": 3.8370527539794614e-07,
150
- "logits/chosen": -2.6771388053894043,
151
- "logits/rejected": -2.6291418075561523,
152
- "logps/chosen": -1.003847360610962,
153
- "logps/rejected": -1.0297266244888306,
154
- "loss": 4.6354,
155
- "rewards/accuracies": 0.737500011920929,
156
- "rewards/chosen": 4.863407611846924,
157
- "rewards/margins": 9.78220272064209,
158
- "rewards/rejected": -4.918795585632324,
159
  "step": 90
160
  },
161
  {
162
- "epoch": 0.24,
163
- "grad_norm": 3074.8663144850243,
164
- "learning_rate": 3.763592983027255e-07,
165
- "logits/chosen": -2.705735683441162,
166
- "logits/rejected": -2.6605448722839355,
167
- "logps/chosen": -0.9163268804550171,
168
- "logps/rejected": -0.9396775960922241,
169
- "loss": 5.8585,
170
- "rewards/accuracies": 0.8062499761581421,
171
- "rewards/chosen": -12.477940559387207,
172
- "rewards/margins": 7.702305793762207,
173
- "rewards/rejected": -20.180248260498047,
174
  "step": 100
175
  },
176
  {
177
- "epoch": 0.27,
178
- "grad_norm": 4099.610429119441,
179
- "learning_rate": 3.6774948509008527e-07,
180
- "logits/chosen": -2.714970111846924,
181
- "logits/rejected": -2.6705470085144043,
182
- "logps/chosen": -0.9598251581192017,
183
- "logps/rejected": -0.9319995641708374,
184
- "loss": 5.1529,
185
- "rewards/accuracies": 0.7875000238418579,
186
- "rewards/chosen": 9.19798755645752,
187
- "rewards/margins": 10.779365539550781,
188
- "rewards/rejected": -1.5813770294189453,
189
  "step": 110
190
  },
191
  {
192
- "epoch": 0.29,
193
- "grad_norm": 4599.711217449366,
194
- "learning_rate": 3.579375358972288e-07,
195
- "logits/chosen": -2.678779125213623,
196
- "logits/rejected": -2.6315762996673584,
197
- "logps/chosen": -0.9081487655639648,
198
- "logps/rejected": -1.0060938596725464,
199
- "loss": 4.0915,
200
- "rewards/accuracies": 0.768750011920929,
201
- "rewards/chosen": -13.663342475891113,
202
- "rewards/margins": 9.755656242370605,
203
- "rewards/rejected": -23.418994903564453,
204
  "step": 120
205
  },
206
  {
207
- "epoch": 0.31,
208
- "grad_norm": 4010.334966061441,
209
- "learning_rate": 3.4699376567716156e-07,
210
- "logits/chosen": -2.7230353355407715,
211
- "logits/rejected": -2.684389591217041,
212
- "logps/chosen": -0.8652521967887878,
213
- "logps/rejected": -0.8799147605895996,
214
- "loss": 4.4027,
215
- "rewards/accuracies": 0.762499988079071,
216
- "rewards/chosen": 2.19469952583313,
217
- "rewards/margins": 15.263641357421875,
218
- "rewards/rejected": -13.068939208984375,
219
  "step": 130
220
  },
221
  {
222
- "epoch": 0.34,
223
- "grad_norm": 5239.11146834966,
224
- "learning_rate": 3.349966003036421e-07,
225
- "logits/chosen": -2.689558506011963,
226
- "logits/rejected": -2.649766445159912,
227
- "logps/chosen": -0.9352903366088867,
228
- "logps/rejected": -0.9416161775588989,
229
- "loss": 4.7953,
230
- "rewards/accuracies": 0.7749999761581421,
231
- "rewards/chosen": -4.734063148498535,
232
- "rewards/margins": 8.841203689575195,
233
- "rewards/rejected": -13.575268745422363,
234
  "step": 140
235
  },
236
  {
237
- "epoch": 0.36,
238
- "grad_norm": 5394.35498681908,
239
- "learning_rate": 3.220320145511884e-07,
240
- "logits/chosen": -2.7070841789245605,
241
- "logits/rejected": -2.647737979888916,
242
- "logps/chosen": -0.9441506266593933,
243
- "logps/rejected": -0.9885166883468628,
244
- "loss": 4.2219,
245
- "rewards/accuracies": 0.793749988079071,
246
- "rewards/chosen": 5.9402689933776855,
247
- "rewards/margins": 12.97706413269043,
248
- "rewards/rejected": -7.036795139312744,
249
  "step": 150
250
  },
251
  {
252
- "epoch": 0.39,
253
- "grad_norm": 5022.189692479379,
254
- "learning_rate": 3.0819291597771795e-07,
255
- "logits/chosen": -2.7051825523376465,
256
- "logits/rejected": -2.667494297027588,
257
- "logps/chosen": -0.911395251750946,
258
- "logps/rejected": -0.939487099647522,
259
- "loss": 4.7963,
260
- "rewards/accuracies": 0.856249988079071,
261
- "rewards/chosen": -0.6114660501480103,
262
- "rewards/margins": 9.443866729736328,
263
- "rewards/rejected": -10.055331230163574,
264
  "step": 160
265
  },
266
  {
267
- "epoch": 0.41,
268
- "grad_norm": 5428.944545727042,
269
- "learning_rate": 2.9357847912507786e-07,
270
- "logits/chosen": -2.6787288188934326,
271
- "logits/rejected": -2.609421968460083,
272
- "logps/chosen": -0.8976411819458008,
273
- "logps/rejected": -0.8857674598693848,
274
- "loss": 4.6262,
275
- "rewards/accuracies": 0.800000011920929,
276
- "rewards/chosen": -2.82297945022583,
277
- "rewards/margins": 10.640687942504883,
278
- "rewards/rejected": -13.463666915893555,
279
  "step": 170
280
  },
281
  {
282
- "epoch": 0.44,
283
- "grad_norm": 7317.882582449178,
284
- "learning_rate": 2.7829343480875617e-07,
285
- "logits/chosen": -2.6716930866241455,
286
- "logits/rejected": -2.6018152236938477,
287
- "logps/chosen": -0.9342878460884094,
288
- "logps/rejected": -0.9536906480789185,
289
- "loss": 4.5209,
290
- "rewards/accuracies": 0.800000011920929,
291
- "rewards/chosen": 4.173262596130371,
292
- "rewards/margins": 8.933877944946289,
293
- "rewards/rejected": -4.760615348815918,
294
  "step": 180
295
  },
296
  {
297
- "epoch": 0.46,
298
- "grad_norm": 5046.2946182405685,
299
- "learning_rate": 2.624473195899052e-07,
300
- "logits/chosen": -2.737992763519287,
301
- "logits/rejected": -2.7089955806732178,
302
- "logps/chosen": -0.9629039764404297,
303
- "logps/rejected": -1.039236307144165,
304
- "loss": 4.5521,
305
- "rewards/accuracies": 0.78125,
306
- "rewards/chosen": -3.3569388389587402,
307
- "rewards/margins": 13.995905876159668,
308
- "rewards/rejected": -17.352848052978516,
309
  "step": 190
310
  },
311
  {
312
- "epoch": 0.48,
313
- "grad_norm": 4268.8163809344915,
314
- "learning_rate": 2.4615369080815547e-07,
315
- "logits/chosen": -2.6982626914978027,
316
- "logits/rejected": -2.6629488468170166,
317
- "logps/chosen": -0.8523995280265808,
318
- "logps/rejected": -0.9246847033500671,
319
- "loss": 3.8184,
320
- "rewards/accuracies": 0.78125,
321
- "rewards/chosen": 2.3979854583740234,
322
- "rewards/margins": 4.996596336364746,
323
- "rewards/rejected": -2.5986106395721436,
324
  "step": 200
325
  },
326
  {
327
- "epoch": 0.51,
328
- "grad_norm": 4211.242306423206,
329
- "learning_rate": 2.2952931280049625e-07,
330
- "logits/chosen": -2.7346115112304688,
331
- "logits/rejected": -2.6734609603881836,
332
- "logps/chosen": -1.0063531398773193,
333
- "logps/rejected": -0.9570119976997375,
334
- "loss": 4.9954,
335
- "rewards/accuracies": 0.762499988079071,
336
- "rewards/chosen": 7.86081600189209,
337
- "rewards/margins": 13.075413703918457,
338
- "rewards/rejected": -5.214597225189209,
339
  "step": 210
340
  },
341
  {
342
- "epoch": 0.53,
343
- "grad_norm": 4906.2448320907815,
344
- "learning_rate": 2.1269332013798747e-07,
345
- "logits/chosen": -2.7431142330169678,
346
- "logits/rejected": -2.7241249084472656,
347
- "logps/chosen": -0.8835189938545227,
348
- "logps/rejected": -0.8670462369918823,
349
- "loss": 4.3795,
350
- "rewards/accuracies": 0.78125,
351
- "rewards/chosen": 4.582579135894775,
352
- "rewards/margins": 8.150335311889648,
353
- "rewards/rejected": -3.567755937576294,
354
  "step": 220
355
  },
356
  {
357
- "epoch": 0.56,
358
- "grad_norm": 2995.5119741253625,
359
- "learning_rate": 1.9576636387676436e-07,
360
- "logits/chosen": -2.690732955932617,
361
- "logits/rejected": -2.653067111968994,
362
- "logps/chosen": -0.960831344127655,
363
- "logps/rejected": -0.9556485414505005,
364
- "loss": 4.0487,
365
- "rewards/accuracies": 0.7562500238418579,
366
- "rewards/chosen": -8.449748039245605,
367
- "rewards/margins": 10.095115661621094,
368
- "rewards/rejected": -18.544864654541016,
369
  "step": 230
370
  },
371
  {
372
- "epoch": 0.58,
373
- "grad_norm": 3504.414329050279,
374
- "learning_rate": 1.7886974694151976e-07,
375
- "logits/chosen": -2.7119805812835693,
376
- "logits/rejected": -2.6879172325134277,
377
- "logps/chosen": -0.990290641784668,
378
- "logps/rejected": -0.9934972524642944,
379
- "loss": 4.3644,
380
- "rewards/accuracies": 0.793749988079071,
381
- "rewards/chosen": -0.5965616106987,
382
- "rewards/margins": 9.211602210998535,
383
- "rewards/rejected": -9.808164596557617,
384
  "step": 240
385
  },
386
  {
387
- "epoch": 0.6,
388
- "grad_norm": 3400.5848210057216,
389
- "learning_rate": 1.6212455483752895e-07,
390
- "logits/chosen": -2.756906270980835,
391
- "logits/rejected": -2.6796135902404785,
392
- "logps/chosen": -0.8838168978691101,
393
- "logps/rejected": -0.9137406349182129,
394
- "loss": 4.5034,
395
- "rewards/accuracies": 0.7875000238418579,
396
- "rewards/chosen": 6.5281982421875,
397
- "rewards/margins": 8.64702033996582,
398
- "rewards/rejected": -2.1188230514526367,
399
  "step": 250
400
  },
401
  {
402
- "epoch": 0.63,
403
- "grad_norm": 6194.117841583386,
404
- "learning_rate": 1.4565078792075733e-07,
405
- "logits/chosen": -2.7132773399353027,
406
- "logits/rejected": -2.6494650840759277,
407
- "logps/chosen": -1.002362847328186,
408
- "logps/rejected": -0.9982520341873169,
409
- "loss": 4.8134,
410
- "rewards/accuracies": 0.84375,
411
- "rewards/chosen": 3.0224878787994385,
412
- "rewards/margins": 16.206506729125977,
413
- "rewards/rejected": -13.1840181350708,
414
  "step": 260
415
  },
416
  {
417
- "epoch": 0.65,
418
- "grad_norm": 4565.495892627232,
419
- "learning_rate": 1.295665014444281e-07,
420
- "logits/chosen": -2.7381529808044434,
421
- "logits/rejected": -2.6608738899230957,
422
- "logps/chosen": -0.9501218795776367,
423
- "logps/rejected": -0.9476363062858582,
424
- "loss": 5.3754,
425
- "rewards/accuracies": 0.8500000238418579,
426
- "rewards/chosen": -0.570526123046875,
427
- "rewards/margins": 12.367398262023926,
428
- "rewards/rejected": -12.9379243850708,
429
  "step": 270
430
  },
431
  {
432
- "epoch": 0.68,
433
- "grad_norm": 5337.153187944306,
434
- "learning_rate": 1.1398695954469597e-07,
435
- "logits/chosen": -2.6872425079345703,
436
- "logits/rejected": -2.630267381668091,
437
- "logps/chosen": -0.9056104421615601,
438
- "logps/rejected": -0.8939152956008911,
439
- "loss": 4.1053,
440
- "rewards/accuracies": 0.793749988079071,
441
- "rewards/chosen": 4.294297218322754,
442
- "rewards/margins": 7.472552299499512,
443
- "rewards/rejected": -3.1782548427581787,
444
  "step": 280
445
  },
446
  {
447
- "epoch": 0.7,
448
- "grad_norm": 3582.07962645892,
449
- "learning_rate": 9.902380922818425e-08,
450
- "logits/chosen": -2.7334370613098145,
451
- "logits/rejected": -2.6919913291931152,
452
- "logps/chosen": -0.9840775728225708,
453
- "logps/rejected": -0.9756690263748169,
454
- "loss": 3.2759,
455
- "rewards/accuracies": 0.762499988079071,
456
- "rewards/chosen": 8.966680526733398,
457
- "rewards/margins": 11.496904373168945,
458
- "rewards/rejected": -2.5302233695983887,
459
  "step": 290
460
  },
461
  {
462
- "epoch": 0.73,
463
- "grad_norm": 4767.591882910886,
464
- "learning_rate": 8.478428028080398e-08,
465
- "logits/chosen": -2.7305169105529785,
466
- "logits/rejected": -2.6773815155029297,
467
- "logps/chosen": -0.8988749384880066,
468
- "logps/rejected": -0.9437707662582397,
469
- "loss": 4.3175,
470
- "rewards/accuracies": 0.7875000238418579,
471
- "rewards/chosen": 0.8898951411247253,
472
- "rewards/margins": 8.447718620300293,
473
- "rewards/rejected": -7.55782413482666,
474
  "step": 300
475
  },
476
  {
477
- "epoch": 0.75,
478
- "grad_norm": 4819.380329592898,
479
- "learning_rate": 7.137041683151202e-08,
480
- "logits/chosen": -2.7228643894195557,
481
- "logits/rejected": -2.6581058502197266,
482
- "logps/chosen": -1.0781683921813965,
483
- "logps/rejected": -1.028840184211731,
484
- "loss": 2.9744,
485
- "rewards/accuracies": 0.8062499761581421,
486
- "rewards/chosen": -2.4226202964782715,
487
- "rewards/margins": 13.473236083984375,
488
- "rewards/rejected": -15.895855903625488,
489
  "step": 310
490
  },
491
  {
492
- "epoch": 0.77,
493
- "grad_norm": 7840.551721640683,
494
- "learning_rate": 5.8878346077822135e-08,
495
- "logits/chosen": -2.7280871868133545,
496
- "logits/rejected": -2.649958848953247,
497
- "logps/chosen": -0.9020591974258423,
498
- "logps/rejected": -0.9361578822135925,
499
- "loss": 2.7082,
500
- "rewards/accuracies": 0.8812500238418579,
501
- "rewards/chosen": 1.223115086555481,
502
- "rewards/margins": 10.582406997680664,
503
- "rewards/rejected": -9.35929012298584,
504
  "step": 320
505
  },
506
  {
507
- "epoch": 0.8,
508
- "grad_norm": 4662.77535052248,
509
- "learning_rate": 4.73975894135696e-08,
510
- "logits/chosen": -2.6770853996276855,
511
- "logits/rejected": -2.6099040508270264,
512
- "logps/chosen": -0.9263202548027039,
513
- "logps/rejected": -0.9608638882637024,
514
- "loss": 3.1985,
515
- "rewards/accuracies": 0.8125,
516
- "rewards/chosen": 0.2365754395723343,
517
- "rewards/margins": 13.195585250854492,
518
- "rewards/rejected": -12.959010124206543,
519
  "step": 330
520
  },
521
  {
522
- "epoch": 0.82,
523
- "grad_norm": 4550.588002339864,
524
- "learning_rate": 3.701042089556483e-08,
525
- "logits/chosen": -2.756493330001831,
526
- "logits/rejected": -2.687851667404175,
527
- "logps/chosen": -0.8901381492614746,
528
- "logps/rejected": -0.9301478266716003,
529
- "loss": 3.841,
530
- "rewards/accuracies": 0.78125,
531
- "rewards/chosen": 0.11963929980993271,
532
- "rewards/margins": 7.3289618492126465,
533
- "rewards/rejected": -7.209322929382324,
534
  "step": 340
535
  },
536
  {
537
- "epoch": 0.85,
538
- "grad_norm": 5464.471487236709,
539
- "learning_rate": 2.779127764652889e-08,
540
- "logits/chosen": -2.689107656478882,
541
- "logits/rejected": -2.6330015659332275,
542
- "logps/chosen": -0.9756801724433899,
543
- "logps/rejected": -0.9646003842353821,
544
- "loss": 3.6421,
545
- "rewards/accuracies": 0.8062499761581421,
546
- "rewards/chosen": -1.1974527835845947,
547
- "rewards/margins": 9.013090133666992,
548
- "rewards/rejected": -10.210542678833008,
549
  "step": 350
550
  },
551
  {
552
- "epoch": 0.87,
553
- "grad_norm": 5949.708940984834,
554
- "learning_rate": 1.9806226419516193e-08,
555
- "logits/chosen": -2.704460620880127,
556
- "logits/rejected": -2.656071186065674,
557
- "logps/chosen": -0.9623576402664185,
558
- "logps/rejected": -1.0082406997680664,
559
- "loss": 3.5231,
560
- "rewards/accuracies": 0.7749999761581421,
561
- "rewards/chosen": 2.5273587703704834,
562
- "rewards/margins": 11.88086223602295,
563
- "rewards/rejected": -9.35350227355957,
564
  "step": 360
565
  },
566
  {
567
- "epoch": 0.89,
568
- "grad_norm": 4320.933402478669,
569
- "learning_rate": 1.3112490146559552e-08,
570
- "logits/chosen": -2.7451281547546387,
571
- "logits/rejected": -2.686728000640869,
572
- "logps/chosen": -0.8951610326766968,
573
- "logps/rejected": -0.89850914478302,
574
- "loss": 3.0053,
575
- "rewards/accuracies": 0.856249988079071,
576
- "rewards/chosen": 0.6753175258636475,
577
- "rewards/margins": 12.29626750946045,
578
- "rewards/rejected": -11.620949745178223,
579
  "step": 370
580
  },
581
  {
582
- "epoch": 0.92,
583
- "grad_norm": 2514.940389992379,
584
- "learning_rate": 7.758037864413247e-09,
585
- "logits/chosen": -2.7158432006835938,
586
- "logits/rejected": -2.6906635761260986,
587
- "logps/chosen": -0.9033122062683105,
588
- "logps/rejected": -0.9709407091140747,
589
- "loss": 2.8751,
590
- "rewards/accuracies": 0.856249988079071,
591
- "rewards/chosen": 0.9628832936286926,
592
- "rewards/margins": 10.83133316040039,
593
- "rewards/rejected": -9.868449211120605,
594
  "step": 380
595
  },
596
  {
597
- "epoch": 0.94,
598
- "grad_norm": 3504.225752431698,
599
- "learning_rate": 3.78124095609087e-09,
600
- "logits/chosen": -2.6947999000549316,
601
- "logits/rejected": -2.6553878784179688,
602
- "logps/chosen": -0.9263744354248047,
603
- "logps/rejected": -0.9935058355331421,
604
- "loss": 3.019,
605
- "rewards/accuracies": 0.862500011920929,
606
- "rewards/chosen": 1.4072116613388062,
607
- "rewards/margins": 10.741894721984863,
608
- "rewards/rejected": -9.334683418273926,
609
  "step": 390
610
  },
611
  {
612
- "epoch": 0.97,
613
- "grad_norm": 4987.634749508018,
614
- "learning_rate": 1.2105981716597603e-09,
615
- "logits/chosen": -2.7300946712493896,
616
- "logits/rejected": -2.6389007568359375,
617
- "logps/chosen": -0.9686774015426636,
618
- "logps/rejected": -0.9328421354293823,
619
- "loss": 3.7864,
620
- "rewards/accuracies": 0.831250011920929,
621
- "rewards/chosen": 0.6465551853179932,
622
- "rewards/margins": 11.809611320495605,
623
- "rewards/rejected": -11.163057327270508,
624
  "step": 400
625
  },
626
  {
627
- "epoch": 0.99,
628
- "grad_norm": 5473.226219590305,
629
- "learning_rate": 6.453139886395398e-11,
630
- "logits/chosen": -2.7284317016601562,
631
- "logits/rejected": -2.6886637210845947,
632
- "logps/chosen": -0.9334842562675476,
633
- "logps/rejected": -0.9600637555122375,
634
- "loss": 3.6391,
635
- "rewards/accuracies": 0.875,
636
- "rewards/chosen": -2.157397747039795,
637
- "rewards/margins": 13.2835054397583,
638
- "rewards/rejected": -15.440902709960938,
639
  "step": 410
640
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
641
  {
642
  "epoch": 1.0,
643
- "step": 413,
644
  "total_flos": 0.0,
645
- "train_loss": 3.8421780889894426,
646
- "train_runtime": 6381.4933,
647
- "train_samples_per_second": 8.293,
648
- "train_steps_per_second": 0.065
649
  }
650
  ],
651
  "logging_steps": 10,
652
- "max_steps": 413,
653
  "num_input_tokens_seen": 0,
654
  "num_train_epochs": 1,
655
  "save_steps": 100,
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9988623435722411,
5
  "eval_steps": 10000000,
6
+ "global_step": 439,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
+ "grad_norm": 22.78893022336454,
14
+ "learning_rate": 2.2727272727272727e-09,
15
+ "logits/chosen": -1.6768856048583984,
16
+ "logits/rejected": -1.7259055376052856,
17
+ "logps/chosen": -1.2793102264404297,
18
+ "logps/rejected": -1.2162058353424072,
19
+ "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
22
  "rewards/margins": 0.0,
 
25
  },
26
  {
27
  "epoch": 0.02,
28
+ "grad_norm": 25.567365971420195,
29
+ "learning_rate": 2.2727272727272725e-08,
30
+ "logits/chosen": -1.7031302452087402,
31
+ "logits/rejected": -1.6688512563705444,
32
+ "logps/chosen": -1.213205337524414,
33
+ "logps/rejected": -1.220165729522705,
34
+ "loss": 0.693,
35
+ "rewards/accuracies": 0.4513888955116272,
36
+ "rewards/chosen": 0.0002006387512665242,
37
+ "rewards/margins": -0.0009609226835891604,
38
+ "rewards/rejected": 0.001161561463959515,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.05,
43
+ "grad_norm": 26.2968319036787,
44
+ "learning_rate": 4.545454545454545e-08,
45
+ "logits/chosen": -1.7800958156585693,
46
+ "logits/rejected": -1.7349421977996826,
47
+ "logps/chosen": -1.1448661088943481,
48
+ "logps/rejected": -1.185571312904358,
49
+ "loss": 0.6924,
50
+ "rewards/accuracies": 0.5062500238418579,
51
+ "rewards/chosen": -0.0032871514558792114,
52
+ "rewards/margins": 0.0009361729025840759,
53
+ "rewards/rejected": -0.004223324358463287,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.07,
58
+ "grad_norm": 28.64228481569523,
59
+ "learning_rate": 6.818181818181817e-08,
60
+ "logits/chosen": -1.744109869003296,
61
+ "logits/rejected": -1.6754045486450195,
62
+ "logps/chosen": -1.195277452468872,
63
+ "logps/rejected": -1.2481118440628052,
64
+ "loss": 0.6883,
65
+ "rewards/accuracies": 0.6187499761581421,
66
+ "rewards/chosen": -0.02475227788090706,
67
+ "rewards/margins": 0.011208651587367058,
68
+ "rewards/rejected": -0.035960931330919266,
69
  "step": 30
70
  },
71
  {
72
+ "epoch": 0.09,
73
+ "grad_norm": 19.497764912088467,
74
+ "learning_rate": 9.09090909090909e-08,
75
+ "logits/chosen": -1.7310603857040405,
76
+ "logits/rejected": -1.6648972034454346,
77
+ "logps/chosen": -1.2513717412948608,
78
+ "logps/rejected": -1.3350750207901,
79
+ "loss": 0.6798,
80
+ "rewards/accuracies": 0.6812499761581421,
81
+ "rewards/chosen": -0.08884630352258682,
82
+ "rewards/margins": 0.06275957077741623,
83
+ "rewards/rejected": -0.15160587430000305,
84
  "step": 40
85
  },
86
  {
87
+ "epoch": 0.11,
88
+ "grad_norm": 24.1777149048992,
89
+ "learning_rate": 9.994307990108962e-08,
90
+ "logits/chosen": -1.6923043727874756,
91
+ "logits/rejected": -1.6273339986801147,
92
+ "logps/chosen": -1.2986948490142822,
93
+ "logps/rejected": -1.356567144393921,
94
+ "loss": 0.6636,
95
+ "rewards/accuracies": 0.71875,
96
+ "rewards/chosen": -0.18121571838855743,
97
+ "rewards/margins": 0.0755590870976448,
98
+ "rewards/rejected": -0.25677481293678284,
99
  "step": 50
100
  },
101
  {
102
+ "epoch": 0.14,
103
+ "grad_norm": 28.137865307641825,
104
+ "learning_rate": 9.959570405988094e-08,
105
+ "logits/chosen": -1.7212276458740234,
106
+ "logits/rejected": -1.6404285430908203,
107
+ "logps/chosen": -1.2889435291290283,
108
+ "logps/rejected": -1.3794549703598022,
109
+ "loss": 0.6598,
110
+ "rewards/accuracies": 0.643750011920929,
111
+ "rewards/chosen": -0.35969024896621704,
112
+ "rewards/margins": 0.07716657221317291,
113
+ "rewards/rejected": -0.4368568956851959,
114
  "step": 60
115
  },
116
  {
117
+ "epoch": 0.16,
118
+ "grad_norm": 21.253003888204287,
119
+ "learning_rate": 9.893476820924666e-08,
120
+ "logits/chosen": -1.8007967472076416,
121
+ "logits/rejected": -1.7120532989501953,
122
+ "logps/chosen": -1.476588487625122,
123
+ "logps/rejected": -1.5963420867919922,
124
+ "loss": 0.6499,
125
+ "rewards/accuracies": 0.6937500238418579,
126
+ "rewards/chosen": -0.5274931192398071,
127
+ "rewards/margins": 0.14617758989334106,
128
+ "rewards/rejected": -0.6736707091331482,
129
  "step": 70
130
  },
131
  {
132
+ "epoch": 0.18,
133
+ "grad_norm": 26.777703545743453,
134
+ "learning_rate": 9.796445099843647e-08,
135
+ "logits/chosen": -1.7857911586761475,
136
+ "logits/rejected": -1.6999902725219727,
137
+ "logps/chosen": -1.5350762605667114,
138
+ "logps/rejected": -1.6896966695785522,
139
+ "loss": 0.654,
140
+ "rewards/accuracies": 0.668749988079071,
141
+ "rewards/chosen": -0.6562157869338989,
142
+ "rewards/margins": 0.18818075954914093,
143
+ "rewards/rejected": -0.8443965911865234,
144
  "step": 80
145
  },
146
  {
147
+ "epoch": 0.2,
148
+ "grad_norm": 25.00309177460346,
149
+ "learning_rate": 9.669088708527066e-08,
150
+ "logits/chosen": -1.7340404987335205,
151
+ "logits/rejected": -1.6651910543441772,
152
+ "logps/chosen": -1.6042931079864502,
153
+ "logps/rejected": -1.6743539571762085,
154
+ "loss": 0.6399,
155
+ "rewards/accuracies": 0.637499988079071,
156
+ "rewards/chosen": -0.737191379070282,
157
+ "rewards/margins": 0.12603236734867096,
158
+ "rewards/rejected": -0.8632237315177917,
159
  "step": 90
160
  },
161
  {
162
+ "epoch": 0.23,
163
+ "grad_norm": 27.969594098965942,
164
+ "learning_rate": 9.512212835085849e-08,
165
+ "logits/chosen": -1.772202730178833,
166
+ "logits/rejected": -1.6827232837677002,
167
+ "logps/chosen": -1.5563361644744873,
168
+ "logps/rejected": -1.69924795627594,
169
+ "loss": 0.6231,
170
+ "rewards/accuracies": 0.699999988079071,
171
+ "rewards/chosen": -0.7757617831230164,
172
+ "rewards/margins": 0.20040392875671387,
173
+ "rewards/rejected": -0.9761656522750854,
174
  "step": 100
175
  },
176
  {
177
+ "epoch": 0.25,
178
+ "grad_norm": 25.83218161515289,
179
+ "learning_rate": 9.326809299301306e-08,
180
+ "logits/chosen": -1.771267294883728,
181
+ "logits/rejected": -1.6669820547103882,
182
+ "logps/chosen": -1.600318193435669,
183
+ "logps/rejected": -1.7721306085586548,
184
+ "loss": 0.6211,
185
+ "rewards/accuracies": 0.7124999761581421,
186
+ "rewards/chosen": -0.7993522882461548,
187
+ "rewards/margins": 0.24041876196861267,
188
+ "rewards/rejected": -1.0397710800170898,
189
  "step": 110
190
  },
191
  {
192
+ "epoch": 0.27,
193
+ "grad_norm": 26.667996957961257,
194
+ "learning_rate": 9.114050282021158e-08,
195
+ "logits/chosen": -1.767559289932251,
196
+ "logits/rejected": -1.7058799266815186,
197
+ "logps/chosen": -1.565065622329712,
198
+ "logps/rejected": -1.7299690246582031,
199
+ "loss": 0.6144,
200
+ "rewards/accuracies": 0.731249988079071,
201
+ "rewards/chosen": -0.8350059390068054,
202
+ "rewards/margins": 0.2350219041109085,
203
+ "rewards/rejected": -1.0700278282165527,
204
  "step": 120
205
  },
206
  {
207
+ "epoch": 0.3,
208
+ "grad_norm": 25.330536994335382,
209
+ "learning_rate": 8.875280914254802e-08,
210
+ "logits/chosen": -1.765298843383789,
211
+ "logits/rejected": -1.6749998331069946,
212
+ "logps/chosen": -1.7646329402923584,
213
+ "logps/rejected": -1.9669040441513062,
214
+ "loss": 0.5993,
215
+ "rewards/accuracies": 0.675000011920929,
216
+ "rewards/chosen": -1.1436058282852173,
217
+ "rewards/margins": 0.34769219160079956,
218
+ "rewards/rejected": -1.491297960281372,
219
  "step": 130
220
  },
221
  {
222
+ "epoch": 0.32,
223
+ "grad_norm": 24.002049854639584,
224
+ "learning_rate": 8.612010772821971e-08,
225
+ "logits/chosen": -1.7989845275878906,
226
+ "logits/rejected": -1.7554515600204468,
227
+ "logps/chosen": -1.8291784524917603,
228
+ "logps/rejected": -1.9761606454849243,
229
+ "loss": 0.5991,
230
+ "rewards/accuracies": 0.731249988079071,
231
+ "rewards/chosen": -1.2305986881256104,
232
+ "rewards/margins": 0.3437841534614563,
233
+ "rewards/rejected": -1.5743829011917114,
234
  "step": 140
235
  },
236
  {
237
+ "epoch": 0.34,
238
+ "grad_norm": 28.7685379938573,
239
+ "learning_rate": 8.325904336322055e-08,
240
+ "logits/chosen": -1.777856469154358,
241
+ "logits/rejected": -1.7238337993621826,
242
+ "logps/chosen": -1.9271425008773804,
243
+ "logps/rejected": -2.141960859298706,
244
+ "loss": 0.6082,
245
+ "rewards/accuracies": 0.6875,
246
+ "rewards/chosen": -1.5195552110671997,
247
+ "rewards/margins": 0.3352898359298706,
248
+ "rewards/rejected": -1.8548450469970703,
249
  "step": 150
250
  },
251
  {
252
+ "epoch": 0.36,
253
+ "grad_norm": 22.36425985091183,
254
+ "learning_rate": 8.01877046176447e-08,
255
+ "logits/chosen": -1.72470223903656,
256
+ "logits/rejected": -1.6554687023162842,
257
+ "logps/chosen": -1.9798545837402344,
258
+ "logps/rejected": -2.1781439781188965,
259
+ "loss": 0.5901,
260
+ "rewards/accuracies": 0.675000011920929,
261
+ "rewards/chosen": -1.6031091213226318,
262
+ "rewards/margins": 0.29380664229393005,
263
+ "rewards/rejected": -1.8969157934188843,
264
  "step": 160
265
  },
266
  {
267
+ "epoch": 0.39,
268
+ "grad_norm": 19.908864367961982,
269
+ "learning_rate": 7.692550948392249e-08,
270
+ "logits/chosen": -1.7723356485366821,
271
+ "logits/rejected": -1.7198549509048462,
272
+ "logps/chosen": -1.9558120965957642,
273
+ "logps/rejected": -2.159667491912842,
274
+ "loss": 0.5921,
275
+ "rewards/accuracies": 0.706250011920929,
276
+ "rewards/chosen": -1.4911130666732788,
277
+ "rewards/margins": 0.3868214190006256,
278
+ "rewards/rejected": -1.877934217453003,
279
  "step": 170
280
  },
281
  {
282
+ "epoch": 0.41,
283
+ "grad_norm": 31.864529340507623,
284
+ "learning_rate": 7.349308261002021e-08,
285
+ "logits/chosen": -1.7342097759246826,
286
+ "logits/rejected": -1.6827507019042969,
287
+ "logps/chosen": -1.9941844940185547,
288
+ "logps/rejected": -2.22194504737854,
289
+ "loss": 0.5855,
290
+ "rewards/accuracies": 0.706250011920929,
291
+ "rewards/chosen": -1.5941965579986572,
292
+ "rewards/margins": 0.35622045397758484,
293
+ "rewards/rejected": -1.950416922569275,
294
  "step": 180
295
  },
296
  {
297
+ "epoch": 0.43,
298
+ "grad_norm": 24.691918932601595,
299
+ "learning_rate": 6.991212490377531e-08,
300
+ "logits/chosen": -1.7905107736587524,
301
+ "logits/rejected": -1.7474826574325562,
302
+ "logps/chosen": -2.10019850730896,
303
+ "logps/rejected": -2.3696181774139404,
304
+ "loss": 0.5604,
305
+ "rewards/accuracies": 0.75,
306
+ "rewards/chosen": -1.740191102027893,
307
+ "rewards/margins": 0.4957551956176758,
308
+ "rewards/rejected": -2.2359461784362793,
309
  "step": 190
310
  },
311
  {
312
+ "epoch": 0.46,
313
+ "grad_norm": 30.11763021258028,
314
+ "learning_rate": 6.620527633276978e-08,
315
+ "logits/chosen": -1.7320966720581055,
316
+ "logits/rejected": -1.6665375232696533,
317
+ "logps/chosen": -2.161935329437256,
318
+ "logps/rejected": -2.522731304168701,
319
+ "loss": 0.5808,
320
+ "rewards/accuracies": 0.7250000238418579,
321
+ "rewards/chosen": -1.9349641799926758,
322
+ "rewards/margins": 0.5480148792266846,
323
+ "rewards/rejected": -2.4829792976379395,
324
  "step": 200
325
  },
326
  {
327
+ "epoch": 0.48,
328
+ "grad_norm": 27.459036499855436,
329
+ "learning_rate": 6.239597278716581e-08,
330
+ "logits/chosen": -1.7859094142913818,
331
+ "logits/rejected": -1.7306747436523438,
332
+ "logps/chosen": -2.2954204082489014,
333
+ "logps/rejected": -2.540391445159912,
334
+ "loss": 0.5619,
335
+ "rewards/accuracies": 0.7250000238418579,
336
+ "rewards/chosen": -2.1001808643341064,
337
+ "rewards/margins": 0.5312689542770386,
338
+ "rewards/rejected": -2.6314499378204346,
339
  "step": 210
340
  },
341
  {
342
+ "epoch": 0.5,
343
+ "grad_norm": 26.562029580561404,
344
+ "learning_rate": 5.8508297910462456e-08,
345
+ "logits/chosen": -1.7313369512557983,
346
+ "logits/rejected": -1.6572465896606445,
347
+ "logps/chosen": -2.212656021118164,
348
+ "logps/rejected": -2.5422561168670654,
349
+ "loss": 0.5673,
350
+ "rewards/accuracies": 0.699999988079071,
351
+ "rewards/chosen": -2.0982813835144043,
352
+ "rewards/margins": 0.5436533689498901,
353
+ "rewards/rejected": -2.641934871673584,
354
  "step": 220
355
  },
356
  {
357
+ "epoch": 0.52,
358
+ "grad_norm": 26.043734197401424,
359
+ "learning_rate": 5.456683083494731e-08,
360
+ "logits/chosen": -1.7218725681304932,
361
+ "logits/rejected": -1.6862335205078125,
362
+ "logps/chosen": -2.1797163486480713,
363
+ "logps/rejected": -2.389193058013916,
364
+ "loss": 0.5843,
365
+ "rewards/accuracies": 0.65625,
366
+ "rewards/chosen": -1.9718148708343506,
367
+ "rewards/margins": 0.37147068977355957,
368
+ "rewards/rejected": -2.34328556060791,
369
  "step": 230
370
  },
371
  {
372
+ "epoch": 0.55,
373
+ "grad_norm": 33.60349201324705,
374
+ "learning_rate": 5.059649078450834e-08,
375
+ "logits/chosen": -1.7049477100372314,
376
+ "logits/rejected": -1.6663001775741577,
377
+ "logps/chosen": -2.2287259101867676,
378
+ "logps/rejected": -2.5061872005462646,
379
+ "loss": 0.5527,
380
+ "rewards/accuracies": 0.6625000238418579,
381
+ "rewards/chosen": -2.120744466781616,
382
+ "rewards/margins": 0.4308454990386963,
383
+ "rewards/rejected": -2.5515899658203125,
384
  "step": 240
385
  },
386
  {
387
+ "epoch": 0.57,
388
+ "grad_norm": 24.721682812448297,
389
+ "learning_rate": 4.6622379527277186e-08,
390
+ "logits/chosen": -1.716957688331604,
391
+ "logits/rejected": -1.6692520380020142,
392
+ "logps/chosen": -2.323068857192993,
393
+ "logps/rejected": -2.551687240600586,
394
+ "loss": 0.5645,
395
+ "rewards/accuracies": 0.625,
396
+ "rewards/chosen": -2.3683600425720215,
397
+ "rewards/margins": 0.3910773694515228,
398
+ "rewards/rejected": -2.759437322616577,
399
  "step": 250
400
  },
401
  {
402
+ "epoch": 0.59,
403
+ "grad_norm": 28.965518895994943,
404
+ "learning_rate": 4.26696226741691e-08,
405
+ "logits/chosen": -1.731431007385254,
406
+ "logits/rejected": -1.6687753200531006,
407
+ "logps/chosen": -2.4228413105010986,
408
+ "logps/rejected": -2.701592206954956,
409
+ "loss": 0.5647,
410
+ "rewards/accuracies": 0.6499999761581421,
411
+ "rewards/chosen": -2.380035877227783,
412
+ "rewards/margins": 0.5175679922103882,
413
+ "rewards/rejected": -2.897603750228882,
414
  "step": 260
415
  },
416
  {
417
+ "epoch": 0.61,
418
+ "grad_norm": 24.276560436452733,
419
+ "learning_rate": 3.876321082668098e-08,
420
+ "logits/chosen": -1.7877088785171509,
421
+ "logits/rejected": -1.7258117198944092,
422
+ "logps/chosen": -2.3104138374328613,
423
+ "logps/rejected": -2.597568988800049,
424
+ "loss": 0.5577,
425
+ "rewards/accuracies": 0.7250000238418579,
426
+ "rewards/chosen": -2.143846035003662,
427
+ "rewards/margins": 0.5556932091712952,
428
+ "rewards/rejected": -2.6995394229888916,
429
  "step": 270
430
  },
431
  {
432
+ "epoch": 0.64,
433
+ "grad_norm": 26.01398581499373,
434
+ "learning_rate": 3.492784157826244e-08,
435
+ "logits/chosen": -1.7255363464355469,
436
+ "logits/rejected": -1.6368858814239502,
437
+ "logps/chosen": -2.307375431060791,
438
+ "logps/rejected": -2.6467177867889404,
439
+ "loss": 0.5569,
440
+ "rewards/accuracies": 0.71875,
441
+ "rewards/chosen": -2.1689984798431396,
442
+ "rewards/margins": 0.6079045534133911,
443
+ "rewards/rejected": -2.7769031524658203,
444
  "step": 280
445
  },
446
  {
447
+ "epoch": 0.66,
448
+ "grad_norm": 22.449000475495634,
449
+ "learning_rate": 3.118776336817812e-08,
450
+ "logits/chosen": -1.7589473724365234,
451
+ "logits/rejected": -1.7079540491104126,
452
+ "logps/chosen": -2.2426087856292725,
453
+ "logps/rejected": -2.575594425201416,
454
+ "loss": 0.5461,
455
+ "rewards/accuracies": 0.731249988079071,
456
+ "rewards/chosen": -2.0787200927734375,
457
+ "rewards/margins": 0.6515394449234009,
458
+ "rewards/rejected": -2.730259656906128,
459
  "step": 290
460
  },
461
  {
462
+ "epoch": 0.68,
463
+ "grad_norm": 23.916342153040954,
464
+ "learning_rate": 2.7566622175067443e-08,
465
+ "logits/chosen": -1.7405074834823608,
466
+ "logits/rejected": -1.6836473941802979,
467
+ "logps/chosen": -2.341914176940918,
468
+ "logps/rejected": -2.724348545074463,
469
+ "loss": 0.5507,
470
+ "rewards/accuracies": 0.7562500238418579,
471
+ "rewards/chosen": -2.2666609287261963,
472
+ "rewards/margins": 0.6088961958885193,
473
+ "rewards/rejected": -2.875556707382202,
474
  "step": 300
475
  },
476
  {
477
+ "epoch": 0.71,
478
+ "grad_norm": 28.87016818276154,
479
+ "learning_rate": 2.408731201945432e-08,
480
+ "logits/chosen": -1.738867998123169,
481
+ "logits/rejected": -1.6946017742156982,
482
+ "logps/chosen": -2.2607665061950684,
483
+ "logps/rejected": -2.508131742477417,
484
+ "loss": 0.5549,
485
+ "rewards/accuracies": 0.6812499761581421,
486
+ "rewards/chosen": -2.133742332458496,
487
+ "rewards/margins": 0.45442089438438416,
488
+ "rewards/rejected": -2.5881636142730713,
489
  "step": 310
490
  },
491
  {
492
+ "epoch": 0.73,
493
+ "grad_norm": 26.474891102018546,
494
+ "learning_rate": 2.0771830220378112e-08,
495
+ "logits/chosen": -1.6925548315048218,
496
+ "logits/rejected": -1.6349446773529053,
497
+ "logps/chosen": -2.292504072189331,
498
+ "logps/rejected": -2.5281872749328613,
499
+ "loss": 0.5545,
500
+ "rewards/accuracies": 0.625,
501
+ "rewards/chosen": -2.139239549636841,
502
+ "rewards/margins": 0.48706698417663574,
503
+ "rewards/rejected": -2.6263065338134766,
504
  "step": 320
505
  },
506
  {
507
+ "epoch": 0.75,
508
+ "grad_norm": 26.153240463475917,
509
+ "learning_rate": 1.7641138321260257e-08,
510
+ "logits/chosen": -1.7273342609405518,
511
+ "logits/rejected": -1.6638519763946533,
512
+ "logps/chosen": -2.218357563018799,
513
+ "logps/rejected": -2.6145200729370117,
514
+ "loss": 0.5485,
515
+ "rewards/accuracies": 0.824999988079071,
516
+ "rewards/chosen": -2.069005250930786,
517
+ "rewards/margins": 0.7372555136680603,
518
+ "rewards/rejected": -2.806260585784912,
519
  "step": 330
520
  },
521
  {
522
+ "epoch": 0.77,
523
+ "grad_norm": 26.27728864294511,
524
+ "learning_rate": 1.4715029564277793e-08,
525
+ "logits/chosen": -1.7901878356933594,
526
+ "logits/rejected": -1.7485193014144897,
527
+ "logps/chosen": -2.2015347480773926,
528
+ "logps/rejected": -2.5572023391723633,
529
+ "loss": 0.5577,
530
+ "rewards/accuracies": 0.7562500238418579,
531
+ "rewards/chosen": -1.9937489032745361,
532
+ "rewards/margins": 0.637208104133606,
533
+ "rewards/rejected": -2.6309566497802734,
534
  "step": 340
535
  },
536
  {
537
+ "epoch": 0.8,
538
+ "grad_norm": 26.035613152091976,
539
+ "learning_rate": 1.2012003751113343e-08,
540
+ "logits/chosen": -1.7805286645889282,
541
+ "logits/rejected": -1.7256838083267212,
542
+ "logps/chosen": -2.3138480186462402,
543
+ "logps/rejected": -2.6945948600769043,
544
+ "loss": 0.536,
545
+ "rewards/accuracies": 0.71875,
546
+ "rewards/chosen": -2.3033559322357178,
547
+ "rewards/margins": 0.6611131429672241,
548
+ "rewards/rejected": -2.9644687175750732,
549
  "step": 350
550
  },
551
  {
552
+ "epoch": 0.82,
553
+ "grad_norm": 31.4301067051425,
554
+ "learning_rate": 9.549150281252633e-09,
555
+ "logits/chosen": -1.7304834127426147,
556
+ "logits/rejected": -1.6880794763565063,
557
+ "logps/chosen": -2.2502734661102295,
558
+ "logps/rejected": -2.593324661254883,
559
+ "loss": 0.54,
560
+ "rewards/accuracies": 0.6937500238418579,
561
+ "rewards/chosen": -2.1562764644622803,
562
+ "rewards/margins": 0.5826417207717896,
563
+ "rewards/rejected": -2.7389183044433594,
564
  "step": 360
565
  },
566
  {
567
+ "epoch": 0.84,
568
+ "grad_norm": 25.13509986712804,
569
+ "learning_rate": 7.3420401072985306e-09,
570
+ "logits/chosen": -1.7723455429077148,
571
+ "logits/rejected": -1.721980094909668,
572
+ "logps/chosen": -2.3107597827911377,
573
+ "logps/rejected": -2.679028272628784,
574
+ "loss": 0.5374,
575
+ "rewards/accuracies": 0.6875,
576
+ "rewards/chosen": -2.1922922134399414,
577
+ "rewards/margins": 0.6216103434562683,
578
+ "rewards/rejected": -2.8139023780822754,
579
  "step": 370
580
  },
581
  {
582
+ "epoch": 0.86,
583
+ "grad_norm": 28.235691242335875,
584
+ "learning_rate": 5.404627290395369e-09,
585
+ "logits/chosen": -1.733109712600708,
586
+ "logits/rejected": -1.6734564304351807,
587
+ "logps/chosen": -2.241391897201538,
588
+ "logps/rejected": -2.590752363204956,
589
+ "loss": 0.5365,
590
+ "rewards/accuracies": 0.7437499761581421,
591
+ "rewards/chosen": -2.103571653366089,
592
+ "rewards/margins": 0.6187294721603394,
593
+ "rewards/rejected": -2.7223010063171387,
594
  "step": 380
595
  },
596
  {
597
+ "epoch": 0.89,
598
+ "grad_norm": 33.12956908569685,
599
+ "learning_rate": 3.74916077816162e-09,
600
+ "logits/chosen": -1.7393004894256592,
601
+ "logits/rejected": -1.6843922138214111,
602
+ "logps/chosen": -2.267721652984619,
603
+ "logps/rejected": -2.5625317096710205,
604
+ "loss": 0.5513,
605
+ "rewards/accuracies": 0.6875,
606
+ "rewards/chosen": -2.235853672027588,
607
+ "rewards/margins": 0.5573619604110718,
608
+ "rewards/rejected": -2.7932159900665283,
609
  "step": 390
610
  },
611
  {
612
+ "epoch": 0.91,
613
+ "grad_norm": 28.33250387056753,
614
+ "learning_rate": 2.386106962899165e-09,
615
+ "logits/chosen": -1.666548490524292,
616
+ "logits/rejected": -1.6048628091812134,
617
+ "logps/chosen": -2.401040554046631,
618
+ "logps/rejected": -2.733664035797119,
619
+ "loss": 0.5422,
620
+ "rewards/accuracies": 0.706250011920929,
621
+ "rewards/chosen": -2.349238872528076,
622
+ "rewards/margins": 0.5525677800178528,
623
+ "rewards/rejected": -2.901806592941284,
624
  "step": 400
625
  },
626
  {
627
+ "epoch": 0.93,
628
+ "grad_norm": 31.32317842504756,
629
+ "learning_rate": 1.3240835096913706e-09,
630
+ "logits/chosen": -1.6938998699188232,
631
+ "logits/rejected": -1.6031659841537476,
632
+ "logps/chosen": -2.251462459564209,
633
+ "logps/rejected": -2.6835711002349854,
634
+ "loss": 0.5597,
635
+ "rewards/accuracies": 0.7124999761581421,
636
+ "rewards/chosen": -2.1598236560821533,
637
+ "rewards/margins": 0.7253878116607666,
638
+ "rewards/rejected": -2.885211706161499,
639
  "step": 410
640
  },
641
+ {
642
+ "epoch": 0.96,
643
+ "grad_norm": 23.813879321456167,
644
+ "learning_rate": 5.698048727497462e-10,
645
+ "logits/chosen": -1.7287553548812866,
646
+ "logits/rejected": -1.6634016036987305,
647
+ "logps/chosen": -2.358034610748291,
648
+ "logps/rejected": -2.7596287727355957,
649
+ "loss": 0.5431,
650
+ "rewards/accuracies": 0.731249988079071,
651
+ "rewards/chosen": -2.3334739208221436,
652
+ "rewards/margins": 0.669273853302002,
653
+ "rewards/rejected": -3.0027480125427246,
654
+ "step": 420
655
+ },
656
+ {
657
+ "epoch": 0.98,
658
+ "grad_norm": 24.13495374841747,
659
+ "learning_rate": 1.2803984447259387e-10,
660
+ "logits/chosen": -1.7348169088363647,
661
+ "logits/rejected": -1.6923096179962158,
662
+ "logps/chosen": -2.339822769165039,
663
+ "logps/rejected": -2.7242085933685303,
664
+ "loss": 0.5338,
665
+ "rewards/accuracies": 0.7250000238418579,
666
+ "rewards/chosen": -2.314966917037964,
667
+ "rewards/margins": 0.6734664440155029,
668
+ "rewards/rejected": -2.9884331226348877,
669
+ "step": 430
670
+ },
671
  {
672
  "epoch": 1.0,
673
+ "step": 439,
674
  "total_flos": 0.0,
675
+ "train_loss": 0.5863300847029632,
676
+ "train_runtime": 6843.0852,
677
+ "train_samples_per_second": 8.218,
678
+ "train_steps_per_second": 0.064
679
  }
680
  ],
681
  "logging_steps": 10,
682
+ "max_steps": 439,
683
  "num_input_tokens_seen": 0,
684
  "num_train_epochs": 1,
685
  "save_steps": 100,