RikkiXu commited on
Commit
f142427
1 Parent(s): 7fd002f

Model save

Browse files
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  tags:
3
  - trl
4
  - dpo
@@ -13,7 +14,7 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # zephyr-7b-dpo-full
15
 
16
- This model was trained from scratch on the None dataset.
17
 
18
  ## Model description
19
 
@@ -32,7 +33,7 @@ More information needed
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
- - learning_rate: 1e-08
36
  - train_batch_size: 4
37
  - eval_batch_size: 4
38
  - seed: 42
 
1
  ---
2
+ base_model: princeton-nlp/Mistral-7B-Base-SFT-SimPO
3
  tags:
4
  - trl
5
  - dpo
 
14
 
15
  # zephyr-7b-dpo-full
16
 
17
+ This model is a fine-tuned version of [princeton-nlp/Mistral-7B-Base-SFT-SimPO](https://huggingface.co/princeton-nlp/Mistral-7B-Base-SFT-SimPO) on the None dataset.
18
 
19
  ## Model description
20
 
 
33
  ### Training hyperparameters
34
 
35
  The following hyperparameters were used during training:
36
+ - learning_rate: 1e-07
37
  - train_batch_size: 4
38
  - eval_batch_size: 4
39
  - seed: 42
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.6880149315033469,
4
- "train_runtime": 4603.368,
5
- "train_samples": 38288,
6
- "train_samples_per_second": 8.317,
7
- "train_steps_per_second": 0.065
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.5309282744935542,
4
+ "train_runtime": 6847.998,
5
+ "train_samples": 56236,
6
+ "train_samples_per_second": 8.212,
7
+ "train_steps_per_second": 0.064
8
  }
config.json CHANGED
@@ -20,7 +20,7 @@
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
- "transformers_version": "4.41.1",
24
  "use_cache": false,
25
  "vocab_size": 32000
26
  }
 
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.39.3",
24
  "use_cache": false,
25
  "vocab_size": 32000
26
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:25a506977023960c048f2933f9aec9fc01ac4cef4294a89c59fb94311d62d0ba
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8d089e259179f6b43c527a368edf999eec073dc01cfa73c90adde6188d1928d
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f36b418c6f0204d63a482c4d40cf9a099d1c71913006174de004f3468b2600ac
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72900a606cc1d8493faaa0946c473002a0400edf1f17c01905fe2a2232dc1be1
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0098c8b3746d7ba8d54ad9a79ed49c5d66268bbf89e84d00df21498d52d9cb42
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cba821d53166885228308afe33150d7a837fce90e03432661986a37f3edccc4
3
  size 4540516344
runs/Jul03_22-31-37_n136-129-074/events.out.tfevents.1720017138.n136-129-074.3171670.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3a0e66f77295f3829902375295d790633724c034ef007948becbc276f0fa9a23
3
- size 32937
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80a2a5c55073794b839c2710274799cbb2c7b6ea7aac51eeb82b1adc87fa81e1
3
+ size 35355
tokenizer.json CHANGED
@@ -134,7 +134,6 @@
134
  "end_of_word_suffix": null,
135
  "fuse_unk": true,
136
  "byte_fallback": true,
137
- "ignore_merges": false,
138
  "vocab": {
139
  "<unk>": 0,
140
  "<s>": 1,
 
134
  "end_of_word_suffix": null,
135
  "fuse_unk": true,
136
  "byte_fallback": true,
 
137
  "vocab": {
138
  "<unk>": 0,
139
  "<s>": 1,
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.6880149315033469,
4
- "train_runtime": 4603.368,
5
- "train_samples": 38288,
6
- "train_samples_per_second": 8.317,
7
- "train_steps_per_second": 0.065
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.5309282744935542,
4
+ "train_runtime": 6847.998,
5
+ "train_samples": 56236,
6
+ "train_samples_per_second": 8.212,
7
+ "train_steps_per_second": 0.064
8
  }
trainer_state.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.9991645781119465,
5
  "eval_steps": 10000000,
6
- "global_step": 299,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
- "grad_norm": 94.51455806402136,
14
- "learning_rate": 3.333333333333333e-10,
15
- "logits/chosen": -1.693521499633789,
16
- "logits/rejected": -1.6753541231155396,
17
- "logps/chosen": -1.041430115699768,
18
- "logps/rejected": -0.9273841977119446,
19
  "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
@@ -24,452 +24,662 @@
24
  "step": 1
25
  },
26
  {
27
- "epoch": 0.03,
28
- "grad_norm": 100.10048800952612,
29
- "learning_rate": 3.3333333333333334e-09,
30
- "logits/chosen": -1.7426362037658691,
31
- "logits/rejected": -1.7463488578796387,
32
- "logps/chosen": -1.0522818565368652,
33
- "logps/rejected": -1.0174607038497925,
34
- "loss": 0.6933,
35
- "rewards/accuracies": 0.4722222089767456,
36
- "rewards/chosen": -0.0006895202095620334,
37
- "rewards/margins": -0.0014206942869350314,
38
- "rewards/rejected": 0.0007311741355806589,
39
  "step": 10
40
  },
41
  {
42
- "epoch": 0.07,
43
- "grad_norm": 100.2747717498339,
44
- "learning_rate": 6.666666666666667e-09,
45
- "logits/chosen": -1.876529335975647,
46
- "logits/rejected": -1.8286478519439697,
47
- "logps/chosen": -1.0717421770095825,
48
- "logps/rejected": -1.0434354543685913,
49
- "loss": 0.6934,
50
- "rewards/accuracies": 0.53125,
51
- "rewards/chosen": 0.0013453494757413864,
52
- "rewards/margins": 0.001054912805557251,
53
- "rewards/rejected": 0.00029043667018413544,
54
  "step": 20
55
  },
56
  {
57
- "epoch": 0.1,
58
- "grad_norm": 91.78075458726622,
59
- "learning_rate": 1e-08,
60
- "logits/chosen": -1.7869594097137451,
61
- "logits/rejected": -1.7599306106567383,
62
- "logps/chosen": -1.066955327987671,
63
- "logps/rejected": -1.0448154211044312,
64
- "loss": 0.6926,
65
- "rewards/accuracies": 0.5562499761581421,
66
- "rewards/chosen": 0.0009901206940412521,
67
- "rewards/margins": 0.005651786923408508,
68
- "rewards/rejected": -0.004661666229367256,
69
  "step": 30
70
  },
71
  {
72
- "epoch": 0.13,
73
- "grad_norm": 83.1668176080734,
74
- "learning_rate": 9.96594024562513e-09,
75
- "logits/chosen": -1.8331626653671265,
76
- "logits/rejected": -1.8235479593276978,
77
- "logps/chosen": -1.069947361946106,
78
- "logps/rejected": -1.0194944143295288,
79
- "loss": 0.6933,
80
- "rewards/accuracies": 0.5625,
81
- "rewards/chosen": 0.00406806543469429,
82
- "rewards/margins": 0.006294439546763897,
83
- "rewards/rejected": -0.0022263741120696068,
84
  "step": 40
85
  },
86
  {
87
- "epoch": 0.17,
88
- "grad_norm": 97.3484905394127,
89
- "learning_rate": 9.86422500924775e-09,
90
- "logits/chosen": -1.8598533868789673,
91
- "logits/rejected": -1.8249794244766235,
92
- "logps/chosen": -1.0844773054122925,
93
- "logps/rejected": -1.0546021461486816,
94
- "loss": 0.6928,
95
- "rewards/accuracies": 0.5,
96
- "rewards/chosen": 0.0010452494025230408,
97
- "rewards/margins": 0.003718096762895584,
98
- "rewards/rejected": -0.0026728473603725433,
99
  "step": 50
100
  },
101
  {
102
- "epoch": 0.2,
103
- "grad_norm": 89.15270505321199,
104
- "learning_rate": 9.696240049254743e-09,
105
- "logits/chosen": -1.8162918090820312,
106
- "logits/rejected": -1.8095808029174805,
107
- "logps/chosen": -1.1147956848144531,
108
- "logps/rejected": -1.10584557056427,
109
- "loss": 0.692,
110
- "rewards/accuracies": 0.574999988079071,
111
- "rewards/chosen": 0.003025440499186516,
112
- "rewards/margins": 0.006887376308441162,
113
- "rewards/rejected": -0.0038619358092546463,
114
  "step": 60
115
  },
116
  {
117
- "epoch": 0.23,
118
- "grad_norm": 92.66102585547152,
119
- "learning_rate": 9.464273976236516e-09,
120
- "logits/chosen": -1.7991975545883179,
121
- "logits/rejected": -1.7613614797592163,
122
- "logps/chosen": -1.0598714351654053,
123
- "logps/rejected": -1.0530080795288086,
124
- "loss": 0.6927,
125
- "rewards/accuracies": 0.5562499761581421,
126
- "rewards/chosen": 0.0042045507580041885,
127
- "rewards/margins": 0.0019652042537927628,
128
- "rewards/rejected": 0.002239346969872713,
129
  "step": 70
130
  },
131
  {
132
- "epoch": 0.27,
133
- "grad_norm": 194.03821825764928,
134
- "learning_rate": 9.171487073181198e-09,
135
- "logits/chosen": -1.8440383672714233,
136
- "logits/rejected": -1.8225574493408203,
137
- "logps/chosen": -1.0219953060150146,
138
- "logps/rejected": -1.0044893026351929,
139
- "loss": 0.6914,
140
- "rewards/accuracies": 0.625,
141
- "rewards/chosen": 0.0038851741701364517,
142
- "rewards/margins": 0.009898573160171509,
143
- "rewards/rejected": -0.006013398990035057,
144
  "step": 80
145
  },
146
  {
147
- "epoch": 0.3,
148
- "grad_norm": 89.9314461991181,
149
- "learning_rate": 8.821868240089676e-09,
150
- "logits/chosen": -1.8029550313949585,
151
- "logits/rejected": -1.7841434478759766,
152
- "logps/chosen": -1.045693039894104,
153
- "logps/rejected": -1.0079419612884521,
154
- "loss": 0.6912,
155
- "rewards/accuracies": 0.48124998807907104,
156
- "rewards/chosen": 0.005337424576282501,
157
- "rewards/margins": 0.004554492421448231,
158
- "rewards/rejected": 0.0007829321548342705,
159
  "step": 90
160
  },
161
  {
162
- "epoch": 0.33,
163
- "grad_norm": 100.68694545007727,
164
- "learning_rate": 8.42018064959393e-09,
165
- "logits/chosen": -1.8531291484832764,
166
- "logits/rejected": -1.8357194662094116,
167
- "logps/chosen": -1.078748345375061,
168
- "logps/rejected": -1.0813863277435303,
169
- "loss": 0.6903,
170
- "rewards/accuracies": 0.606249988079071,
171
- "rewards/chosen": 0.007160305976867676,
172
- "rewards/margins": 0.007577991578727961,
173
- "rewards/rejected": -0.0004176851361989975,
174
  "step": 100
175
  },
176
  {
177
- "epoch": 0.37,
178
- "grad_norm": 91.99753152902545,
179
- "learning_rate": 7.971896853961043e-09,
180
- "logits/chosen": -1.8346866369247437,
181
- "logits/rejected": -1.8052761554718018,
182
- "logps/chosen": -1.0582704544067383,
183
- "logps/rejected": -1.0159928798675537,
184
- "loss": 0.6901,
185
- "rewards/accuracies": 0.550000011920929,
186
- "rewards/chosen": -0.0007529235444962978,
187
- "rewards/margins": 0.0036198147572577,
188
- "rewards/rejected": -0.004372738301753998,
189
  "step": 110
190
  },
191
  {
192
- "epoch": 0.4,
193
- "grad_norm": 76.03854240063603,
194
- "learning_rate": 7.48312422757881e-09,
195
- "logits/chosen": -1.8829187154769897,
196
- "logits/rejected": -1.8501228094100952,
197
- "logps/chosen": -1.0292680263519287,
198
- "logps/rejected": -1.019852876663208,
199
- "loss": 0.6887,
200
- "rewards/accuracies": 0.6875,
201
- "rewards/chosen": 0.00865122489631176,
202
- "rewards/margins": 0.011260826140642166,
203
- "rewards/rejected": -0.0026096017099916935,
204
  "step": 120
205
  },
206
  {
207
- "epoch": 0.43,
208
- "grad_norm": 91.54881621872774,
209
- "learning_rate": 6.96052176068713e-09,
210
- "logits/chosen": -1.7626311779022217,
211
- "logits/rejected": -1.7299768924713135,
212
- "logps/chosen": -1.0229580402374268,
213
- "logps/rejected": -1.0213210582733154,
214
- "loss": 0.6874,
215
- "rewards/accuracies": 0.65625,
216
- "rewards/chosen": 0.010313736274838448,
217
- "rewards/margins": 0.01158633828163147,
218
- "rewards/rejected": -0.0012726020067930222,
219
  "step": 130
220
  },
221
  {
222
- "epoch": 0.47,
223
- "grad_norm": 88.02230042083127,
224
- "learning_rate": 6.4112093379492135e-09,
225
- "logits/chosen": -1.796229600906372,
226
- "logits/rejected": -1.7824671268463135,
227
- "logps/chosen": -1.078906774520874,
228
- "logps/rejected": -1.0356519222259521,
229
- "loss": 0.6887,
230
- "rewards/accuracies": 0.65625,
231
- "rewards/chosen": 0.015195205807685852,
232
- "rewards/margins": 0.011107077822089195,
233
- "rewards/rejected": 0.004088127985596657,
234
  "step": 140
235
  },
236
  {
237
- "epoch": 0.5,
238
- "grad_norm": 89.70971937959732,
239
- "learning_rate": 5.842670737842467e-09,
240
- "logits/chosen": -1.8206145763397217,
241
- "logits/rejected": -1.788490891456604,
242
- "logps/chosen": -1.0580933094024658,
243
- "logps/rejected": -1.0225986242294312,
244
- "loss": 0.6877,
245
- "rewards/accuracies": 0.5625,
246
- "rewards/chosen": 0.004929685965180397,
247
- "rewards/margins": 0.005628877319395542,
248
- "rewards/rejected": -0.0006991913542151451,
249
  "step": 150
250
  },
251
  {
252
- "epoch": 0.53,
253
- "grad_norm": 81.17857900193553,
254
- "learning_rate": 5.262651674395798e-09,
255
- "logits/chosen": -1.8310163021087646,
256
- "logits/rejected": -1.8322261571884155,
257
- "logps/chosen": -1.0157067775726318,
258
- "logps/rejected": -1.0004805326461792,
259
- "loss": 0.6876,
260
- "rewards/accuracies": 0.6187499761581421,
261
- "rewards/chosen": 0.010493971407413483,
262
- "rewards/margins": 0.013428708538413048,
263
- "rewards/rejected": -0.0029347380623221397,
264
  "step": 160
265
  },
266
  {
267
- "epoch": 0.57,
268
- "grad_norm": 87.11260929204627,
269
- "learning_rate": 4.679054270342702e-09,
270
- "logits/chosen": -1.8449046611785889,
271
- "logits/rejected": -1.7946765422821045,
272
- "logps/chosen": -1.0548999309539795,
273
- "logps/rejected": -1.051992654800415,
274
- "loss": 0.6869,
275
- "rewards/accuracies": 0.6937500238418579,
276
- "rewards/chosen": 0.010436683893203735,
277
- "rewards/margins": 0.010068513453006744,
278
- "rewards/rejected": 0.00036817044019699097,
279
  "step": 170
280
  },
281
  {
282
- "epoch": 0.6,
283
- "grad_norm": 93.41415146032115,
284
- "learning_rate": 4.099829399377524e-09,
285
- "logits/chosen": -1.8277971744537354,
286
- "logits/rejected": -1.7856277227401733,
287
- "logps/chosen": -1.0608714818954468,
288
- "logps/rejected": -1.0331629514694214,
289
- "loss": 0.6855,
290
- "rewards/accuracies": 0.6000000238418579,
291
- "rewards/chosen": 0.015542459674179554,
292
- "rewards/margins": 0.01706361211836338,
293
- "rewards/rejected": -0.0015211515128612518,
294
  "step": 180
295
  },
296
  {
297
- "epoch": 0.63,
298
- "grad_norm": 91.20164752256586,
299
- "learning_rate": 3.532868364233416e-09,
300
- "logits/chosen": -1.8144668340682983,
301
- "logits/rejected": -1.7934455871582031,
302
- "logps/chosen": -1.0488895177841187,
303
- "logps/rejected": -1.0484153032302856,
304
- "loss": 0.6869,
305
- "rewards/accuracies": 0.643750011920929,
306
- "rewards/chosen": 0.015009616501629353,
307
- "rewards/margins": 0.011918185278773308,
308
- "rewards/rejected": 0.00309143029153347,
309
  "step": 190
310
  },
311
  {
312
- "epoch": 0.67,
313
- "grad_norm": 103.72123679470863,
314
- "learning_rate": 2.985895386349233e-09,
315
- "logits/chosen": -1.783926248550415,
316
- "logits/rejected": -1.7509727478027344,
317
- "logps/chosen": -1.033827543258667,
318
- "logps/rejected": -1.0074162483215332,
319
- "loss": 0.6856,
320
- "rewards/accuracies": 0.6499999761581421,
321
- "rewards/chosen": 0.018476296216249466,
322
- "rewards/margins": 0.019362105056643486,
323
- "rewards/rejected": -0.0008858110522851348,
324
  "step": 200
325
  },
326
  {
327
- "epoch": 0.7,
328
- "grad_norm": 86.58629109761597,
329
- "learning_rate": 2.4663623718355446e-09,
330
- "logits/chosen": -1.842024803161621,
331
- "logits/rejected": -1.8078495264053345,
332
- "logps/chosen": -1.0763428211212158,
333
- "logps/rejected": -1.0434763431549072,
334
- "loss": 0.6853,
335
- "rewards/accuracies": 0.6625000238418579,
336
- "rewards/chosen": 0.018699195235967636,
337
- "rewards/margins": 0.017539886757731438,
338
- "rewards/rejected": 0.0011593066155910492,
339
  "step": 210
340
  },
341
  {
342
- "epoch": 0.74,
343
- "grad_norm": 94.3943636617196,
344
- "learning_rate": 1.9813473874379397e-09,
345
- "logits/chosen": -1.732317328453064,
346
- "logits/rejected": -1.7312743663787842,
347
- "logps/chosen": -1.073425054550171,
348
- "logps/rejected": -1.0629937648773193,
349
- "loss": 0.6847,
350
- "rewards/accuracies": 0.6312500238418579,
351
- "rewards/chosen": 0.013539738953113556,
352
- "rewards/margins": 0.013838117942214012,
353
- "rewards/rejected": -0.00029837898910045624,
354
  "step": 220
355
  },
356
  {
357
- "epoch": 0.77,
358
- "grad_norm": 92.64370783949347,
359
- "learning_rate": 1.5374582296511053e-09,
360
- "logits/chosen": -1.7242523431777954,
361
- "logits/rejected": -1.6965806484222412,
362
- "logps/chosen": -1.0366003513336182,
363
- "logps/rejected": -0.9941840171813965,
364
- "loss": 0.6857,
365
- "rewards/accuracies": 0.625,
366
- "rewards/chosen": 0.021335098892450333,
367
- "rewards/margins": 0.015094568021595478,
368
- "rewards/rejected": 0.006240529473870993,
369
  "step": 230
370
  },
371
  {
372
- "epoch": 0.8,
373
- "grad_norm": 91.14853265672438,
374
- "learning_rate": 1.1407424007485927e-09,
375
- "logits/chosen": -1.881166696548462,
376
- "logits/rejected": -1.8595256805419922,
377
- "logps/chosen": -1.0768239498138428,
378
- "logps/rejected": -1.0254974365234375,
379
- "loss": 0.6848,
380
- "rewards/accuracies": 0.7250000238418579,
381
- "rewards/chosen": 0.021426241844892502,
382
- "rewards/margins": 0.021464312449097633,
383
- "rewards/rejected": -3.8067344576120377e-05,
384
  "step": 240
385
  },
386
  {
387
- "epoch": 0.84,
388
- "grad_norm": 88.07457325362287,
389
- "learning_rate": 7.966047182060226e-10,
390
- "logits/chosen": -1.8788058757781982,
391
- "logits/rejected": -1.881800889968872,
392
- "logps/chosen": -1.0638504028320312,
393
- "logps/rejected": -1.028044581413269,
394
- "loss": 0.6828,
395
- "rewards/accuracies": 0.637499988079071,
396
- "rewards/chosen": 0.018116505816578865,
397
- "rewards/margins": 0.015344863757491112,
398
- "rewards/rejected": 0.0027716427575796843,
399
  "step": 250
400
  },
401
  {
402
- "epoch": 0.87,
403
- "grad_norm": 99.87255814500503,
404
- "learning_rate": 5.097336799988067e-10,
405
- "logits/chosen": -1.8842456340789795,
406
- "logits/rejected": -1.8642467260360718,
407
- "logps/chosen": -1.0772615671157837,
408
- "logps/rejected": -1.0665724277496338,
409
- "loss": 0.6848,
410
- "rewards/accuracies": 0.675000011920929,
411
- "rewards/chosen": 0.023015262559056282,
412
- "rewards/margins": 0.02001366578042507,
413
- "rewards/rejected": 0.0030015967786312103,
414
  "step": 260
415
  },
416
  {
417
- "epoch": 0.9,
418
- "grad_norm": 107.74820920122671,
419
- "learning_rate": 2.840375889663871e-10,
420
- "logits/chosen": -1.8729417324066162,
421
- "logits/rejected": -1.8372119665145874,
422
- "logps/chosen": -0.9936866760253906,
423
- "logps/rejected": -1.0166294574737549,
424
- "loss": 0.6847,
425
- "rewards/accuracies": 0.7250000238418579,
426
- "rewards/chosen": 0.01751965843141079,
427
- "rewards/margins": 0.02036522701382637,
428
- "rewards/rejected": -0.0028455646242946386,
429
  "step": 270
430
  },
431
  {
432
- "epoch": 0.94,
433
- "grad_norm": 87.56807066875254,
434
- "learning_rate": 1.2259130647833627e-10,
435
- "logits/chosen": -1.7625566720962524,
436
- "logits/rejected": -1.7224514484405518,
437
- "logps/chosen": -1.093827724456787,
438
- "logps/rejected": -1.0462143421173096,
439
- "loss": 0.6834,
440
- "rewards/accuracies": 0.6187499761581421,
441
- "rewards/chosen": 0.01200016774237156,
442
- "rewards/margins": 0.019106844440102577,
443
- "rewards/rejected": -0.0071066757664084435,
444
  "step": 280
445
  },
446
  {
447
- "epoch": 0.97,
448
- "grad_norm": 81.16688874976322,
449
- "learning_rate": 2.7594360825166643e-11,
450
- "logits/chosen": -1.780118703842163,
451
- "logits/rejected": -1.7672898769378662,
452
- "logps/chosen": -1.0616685152053833,
453
- "logps/rejected": -1.0320245027542114,
454
- "loss": 0.6845,
455
- "rewards/accuracies": 0.6499999761581421,
456
- "rewards/chosen": 0.028133947402238846,
457
- "rewards/margins": 0.020768892019987106,
458
- "rewards/rejected": 0.007365054450929165,
459
  "step": 290
460
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
461
  {
462
  "epoch": 1.0,
463
- "step": 299,
464
  "total_flos": 0.0,
465
- "train_loss": 0.6880149315033469,
466
- "train_runtime": 4603.368,
467
- "train_samples_per_second": 8.317,
468
- "train_steps_per_second": 0.065
469
  }
470
  ],
471
  "logging_steps": 10,
472
- "max_steps": 299,
473
  "num_input_tokens_seen": 0,
474
  "num_train_epochs": 1,
475
  "save_steps": 100,
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9988623435722411,
5
  "eval_steps": 10000000,
6
+ "global_step": 439,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
+ "grad_norm": 120.07926705665244,
14
+ "learning_rate": 2.2727272727272727e-09,
15
+ "logits/chosen": -1.6768856048583984,
16
+ "logits/rejected": -1.7259055376052856,
17
+ "logps/chosen": -394.9654541015625,
18
+ "logps/rejected": -320.0859069824219,
19
  "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
 
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.02,
28
+ "grad_norm": 160.8036403422587,
29
+ "learning_rate": 2.2727272727272725e-08,
30
+ "logits/chosen": -1.703018069267273,
31
+ "logits/rejected": -1.6685585975646973,
32
+ "logps/chosen": -429.5360412597656,
33
+ "logps/rejected": -403.7555236816406,
34
+ "loss": 0.6934,
35
+ "rewards/accuracies": 0.4375,
36
+ "rewards/chosen": -0.0004270696663297713,
37
+ "rewards/margins": -0.0031983989756554365,
38
+ "rewards/rejected": 0.00277132960036397,
39
  "step": 10
40
  },
41
  {
42
+ "epoch": 0.05,
43
+ "grad_norm": 241.66358094434165,
44
+ "learning_rate": 4.545454545454545e-08,
45
+ "logits/chosen": -1.780827283859253,
46
+ "logits/rejected": -1.7355620861053467,
47
+ "logps/chosen": -442.1951599121094,
48
+ "logps/rejected": -401.2707214355469,
49
+ "loss": 0.6889,
50
+ "rewards/accuracies": 0.4625000059604645,
51
+ "rewards/chosen": -0.002479883376508951,
52
+ "rewards/margins": 0.002258532214909792,
53
+ "rewards/rejected": -0.004738415591418743,
54
  "step": 20
55
  },
56
  {
57
+ "epoch": 0.07,
58
+ "grad_norm": 160.69914583171965,
59
+ "learning_rate": 6.818181818181817e-08,
60
+ "logits/chosen": -1.7514013051986694,
61
+ "logits/rejected": -1.6820430755615234,
62
+ "logps/chosen": -439.64373779296875,
63
+ "logps/rejected": -401.2854919433594,
64
+ "loss": 0.6738,
65
+ "rewards/accuracies": 0.637499988079071,
66
+ "rewards/chosen": 0.0074474625289440155,
67
+ "rewards/margins": 0.054817844182252884,
68
+ "rewards/rejected": -0.04737037047743797,
69
  "step": 30
70
  },
71
  {
72
+ "epoch": 0.09,
73
+ "grad_norm": 144.27361385062915,
74
+ "learning_rate": 9.09090909090909e-08,
75
+ "logits/chosen": -1.7552951574325562,
76
+ "logits/rejected": -1.6888301372528076,
77
+ "logps/chosen": -428.416015625,
78
+ "logps/rejected": -383.068603515625,
79
+ "loss": 0.6579,
80
+ "rewards/accuracies": 0.6875,
81
+ "rewards/chosen": 0.1923234909772873,
82
+ "rewards/margins": 0.1899619996547699,
83
+ "rewards/rejected": 0.0023614875972270966,
84
  "step": 40
85
  },
86
  {
87
+ "epoch": 0.11,
88
+ "grad_norm": 115.83588190821067,
89
+ "learning_rate": 9.994307990108962e-08,
90
+ "logits/chosen": -1.7498763799667358,
91
+ "logits/rejected": -1.6867773532867432,
92
+ "logps/chosen": -437.7354431152344,
93
+ "logps/rejected": -383.8248596191406,
94
+ "loss": 0.6152,
95
+ "rewards/accuracies": 0.675000011920929,
96
+ "rewards/chosen": 0.5490495562553406,
97
+ "rewards/margins": 0.33619847893714905,
98
+ "rewards/rejected": 0.2128511667251587,
99
  "step": 50
100
  },
101
  {
102
+ "epoch": 0.14,
103
+ "grad_norm": 111.21906155896082,
104
+ "learning_rate": 9.959570405988094e-08,
105
+ "logits/chosen": -1.8246532678604126,
106
+ "logits/rejected": -1.7464463710784912,
107
+ "logps/chosen": -379.9985046386719,
108
+ "logps/rejected": -352.2860412597656,
109
+ "loss": 0.614,
110
+ "rewards/accuracies": 0.6187499761581421,
111
+ "rewards/chosen": 0.7562192678451538,
112
+ "rewards/margins": 0.2702825665473938,
113
+ "rewards/rejected": 0.4859367311000824,
114
  "step": 60
115
  },
116
  {
117
+ "epoch": 0.16,
118
+ "grad_norm": 114.01926694263507,
119
+ "learning_rate": 9.893476820924666e-08,
120
+ "logits/chosen": -1.9211199283599854,
121
+ "logits/rejected": -1.8387491703033447,
122
+ "logps/chosen": -412.0660095214844,
123
+ "logps/rejected": -374.1966247558594,
124
+ "loss": 0.5841,
125
+ "rewards/accuracies": 0.7124999761581421,
126
+ "rewards/chosen": 1.0015352964401245,
127
+ "rewards/margins": 0.3862503468990326,
128
+ "rewards/rejected": 0.6152850389480591,
129
  "step": 70
130
  },
131
  {
132
+ "epoch": 0.18,
133
+ "grad_norm": 143.95597429946028,
134
+ "learning_rate": 9.796445099843647e-08,
135
+ "logits/chosen": -1.9127334356307983,
136
+ "logits/rejected": -1.8274517059326172,
137
+ "logps/chosen": -424.8004455566406,
138
+ "logps/rejected": -390.2203063964844,
139
+ "loss": 0.6144,
140
+ "rewards/accuracies": 0.6812499761581421,
141
+ "rewards/chosen": 0.9376031160354614,
142
+ "rewards/margins": 0.4474514126777649,
143
+ "rewards/rejected": 0.4901517331600189,
144
  "step": 80
145
  },
146
  {
147
+ "epoch": 0.2,
148
+ "grad_norm": 126.42198014014429,
149
+ "learning_rate": 9.669088708527066e-08,
150
+ "logits/chosen": -1.8573029041290283,
151
+ "logits/rejected": -1.7926852703094482,
152
+ "logps/chosen": -435.7537536621094,
153
+ "logps/rejected": -401.1055908203125,
154
+ "loss": 0.5739,
155
+ "rewards/accuracies": 0.71875,
156
+ "rewards/chosen": 0.8144777417182922,
157
+ "rewards/margins": 0.4378415048122406,
158
+ "rewards/rejected": 0.376636266708374,
159
  "step": 90
160
  },
161
  {
162
+ "epoch": 0.23,
163
+ "grad_norm": 132.16216232500156,
164
+ "learning_rate": 9.512212835085849e-08,
165
+ "logits/chosen": -1.898048758506775,
166
+ "logits/rejected": -1.813433051109314,
167
+ "logps/chosen": -413.4141540527344,
168
+ "logps/rejected": -398.70526123046875,
169
+ "loss": 0.5603,
170
+ "rewards/accuracies": 0.71875,
171
+ "rewards/chosen": 0.6169204115867615,
172
+ "rewards/margins": 0.5242881178855896,
173
+ "rewards/rejected": 0.09263229370117188,
174
  "step": 100
175
  },
176
  {
177
+ "epoch": 0.25,
178
+ "grad_norm": 112.13374239362014,
179
+ "learning_rate": 9.326809299301306e-08,
180
+ "logits/chosen": -1.89919114112854,
181
+ "logits/rejected": -1.7970205545425415,
182
+ "logps/chosen": -454.9283142089844,
183
+ "logps/rejected": -405.9847717285156,
184
+ "loss": 0.5581,
185
+ "rewards/accuracies": 0.71875,
186
+ "rewards/chosen": 0.4878689646720886,
187
+ "rewards/margins": 0.6205599308013916,
188
+ "rewards/rejected": -0.1326909363269806,
189
  "step": 110
190
  },
191
  {
192
+ "epoch": 0.27,
193
+ "grad_norm": 113.80543806203703,
194
+ "learning_rate": 9.114050282021158e-08,
195
+ "logits/chosen": -1.8961639404296875,
196
+ "logits/rejected": -1.836488127708435,
197
+ "logps/chosen": -453.51898193359375,
198
+ "logps/rejected": -419.9806213378906,
199
+ "loss": 0.5372,
200
+ "rewards/accuracies": 0.7875000238418579,
201
+ "rewards/chosen": 0.4265638291835785,
202
+ "rewards/margins": 0.6399323344230652,
203
+ "rewards/rejected": -0.21336853504180908,
204
  "step": 120
205
  },
206
  {
207
+ "epoch": 0.3,
208
+ "grad_norm": 122.38937390797612,
209
+ "learning_rate": 8.875280914254802e-08,
210
+ "logits/chosen": -1.9070549011230469,
211
+ "logits/rejected": -1.8174508810043335,
212
+ "logps/chosen": -408.0248107910156,
213
+ "logps/rejected": -365.46185302734375,
214
+ "loss": 0.5379,
215
+ "rewards/accuracies": 0.762499988079071,
216
+ "rewards/chosen": 0.505743145942688,
217
+ "rewards/margins": 0.6783953309059143,
218
+ "rewards/rejected": -0.17265217006206512,
219
  "step": 130
220
  },
221
  {
222
+ "epoch": 0.32,
223
+ "grad_norm": 104.81942476289146,
224
+ "learning_rate": 8.612010772821971e-08,
225
+ "logits/chosen": -1.9492871761322021,
226
+ "logits/rejected": -1.903029203414917,
227
+ "logps/chosen": -463.1678771972656,
228
+ "logps/rejected": -408.36138916015625,
229
+ "loss": 0.5415,
230
+ "rewards/accuracies": 0.78125,
231
+ "rewards/chosen": 0.7421985268592834,
232
+ "rewards/margins": 0.7196205854415894,
233
+ "rewards/rejected": 0.02257799357175827,
234
  "step": 140
235
  },
236
  {
237
+ "epoch": 0.34,
238
+ "grad_norm": 130.65997070751789,
239
+ "learning_rate": 8.325904336322055e-08,
240
+ "logits/chosen": -1.9297358989715576,
241
+ "logits/rejected": -1.872240424156189,
242
+ "logps/chosen": -408.3719482421875,
243
+ "logps/rejected": -374.2057189941406,
244
+ "loss": 0.5461,
245
+ "rewards/accuracies": 0.762499988079071,
246
+ "rewards/chosen": 0.6860557198524475,
247
+ "rewards/margins": 0.6311514973640442,
248
+ "rewards/rejected": 0.05490417033433914,
249
  "step": 150
250
  },
251
  {
252
+ "epoch": 0.36,
253
+ "grad_norm": 100.26257755554322,
254
+ "learning_rate": 8.01877046176447e-08,
255
+ "logits/chosen": -1.8793761730194092,
256
+ "logits/rejected": -1.8072710037231445,
257
+ "logps/chosen": -399.7383728027344,
258
+ "logps/rejected": -373.10791015625,
259
+ "loss": 0.5278,
260
+ "rewards/accuracies": 0.71875,
261
+ "rewards/chosen": 0.4368124008178711,
262
+ "rewards/margins": 0.6604553461074829,
263
+ "rewards/rejected": -0.22364301979541779,
264
  "step": 160
265
  },
266
  {
267
+ "epoch": 0.39,
268
+ "grad_norm": 89.3633959368459,
269
+ "learning_rate": 7.692550948392249e-08,
270
+ "logits/chosen": -1.9449889659881592,
271
+ "logits/rejected": -1.8793401718139648,
272
+ "logps/chosen": -430.8158264160156,
273
+ "logps/rejected": -383.00640869140625,
274
+ "loss": 0.5275,
275
+ "rewards/accuracies": 0.6875,
276
+ "rewards/chosen": 0.5035308599472046,
277
+ "rewards/margins": 0.5975342988967896,
278
+ "rewards/rejected": -0.09400341659784317,
279
  "step": 170
280
  },
281
  {
282
+ "epoch": 0.41,
283
+ "grad_norm": 103.70599270965509,
284
+ "learning_rate": 7.349308261002021e-08,
285
+ "logits/chosen": -1.9056031703948975,
286
+ "logits/rejected": -1.8477048873901367,
287
+ "logps/chosen": -437.502197265625,
288
+ "logps/rejected": -403.87762451171875,
289
+ "loss": 0.526,
290
+ "rewards/accuracies": 0.737500011920929,
291
+ "rewards/chosen": 0.6943656206130981,
292
+ "rewards/margins": 0.6242468357086182,
293
+ "rewards/rejected": 0.07011876255273819,
294
  "step": 180
295
  },
296
  {
297
+ "epoch": 0.43,
298
+ "grad_norm": 84.20282962041543,
299
+ "learning_rate": 6.991212490377531e-08,
300
+ "logits/chosen": -1.9574388265609741,
301
+ "logits/rejected": -1.906002402305603,
302
+ "logps/chosen": -464.6742248535156,
303
+ "logps/rejected": -417.5992126464844,
304
+ "loss": 0.5064,
305
+ "rewards/accuracies": 0.7875000238418579,
306
+ "rewards/chosen": 0.6871397495269775,
307
+ "rewards/margins": 0.8371666669845581,
308
+ "rewards/rejected": -0.15002694725990295,
309
  "step": 190
310
  },
311
  {
312
+ "epoch": 0.46,
313
+ "grad_norm": 166.8394060310626,
314
+ "learning_rate": 6.620527633276978e-08,
315
+ "logits/chosen": -1.8844044208526611,
316
+ "logits/rejected": -1.8095699548721313,
317
+ "logps/chosen": -425.5460510253906,
318
+ "logps/rejected": -407.5855712890625,
319
+ "loss": 0.507,
320
+ "rewards/accuracies": 0.762499988079071,
321
+ "rewards/chosen": 0.5698906779289246,
322
+ "rewards/margins": 0.8980382680892944,
323
+ "rewards/rejected": -0.3281475901603699,
324
  "step": 200
325
  },
326
  {
327
+ "epoch": 0.48,
328
+ "grad_norm": 100.6332916667586,
329
+ "learning_rate": 6.239597278716581e-08,
330
+ "logits/chosen": -1.9531447887420654,
331
+ "logits/rejected": -1.8929874897003174,
332
+ "logps/chosen": -414.9901428222656,
333
+ "logps/rejected": -375.222412109375,
334
+ "loss": 0.4997,
335
+ "rewards/accuracies": 0.793749988079071,
336
+ "rewards/chosen": 0.5068634748458862,
337
+ "rewards/margins": 0.9239821434020996,
338
+ "rewards/rejected": -0.41711869835853577,
339
  "step": 210
340
  },
341
  {
342
+ "epoch": 0.5,
343
+ "grad_norm": 109.57190452096494,
344
+ "learning_rate": 5.8508297910462456e-08,
345
+ "logits/chosen": -1.8995733261108398,
346
+ "logits/rejected": -1.818933129310608,
347
+ "logps/chosen": -414.49176025390625,
348
+ "logps/rejected": -402.1959228515625,
349
+ "loss": 0.498,
350
+ "rewards/accuracies": 0.793749988079071,
351
+ "rewards/chosen": 0.3541712164878845,
352
+ "rewards/margins": 0.9701933860778809,
353
+ "rewards/rejected": -0.6160220503807068,
354
  "step": 220
355
  },
356
  {
357
+ "epoch": 0.52,
358
+ "grad_norm": 221.2346866193578,
359
+ "learning_rate": 5.456683083494731e-08,
360
+ "logits/chosen": -1.892313003540039,
361
+ "logits/rejected": -1.8525936603546143,
362
+ "logps/chosen": -472.8267517089844,
363
+ "logps/rejected": -455.94305419921875,
364
+ "loss": 0.5217,
365
+ "rewards/accuracies": 0.7250000238418579,
366
+ "rewards/chosen": 0.2913890480995178,
367
+ "rewards/margins": 0.6712461709976196,
368
+ "rewards/rejected": -0.3798571825027466,
369
  "step": 230
370
  },
371
  {
372
+ "epoch": 0.55,
373
+ "grad_norm": 199.8394238496795,
374
+ "learning_rate": 5.059649078450834e-08,
375
+ "logits/chosen": -1.8888638019561768,
376
+ "logits/rejected": -1.8429927825927734,
377
+ "logps/chosen": -432.7430725097656,
378
+ "logps/rejected": -422.738037109375,
379
+ "loss": 0.4908,
380
+ "rewards/accuracies": 0.7437499761581421,
381
+ "rewards/chosen": 0.274366557598114,
382
+ "rewards/margins": 0.8101264834403992,
383
+ "rewards/rejected": -0.5357599854469299,
384
  "step": 240
385
  },
386
  {
387
+ "epoch": 0.57,
388
+ "grad_norm": 80.39074621948711,
389
+ "learning_rate": 4.6622379527277186e-08,
390
+ "logits/chosen": -1.8966060876846313,
391
+ "logits/rejected": -1.8454921245574951,
392
+ "logps/chosen": -394.818115234375,
393
+ "logps/rejected": -373.1236877441406,
394
+ "loss": 0.4952,
395
+ "rewards/accuracies": 0.6937500238418579,
396
+ "rewards/chosen": 0.21977496147155762,
397
+ "rewards/margins": 0.7499412298202515,
398
+ "rewards/rejected": -0.5301662683486938,
399
  "step": 250
400
  },
401
  {
402
+ "epoch": 0.59,
403
+ "grad_norm": 161.76520518881122,
404
+ "learning_rate": 4.26696226741691e-08,
405
+ "logits/chosen": -1.9112564325332642,
406
+ "logits/rejected": -1.839714765548706,
407
+ "logps/chosen": -430.2705078125,
408
+ "logps/rejected": -403.42633056640625,
409
+ "loss": 0.5027,
410
+ "rewards/accuracies": 0.737500011920929,
411
+ "rewards/chosen": 0.09559208154678345,
412
+ "rewards/margins": 0.799443244934082,
413
+ "rewards/rejected": -0.7038511037826538,
414
  "step": 260
415
  },
416
  {
417
+ "epoch": 0.61,
418
+ "grad_norm": 92.5112436349587,
419
+ "learning_rate": 3.876321082668098e-08,
420
+ "logits/chosen": -1.9705326557159424,
421
+ "logits/rejected": -1.896384596824646,
422
+ "logps/chosen": -455.70849609375,
423
+ "logps/rejected": -424.35528564453125,
424
+ "loss": 0.4925,
425
+ "rewards/accuracies": 0.78125,
426
+ "rewards/chosen": 0.3973539471626282,
427
+ "rewards/margins": 0.953347384929657,
428
+ "rewards/rejected": -0.5559934377670288,
429
  "step": 270
430
  },
431
  {
432
+ "epoch": 0.64,
433
+ "grad_norm": 178.08098991416173,
434
+ "learning_rate": 3.492784157826244e-08,
435
+ "logits/chosen": -1.8988901376724243,
436
+ "logits/rejected": -1.8011884689331055,
437
+ "logps/chosen": -438.60076904296875,
438
+ "logps/rejected": -388.46978759765625,
439
+ "loss": 0.5004,
440
+ "rewards/accuracies": 0.75,
441
+ "rewards/chosen": 0.43949180841445923,
442
+ "rewards/margins": 1.0134050846099854,
443
+ "rewards/rejected": -0.5739132165908813,
444
  "step": 280
445
  },
446
  {
447
+ "epoch": 0.66,
448
+ "grad_norm": 118.63444466442957,
449
+ "learning_rate": 3.118776336817812e-08,
450
+ "logits/chosen": -1.9487316608428955,
451
+ "logits/rejected": -1.8821332454681396,
452
+ "logps/chosen": -431.9124450683594,
453
+ "logps/rejected": -391.89208984375,
454
+ "loss": 0.4867,
455
+ "rewards/accuracies": 0.7749999761581421,
456
+ "rewards/chosen": 0.3583192229270935,
457
+ "rewards/margins": 0.9170917272567749,
458
+ "rewards/rejected": -0.5587725639343262,
459
  "step": 290
460
  },
461
+ {
462
+ "epoch": 0.68,
463
+ "grad_norm": 105.93330787956464,
464
+ "learning_rate": 2.7566622175067443e-08,
465
+ "logits/chosen": -1.9368865489959717,
466
+ "logits/rejected": -1.8702398538589478,
467
+ "logps/chosen": -427.23101806640625,
468
+ "logps/rejected": -406.6597595214844,
469
+ "loss": 0.5099,
470
+ "rewards/accuracies": 0.737500011920929,
471
+ "rewards/chosen": 0.41136330366134644,
472
+ "rewards/margins": 0.9053149223327637,
473
+ "rewards/rejected": -0.4939516484737396,
474
+ "step": 300
475
+ },
476
+ {
477
+ "epoch": 0.71,
478
+ "grad_norm": 94.82101284169875,
479
+ "learning_rate": 2.408731201945432e-08,
480
+ "logits/chosen": -1.9327596426010132,
481
+ "logits/rejected": -1.8846778869628906,
482
+ "logps/chosen": -422.32061767578125,
483
+ "logps/rejected": -419.91131591796875,
484
+ "loss": 0.4923,
485
+ "rewards/accuracies": 0.737500011920929,
486
+ "rewards/chosen": 0.4359460473060608,
487
+ "rewards/margins": 0.7473770380020142,
488
+ "rewards/rejected": -0.31143102049827576,
489
+ "step": 310
490
+ },
491
+ {
492
+ "epoch": 0.73,
493
+ "grad_norm": 197.8417598175193,
494
+ "learning_rate": 2.0771830220378112e-08,
495
+ "logits/chosen": -1.8852752447128296,
496
+ "logits/rejected": -1.8286488056182861,
497
+ "logps/chosen": -450.9109802246094,
498
+ "logps/rejected": -427.3114318847656,
499
+ "loss": 0.4812,
500
+ "rewards/accuracies": 0.7875000238418579,
501
+ "rewards/chosen": 0.44545871019363403,
502
+ "rewards/margins": 0.8026180267333984,
503
+ "rewards/rejected": -0.357159286737442,
504
+ "step": 320
505
+ },
506
+ {
507
+ "epoch": 0.75,
508
+ "grad_norm": 115.40932567329016,
509
+ "learning_rate": 1.7641138321260257e-08,
510
+ "logits/chosen": -1.9217668771743774,
511
+ "logits/rejected": -1.8482532501220703,
512
+ "logps/chosen": -424.6617736816406,
513
+ "logps/rejected": -386.41943359375,
514
+ "loss": 0.4888,
515
+ "rewards/accuracies": 0.8187500238418579,
516
+ "rewards/chosen": 0.49162572622299194,
517
+ "rewards/margins": 1.0169591903686523,
518
+ "rewards/rejected": -0.5253334045410156,
519
+ "step": 330
520
+ },
521
+ {
522
+ "epoch": 0.77,
523
+ "grad_norm": 116.14707152688237,
524
+ "learning_rate": 1.4715029564277793e-08,
525
+ "logits/chosen": -1.995141625404358,
526
+ "logits/rejected": -1.9429680109024048,
527
+ "logps/chosen": -437.26324462890625,
528
+ "logps/rejected": -408.69403076171875,
529
+ "loss": 0.4984,
530
+ "rewards/accuracies": 0.7749999761581421,
531
+ "rewards/chosen": 0.6876565217971802,
532
+ "rewards/margins": 0.9710708856582642,
533
+ "rewards/rejected": -0.283414363861084,
534
+ "step": 340
535
+ },
536
+ {
537
+ "epoch": 0.8,
538
+ "grad_norm": 96.35236055831872,
539
+ "learning_rate": 1.2012003751113343e-08,
540
+ "logits/chosen": -1.9784055948257446,
541
+ "logits/rejected": -1.913835883140564,
542
+ "logps/chosen": -427.19921875,
543
+ "logps/rejected": -406.6058044433594,
544
+ "loss": 0.4675,
545
+ "rewards/accuracies": 0.7749999761581421,
546
+ "rewards/chosen": 0.41034063696861267,
547
+ "rewards/margins": 0.9592168927192688,
548
+ "rewards/rejected": -0.5488761067390442,
549
+ "step": 350
550
+ },
551
+ {
552
+ "epoch": 0.82,
553
+ "grad_norm": 119.40607594359629,
554
+ "learning_rate": 9.549150281252633e-09,
555
+ "logits/chosen": -1.9397361278533936,
556
+ "logits/rejected": -1.884749174118042,
557
+ "logps/chosen": -451.00531005859375,
558
+ "logps/rejected": -423.86553955078125,
559
+ "loss": 0.481,
560
+ "rewards/accuracies": 0.7437499761581421,
561
+ "rewards/chosen": 0.5500633120536804,
562
+ "rewards/margins": 0.9735897779464722,
563
+ "rewards/rejected": -0.4235265851020813,
564
+ "step": 360
565
+ },
566
+ {
567
+ "epoch": 0.84,
568
+ "grad_norm": 110.76475497042553,
569
+ "learning_rate": 7.3420401072985306e-09,
570
+ "logits/chosen": -1.9737659692764282,
571
+ "logits/rejected": -1.9217618703842163,
572
+ "logps/chosen": -427.6600646972656,
573
+ "logps/rejected": -412.3758850097656,
574
+ "loss": 0.4918,
575
+ "rewards/accuracies": 0.7437499761581421,
576
+ "rewards/chosen": 0.44847536087036133,
577
+ "rewards/margins": 0.9299023747444153,
578
+ "rewards/rejected": -0.4814269542694092,
579
+ "step": 370
580
+ },
581
+ {
582
+ "epoch": 0.86,
583
+ "grad_norm": 95.99625394215789,
584
+ "learning_rate": 5.404627290395369e-09,
585
+ "logits/chosen": -1.9365133047103882,
586
+ "logits/rejected": -1.8697948455810547,
587
+ "logps/chosen": -429.69219970703125,
588
+ "logps/rejected": -406.6961975097656,
589
+ "loss": 0.4841,
590
+ "rewards/accuracies": 0.7875000238418579,
591
+ "rewards/chosen": 0.6347268223762512,
592
+ "rewards/margins": 0.9203673601150513,
593
+ "rewards/rejected": -0.2856404185295105,
594
+ "step": 380
595
+ },
596
+ {
597
+ "epoch": 0.89,
598
+ "grad_norm": 157.20747053789427,
599
+ "learning_rate": 3.74916077816162e-09,
600
+ "logits/chosen": -1.945406198501587,
601
+ "logits/rejected": -1.8826377391815186,
602
+ "logps/chosen": -412.59619140625,
603
+ "logps/rejected": -386.8226623535156,
604
+ "loss": 0.4932,
605
+ "rewards/accuracies": 0.731249988079071,
606
+ "rewards/chosen": 0.42942380905151367,
607
+ "rewards/margins": 0.8297268152236938,
608
+ "rewards/rejected": -0.4003029763698578,
609
+ "step": 390
610
+ },
611
+ {
612
+ "epoch": 0.91,
613
+ "grad_norm": 105.21807814382213,
614
+ "learning_rate": 2.386106962899165e-09,
615
+ "logits/chosen": -1.871506690979004,
616
+ "logits/rejected": -1.7978681325912476,
617
+ "logps/chosen": -419.5323791503906,
618
+ "logps/rejected": -388.26239013671875,
619
+ "loss": 0.4821,
620
+ "rewards/accuracies": 0.762499988079071,
621
+ "rewards/chosen": 0.3208548426628113,
622
+ "rewards/margins": 0.8715551495552063,
623
+ "rewards/rejected": -0.5507001876831055,
624
+ "step": 400
625
+ },
626
+ {
627
+ "epoch": 0.93,
628
+ "grad_norm": 114.13246004934948,
629
+ "learning_rate": 1.3240835096913706e-09,
630
+ "logits/chosen": -1.9058313369750977,
631
+ "logits/rejected": -1.8005132675170898,
632
+ "logps/chosen": -423.19305419921875,
633
+ "logps/rejected": -383.4271240234375,
634
+ "loss": 0.4992,
635
+ "rewards/accuracies": 0.731249988079071,
636
+ "rewards/chosen": 0.6571682095527649,
637
+ "rewards/margins": 0.9992189407348633,
638
+ "rewards/rejected": -0.3420506417751312,
639
+ "step": 410
640
+ },
641
+ {
642
+ "epoch": 0.96,
643
+ "grad_norm": 88.16218181131795,
644
+ "learning_rate": 5.698048727497462e-10,
645
+ "logits/chosen": -1.929990530014038,
646
+ "logits/rejected": -1.8586419820785522,
647
+ "logps/chosen": -402.3602294921875,
648
+ "logps/rejected": -381.64605712890625,
649
+ "loss": 0.4911,
650
+ "rewards/accuracies": 0.78125,
651
+ "rewards/chosen": 0.5359801650047302,
652
+ "rewards/margins": 1.0312530994415283,
653
+ "rewards/rejected": -0.4952728748321533,
654
+ "step": 420
655
+ },
656
+ {
657
+ "epoch": 0.98,
658
+ "grad_norm": 104.24331856340095,
659
+ "learning_rate": 1.2803984447259387e-10,
660
+ "logits/chosen": -1.9403343200683594,
661
+ "logits/rejected": -1.881209135055542,
662
+ "logps/chosen": -453.416259765625,
663
+ "logps/rejected": -408.7674865722656,
664
+ "loss": 0.4731,
665
+ "rewards/accuracies": 0.768750011920929,
666
+ "rewards/chosen": 0.4977526068687439,
667
+ "rewards/margins": 1.0032169818878174,
668
+ "rewards/rejected": -0.5054643750190735,
669
+ "step": 430
670
+ },
671
  {
672
  "epoch": 1.0,
673
+ "step": 439,
674
  "total_flos": 0.0,
675
+ "train_loss": 0.5309282744935542,
676
+ "train_runtime": 6847.998,
677
+ "train_samples_per_second": 8.212,
678
+ "train_steps_per_second": 0.064
679
  }
680
  ],
681
  "logging_steps": 10,
682
+ "max_steps": 439,
683
  "num_input_tokens_seen": 0,
684
  "num_train_epochs": 1,
685
  "save_steps": 100,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ce791dc81b1b2238243a31a7af35de8e05887362f23dad989bfe1e254d1f7d14
3
- size 6520
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85030f3e8e8a49982a5e76bc0937f8c7c89ad40820c9fa0bff6598e0cf753ecb
3
+ size 6264