RikkiXu commited on
Commit
e63de6b
1 Parent(s): 9d1d2af

Model save

Browse files
README.md CHANGED
@@ -13,7 +13,7 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # zephyr-7b-dpo-full
15
 
16
- This model was trained from scratch on an unknown dataset.
17
 
18
  ## Model description
19
 
@@ -32,7 +32,7 @@ More information needed
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
- - learning_rate: 3e-07
36
  - train_batch_size: 4
37
  - eval_batch_size: 4
38
  - seed: 42
@@ -54,5 +54,5 @@ The following hyperparameters were used during training:
54
 
55
  - Transformers 4.39.3
56
  - Pytorch 2.1.2+cu118
57
- - Datasets 2.19.1
58
  - Tokenizers 0.15.2
 
13
 
14
  # zephyr-7b-dpo-full
15
 
16
+ This model was trained from scratch on the None dataset.
17
 
18
  ## Model description
19
 
 
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
+ - learning_rate: 5e-07
36
  - train_batch_size: 4
37
  - eval_batch_size: 4
38
  - seed: 42
 
54
 
55
  - Transformers 4.39.3
56
  - Pytorch 2.1.2+cu118
57
+ - Datasets 2.16.1
58
  - Tokenizers 0.15.2
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.20326648155848184,
4
- "train_runtime": 5896.1189,
5
  "train_samples": 50000,
6
- "train_samples_per_second": 8.48,
7
- "train_steps_per_second": 0.066
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.4513903107398596,
4
+ "train_runtime": 5854.463,
5
  "train_samples": 50000,
6
+ "train_samples_per_second": 8.54,
7
+ "train_steps_per_second": 0.067
8
  }
config.json CHANGED
@@ -20,7 +20,7 @@
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
- "transformers_version": "4.41.1",
24
  "use_cache": false,
25
  "vocab_size": 32000
26
  }
 
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.39.3",
24
  "use_cache": false,
25
  "vocab_size": 32000
26
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:58bde9bfb67010c336cace37ab13ac39da6af79040fcdbdabd0a04935b66a870
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04f2436f631684bd85d9ee0f015c2a6d18e79bd1c5a33dd8503659b12524b028
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:35d424b1b9f269fcdb54ade09434feadf354ef611fe440a2d936528908734919
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70de4abb0b18d600edfa17cb8203a64d82cb5cd93011b4c04bcdbe4e85b25295
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:073b6342526b72183cb12d771d63bd08d1440bc35898cc6d740a2bf32a97585b
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f381addd443a7edabe9d8200e1356682fb04f61fecc2810526dc1a4c51d7868
3
  size 4540516344
runs/Jun22_08-38-32_n136-100-194/events.out.tfevents.1719016734.n136-100-194.1556408.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a680a3693b3c5f01d4a780663ddb11807a5a69d4f00219979400f7bc2764c809
3
- size 26098
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffe1de74fe45221dbe6392c3d7e72da21567f3fa1c45a226d48ccd3eb6bd3501
3
+ size 32644
tokenizer.json CHANGED
@@ -134,7 +134,6 @@
134
  "end_of_word_suffix": null,
135
  "fuse_unk": true,
136
  "byte_fallback": true,
137
- "ignore_merges": false,
138
  "vocab": {
139
  "<unk>": 0,
140
  "<s>": 1,
 
134
  "end_of_word_suffix": null,
135
  "fuse_unk": true,
136
  "byte_fallback": true,
 
137
  "vocab": {
138
  "<unk>": 0,
139
  "<s>": 1,
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.20326648155848184,
4
- "train_runtime": 5896.1189,
5
  "train_samples": 50000,
6
- "train_samples_per_second": 8.48,
7
- "train_steps_per_second": 0.066
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.4513903107398596,
4
+ "train_runtime": 5854.463,
5
  "train_samples": 50000,
6
+ "train_samples_per_second": 8.54,
7
+ "train_steps_per_second": 0.067
8
  }
trainer_state.json CHANGED
@@ -10,13 +10,13 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
- "grad_norm": 747.0977926336889,
14
- "learning_rate": 7.692307692307691e-09,
15
- "logits/chosen": -2.5617921352386475,
16
- "logits/rejected": -2.415619373321533,
17
  "logps/chosen": -258.1644592285156,
18
- "logps/rejected": -191.65736389160156,
19
- "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
22
  "rewards/margins": 0.0,
@@ -25,597 +25,597 @@
25
  },
26
  {
27
  "epoch": 0.03,
28
- "grad_norm": 746.141595669296,
29
- "learning_rate": 7.692307692307691e-08,
30
- "logits/chosen": -2.6110925674438477,
31
- "logits/rejected": -2.524423122406006,
32
- "logps/chosen": -267.3368225097656,
33
- "logps/rejected": -198.19520568847656,
34
- "loss": 0.6963,
35
- "rewards/accuracies": 0.3888888955116272,
36
- "rewards/chosen": -0.02406422607600689,
37
- "rewards/margins": -0.021091409027576447,
38
- "rewards/rejected": -0.0029728179797530174,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.05,
43
- "grad_norm": 433.45696577907285,
44
- "learning_rate": 1.5384615384615382e-07,
45
- "logits/chosen": -2.628641128540039,
46
- "logits/rejected": -2.5271899700164795,
47
- "logps/chosen": -260.9211120605469,
48
- "logps/rejected": -198.38711547851562,
49
- "loss": 0.5697,
50
- "rewards/accuracies": 0.768750011920929,
51
- "rewards/chosen": 0.20215623080730438,
52
- "rewards/margins": 0.35489505529403687,
53
- "rewards/rejected": -0.1527387946844101,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.08,
58
- "grad_norm": 466.53027664588734,
59
- "learning_rate": 2.3076923076923078e-07,
60
- "logits/chosen": -2.6438043117523193,
61
- "logits/rejected": -2.5429482460021973,
62
- "logps/chosen": -252.55557250976562,
63
- "logps/rejected": -195.11138916015625,
64
- "loss": 0.306,
65
- "rewards/accuracies": 0.90625,
66
- "rewards/chosen": 1.301897406578064,
67
- "rewards/margins": 1.880059838294983,
68
- "rewards/rejected": -0.5781622529029846,
69
  "step": 30
70
  },
71
  {
72
  "epoch": 0.1,
73
- "grad_norm": 451.3374046706128,
74
- "learning_rate": 2.999939918069778e-07,
75
- "logits/chosen": -2.6604888439178467,
76
- "logits/rejected": -2.5366878509521484,
77
- "logps/chosen": -244.97402954101562,
78
- "logps/rejected": -199.42355346679688,
79
- "loss": 0.2052,
80
- "rewards/accuracies": 0.875,
81
- "rewards/chosen": 2.6914100646972656,
82
- "rewards/margins": 3.822247266769409,
83
- "rewards/rejected": -1.130837321281433,
84
  "step": 40
85
  },
86
  {
87
  "epoch": 0.13,
88
- "grad_norm": 268.00932309883206,
89
- "learning_rate": 2.9927359084964875e-07,
90
- "logits/chosen": -2.7027461528778076,
91
- "logits/rejected": -2.5904927253723145,
92
- "logps/chosen": -258.02886962890625,
93
- "logps/rejected": -204.94631958007812,
94
- "loss": 0.2326,
95
- "rewards/accuracies": 0.887499988079071,
96
- "rewards/chosen": 4.602013111114502,
97
- "rewards/margins": 5.5924835205078125,
98
- "rewards/rejected": -0.9904701113700867,
99
  "step": 50
100
  },
101
  {
102
  "epoch": 0.15,
103
- "grad_norm": 340.360656955259,
104
- "learning_rate": 2.9735816061234966e-07,
105
- "logits/chosen": -2.6768908500671387,
106
- "logits/rejected": -2.557954788208008,
107
- "logps/chosen": -259.3023681640625,
108
- "logps/rejected": -192.10040283203125,
109
- "loss": 0.1987,
110
- "rewards/accuracies": 0.9312499761581421,
111
- "rewards/chosen": 4.70054817199707,
112
- "rewards/margins": 6.1066999435424805,
113
- "rewards/rejected": -1.4061520099639893,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.18,
118
- "grad_norm": 344.3937897868754,
119
- "learning_rate": 2.942630353226844e-07,
120
- "logits/chosen": -2.650172472000122,
121
- "logits/rejected": -2.500756025314331,
122
- "logps/chosen": -288.3818054199219,
123
- "logps/rejected": -222.72811889648438,
124
- "loss": 0.1904,
125
  "rewards/accuracies": 0.918749988079071,
126
- "rewards/chosen": 4.768267631530762,
127
- "rewards/margins": 7.4695234298706055,
128
- "rewards/rejected": -2.7012553215026855,
129
  "step": 70
130
  },
131
  {
132
  "epoch": 0.2,
133
- "grad_norm": 453.28474416843744,
134
- "learning_rate": 2.900129934114876e-07,
135
- "logits/chosen": -2.5939252376556396,
136
- "logits/rejected": -2.458728790283203,
137
- "logps/chosen": -260.589111328125,
138
- "logps/rejected": -212.63925170898438,
139
- "loss": 0.1791,
140
- "rewards/accuracies": 0.9312499761581421,
141
- "rewards/chosen": 3.8766517639160156,
142
- "rewards/margins": 7.434275150299072,
143
- "rewards/rejected": -3.5576236248016357,
144
  "step": 80
145
  },
146
  {
147
  "epoch": 0.23,
148
- "grad_norm": 598.8098573854415,
149
- "learning_rate": 2.8464205914585213e-07,
150
- "logits/chosen": -2.6328907012939453,
151
- "logits/rejected": -2.507690906524658,
152
- "logps/chosen": -253.75570678710938,
153
- "logps/rejected": -193.23147583007812,
154
- "loss": 0.19,
155
- "rewards/accuracies": 0.90625,
156
- "rewards/chosen": 3.2832133769989014,
157
- "rewards/margins": 6.802037715911865,
158
- "rewards/rejected": -3.518825054168701,
159
  "step": 90
160
  },
161
  {
162
  "epoch": 0.26,
163
- "grad_norm": 242.78091859451033,
164
- "learning_rate": 2.78193230243403e-07,
165
- "logits/chosen": -2.6586058139801025,
166
- "logits/rejected": -2.52885103225708,
167
- "logps/chosen": -234.36068725585938,
168
- "logps/rejected": -197.55322265625,
169
- "loss": 0.1766,
170
- "rewards/accuracies": 0.9125000238418579,
171
- "rewards/chosen": 4.212424278259277,
172
- "rewards/margins": 7.145503997802734,
173
- "rewards/rejected": -2.933079242706299,
174
  "step": 100
175
  },
176
  {
177
  "epoch": 0.28,
178
- "grad_norm": 288.7802754186709,
179
- "learning_rate": 2.707181336484383e-07,
180
- "logits/chosen": -2.6430606842041016,
181
- "logits/rejected": -2.5142664909362793,
182
- "logps/chosen": -253.27975463867188,
183
- "logps/rejected": -192.4219207763672,
184
- "loss": 0.1616,
185
- "rewards/accuracies": 0.925000011920929,
186
- "rewards/chosen": 5.699277400970459,
187
- "rewards/margins": 7.844499111175537,
188
- "rewards/rejected": -2.1452219486236572,
189
  "step": 110
190
  },
191
  {
192
  "epoch": 0.31,
193
- "grad_norm": 242.33756453841693,
194
- "learning_rate": 2.622766122256652e-07,
195
- "logits/chosen": -2.6139981746673584,
196
- "logits/rejected": -2.516448497772217,
197
- "logps/chosen": -256.9673767089844,
198
- "logps/rejected": -211.84988403320312,
199
- "loss": 0.1786,
200
- "rewards/accuracies": 0.9437500238418579,
201
- "rewards/chosen": 5.386040210723877,
202
- "rewards/margins": 8.150399208068848,
203
- "rewards/rejected": -2.7643585205078125,
204
  "step": 120
205
  },
206
  {
207
  "epoch": 0.33,
208
- "grad_norm": 181.17712779023478,
209
- "learning_rate": 2.5293624568031005e-07,
210
- "logits/chosen": -2.5844571590423584,
211
- "logits/rejected": -2.4461381435394287,
212
- "logps/chosen": -248.85513305664062,
213
- "logps/rejected": -187.93716430664062,
214
- "loss": 0.172,
215
- "rewards/accuracies": 0.918749988079071,
216
- "rewards/chosen": 4.1653828620910645,
217
- "rewards/margins": 7.870238304138184,
218
- "rewards/rejected": -3.7048561573028564,
219
  "step": 130
220
  },
221
  {
222
  "epoch": 0.36,
223
- "grad_norm": 386.4455316752931,
224
- "learning_rate": 2.4277180953993823e-07,
225
- "logits/chosen": -2.6205365657806396,
226
- "logits/rejected": -2.5077974796295166,
227
- "logps/chosen": -269.39251708984375,
228
- "logps/rejected": -207.6556854248047,
229
- "loss": 0.2112,
230
- "rewards/accuracies": 0.9312499761581421,
231
- "rewards/chosen": 3.9067764282226562,
232
- "rewards/margins": 7.915855407714844,
233
- "rewards/rejected": -4.0090789794921875,
234
  "step": 140
235
  },
236
  {
237
  "epoch": 0.38,
238
- "grad_norm": 264.0646768310831,
239
- "learning_rate": 2.3186467652917566e-07,
240
- "logits/chosen": -2.607632875442505,
241
- "logits/rejected": -2.5044326782226562,
242
- "logps/chosen": -261.9036560058594,
243
- "logps/rejected": -212.12777709960938,
244
- "loss": 0.2286,
245
- "rewards/accuracies": 0.918749988079071,
246
- "rewards/chosen": 4.435807704925537,
247
- "rewards/margins": 8.641576766967773,
248
- "rewards/rejected": -4.2057695388793945,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.41,
253
- "grad_norm": 571.6419774908388,
254
- "learning_rate": 2.2030216512970552e-07,
255
- "logits/chosen": -2.5919671058654785,
256
- "logits/rejected": -2.48041033744812,
257
- "logps/chosen": -240.66891479492188,
258
- "logps/rejected": -183.67489624023438,
259
- "loss": 0.2024,
260
- "rewards/accuracies": 0.956250011920929,
261
- "rewards/chosen": 4.69089412689209,
262
- "rewards/margins": 7.830643653869629,
263
- "rewards/rejected": -3.139749526977539,
264
  "step": 160
265
  },
266
  {
267
  "epoch": 0.44,
268
- "grad_norm": 351.0335983478419,
269
- "learning_rate": 2.0817684054072823e-07,
270
- "logits/chosen": -2.594038724899292,
271
- "logits/rejected": -2.479696273803711,
272
- "logps/chosen": -259.5168151855469,
273
- "logps/rejected": -199.40609741210938,
274
- "loss": 0.1492,
275
- "rewards/accuracies": 0.9375,
276
- "rewards/chosen": 4.83851432800293,
277
- "rewards/margins": 7.966272830963135,
278
- "rewards/rejected": -3.127758741378784,
279
  "step": 170
280
  },
281
  {
282
  "epoch": 0.46,
283
- "grad_norm": 332.9543090868915,
284
- "learning_rate": 1.9558577363613703e-07,
285
- "logits/chosen": -2.643381357192993,
286
- "logits/rejected": -2.5339646339416504,
287
- "logps/chosen": -235.37637329101562,
288
- "logps/rejected": -190.86380004882812,
289
- "loss": 0.1789,
290
- "rewards/accuracies": 0.9437500238418579,
291
- "rewards/chosen": 3.2347798347473145,
292
- "rewards/margins": 7.21783971786499,
293
- "rewards/rejected": -3.983060359954834,
294
  "step": 180
295
  },
296
  {
297
  "epoch": 0.49,
298
- "grad_norm": 413.79495264463003,
299
- "learning_rate": 1.826297638509251e-07,
300
- "logits/chosen": -2.595829725265503,
301
- "logits/rejected": -2.500739812850952,
302
- "logps/chosen": -256.819580078125,
303
- "logps/rejected": -206.32376098632812,
304
- "loss": 0.179,
305
- "rewards/accuracies": 0.918749988079071,
306
- "rewards/chosen": 3.607396364212036,
307
- "rewards/margins": 7.752522945404053,
308
- "rewards/rejected": -4.145126819610596,
309
  "step": 190
310
  },
311
  {
312
  "epoch": 0.51,
313
- "grad_norm": 458.13511698305706,
314
- "learning_rate": 1.694125322181083e-07,
315
- "logits/chosen": -2.63800311088562,
316
- "logits/rejected": -2.5028953552246094,
317
- "logps/chosen": -269.77081298828125,
318
- "logps/rejected": -201.87176513671875,
319
- "loss": 0.1642,
320
- "rewards/accuracies": 0.949999988079071,
321
- "rewards/chosen": 5.391812801361084,
322
- "rewards/margins": 9.271406173706055,
323
- "rewards/rejected": -3.8795933723449707,
324
  "step": 200
325
  },
326
  {
327
  "epoch": 0.54,
328
- "grad_norm": 449.95115191482023,
329
- "learning_rate": 1.5603989101641228e-07,
330
- "logits/chosen": -2.620668888092041,
331
- "logits/rejected": -2.5066120624542236,
332
- "logps/chosen": -262.0960693359375,
333
- "logps/rejected": -210.2143096923828,
334
- "loss": 0.158,
335
- "rewards/accuracies": 0.9375,
336
- "rewards/chosen": 4.4803595542907715,
337
- "rewards/margins": 8.733253479003906,
338
- "rewards/rejected": -4.252893924713135,
339
  "step": 210
340
  },
341
  {
342
  "epoch": 0.56,
343
- "grad_norm": 386.8925888622673,
344
- "learning_rate": 1.4261889667621828e-07,
345
- "logits/chosen": -2.623037815093994,
346
- "logits/rejected": -2.5237972736358643,
347
- "logps/chosen": -254.78390502929688,
348
- "logps/rejected": -206.7708282470703,
349
- "loss": 0.2113,
350
- "rewards/accuracies": 0.925000011920929,
351
- "rewards/chosen": 3.867889881134033,
352
- "rewards/margins": 8.484498023986816,
353
- "rewards/rejected": -4.616608142852783,
354
  "step": 220
355
  },
356
  {
357
  "epoch": 0.59,
358
- "grad_norm": 264.4396878771059,
359
- "learning_rate": 1.2925699272529007e-07,
360
- "logits/chosen": -2.6517319679260254,
361
- "logits/rejected": -2.5196144580841064,
362
- "logps/chosen": -263.8755798339844,
363
- "logps/rejected": -206.9174346923828,
364
- "loss": 0.1518,
365
- "rewards/accuracies": 0.8999999761581421,
366
- "rewards/chosen": 4.761946201324463,
367
- "rewards/margins": 8.56539249420166,
368
- "rewards/rejected": -3.8034462928771973,
369
  "step": 230
370
  },
371
  {
372
  "epoch": 0.61,
373
- "grad_norm": 419.86487391831014,
374
- "learning_rate": 1.160611496355417e-07,
375
- "logits/chosen": -2.6276602745056152,
376
- "logits/rejected": -2.5167899131774902,
377
- "logps/chosen": -257.7350769042969,
378
- "logps/rejected": -210.53292846679688,
379
- "loss": 0.1539,
380
- "rewards/accuracies": 0.9375,
381
- "rewards/chosen": 4.4489850997924805,
382
- "rewards/margins": 9.010086059570312,
383
- "rewards/rejected": -4.561100482940674,
384
  "step": 240
385
  },
386
  {
387
  "epoch": 0.64,
388
- "grad_norm": 233.18563601164297,
389
- "learning_rate": 1.0313700845691635e-07,
390
- "logits/chosen": -2.6395657062530518,
391
- "logits/rejected": -2.5249786376953125,
392
- "logps/chosen": -263.78375244140625,
393
- "logps/rejected": -216.53323364257812,
394
- "loss": 0.1755,
395
- "rewards/accuracies": 0.949999988079071,
396
- "rewards/chosen": 4.129330158233643,
397
- "rewards/margins": 8.881619453430176,
398
- "rewards/rejected": -4.752288341522217,
399
  "step": 250
400
  },
401
  {
402
  "epoch": 0.67,
403
- "grad_norm": 301.32208500782895,
404
- "learning_rate": 9.058803509412647e-08,
405
- "logits/chosen": -2.63856840133667,
406
- "logits/rejected": -2.5386815071105957,
407
- "logps/chosen": -257.0624084472656,
408
- "logps/rejected": -209.1647186279297,
409
- "loss": 0.1345,
410
- "rewards/accuracies": 0.949999988079071,
411
- "rewards/chosen": 4.407042503356934,
412
- "rewards/margins": 8.488649368286133,
413
- "rewards/rejected": -4.081605434417725,
414
  "step": 260
415
  },
416
  {
417
  "epoch": 0.69,
418
- "grad_norm": 262.72645974554706,
419
- "learning_rate": 7.851469199680381e-08,
420
- "logits/chosen": -2.6127829551696777,
421
- "logits/rejected": -2.4841771125793457,
422
- "logps/chosen": -268.802490234375,
423
- "logps/rejected": -217.36740112304688,
424
- "loss": 0.1877,
425
- "rewards/accuracies": 0.9375,
426
- "rewards/chosen": 4.018976211547852,
427
- "rewards/margins": 9.022808074951172,
428
- "rewards/rejected": -5.00383186340332,
429
  "step": 270
430
  },
431
  {
432
  "epoch": 0.72,
433
- "grad_norm": 216.84234596530857,
434
- "learning_rate": 6.701363389420295e-08,
435
- "logits/chosen": -2.649754762649536,
436
- "logits/rejected": -2.5276429653167725,
437
- "logps/chosen": -265.546875,
438
- "logps/rejected": -207.27426147460938,
439
- "loss": 0.1499,
440
- "rewards/accuracies": 0.9312499761581421,
441
- "rewards/chosen": 4.076898574829102,
442
- "rewards/margins": 9.239118576049805,
443
- "rewards/rejected": -5.1622209548950195,
444
  "step": 280
445
  },
446
  {
447
  "epoch": 0.74,
448
- "grad_norm": 267.2442999173217,
449
- "learning_rate": 5.617693401310837e-08,
450
- "logits/chosen": -2.6028785705566406,
451
- "logits/rejected": -2.526676654815674,
452
- "logps/chosen": -266.23406982421875,
453
- "logps/rejected": -225.2560577392578,
454
- "loss": 0.1785,
455
- "rewards/accuracies": 0.9437500238418579,
456
- "rewards/chosen": 3.791701555252075,
457
- "rewards/margins": 7.995067596435547,
458
- "rewards/rejected": -4.203365802764893,
459
  "step": 290
460
  },
461
  {
462
  "epoch": 0.77,
463
- "grad_norm": 304.40926278591246,
464
- "learning_rate": 4.609134697356009e-08,
465
- "logits/chosen": -2.6325907707214355,
466
- "logits/rejected": -2.513869524002075,
467
- "logps/chosen": -270.08087158203125,
468
- "logps/rejected": -216.07247924804688,
469
- "loss": 0.1539,
470
- "rewards/accuracies": 0.956250011920929,
471
- "rewards/chosen": 4.105973720550537,
472
- "rewards/margins": 9.096407890319824,
473
- "rewards/rejected": -4.990433692932129,
474
  "step": 300
475
  },
476
  {
477
  "epoch": 0.79,
478
- "grad_norm": 183.46140347071307,
479
- "learning_rate": 3.683761426338148e-08,
480
- "logits/chosen": -2.5910542011260986,
481
- "logits/rejected": -2.4951541423797607,
482
- "logps/chosen": -275.2190246582031,
483
- "logps/rejected": -210.9887237548828,
484
- "loss": 0.1539,
485
- "rewards/accuracies": 0.925000011920929,
486
- "rewards/chosen": 4.16552209854126,
487
- "rewards/margins": 8.900744438171387,
488
- "rewards/rejected": -4.735221862792969,
489
  "step": 310
490
  },
491
  {
492
  "epoch": 0.82,
493
- "grad_norm": 336.8108974655118,
494
- "learning_rate": 2.8489817851625024e-08,
495
- "logits/chosen": -2.6144814491271973,
496
- "logits/rejected": -2.5162534713745117,
497
- "logps/chosen": -258.4134826660156,
498
- "logps/rejected": -210.2257080078125,
499
- "loss": 0.1866,
500
  "rewards/accuracies": 0.875,
501
- "rewards/chosen": 3.252080202102661,
502
- "rewards/margins": 8.461040496826172,
503
- "rewards/rejected": -5.20896053314209,
504
  "step": 320
505
  },
506
  {
507
  "epoch": 0.84,
508
- "grad_norm": 222.30131769249033,
509
- "learning_rate": 2.1114787115667477e-08,
510
- "logits/chosen": -2.634732484817505,
511
- "logits/rejected": -2.5385124683380127,
512
- "logps/chosen": -260.74530029296875,
513
- "logps/rejected": -209.8925323486328,
514
- "loss": 0.1579,
515
- "rewards/accuracies": 0.981249988079071,
516
- "rewards/chosen": 4.701260566711426,
517
- "rewards/margins": 9.207574844360352,
518
- "rewards/rejected": -4.506315231323242,
519
  "step": 330
520
  },
521
  {
522
  "epoch": 0.87,
523
- "grad_norm": 298.91033859930906,
524
- "learning_rate": 1.4771563829877598e-08,
525
- "logits/chosen": -2.620940685272217,
526
- "logits/rejected": -2.5207433700561523,
527
- "logps/chosen": -256.2254638671875,
528
- "logps/rejected": -197.62445068359375,
529
- "loss": 0.1936,
530
- "rewards/accuracies": 0.956250011920929,
531
- "rewards/chosen": 4.292969703674316,
532
- "rewards/margins": 9.223516464233398,
533
- "rewards/rejected": -4.930546760559082,
534
  "step": 340
535
  },
536
  {
537
  "epoch": 0.9,
538
- "grad_norm": 322.2905254048829,
539
- "learning_rate": 9.510929498959268e-09,
540
- "logits/chosen": -2.636793851852417,
541
- "logits/rejected": -2.522016763687134,
542
- "logps/chosen": -265.2098388671875,
543
- "logps/rejected": -215.28469848632812,
544
- "loss": 0.1724,
545
- "rewards/accuracies": 0.918749988079071,
546
- "rewards/chosen": 3.8617148399353027,
547
- "rewards/margins": 8.617898941040039,
548
- "rewards/rejected": -4.7561845779418945,
549
  "step": 350
550
  },
551
  {
552
  "epoch": 0.92,
553
- "grad_norm": 389.7582997593581,
554
- "learning_rate": 5.374998819965654e-09,
555
- "logits/chosen": -2.6430556774139404,
556
- "logits/rejected": -2.5382204055786133,
557
- "logps/chosen": -270.87615966796875,
558
- "logps/rejected": -212.25949096679688,
559
- "loss": 0.1727,
560
- "rewards/accuracies": 0.9437500238418579,
561
- "rewards/chosen": 4.255101680755615,
562
- "rewards/margins": 8.951577186584473,
563
- "rewards/rejected": -4.696475028991699,
564
  "step": 360
565
  },
566
  {
567
  "epoch": 0.95,
568
- "grad_norm": 224.62737396153923,
569
- "learning_rate": 2.396882527576477e-09,
570
- "logits/chosen": -2.5986270904541016,
571
- "logits/rejected": -2.492842197418213,
572
- "logps/chosen": -269.3092346191406,
573
- "logps/rejected": -214.992919921875,
574
- "loss": 0.1462,
575
- "rewards/accuracies": 0.9312499761581421,
576
- "rewards/chosen": 4.158980846405029,
577
- "rewards/margins": 8.108312606811523,
578
- "rewards/rejected": -3.9493324756622314,
579
  "step": 370
580
  },
581
  {
582
  "epoch": 0.97,
583
- "grad_norm": 174.48647090969504,
584
- "learning_rate": 6.004223217757509e-10,
585
- "logits/chosen": -2.653160572052002,
586
- "logits/rejected": -2.5599982738494873,
587
- "logps/chosen": -260.55279541015625,
588
- "logps/rejected": -219.11105346679688,
589
- "loss": 0.1559,
590
- "rewards/accuracies": 0.925000011920929,
591
- "rewards/chosen": 4.291203498840332,
592
- "rewards/margins": 8.267863273620605,
593
- "rewards/rejected": -3.9766602516174316,
594
  "step": 380
595
  },
596
  {
597
  "epoch": 1.0,
598
- "grad_norm": 384.8395002541089,
599
  "learning_rate": 0.0,
600
- "logits/chosen": -2.6584863662719727,
601
- "logits/rejected": -2.5489349365234375,
602
- "logps/chosen": -248.6810302734375,
603
- "logps/rejected": -207.1171875,
604
- "loss": 0.1633,
605
- "rewards/accuracies": 0.9375,
606
- "rewards/chosen": 3.9972071647644043,
607
- "rewards/margins": 9.053262710571289,
608
- "rewards/rejected": -5.056054592132568,
609
  "step": 390
610
  },
611
  {
612
  "epoch": 1.0,
613
  "step": 390,
614
  "total_flos": 0.0,
615
- "train_loss": 0.20326648155848184,
616
- "train_runtime": 5896.1189,
617
- "train_samples_per_second": 8.48,
618
- "train_steps_per_second": 0.066
619
  }
620
  ],
621
  "logging_steps": 10,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
+ "grad_norm": 1292.2103490533002,
14
+ "learning_rate": 1.282051282051282e-08,
15
+ "logits/chosen": -2.5583817958831787,
16
+ "logits/rejected": -2.4487552642822266,
17
  "logps/chosen": -258.1644592285156,
18
+ "logps/rejected": -216.25729370117188,
19
+ "loss": 0.6964,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
22
  "rewards/margins": 0.0,
 
25
  },
26
  {
27
  "epoch": 0.03,
28
+ "grad_norm": 1261.1198733310573,
29
+ "learning_rate": 1.2820512820512818e-07,
30
+ "logits/chosen": -2.6061007976531982,
31
+ "logits/rejected": -2.553147315979004,
32
+ "logps/chosen": -267.5506591796875,
33
+ "logps/rejected": -217.63583374023438,
34
+ "loss": 0.7045,
35
+ "rewards/accuracies": 0.4375,
36
+ "rewards/chosen": 0.012439604848623276,
37
+ "rewards/margins": 0.01009546872228384,
38
+ "rewards/rejected": 0.002344133099541068,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.05,
43
+ "grad_norm": 853.2707411908161,
44
+ "learning_rate": 2.5641025641025636e-07,
45
+ "logits/chosen": -2.629751443862915,
46
+ "logits/rejected": -2.5669989585876465,
47
+ "logps/chosen": -260.5412292480469,
48
+ "logps/rejected": -207.0039825439453,
49
+ "loss": 0.5192,
50
+ "rewards/accuracies": 0.7250000238418579,
51
+ "rewards/chosen": 0.6866833567619324,
52
+ "rewards/margins": 0.6415502429008484,
53
+ "rewards/rejected": 0.045133065432310104,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.08,
58
+ "grad_norm": 1142.6320871817904,
59
+ "learning_rate": 3.8461538461538463e-07,
60
+ "logits/chosen": -2.6441783905029297,
61
+ "logits/rejected": -2.5700392723083496,
62
+ "logps/chosen": -251.12313842773438,
63
+ "logps/rejected": -198.34071350097656,
64
+ "loss": 0.3383,
65
+ "rewards/accuracies": 0.856249988079071,
66
+ "rewards/chosen": 3.621673583984375,
67
+ "rewards/margins": 3.1466643810272217,
68
+ "rewards/rejected": 0.47500935196876526,
69
  "step": 30
70
  },
71
  {
72
  "epoch": 0.1,
73
+ "grad_norm": 498.4795422948633,
74
+ "learning_rate": 4.99989986344963e-07,
75
+ "logits/chosen": -2.6392924785614014,
76
+ "logits/rejected": -2.561156749725342,
77
+ "logps/chosen": -243.8309783935547,
78
+ "logps/rejected": -192.75845336914062,
79
+ "loss": 0.3207,
80
+ "rewards/accuracies": 0.8500000238418579,
81
+ "rewards/chosen": 5.866530418395996,
82
+ "rewards/margins": 5.031473636627197,
83
+ "rewards/rejected": 0.8350569009780884,
84
  "step": 40
85
  },
86
  {
87
  "epoch": 0.13,
88
+ "grad_norm": 687.258084457276,
89
+ "learning_rate": 4.987893180827479e-07,
90
+ "logits/chosen": -2.6534440517425537,
91
+ "logits/rejected": -2.5842742919921875,
92
+ "logps/chosen": -258.9037170410156,
93
+ "logps/rejected": -203.41665649414062,
94
+ "loss": 0.3666,
95
+ "rewards/accuracies": 0.8687499761581421,
96
+ "rewards/chosen": 8.400110244750977,
97
+ "rewards/margins": 6.8410444259643555,
98
+ "rewards/rejected": 1.559065818786621,
99
  "step": 50
100
  },
101
  {
102
  "epoch": 0.15,
103
+ "grad_norm": 759.1516730607283,
104
+ "learning_rate": 4.955969343539162e-07,
105
+ "logits/chosen": -2.616115093231201,
106
+ "logits/rejected": -2.5453152656555176,
107
+ "logps/chosen": -262.9014892578125,
108
+ "logps/rejected": -209.2264404296875,
109
+ "loss": 0.3916,
110
+ "rewards/accuracies": 0.862500011920929,
111
+ "rewards/chosen": 5.333600044250488,
112
+ "rewards/margins": 6.3847455978393555,
113
+ "rewards/rejected": -1.0511456727981567,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.18,
118
+ "grad_norm": 560.0502144214485,
119
+ "learning_rate": 4.90438392204474e-07,
120
+ "logits/chosen": -2.590355634689331,
121
+ "logits/rejected": -2.5212607383728027,
122
+ "logps/chosen": -292.03955078125,
123
+ "logps/rejected": -227.56900024414062,
124
+ "loss": 0.366,
125
  "rewards/accuracies": 0.918749988079071,
126
+ "rewards/chosen": 5.28397798538208,
127
+ "rewards/margins": 7.424477577209473,
128
+ "rewards/rejected": -2.1404995918273926,
129
  "step": 70
130
  },
131
  {
132
  "epoch": 0.2,
133
+ "grad_norm": 1037.6404299504059,
134
+ "learning_rate": 4.83354989019146e-07,
135
+ "logits/chosen": -2.5414998531341553,
136
+ "logits/rejected": -2.468327283859253,
137
+ "logps/chosen": -260.0003356933594,
138
+ "logps/rejected": -203.8599090576172,
139
+ "loss": 0.3627,
140
+ "rewards/accuracies": 0.875,
141
+ "rewards/chosen": 6.802065372467041,
142
+ "rewards/margins": 7.507330894470215,
143
+ "rewards/rejected": -0.705264687538147,
144
  "step": 80
145
  },
146
  {
147
  "epoch": 0.23,
148
+ "grad_norm": 1095.0164975833522,
149
+ "learning_rate": 4.7440343190975353e-07,
150
+ "logits/chosen": -2.5701346397399902,
151
+ "logits/rejected": -2.513249397277832,
152
+ "logps/chosen": -257.8319396972656,
153
+ "logps/rejected": -217.47695922851562,
154
+ "loss": 0.3529,
155
+ "rewards/accuracies": 0.84375,
156
+ "rewards/chosen": 3.5220108032226562,
157
+ "rewards/margins": 6.205338478088379,
158
+ "rewards/rejected": -2.683328151702881,
159
  "step": 90
160
  },
161
  {
162
  "epoch": 0.26,
163
+ "grad_norm": 493.8561719761038,
164
+ "learning_rate": 4.6365538373900506e-07,
165
+ "logits/chosen": -2.624537944793701,
166
+ "logits/rejected": -2.5495262145996094,
167
+ "logps/chosen": -237.0422821044922,
168
+ "logps/rejected": -201.1056671142578,
169
+ "loss": 0.6296,
170
+ "rewards/accuracies": 0.8687499761581421,
171
+ "rewards/chosen": 4.613960266113281,
172
+ "rewards/margins": 6.805362701416016,
173
+ "rewards/rejected": -2.191401958465576,
174
  "step": 100
175
  },
176
  {
177
  "epoch": 0.28,
178
+ "grad_norm": 783.3339585792174,
179
+ "learning_rate": 4.5119688941406386e-07,
180
+ "logits/chosen": -2.6185028553009033,
181
+ "logits/rejected": -2.5373358726501465,
182
+ "logps/chosen": -258.2229919433594,
183
+ "logps/rejected": -209.95010375976562,
184
+ "loss": 0.5085,
185
+ "rewards/accuracies": 0.862500011920929,
186
+ "rewards/chosen": 6.706292629241943,
187
+ "rewards/margins": 8.07982349395752,
188
+ "rewards/rejected": -1.3735301494598389,
189
  "step": 110
190
  },
191
  {
192
  "epoch": 0.31,
193
+ "grad_norm": 1013.1425815966796,
194
+ "learning_rate": 4.3712768704277524e-07,
195
+ "logits/chosen": -2.5965781211853027,
196
+ "logits/rejected": -2.5263915061950684,
197
+ "logps/chosen": -263.1861572265625,
198
+ "logps/rejected": -208.8050079345703,
199
+ "loss": 0.4699,
200
+ "rewards/accuracies": 0.8812500238418579,
201
+ "rewards/chosen": 5.6486382484436035,
202
+ "rewards/margins": 7.843411445617676,
203
+ "rewards/rejected": -2.194772720336914,
204
  "step": 120
205
  },
206
  {
207
  "epoch": 0.33,
208
+ "grad_norm": 659.1716861853262,
209
+ "learning_rate": 4.2156040946718343e-07,
210
+ "logits/chosen": -2.571542263031006,
211
+ "logits/rejected": -2.502825975418091,
212
+ "logps/chosen": -252.36941528320312,
213
+ "logps/rejected": -197.37109375,
214
+ "loss": 0.4289,
215
+ "rewards/accuracies": 0.8374999761581421,
216
+ "rewards/chosen": 4.440427303314209,
217
+ "rewards/margins": 7.714503288269043,
218
+ "rewards/rejected": -3.274075746536255,
219
  "step": 130
220
  },
221
  {
222
  "epoch": 0.36,
223
+ "grad_norm": 706.862930333198,
224
+ "learning_rate": 4.046196825665637e-07,
225
+ "logits/chosen": -2.5883898735046387,
226
+ "logits/rejected": -2.5185062885284424,
227
+ "logps/chosen": -271.88494873046875,
228
+ "logps/rejected": -218.22323608398438,
229
+ "loss": 0.4943,
230
+ "rewards/accuracies": 0.84375,
231
+ "rewards/chosen": 2.9622278213500977,
232
+ "rewards/margins": 7.161763668060303,
233
+ "rewards/rejected": -4.199534893035889,
234
  "step": 140
235
  },
236
  {
237
  "epoch": 0.38,
238
+ "grad_norm": 715.990249793818,
239
+ "learning_rate": 3.864411275486261e-07,
240
+ "logits/chosen": -2.5699923038482666,
241
+ "logits/rejected": -2.502592086791992,
242
+ "logps/chosen": -264.418212890625,
243
+ "logps/rejected": -213.1693115234375,
244
+ "loss": 0.5111,
245
+ "rewards/accuracies": 0.875,
246
+ "rewards/chosen": 5.556182384490967,
247
+ "rewards/margins": 8.052742004394531,
248
+ "rewards/rejected": -2.4965591430664062,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.41,
253
+ "grad_norm": 710.6520782907305,
254
+ "learning_rate": 3.671702752161759e-07,
255
+ "logits/chosen": -2.559586763381958,
256
+ "logits/rejected": -2.4897525310516357,
257
+ "logps/chosen": -245.1999969482422,
258
+ "logps/rejected": -198.19442749023438,
259
+ "loss": 0.4696,
260
+ "rewards/accuracies": 0.875,
261
+ "rewards/chosen": 3.364131212234497,
262
+ "rewards/margins": 8.019769668579102,
263
+ "rewards/rejected": -4.655638694763184,
264
  "step": 160
265
  },
266
  {
267
  "epoch": 0.44,
268
+ "grad_norm": 865.8215089865342,
269
+ "learning_rate": 3.4696140090121375e-07,
270
+ "logits/chosen": -2.57694935798645,
271
+ "logits/rejected": -2.5136027336120605,
272
+ "logps/chosen": -266.91412353515625,
273
+ "logps/rejected": -211.6914825439453,
274
+ "loss": 0.397,
275
+ "rewards/accuracies": 0.862500011920929,
276
+ "rewards/chosen": 3.399867296218872,
277
+ "rewards/margins": 8.182429313659668,
278
+ "rewards/rejected": -4.782561302185059,
279
  "step": 170
280
  },
281
  {
282
  "epoch": 0.46,
283
+ "grad_norm": 658.5054936178984,
284
+ "learning_rate": 3.259762893935617e-07,
285
+ "logits/chosen": -2.6607277393341064,
286
+ "logits/rejected": -2.581068515777588,
287
+ "logps/chosen": -239.2282257080078,
288
+ "logps/rejected": -188.63772583007812,
289
+ "loss": 0.4691,
290
+ "rewards/accuracies": 0.8687499761581421,
291
+ "rewards/chosen": 2.5246458053588867,
292
+ "rewards/margins": 7.373200416564941,
293
+ "rewards/rejected": -4.848554611206055,
294
  "step": 180
295
  },
296
  {
297
  "epoch": 0.49,
298
+ "grad_norm": 746.921617266345,
299
+ "learning_rate": 3.0438293975154184e-07,
300
+ "logits/chosen": -2.6273646354675293,
301
+ "logits/rejected": -2.5594921112060547,
302
+ "logps/chosen": -263.73443603515625,
303
+ "logps/rejected": -207.8045654296875,
304
+ "loss": 0.4399,
305
+ "rewards/accuracies": 0.8687499761581421,
306
+ "rewards/chosen": 0.9775351285934448,
307
+ "rewards/margins": 8.604721069335938,
308
+ "rewards/rejected": -7.6271867752075195,
309
  "step": 190
310
  },
311
  {
312
  "epoch": 0.51,
313
+ "grad_norm": 859.0725144089504,
314
+ "learning_rate": 2.823542203635138e-07,
315
+ "logits/chosen": -2.6617209911346436,
316
+ "logits/rejected": -2.5823440551757812,
317
+ "logps/chosen": -278.6440734863281,
318
+ "logps/rejected": -222.3423309326172,
319
+ "loss": 0.493,
320
+ "rewards/accuracies": 0.887499988079071,
321
+ "rewards/chosen": 1.6911423206329346,
322
+ "rewards/margins": 9.542765617370605,
323
+ "rewards/rejected": -7.851624488830566,
324
  "step": 200
325
  },
326
  {
327
  "epoch": 0.54,
328
+ "grad_norm": 1073.7778122145994,
329
+ "learning_rate": 2.600664850273538e-07,
330
+ "logits/chosen": -2.6473941802978516,
331
+ "logits/rejected": -2.581458568572998,
332
+ "logps/chosen": -270.93621826171875,
333
+ "logps/rejected": -214.9030303955078,
334
+ "loss": 0.7378,
335
+ "rewards/accuracies": 0.8374999761581421,
336
+ "rewards/chosen": 0.10583686828613281,
337
+ "rewards/margins": 7.7938642501831055,
338
+ "rewards/rejected": -7.688027858734131,
339
  "step": 210
340
  },
341
  {
342
  "epoch": 0.56,
343
+ "grad_norm": 783.9779969138841,
344
+ "learning_rate": 2.3769816112703045e-07,
345
+ "logits/chosen": -2.6640350818634033,
346
+ "logits/rejected": -2.606898307800293,
347
+ "logps/chosen": -260.6977233886719,
348
+ "logps/rejected": -216.708740234375,
349
+ "loss": 0.5298,
350
+ "rewards/accuracies": 0.8687499761581421,
351
+ "rewards/chosen": 0.881191611289978,
352
+ "rewards/margins": 7.128912448883057,
353
+ "rewards/rejected": -6.247720241546631,
354
  "step": 220
355
  },
356
  {
357
  "epoch": 0.59,
358
+ "grad_norm": 656.1996441950056,
359
+ "learning_rate": 2.1542832120881677e-07,
360
+ "logits/chosen": -2.6948089599609375,
361
+ "logits/rejected": -2.615889072418213,
362
+ "logps/chosen": -270.77557373046875,
363
+ "logps/rejected": -219.38320922851562,
364
+ "loss": 0.4547,
365
+ "rewards/accuracies": 0.856249988079071,
366
+ "rewards/chosen": 2.3647713661193848,
367
+ "rewards/margins": 8.186816215515137,
368
+ "rewards/rejected": -5.82204532623291,
369
  "step": 230
370
  },
371
  {
372
  "epoch": 0.61,
373
+ "grad_norm": 1015.3762946599339,
374
+ "learning_rate": 1.934352493925695e-07,
375
+ "logits/chosen": -2.6751513481140137,
376
+ "logits/rejected": -2.6343884468078613,
377
+ "logps/chosen": -266.14263916015625,
378
+ "logps/rejected": -223.5273895263672,
379
+ "loss": 0.429,
380
+ "rewards/accuracies": 0.8999999761581421,
381
+ "rewards/chosen": 1.4297797679901123,
382
+ "rewards/margins": 10.390259742736816,
383
+ "rewards/rejected": -8.960479736328125,
384
  "step": 240
385
  },
386
  {
387
  "epoch": 0.64,
388
+ "grad_norm": 586.0366917195352,
389
+ "learning_rate": 1.7189501409486059e-07,
390
+ "logits/chosen": -2.6815237998962402,
391
+ "logits/rejected": -2.6186068058013916,
392
+ "logps/chosen": -271.32818603515625,
393
+ "logps/rejected": -225.2965545654297,
394
+ "loss": 0.4243,
395
+ "rewards/accuracies": 0.8374999761581421,
396
+ "rewards/chosen": 0.4054955542087555,
397
+ "rewards/margins": 8.429037094116211,
398
+ "rewards/rejected": -8.023542404174805,
399
  "step": 250
400
  },
401
  {
402
  "epoch": 0.67,
403
+ "grad_norm": 567.2324686905487,
404
+ "learning_rate": 1.5098005849021078e-07,
405
+ "logits/chosen": -2.6706247329711914,
406
+ "logits/rejected": -2.6183619499206543,
407
+ "logps/chosen": -265.3566589355469,
408
+ "logps/rejected": -211.55789184570312,
409
+ "loss": 0.4026,
410
+ "rewards/accuracies": 0.856249988079071,
411
+ "rewards/chosen": 0.452970027923584,
412
+ "rewards/margins": 7.969753265380859,
413
+ "rewards/rejected": -7.516783237457275,
414
  "step": 260
415
  },
416
  {
417
  "epoch": 0.69,
418
+ "grad_norm": 651.6922692823155,
419
+ "learning_rate": 1.30857819994673e-07,
420
+ "logits/chosen": -2.648026943206787,
421
+ "logits/rejected": -2.5697438716888428,
422
+ "logps/chosen": -276.09661865234375,
423
+ "logps/rejected": -233.24966430664062,
424
+ "loss": 0.4615,
425
+ "rewards/accuracies": 0.875,
426
+ "rewards/chosen": 0.4385630190372467,
427
+ "rewards/margins": 11.782003402709961,
428
+ "rewards/rejected": -11.343441009521484,
429
  "step": 270
430
  },
431
  {
432
  "epoch": 0.72,
433
+ "grad_norm": 492.898374979646,
434
+ "learning_rate": 1.116893898236716e-07,
435
+ "logits/chosen": -2.6764636039733887,
436
+ "logits/rejected": -2.6275370121002197,
437
+ "logps/chosen": -273.55120849609375,
438
+ "logps/rejected": -222.28256225585938,
439
+ "loss": 0.431,
440
+ "rewards/accuracies": 0.8687499761581421,
441
+ "rewards/chosen": 0.1396121382713318,
442
+ "rewards/margins": 8.543681144714355,
443
+ "rewards/rejected": -8.404068946838379,
444
  "step": 280
445
  },
446
  {
447
  "epoch": 0.74,
448
+ "grad_norm": 601.4691313440442,
449
+ "learning_rate": 9.362822335518062e-08,
450
+ "logits/chosen": -2.639714241027832,
451
+ "logits/rejected": -2.6006019115448,
452
+ "logps/chosen": -271.51104736328125,
453
+ "logps/rejected": -219.78646850585938,
454
+ "loss": 0.3785,
455
+ "rewards/accuracies": 0.887499988079071,
456
+ "rewards/chosen": 0.39154329895973206,
457
+ "rewards/margins": 8.193072319030762,
458
+ "rewards/rejected": -7.8015289306640625,
459
  "step": 290
460
  },
461
  {
462
  "epoch": 0.77,
463
+ "grad_norm": 876.6568683843423,
464
+ "learning_rate": 7.681891162260015e-08,
465
+ "logits/chosen": -2.6577625274658203,
466
+ "logits/rejected": -2.610130786895752,
467
+ "logps/chosen": -278.3086853027344,
468
+ "logps/rejected": -223.9765625,
469
+ "loss": 0.4175,
470
+ "rewards/accuracies": 0.862500011920929,
471
+ "rewards/chosen": 0.23941349983215332,
472
+ "rewards/margins": 8.298933029174805,
473
+ "rewards/rejected": -8.059518814086914,
474
  "step": 300
475
  },
476
  {
477
  "epoch": 0.79,
478
+ "grad_norm": 491.2362210232499,
479
+ "learning_rate": 6.139602377230247e-08,
480
+ "logits/chosen": -2.623286724090576,
481
+ "logits/rejected": -2.5633485317230225,
482
+ "logps/chosen": -281.3511047363281,
483
+ "logps/rejected": -218.5331268310547,
484
+ "loss": 0.4291,
485
+ "rewards/accuracies": 0.875,
486
+ "rewards/chosen": 0.8305476903915405,
487
+ "rewards/margins": 8.879867553710938,
488
+ "rewards/rejected": -8.049318313598633,
489
  "step": 310
490
  },
491
  {
492
  "epoch": 0.82,
493
+ "grad_norm": 762.3598954431114,
494
+ "learning_rate": 4.748302975270837e-08,
495
+ "logits/chosen": -2.6497387886047363,
496
+ "logits/rejected": -2.610168933868408,
497
+ "logps/chosen": -264.21588134765625,
498
+ "logps/rejected": -206.8611602783203,
499
+ "loss": 0.4476,
500
  "rewards/accuracies": 0.875,
501
+ "rewards/chosen": 0.6263083219528198,
502
+ "rewards/margins": 7.853513240814209,
503
+ "rewards/rejected": -7.227204322814941,
504
  "step": 320
505
  },
506
  {
507
  "epoch": 0.84,
508
+ "grad_norm": 668.8344256472935,
509
+ "learning_rate": 3.5191311859445795e-08,
510
+ "logits/chosen": -2.6693060398101807,
511
+ "logits/rejected": -2.6197760105133057,
512
+ "logps/chosen": -268.38543701171875,
513
+ "logps/rejected": -220.74081420898438,
514
+ "loss": 0.4113,
515
+ "rewards/accuracies": 0.90625,
516
+ "rewards/chosen": 1.1819716691970825,
517
+ "rewards/margins": 8.392292976379395,
518
+ "rewards/rejected": -7.210320949554443,
519
  "step": 330
520
  },
521
  {
522
  "epoch": 0.87,
523
+ "grad_norm": 600.9418350776771,
524
+ "learning_rate": 2.4619273049795996e-08,
525
+ "logits/chosen": -2.653104066848755,
526
+ "logits/rejected": -2.6034810543060303,
527
+ "logps/chosen": -263.5737609863281,
528
+ "logps/rejected": -213.79544067382812,
529
+ "loss": 0.3933,
530
+ "rewards/accuracies": 0.9125000238418579,
531
+ "rewards/chosen": 1.1136972904205322,
532
+ "rewards/margins": 9.145872116088867,
533
+ "rewards/rejected": -8.032175064086914,
534
  "step": 340
535
  },
536
  {
537
  "epoch": 0.9,
538
+ "grad_norm": 448.6960620183454,
539
+ "learning_rate": 1.5851549164932115e-08,
540
+ "logits/chosen": -2.663357734680176,
541
+ "logits/rejected": -2.6194939613342285,
542
+ "logps/chosen": -272.70233154296875,
543
+ "logps/rejected": -229.30697631835938,
544
+ "loss": 0.4249,
545
+ "rewards/accuracies": 0.887499988079071,
546
+ "rewards/chosen": 1.3936760425567627,
547
+ "rewards/margins": 8.432661056518555,
548
+ "rewards/rejected": -7.0389862060546875,
549
  "step": 350
550
  },
551
  {
552
  "epoch": 0.92,
553
+ "grad_norm": 606.8337189375926,
554
+ "learning_rate": 8.958331366609423e-09,
555
+ "logits/chosen": -2.666874885559082,
556
+ "logits/rejected": -2.6079227924346924,
557
+ "logps/chosen": -278.23712158203125,
558
+ "logps/rejected": -222.1784210205078,
559
+ "loss": 0.4777,
560
+ "rewards/accuracies": 0.893750011920929,
561
+ "rewards/chosen": 0.8818317651748657,
562
+ "rewards/margins": 8.486412048339844,
563
+ "rewards/rejected": -7.604579925537109,
564
  "step": 360
565
  },
566
  {
567
  "epoch": 0.95,
568
+ "grad_norm": 565.1329564450134,
569
+ "learning_rate": 3.994804212627461e-09,
570
+ "logits/chosen": -2.626528739929199,
571
+ "logits/rejected": -2.5960590839385986,
572
+ "logps/chosen": -277.90789794921875,
573
+ "logps/rejected": -232.6277313232422,
574
+ "loss": 0.4164,
575
+ "rewards/accuracies": 0.84375,
576
+ "rewards/chosen": 1.226670503616333,
577
+ "rewards/margins": 8.475992202758789,
578
+ "rewards/rejected": -7.249321937561035,
579
  "step": 370
580
  },
581
  {
582
  "epoch": 0.97,
583
+ "grad_norm": 494.78405010061283,
584
+ "learning_rate": 1.0007038696262516e-09,
585
+ "logits/chosen": -2.672760486602783,
586
+ "logits/rejected": -2.6364920139312744,
587
+ "logps/chosen": -266.59918212890625,
588
+ "logps/rejected": -233.4165496826172,
589
+ "loss": 0.4064,
590
+ "rewards/accuracies": 0.862500011920929,
591
+ "rewards/chosen": 1.6075836420059204,
592
+ "rewards/margins": 8.459333419799805,
593
+ "rewards/rejected": -6.851749420166016,
594
  "step": 380
595
  },
596
  {
597
  "epoch": 1.0,
598
+ "grad_norm": 917.0969156966863,
599
  "learning_rate": 0.0,
600
+ "logits/chosen": -2.6849241256713867,
601
+ "logits/rejected": -2.630876302719116,
602
+ "logps/chosen": -254.27664184570312,
603
+ "logps/rejected": -213.14102172851562,
604
+ "loss": 0.4983,
605
+ "rewards/accuracies": 0.862500011920929,
606
+ "rewards/chosen": 0.6460372805595398,
607
+ "rewards/margins": 8.144643783569336,
608
+ "rewards/rejected": -7.498606204986572,
609
  "step": 390
610
  },
611
  {
612
  "epoch": 1.0,
613
  "step": 390,
614
  "total_flos": 0.0,
615
+ "train_loss": 0.4513903107398596,
616
+ "train_runtime": 5854.463,
617
+ "train_samples_per_second": 8.54,
618
+ "train_steps_per_second": 0.067
619
  }
620
  ],
621
  "logging_steps": 10,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6819c1dd4414cf3e1008e039028d4a09c333504f352311e2138049a4cec39cd4
3
- size 6520
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7346ad29a6b9b0903d845abfe58994ac8a80348e425e0e321ee83abcfcb035e
3
+ size 6264