RikkiXu commited on
Commit
5c4a558
1 Parent(s): 47cc9fb

Model save

Browse files
README.md CHANGED
@@ -14,7 +14,7 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # zephyr-7b-dpo-full
16
 
17
- This model is a fine-tuned version of [princeton-nlp/Mistral-7B-Base-SFT-DPO](https://huggingface.co/princeton-nlp/Mistral-7B-Base-SFT-DPO) on an unknown dataset.
18
 
19
  ## Model description
20
 
@@ -55,5 +55,5 @@ The following hyperparameters were used during training:
55
 
56
  - Transformers 4.39.3
57
  - Pytorch 2.1.2+cu118
58
- - Datasets 2.19.1
59
  - Tokenizers 0.15.2
 
14
 
15
  # zephyr-7b-dpo-full
16
 
17
+ This model is a fine-tuned version of [princeton-nlp/Mistral-7B-Base-SFT-DPO](https://huggingface.co/princeton-nlp/Mistral-7B-Base-SFT-DPO) on the None dataset.
18
 
19
  ## Model description
20
 
 
55
 
56
  - Transformers 4.39.3
57
  - Pytorch 2.1.2+cu118
58
+ - Datasets 2.16.1
59
  - Tokenizers 0.15.2
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.2186826115846634,
4
- "train_runtime": 11940.2176,
5
- "train_samples": 102360,
6
- "train_samples_per_second": 8.573,
7
- "train_steps_per_second": 0.034
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.37812867454580357,
4
+ "train_runtime": 5319.5814,
5
+ "train_samples": 47302,
6
+ "train_samples_per_second": 8.892,
7
+ "train_steps_per_second": 0.035
8
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1a118ac948197a3bbfd249f1956387edf3828fdc2a6cc414bf8ce3c3e7a6e2e3
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b0d91deaf39d5239312d2ee38ee390acaa4b4fa405cd4b896c533d8687b80b9
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:08a70e605bdd6f68144e056258758c5577009cb50b28c0583f8d451060cb6684
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b806d3480fd4992109c5d61a5d48cf88cca223fbec8796f7b25067bfe4956722
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7fc90fce317ef012990a74b3c58b0b4875f221d5b4f150cec69f3ed0b9db0134
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a766c0fba83e2189c5e7cdab4f60fcf4245ea877bf5d1a20cb88a89e9fbb76e
3
  size 4540516344
runs/Jun21_03-36-42_n136-100-194/events.out.tfevents.1718912225.n136-100-194.566786.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c7a82fa25f3fb898720560ce86ca0146accaa60e9f545355c33bd6e7520be9f
3
+ size 18155
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.2186826115846634,
4
- "train_runtime": 11940.2176,
5
- "train_samples": 102360,
6
- "train_samples_per_second": 8.573,
7
- "train_steps_per_second": 0.034
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.37812867454580357,
4
+ "train_runtime": 5319.5814,
5
+ "train_samples": 47302,
6
+ "train_samples_per_second": 8.892,
7
+ "train_steps_per_second": 0.035
8
  }
trainer_state.json CHANGED
@@ -3,20 +3,20 @@
3
  "best_model_checkpoint": null,
4
  "epoch": 1.0,
5
  "eval_steps": 500,
6
- "global_step": 400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.0,
13
- "grad_norm": 248.672711818404,
14
- "learning_rate": 1.25e-08,
15
- "logits/chosen": -0.5811702013015747,
16
- "logits/rejected": -0.11655431985855103,
17
- "logps/chosen": -351.5902099609375,
18
- "logps/rejected": -240.969970703125,
19
- "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
22
  "rewards/margins": 0.0,
@@ -24,617 +24,287 @@
24
  "step": 1
25
  },
26
  {
27
- "epoch": 0.03,
28
- "grad_norm": 222.11982816621227,
29
- "learning_rate": 1.25e-07,
30
- "logits/chosen": 0.2612163722515106,
31
- "logits/rejected": 0.2365657538175583,
32
- "logps/chosen": -333.2138366699219,
33
- "logps/rejected": -244.68914794921875,
34
- "loss": 0.6848,
35
- "rewards/accuracies": 0.4965277910232544,
36
- "rewards/chosen": 0.018286287784576416,
37
- "rewards/margins": 0.01924367994070053,
38
- "rewards/rejected": -0.0009573940187692642,
39
  "step": 10
40
  },
41
  {
42
- "epoch": 0.05,
43
- "grad_norm": 131.0722901505231,
44
- "learning_rate": 2.5e-07,
45
- "logits/chosen": 0.00122042594011873,
46
- "logits/rejected": 0.21793019771575928,
47
- "logps/chosen": -320.78399658203125,
48
- "logps/rejected": -234.4599609375,
49
- "loss": 0.5397,
50
- "rewards/accuracies": 0.778124988079071,
51
- "rewards/chosen": 0.3007456660270691,
52
- "rewards/margins": 0.44470709562301636,
53
- "rewards/rejected": -0.14396145939826965,
54
  "step": 20
55
  },
56
  {
57
- "epoch": 0.07,
58
- "grad_norm": 80.67754344694362,
59
- "learning_rate": 3.75e-07,
60
- "logits/chosen": 0.11814385652542114,
61
- "logits/rejected": -0.13333633542060852,
62
- "logps/chosen": -305.92999267578125,
63
- "logps/rejected": -253.8010711669922,
64
- "loss": 0.3279,
65
- "rewards/accuracies": 0.8343750238418579,
66
- "rewards/chosen": 0.9548959732055664,
67
- "rewards/margins": 2.203428268432617,
68
- "rewards/rejected": -1.2485322952270508,
69
  "step": 30
70
  },
71
  {
72
- "epoch": 0.1,
73
- "grad_norm": 66.3293163499791,
74
- "learning_rate": 5e-07,
75
- "logits/chosen": -0.011821460910141468,
76
- "logits/rejected": -0.11223969608545303,
77
- "logps/chosen": -307.73626708984375,
78
- "logps/rejected": -266.0770263671875,
79
- "loss": 0.2627,
80
- "rewards/accuracies": 0.8812500238418579,
81
- "rewards/chosen": 1.6875461339950562,
82
- "rewards/margins": 3.763516664505005,
83
- "rewards/rejected": -2.075970411300659,
84
  "step": 40
85
  },
86
  {
87
- "epoch": 0.12,
88
- "grad_norm": 72.19599673796307,
89
- "learning_rate": 4.990486745229364e-07,
90
- "logits/chosen": 0.3851412832736969,
91
- "logits/rejected": 0.5114809274673462,
92
- "logps/chosen": -316.8995056152344,
93
- "logps/rejected": -269.6571350097656,
94
- "loss": 0.229,
95
- "rewards/accuracies": 0.8843749761581421,
96
- "rewards/chosen": 1.225061058998108,
97
- "rewards/margins": 4.17338228225708,
98
- "rewards/rejected": -2.9483208656311035,
99
  "step": 50
100
  },
101
  {
102
- "epoch": 0.15,
103
- "grad_norm": 53.904395061776626,
104
- "learning_rate": 4.96201938253052e-07,
105
- "logits/chosen": 0.19928967952728271,
106
- "logits/rejected": 0.3621904253959656,
107
- "logps/chosen": -323.8656921386719,
108
- "logps/rejected": -287.19903564453125,
109
- "loss": 0.2394,
110
- "rewards/accuracies": 0.8812500238418579,
111
- "rewards/chosen": 0.07580803334712982,
112
- "rewards/margins": 4.283209800720215,
113
- "rewards/rejected": -4.207401752471924,
114
  "step": 60
115
  },
116
  {
117
- "epoch": 0.17,
118
- "grad_norm": 70.90163796602725,
119
- "learning_rate": 4.91481456572267e-07,
120
- "logits/chosen": 0.33822208642959595,
121
- "logits/rejected": 0.5376901626586914,
122
- "logps/chosen": -337.15509033203125,
123
- "logps/rejected": -272.99346923828125,
124
- "loss": 0.2201,
125
- "rewards/accuracies": 0.9156249761581421,
126
- "rewards/chosen": 0.19220082461833954,
127
- "rewards/margins": 4.760615348815918,
128
- "rewards/rejected": -4.56841516494751,
129
  "step": 70
130
  },
131
  {
132
- "epoch": 0.2,
133
- "grad_norm": 52.67912669957963,
134
- "learning_rate": 4.849231551964771e-07,
135
- "logits/chosen": 0.9565310478210449,
136
- "logits/rejected": 0.9506447911262512,
137
- "logps/chosen": -327.2256774902344,
138
- "logps/rejected": -293.74700927734375,
139
- "loss": 0.2138,
140
- "rewards/accuracies": 0.9375,
141
- "rewards/chosen": -1.5700352191925049,
142
- "rewards/margins": 5.063401222229004,
143
- "rewards/rejected": -6.633436679840088,
144
  "step": 80
145
  },
146
  {
147
- "epoch": 0.23,
148
- "grad_norm": 65.22420033118601,
149
- "learning_rate": 4.7657694675916247e-07,
150
- "logits/chosen": 0.3500242233276367,
151
- "logits/rejected": 0.21482405066490173,
152
- "logps/chosen": -315.8086242675781,
153
- "logps/rejected": -293.5943908691406,
154
- "loss": 0.2317,
155
- "rewards/accuracies": 0.90625,
156
- "rewards/chosen": 0.491361141204834,
157
- "rewards/margins": 5.032988548278809,
158
- "rewards/rejected": -4.541626930236816,
159
  "step": 90
160
  },
161
  {
162
- "epoch": 0.25,
163
- "grad_norm": 54.96225429454786,
164
- "learning_rate": 4.6650635094610966e-07,
165
- "logits/chosen": 0.10862906277179718,
166
- "logits/rejected": 0.24883398413658142,
167
- "logps/chosen": -349.65911865234375,
168
- "logps/rejected": -326.064697265625,
169
- "loss": 0.1952,
170
- "rewards/accuracies": 0.893750011920929,
171
- "rewards/chosen": -1.6933103799819946,
172
- "rewards/margins": 4.797018051147461,
173
- "rewards/rejected": -6.490328788757324,
174
  "step": 100
175
  },
176
  {
177
- "epoch": 0.28,
178
- "grad_norm": 52.005066827882644,
179
- "learning_rate": 4.5478801107224794e-07,
180
- "logits/chosen": 0.11174388229846954,
181
- "logits/rejected": 0.27005332708358765,
182
- "logps/chosen": -326.76959228515625,
183
- "logps/rejected": -285.99151611328125,
184
- "loss": 0.1986,
185
- "rewards/accuracies": 0.934374988079071,
186
- "rewards/chosen": -0.6088568568229675,
187
- "rewards/margins": 5.2384033203125,
188
- "rewards/rejected": -5.847259521484375,
189
  "step": 110
190
  },
191
  {
192
- "epoch": 0.3,
193
- "grad_norm": 80.2735393844808,
194
- "learning_rate": 4.415111107797445e-07,
195
- "logits/chosen": 0.12390895932912827,
196
- "logits/rejected": 0.2671768069267273,
197
- "logps/chosen": -314.864013671875,
198
- "logps/rejected": -296.4173278808594,
199
- "loss": 0.2022,
200
- "rewards/accuracies": 0.890625,
201
- "rewards/chosen": -0.602489709854126,
202
- "rewards/margins": 5.041877746582031,
203
- "rewards/rejected": -5.644367694854736,
204
  "step": 120
205
  },
206
  {
207
- "epoch": 0.33,
208
- "grad_norm": 53.04554797674892,
209
- "learning_rate": 4.2677669529663686e-07,
210
- "logits/chosen": 0.2591269612312317,
211
- "logits/rejected": 0.31469181180000305,
212
- "logps/chosen": -337.55279541015625,
213
- "logps/rejected": -303.430908203125,
214
- "loss": 0.1929,
215
- "rewards/accuracies": 0.8999999761581421,
216
- "rewards/chosen": -0.5142609477043152,
217
- "rewards/margins": 5.346030235290527,
218
- "rewards/rejected": -5.86029052734375,
219
  "step": 130
220
  },
221
  {
222
- "epoch": 0.35,
223
- "grad_norm": 46.55373068779065,
224
- "learning_rate": 4.106969024216348e-07,
225
- "logits/chosen": 0.5539799928665161,
226
- "logits/rejected": 0.3750854432582855,
227
- "logps/chosen": -323.2265625,
228
- "logps/rejected": -283.9201354980469,
229
- "loss": 0.1948,
230
- "rewards/accuracies": 0.934374988079071,
231
- "rewards/chosen": -0.46524372696876526,
232
- "rewards/margins": 5.173353672027588,
233
- "rewards/rejected": -5.63859748840332,
234
  "step": 140
235
  },
236
  {
237
- "epoch": 0.38,
238
- "grad_norm": 57.84664559821586,
239
- "learning_rate": 3.933941090877615e-07,
240
- "logits/chosen": -0.04622136428952217,
241
- "logits/rejected": 0.10014678537845612,
242
- "logps/chosen": -330.5162353515625,
243
- "logps/rejected": -301.1103515625,
244
- "loss": 0.2027,
245
- "rewards/accuracies": 0.8999999761581421,
246
- "rewards/chosen": -0.5420152544975281,
247
- "rewards/margins": 4.931308746337891,
248
- "rewards/rejected": -5.473323822021484,
249
  "step": 150
250
  },
251
  {
252
- "epoch": 0.4,
253
- "grad_norm": 90.75814646308999,
254
- "learning_rate": 3.75e-07,
255
- "logits/chosen": 0.11180607229471207,
256
- "logits/rejected": -0.048911936581134796,
257
- "logps/chosen": -295.4419860839844,
258
- "logps/rejected": -279.9349670410156,
259
- "loss": 0.1841,
260
- "rewards/accuracies": 0.921875,
261
- "rewards/chosen": -0.33810311555862427,
262
- "rewards/margins": 4.924574375152588,
263
- "rewards/rejected": -5.262677192687988,
264
  "step": 160
265
  },
266
  {
267
- "epoch": 0.42,
268
- "grad_norm": 48.25868450687608,
269
- "learning_rate": 3.5565456543517485e-07,
270
- "logits/chosen": -0.2750697731971741,
271
- "logits/rejected": 0.026401836425065994,
272
- "logps/chosen": -324.1236572265625,
273
- "logps/rejected": -290.73785400390625,
274
- "loss": 0.1938,
275
- "rewards/accuracies": 0.887499988079071,
276
- "rewards/chosen": -0.8598020672798157,
277
- "rewards/margins": 5.191195487976074,
278
- "rewards/rejected": -6.050997257232666,
279
  "step": 170
280
  },
281
- {
282
- "epoch": 0.45,
283
- "grad_norm": 55.57273036697954,
284
- "learning_rate": 3.355050358314172e-07,
285
- "logits/chosen": -0.17330403625965118,
286
- "logits/rejected": -0.37704771757125854,
287
- "logps/chosen": -335.1918029785156,
288
- "logps/rejected": -303.3643493652344,
289
- "loss": 0.1893,
290
- "rewards/accuracies": 0.940625011920929,
291
- "rewards/chosen": 0.3236238360404968,
292
- "rewards/margins": 5.686396598815918,
293
- "rewards/rejected": -5.3627729415893555,
294
- "step": 180
295
- },
296
- {
297
- "epoch": 0.47,
298
- "grad_norm": 34.4619211960604,
299
- "learning_rate": 3.147047612756302e-07,
300
- "logits/chosen": 0.051471106708049774,
301
- "logits/rejected": 0.08085541427135468,
302
- "logps/chosen": -341.36932373046875,
303
- "logps/rejected": -306.84906005859375,
304
- "loss": 0.1714,
305
- "rewards/accuracies": 0.9312499761581421,
306
- "rewards/chosen": -0.4273417592048645,
307
- "rewards/margins": 5.435866355895996,
308
- "rewards/rejected": -5.863208293914795,
309
- "step": 190
310
- },
311
- {
312
- "epoch": 0.5,
313
- "grad_norm": 56.25034316469711,
314
- "learning_rate": 2.934120444167326e-07,
315
- "logits/chosen": 0.2834271490573883,
316
- "logits/rejected": 0.4803285002708435,
317
- "logps/chosen": -320.8459777832031,
318
- "logps/rejected": -293.8531799316406,
319
- "loss": 0.1797,
320
- "rewards/accuracies": 0.9125000238418579,
321
- "rewards/chosen": -1.590319275856018,
322
- "rewards/margins": 5.1105732917785645,
323
- "rewards/rejected": -6.700892448425293,
324
- "step": 200
325
- },
326
- {
327
- "epoch": 0.53,
328
- "grad_norm": 42.616613118118565,
329
- "learning_rate": 2.717889356869146e-07,
330
- "logits/chosen": -0.1653163880109787,
331
- "logits/rejected": -0.19997279345989227,
332
- "logps/chosen": -297.6966857910156,
333
- "logps/rejected": -281.0061950683594,
334
- "loss": 0.1905,
335
- "rewards/accuracies": 0.921875,
336
- "rewards/chosen": 0.32219845056533813,
337
- "rewards/margins": 5.068012237548828,
338
- "rewards/rejected": -4.745813846588135,
339
- "step": 210
340
- },
341
- {
342
- "epoch": 0.55,
343
- "grad_norm": 54.94298294558815,
344
- "learning_rate": 2.5e-07,
345
- "logits/chosen": 0.11906982958316803,
346
- "logits/rejected": 0.16367843747138977,
347
- "logps/chosen": -322.9801330566406,
348
- "logps/rejected": -289.7718811035156,
349
- "loss": 0.2379,
350
- "rewards/accuracies": 0.9312499761581421,
351
- "rewards/chosen": -0.8957729339599609,
352
- "rewards/margins": 4.5600104331970215,
353
- "rewards/rejected": -5.455783367156982,
354
- "step": 220
355
- },
356
- {
357
- "epoch": 0.57,
358
- "grad_norm": 45.798677109719435,
359
- "learning_rate": 2.2821106431308543e-07,
360
- "logits/chosen": 0.26459816098213196,
361
- "logits/rejected": 0.37918907403945923,
362
- "logps/chosen": -308.4639892578125,
363
- "logps/rejected": -284.7757873535156,
364
- "loss": 0.1974,
365
- "rewards/accuracies": 0.90625,
366
- "rewards/chosen": -0.7637487649917603,
367
- "rewards/margins": 5.488121032714844,
368
- "rewards/rejected": -6.251870155334473,
369
- "step": 230
370
- },
371
- {
372
- "epoch": 0.6,
373
- "grad_norm": 71.171077628863,
374
- "learning_rate": 2.065879555832674e-07,
375
- "logits/chosen": 0.11944101750850677,
376
- "logits/rejected": 0.10174547135829926,
377
- "logps/chosen": -317.2291564941406,
378
- "logps/rejected": -301.72357177734375,
379
- "loss": 0.1761,
380
- "rewards/accuracies": 0.9281250238418579,
381
- "rewards/chosen": -0.4725034832954407,
382
- "rewards/margins": 5.045371055603027,
383
- "rewards/rejected": -5.517874717712402,
384
- "step": 240
385
- },
386
- {
387
- "epoch": 0.62,
388
- "grad_norm": 58.77603788283333,
389
- "learning_rate": 1.8529523872436977e-07,
390
- "logits/chosen": -0.0030237496830523014,
391
- "logits/rejected": 0.24394559860229492,
392
- "logps/chosen": -337.0401306152344,
393
- "logps/rejected": -304.41485595703125,
394
- "loss": 0.2199,
395
- "rewards/accuracies": 0.90625,
396
- "rewards/chosen": -1.1832717657089233,
397
- "rewards/margins": 4.3705525398254395,
398
- "rewards/rejected": -5.553823947906494,
399
- "step": 250
400
- },
401
- {
402
- "epoch": 0.65,
403
- "grad_norm": 58.549851578658085,
404
- "learning_rate": 1.6449496416858282e-07,
405
- "logits/chosen": 0.2534635663032532,
406
- "logits/rejected": 0.33454760909080505,
407
- "logps/chosen": -322.7724609375,
408
- "logps/rejected": -291.37420654296875,
409
- "loss": 0.1828,
410
- "rewards/accuracies": 0.9437500238418579,
411
- "rewards/chosen": -1.3282363414764404,
412
- "rewards/margins": 5.129273414611816,
413
- "rewards/rejected": -6.457509517669678,
414
- "step": 260
415
- },
416
- {
417
- "epoch": 0.68,
418
- "grad_norm": 61.69689329910415,
419
- "learning_rate": 1.4434543456482518e-07,
420
- "logits/chosen": 0.16627629101276398,
421
- "logits/rejected": 0.17112889885902405,
422
- "logps/chosen": -328.3280944824219,
423
- "logps/rejected": -302.532958984375,
424
- "loss": 0.1701,
425
- "rewards/accuracies": 0.918749988079071,
426
- "rewards/chosen": -0.8465593457221985,
427
- "rewards/margins": 5.068365573883057,
428
- "rewards/rejected": -5.914924621582031,
429
- "step": 270
430
- },
431
- {
432
- "epoch": 0.7,
433
- "grad_norm": 82.28572950186891,
434
- "learning_rate": 1.2500000000000005e-07,
435
- "logits/chosen": 0.07045526802539825,
436
- "logits/rejected": 0.055424489080905914,
437
- "logps/chosen": -314.9088134765625,
438
- "logps/rejected": -283.484130859375,
439
- "loss": 0.1988,
440
- "rewards/accuracies": 0.934374988079071,
441
- "rewards/chosen": -0.05264568328857422,
442
- "rewards/margins": 5.250518798828125,
443
- "rewards/rejected": -5.303164958953857,
444
- "step": 280
445
- },
446
- {
447
- "epoch": 0.72,
448
- "grad_norm": 73.90506249227809,
449
- "learning_rate": 1.0660589091223854e-07,
450
- "logits/chosen": 0.08644680678844452,
451
- "logits/rejected": 0.3770992159843445,
452
- "logps/chosen": -334.16839599609375,
453
- "logps/rejected": -295.61138916015625,
454
- "loss": 0.1816,
455
- "rewards/accuracies": 0.9312499761581421,
456
- "rewards/chosen": -0.6267386674880981,
457
- "rewards/margins": 5.2511420249938965,
458
- "rewards/rejected": -5.877881050109863,
459
- "step": 290
460
- },
461
- {
462
- "epoch": 0.75,
463
- "grad_norm": 55.979754279546064,
464
- "learning_rate": 8.930309757836516e-08,
465
- "logits/chosen": 0.0685787945985794,
466
- "logits/rejected": 0.4188007414340973,
467
- "logps/chosen": -330.8103332519531,
468
- "logps/rejected": -297.2096252441406,
469
- "loss": 0.1831,
470
- "rewards/accuracies": 0.8999999761581421,
471
- "rewards/chosen": -0.8838691711425781,
472
- "rewards/margins": 4.987154960632324,
473
- "rewards/rejected": -5.871024131774902,
474
- "step": 300
475
- },
476
- {
477
- "epoch": 0.78,
478
- "grad_norm": 50.80261213037778,
479
- "learning_rate": 7.322330470336313e-08,
480
- "logits/chosen": 0.22002212703227997,
481
- "logits/rejected": 0.2094310224056244,
482
- "logps/chosen": -321.89739990234375,
483
- "logps/rejected": -294.4070739746094,
484
- "loss": 0.1741,
485
- "rewards/accuracies": 0.918749988079071,
486
- "rewards/chosen": -0.5131534337997437,
487
- "rewards/margins": 5.067374229431152,
488
- "rewards/rejected": -5.580528259277344,
489
- "step": 310
490
- },
491
- {
492
- "epoch": 0.8,
493
- "grad_norm": 62.71796095386463,
494
- "learning_rate": 5.848888922025552e-08,
495
- "logits/chosen": 0.0758807510137558,
496
- "logits/rejected": 0.19501206278800964,
497
- "logps/chosen": -337.13995361328125,
498
- "logps/rejected": -296.71954345703125,
499
- "loss": 0.1841,
500
- "rewards/accuracies": 0.953125,
501
- "rewards/chosen": -0.4793934226036072,
502
- "rewards/margins": 5.182127952575684,
503
- "rewards/rejected": -5.661521911621094,
504
- "step": 320
505
- },
506
- {
507
- "epoch": 0.82,
508
- "grad_norm": 55.862146356475115,
509
- "learning_rate": 4.521198892775202e-08,
510
- "logits/chosen": 0.08274734020233154,
511
- "logits/rejected": -0.0012732266914099455,
512
- "logps/chosen": -333.7369079589844,
513
- "logps/rejected": -296.9526062011719,
514
- "loss": 0.1768,
515
- "rewards/accuracies": 0.925000011920929,
516
- "rewards/chosen": -0.4573189616203308,
517
- "rewards/margins": 5.402385711669922,
518
- "rewards/rejected": -5.859705448150635,
519
- "step": 330
520
- },
521
- {
522
- "epoch": 0.85,
523
- "grad_norm": 78.6552872883194,
524
- "learning_rate": 3.349364905389032e-08,
525
- "logits/chosen": -0.16344629228115082,
526
- "logits/rejected": 0.0033723146189004183,
527
- "logps/chosen": -322.8833312988281,
528
- "logps/rejected": -298.59588623046875,
529
- "loss": 0.1682,
530
- "rewards/accuracies": 0.8999999761581421,
531
- "rewards/chosen": -0.6776003837585449,
532
- "rewards/margins": 5.152725696563721,
533
- "rewards/rejected": -5.830325126647949,
534
- "step": 340
535
- },
536
- {
537
- "epoch": 0.88,
538
- "grad_norm": 57.94825784387087,
539
- "learning_rate": 2.3423053240837514e-08,
540
- "logits/chosen": 0.06374481320381165,
541
- "logits/rejected": 0.13793043792247772,
542
- "logps/chosen": -319.5912780761719,
543
- "logps/rejected": -297.6089172363281,
544
- "loss": 0.1912,
545
- "rewards/accuracies": 0.903124988079071,
546
- "rewards/chosen": -0.5590634346008301,
547
- "rewards/margins": 5.25864315032959,
548
- "rewards/rejected": -5.81770658493042,
549
- "step": 350
550
- },
551
- {
552
- "epoch": 0.9,
553
- "grad_norm": 48.20871737390797,
554
- "learning_rate": 1.507684480352292e-08,
555
- "logits/chosen": 0.1408473700284958,
556
- "logits/rejected": 0.24622826278209686,
557
- "logps/chosen": -323.8467712402344,
558
- "logps/rejected": -301.0394592285156,
559
- "loss": 0.1714,
560
- "rewards/accuracies": 0.9312499761581421,
561
- "rewards/chosen": -0.5114877820014954,
562
- "rewards/margins": 5.479299068450928,
563
- "rewards/rejected": -5.990786552429199,
564
- "step": 360
565
- },
566
- {
567
- "epoch": 0.93,
568
- "grad_norm": 66.71686397462808,
569
- "learning_rate": 8.518543427732949e-09,
570
- "logits/chosen": -0.14612798392772675,
571
- "logits/rejected": -0.002347910311073065,
572
- "logps/chosen": -305.1243896484375,
573
- "logps/rejected": -282.12835693359375,
574
- "loss": 0.1673,
575
- "rewards/accuracies": 0.921875,
576
- "rewards/chosen": -0.6524958610534668,
577
- "rewards/margins": 5.196988105773926,
578
- "rewards/rejected": -5.849484443664551,
579
- "step": 370
580
- },
581
- {
582
- "epoch": 0.95,
583
- "grad_norm": 61.291842343672215,
584
- "learning_rate": 3.798061746947995e-09,
585
- "logits/chosen": 0.044129520654678345,
586
- "logits/rejected": 0.14980553090572357,
587
- "logps/chosen": -319.0976257324219,
588
- "logps/rejected": -283.139892578125,
589
- "loss": 0.1714,
590
- "rewards/accuracies": 0.940625011920929,
591
- "rewards/chosen": -0.5317240953445435,
592
- "rewards/margins": 5.177973747253418,
593
- "rewards/rejected": -5.70969820022583,
594
- "step": 380
595
- },
596
  {
597
  "epoch": 0.97,
598
- "grad_norm": 56.153629064135075,
599
- "learning_rate": 9.513254770636137e-10,
600
- "logits/chosen": -0.012590537779033184,
601
- "logits/rejected": 0.09102629870176315,
602
- "logps/chosen": -334.525634765625,
603
- "logps/rejected": -287.55987548828125,
604
- "loss": 0.1797,
605
- "rewards/accuracies": 0.925000011920929,
606
- "rewards/chosen": -0.5071713328361511,
607
- "rewards/margins": 5.1908040046691895,
608
- "rewards/rejected": -5.697975158691406,
609
- "step": 390
610
- },
611
- {
612
- "epoch": 1.0,
613
- "grad_norm": 40.78979544232789,
614
- "learning_rate": 0.0,
615
- "logits/chosen": 0.030362462624907494,
616
- "logits/rejected": 0.237002894282341,
617
- "logps/chosen": -320.5105895996094,
618
- "logps/rejected": -285.7564697265625,
619
- "loss": 0.1703,
620
- "rewards/accuracies": 0.9156249761581421,
621
- "rewards/chosen": -0.8242006301879883,
622
- "rewards/margins": 5.20082950592041,
623
- "rewards/rejected": -6.025030136108398,
624
- "step": 400
625
  },
626
  {
627
  "epoch": 1.0,
628
- "step": 400,
629
  "total_flos": 0.0,
630
- "train_loss": 0.2186826115846634,
631
- "train_runtime": 11940.2176,
632
- "train_samples_per_second": 8.573,
633
- "train_steps_per_second": 0.034
634
  }
635
  ],
636
  "logging_steps": 10,
637
- "max_steps": 400,
638
  "num_input_tokens_seen": 0,
639
  "num_train_epochs": 1,
640
  "save_steps": 100,
 
3
  "best_model_checkpoint": null,
4
  "epoch": 1.0,
5
  "eval_steps": 500,
6
+ "global_step": 185,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.01,
13
+ "grad_norm": 518.0837836920463,
14
+ "learning_rate": 2.6315789473684208e-08,
15
+ "logits/chosen": -0.1266070306301117,
16
+ "logits/rejected": 0.7204304933547974,
17
+ "logps/chosen": -319.01666259765625,
18
+ "logps/rejected": -252.47039794921875,
19
+ "loss": 0.6957,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
22
  "rewards/margins": 0.0,
 
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.05,
28
+ "grad_norm": 455.8868449558538,
29
+ "learning_rate": 2.631578947368421e-07,
30
+ "logits/chosen": -0.38653168082237244,
31
+ "logits/rejected": 0.3361072242259979,
32
+ "logps/chosen": -266.4560546875,
33
+ "logps/rejected": -224.02757263183594,
34
+ "loss": 0.6557,
35
+ "rewards/accuracies": 0.53125,
36
+ "rewards/chosen": -0.08313964307308197,
37
+ "rewards/margins": 0.08996326476335526,
38
+ "rewards/rejected": -0.17310291528701782,
39
  "step": 10
40
  },
41
  {
42
+ "epoch": 0.11,
43
+ "grad_norm": 261.73889640243516,
44
+ "learning_rate": 4.999552306674344e-07,
45
+ "logits/chosen": -0.3945041298866272,
46
+ "logits/rejected": 0.5968992114067078,
47
+ "logps/chosen": -283.4828796386719,
48
+ "logps/rejected": -241.41641235351562,
49
+ "loss": 0.4425,
50
+ "rewards/accuracies": 0.8218749761581421,
51
+ "rewards/chosen": 0.35899922251701355,
52
+ "rewards/margins": 2.6830153465270996,
53
+ "rewards/rejected": -2.3240160942077637,
54
  "step": 20
55
  },
56
  {
57
+ "epoch": 0.16,
58
+ "grad_norm": 261.2394624512226,
59
+ "learning_rate": 4.946022852363932e-07,
60
+ "logits/chosen": -0.47767549753189087,
61
+ "logits/rejected": 0.3392488360404968,
62
+ "logps/chosen": -265.13568115234375,
63
+ "logps/rejected": -243.9243927001953,
64
+ "loss": 0.462,
65
+ "rewards/accuracies": 0.815625011920929,
66
+ "rewards/chosen": -0.15065696835517883,
67
+ "rewards/margins": 5.174683094024658,
68
+ "rewards/rejected": -5.3253397941589355,
69
  "step": 30
70
  },
71
  {
72
+ "epoch": 0.22,
73
+ "grad_norm": 310.52802941610554,
74
+ "learning_rate": 4.805146507594034e-07,
75
+ "logits/chosen": -0.5098148584365845,
76
+ "logits/rejected": 0.3751198649406433,
77
+ "logps/chosen": -261.0537109375,
78
+ "logps/rejected": -233.42532348632812,
79
+ "loss": 0.4377,
80
+ "rewards/accuracies": 0.862500011920929,
81
+ "rewards/chosen": -0.6695741415023804,
82
+ "rewards/margins": 6.071677207946777,
83
+ "rewards/rejected": -6.7412519454956055,
84
  "step": 40
85
  },
86
  {
87
+ "epoch": 0.27,
88
+ "grad_norm": 313.59929050730216,
89
+ "learning_rate": 4.581953932909403e-07,
90
+ "logits/chosen": -0.5670371055603027,
91
+ "logits/rejected": 0.24361911416053772,
92
+ "logps/chosen": -281.9254150390625,
93
+ "logps/rejected": -255.1023712158203,
94
+ "loss": 0.3616,
95
+ "rewards/accuracies": 0.871874988079071,
96
+ "rewards/chosen": 0.5183612108230591,
97
+ "rewards/margins": 6.197042465209961,
98
+ "rewards/rejected": -5.678681373596191,
99
  "step": 50
100
  },
101
  {
102
+ "epoch": 0.32,
103
+ "grad_norm": 266.5544579622121,
104
+ "learning_rate": 4.284415281717847e-07,
105
+ "logits/chosen": -0.47226476669311523,
106
+ "logits/rejected": 0.4281126856803894,
107
+ "logps/chosen": -278.83251953125,
108
+ "logps/rejected": -258.22943115234375,
109
+ "loss": 0.3576,
110
+ "rewards/accuracies": 0.878125011920929,
111
+ "rewards/chosen": -0.19791364669799805,
112
+ "rewards/margins": 6.015913486480713,
113
+ "rewards/rejected": -6.213827133178711,
114
  "step": 60
115
  },
116
  {
117
+ "epoch": 0.38,
118
+ "grad_norm": 302.58453941602346,
119
+ "learning_rate": 3.923155588020165e-07,
120
+ "logits/chosen": -0.5101458430290222,
121
+ "logits/rejected": 0.430727481842041,
122
+ "logps/chosen": -249.14736938476562,
123
+ "logps/rejected": -227.1038055419922,
124
+ "loss": 0.3598,
125
+ "rewards/accuracies": 0.871874988079071,
126
+ "rewards/chosen": 0.11757852882146835,
127
+ "rewards/margins": 5.637847900390625,
128
+ "rewards/rejected": -5.520269870758057,
129
  "step": 70
130
  },
131
  {
132
+ "epoch": 0.43,
133
+ "grad_norm": 307.7393549252261,
134
+ "learning_rate": 3.511075348989692e-07,
135
+ "logits/chosen": -0.28342846035957336,
136
+ "logits/rejected": 0.524147629737854,
137
+ "logps/chosen": -275.883544921875,
138
+ "logps/rejected": -249.2301788330078,
139
+ "loss": 0.4166,
140
+ "rewards/accuracies": 0.8500000238418579,
141
+ "rewards/chosen": -1.1018508672714233,
142
+ "rewards/margins": 5.499217510223389,
143
+ "rewards/rejected": -6.601068019866943,
144
  "step": 80
145
  },
146
  {
147
+ "epoch": 0.49,
148
+ "grad_norm": 307.7617928649449,
149
+ "learning_rate": 3.062889851306735e-07,
150
+ "logits/chosen": -0.23101525008678436,
151
+ "logits/rejected": 0.594648003578186,
152
+ "logps/chosen": -263.6214294433594,
153
+ "logps/rejected": -237.5282440185547,
154
+ "loss": 0.3926,
155
+ "rewards/accuracies": 0.8687499761581421,
156
+ "rewards/chosen": -0.46620288491249084,
157
+ "rewards/margins": 5.611274719238281,
158
+ "rewards/rejected": -6.077476978302002,
159
  "step": 90
160
  },
161
  {
162
+ "epoch": 0.54,
163
+ "grad_norm": 263.0128253752098,
164
+ "learning_rate": 2.594603691794176e-07,
165
+ "logits/chosen": -0.35276657342910767,
166
+ "logits/rejected": 0.5542042851448059,
167
+ "logps/chosen": -268.01763916015625,
168
+ "logps/rejected": -240.2915496826172,
169
+ "loss": 0.3427,
170
+ "rewards/accuracies": 0.831250011920929,
171
+ "rewards/chosen": -0.9488876461982727,
172
+ "rewards/margins": 5.2973761558532715,
173
+ "rewards/rejected": -6.246264457702637,
174
  "step": 100
175
  },
176
  {
177
+ "epoch": 0.59,
178
+ "grad_norm": 321.2698886355005,
179
+ "learning_rate": 2.1229392570965654e-07,
180
+ "logits/chosen": 0.03777565434575081,
181
+ "logits/rejected": 0.7461130023002625,
182
+ "logps/chosen": -262.41558837890625,
183
+ "logps/rejected": -246.7896270751953,
184
+ "loss": 0.3086,
185
+ "rewards/accuracies": 0.871874988079071,
186
+ "rewards/chosen": -1.0481889247894287,
187
+ "rewards/margins": 5.13235330581665,
188
+ "rewards/rejected": -6.180542945861816,
189
  "step": 110
190
  },
191
  {
192
+ "epoch": 0.65,
193
+ "grad_norm": 257.6145945098329,
194
+ "learning_rate": 1.6647395712565254e-07,
195
+ "logits/chosen": -0.36657968163490295,
196
+ "logits/rejected": 0.6054766774177551,
197
+ "logps/chosen": -276.4320373535156,
198
+ "logps/rejected": -247.306640625,
199
+ "loss": 0.3606,
200
+ "rewards/accuracies": 0.8687499761581421,
201
+ "rewards/chosen": -0.45442095398902893,
202
+ "rewards/margins": 5.263146877288818,
203
+ "rewards/rejected": -5.717567443847656,
204
  "step": 120
205
  },
206
  {
207
+ "epoch": 0.7,
208
+ "grad_norm": 224.99155866870328,
209
+ "learning_rate": 1.2363668353585485e-07,
210
+ "logits/chosen": -0.39301735162734985,
211
+ "logits/rejected": 0.5232549905776978,
212
+ "logps/chosen": -264.6087951660156,
213
+ "logps/rejected": -236.9209442138672,
214
+ "loss": 0.3158,
215
+ "rewards/accuracies": 0.903124988079071,
216
+ "rewards/chosen": -0.47178196907043457,
217
+ "rewards/margins": 5.685537815093994,
218
+ "rewards/rejected": -6.157320499420166,
219
  "step": 130
220
  },
221
  {
222
+ "epoch": 0.76,
223
+ "grad_norm": 234.77001103810687,
224
+ "learning_rate": 8.53118137245516e-08,
225
+ "logits/chosen": -0.21741196513175964,
226
+ "logits/rejected": 0.6465774178504944,
227
+ "logps/chosen": -270.5892028808594,
228
+ "logps/rejected": -251.4233856201172,
229
+ "loss": 0.2942,
230
+ "rewards/accuracies": 0.8687499761581421,
231
+ "rewards/chosen": -0.42987537384033203,
232
+ "rewards/margins": 5.9034929275512695,
233
+ "rewards/rejected": -6.33336877822876,
234
  "step": 140
235
  },
236
  {
237
+ "epoch": 0.81,
238
+ "grad_norm": 289.2759977765359,
239
+ "learning_rate": 5.2867919617408553e-08,
240
+ "logits/chosen": -0.30728083848953247,
241
+ "logits/rejected": 0.5963491201400757,
242
+ "logps/chosen": -267.1575927734375,
243
+ "logps/rejected": -238.6244659423828,
244
+ "loss": 0.3253,
245
+ "rewards/accuracies": 0.8812500238418579,
246
+ "rewards/chosen": 0.4330620765686035,
247
+ "rewards/margins": 5.231642723083496,
248
+ "rewards/rejected": -4.798580646514893,
249
  "step": 150
250
  },
251
  {
252
+ "epoch": 0.86,
253
+ "grad_norm": 230.9320948308424,
254
+ "learning_rate": 2.7463564905650853e-08,
255
+ "logits/chosen": -0.37829893827438354,
256
+ "logits/rejected": 0.43174609541893005,
257
+ "logps/chosen": -270.28094482421875,
258
+ "logps/rejected": -246.8420867919922,
259
+ "loss": 0.3039,
260
+ "rewards/accuracies": 0.893750011920929,
261
+ "rewards/chosen": 0.014125394634902477,
262
+ "rewards/margins": 5.368089199066162,
263
+ "rewards/rejected": -5.353963375091553,
264
  "step": 160
265
  },
266
  {
267
+ "epoch": 0.92,
268
+ "grad_norm": 253.43499472230928,
269
+ "learning_rate": 1.0005933014019307e-08,
270
+ "logits/chosen": -0.3070162534713745,
271
+ "logits/rejected": 0.6451749205589294,
272
+ "logps/chosen": -269.88385009765625,
273
+ "logps/rejected": -247.8417205810547,
274
+ "loss": 0.314,
275
+ "rewards/accuracies": 0.8812500238418579,
276
+ "rewards/chosen": -0.7552144527435303,
277
+ "rewards/margins": 5.324892044067383,
278
+ "rewards/rejected": -6.080106258392334,
279
  "step": 170
280
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281
  {
282
  "epoch": 0.97,
283
+ "grad_norm": 264.72644139971993,
284
+ "learning_rate": 1.1184317978602808e-09,
285
+ "logits/chosen": -0.4602000117301941,
286
+ "logits/rejected": 0.40653783082962036,
287
+ "logps/chosen": -261.251220703125,
288
+ "logps/rejected": -241.342529296875,
289
+ "loss": 0.404,
290
+ "rewards/accuracies": 0.9281250238418579,
291
+ "rewards/chosen": -0.2922298312187195,
292
+ "rewards/margins": 5.988170623779297,
293
+ "rewards/rejected": -6.28040075302124,
294
+ "step": 180
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
295
  },
296
  {
297
  "epoch": 1.0,
298
+ "step": 185,
299
  "total_flos": 0.0,
300
+ "train_loss": 0.37812867454580357,
301
+ "train_runtime": 5319.5814,
302
+ "train_samples_per_second": 8.892,
303
+ "train_steps_per_second": 0.035
304
  }
305
  ],
306
  "logging_steps": 10,
307
+ "max_steps": 185,
308
  "num_input_tokens_seen": 0,
309
  "num_train_epochs": 1,
310
  "save_steps": 100,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b6f555e8b191e5dee3bafecffb3a88f44ea1db78545e88e62c9e7b6e0969daad
3
- size 6328
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbdc9c5363b2f9445f9cb4d5b25a0e5791e43f82c6521e4905a43fd8fc82e524
3
+ size 6264