RikkiXu commited on
Commit
c90c496
1 Parent(s): 027dc1d

Model save

Browse files
README.md CHANGED
@@ -13,7 +13,7 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # zephyr-7b-dpo-full
15
 
16
- This model was trained from scratch on the None dataset.
17
 
18
  ## Model description
19
 
@@ -52,7 +52,7 @@ The following hyperparameters were used during training:
52
 
53
  ### Framework versions
54
 
55
- - Transformers 4.41.1
56
  - Pytorch 2.1.2+cu118
57
- - Datasets 2.16.1
58
- - Tokenizers 0.19.1
 
13
 
14
  # zephyr-7b-dpo-full
15
 
16
+ This model was trained from scratch on an unknown dataset.
17
 
18
  ## Model description
19
 
 
52
 
53
  ### Framework versions
54
 
55
+ - Transformers 4.39.3
56
  - Pytorch 2.1.2+cu118
57
+ - Datasets 2.19.1
58
+ - Tokenizers 0.15.2
all_results.json CHANGED
@@ -1,9 +1,8 @@
1
  {
2
- "epoch": 0.9980806142034548,
3
- "total_flos": 0.0,
4
- "train_loss": 0.3119094243416419,
5
- "train_runtime": 6443.086,
6
  "train_samples": 50000,
7
- "train_samples_per_second": 7.76,
8
- "train_steps_per_second": 0.061
9
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.22171047773116676,
4
+ "train_runtime": 5890.091,
 
5
  "train_samples": 50000,
6
+ "train_samples_per_second": 8.489,
7
+ "train_steps_per_second": 0.066
8
  }
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
- "transformers_version": "4.41.1"
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
+ "transformers_version": "4.39.3"
6
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5bc88b0288a40faf21a1e24a96384ccf683d38e8246d3b0cab22d31c505d8b9f
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0abc7bcb57ebd4c85bca100446495fa1f35516ea4513f630bb3534e01a3e139
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c430b926ebd01f65609ab319292fa842772b87fc92486924b50fdd7065be6155
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9901d8efdf08230c45749f927c9bcca0fc53e1bb330c70a7989c85951effcb3e
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d5049e6472b9a1956a056ed3a8a66408f55efa3d2ea2bd03073317eac4eec19
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07758d9f276a4287d05416d1f89f26070bd0cd4f6fe426596e860df642b6c6e8
3
  size 4540516344
runs/Jun22_09-40-11_n136-112-146/events.out.tfevents.1719021114.n136-112-146.3387325.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ae2e602011d96a1d261b995a1e371de2d2d68ae27891840cc7c7ae421436694c
3
- size 26098
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f47c1118994624212cf7c63f077ee6e2ea3cd246ed37d29b5d4571eca132c8b
3
+ size 32644
train_results.json CHANGED
@@ -1,9 +1,8 @@
1
  {
2
- "epoch": 0.9980806142034548,
3
- "total_flos": 0.0,
4
- "train_loss": 0.3119094243416419,
5
- "train_runtime": 6443.086,
6
  "train_samples": 50000,
7
- "train_samples_per_second": 7.76,
8
- "train_steps_per_second": 0.061
9
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.22171047773116676,
4
+ "train_runtime": 5890.091,
 
5
  "train_samples": 50000,
6
+ "train_samples_per_second": 8.489,
7
+ "train_steps_per_second": 0.066
8
  }
trainer_state.json CHANGED
@@ -9,13 +9,13 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.0025591810620601407,
13
- "grad_norm": 709.5459154500938,
14
  "learning_rate": 1.0256410256410256e-08,
15
- "logits/chosen": -2.5583817958831787,
16
- "logits/rejected": -2.4487552642822266,
17
  "logps/chosen": -258.1644592285156,
18
- "logps/rejected": -216.25729370117188,
19
  "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
@@ -24,598 +24,598 @@
24
  "step": 1
25
  },
26
  {
27
- "epoch": 0.025591810620601407,
28
- "grad_norm": 684.196668594934,
29
  "learning_rate": 1.0256410256410255e-07,
30
- "logits/chosen": -2.605860471725464,
31
- "logits/rejected": -2.5528433322906494,
32
- "logps/chosen": -267.56048583984375,
33
- "logps/rejected": -217.59442138671875,
34
- "loss": 0.7027,
35
- "rewards/accuracies": 0.4166666567325592,
36
- "rewards/chosen": 0.0020057493820786476,
37
- "rewards/margins": -0.020002255216240883,
38
- "rewards/rejected": 0.022008005529642105,
39
  "step": 10
40
  },
41
  {
42
- "epoch": 0.05118362124120281,
43
- "grad_norm": 515.3907221989738,
44
  "learning_rate": 2.051282051282051e-07,
45
- "logits/chosen": -2.6285552978515625,
46
- "logits/rejected": -2.566141128540039,
47
- "logps/chosen": -260.791259765625,
48
- "logps/rejected": -207.073486328125,
49
- "loss": 0.5875,
50
- "rewards/accuracies": 0.71875,
51
- "rewards/chosen": 0.2564736604690552,
52
- "rewards/margins": 0.2661496698856354,
53
- "rewards/rejected": -0.00967598520219326,
54
  "step": 20
55
  },
56
  {
57
- "epoch": 0.07677543186180422,
58
- "grad_norm": 608.3322300561331,
59
  "learning_rate": 3.076923076923077e-07,
60
- "logits/chosen": -2.6465039253234863,
61
- "logits/rejected": -2.5724854469299316,
62
- "logps/chosen": -251.90835571289062,
63
- "logps/rejected": -198.9151611328125,
64
- "loss": 0.3737,
65
- "rewards/accuracies": 0.8500000238418579,
66
- "rewards/chosen": 1.6194490194320679,
67
- "rewards/margins": 1.6427723169326782,
68
- "rewards/rejected": -0.02332335151731968,
69
  "step": 30
70
  },
71
  {
72
- "epoch": 0.10236724248240563,
73
- "grad_norm": 286.23056215212245,
74
  "learning_rate": 3.999919890759704e-07,
75
- "logits/chosen": -2.650782823562622,
76
- "logits/rejected": -2.571556329727173,
77
- "logps/chosen": -244.2300567626953,
78
- "logps/rejected": -193.5239715576172,
79
- "loss": 0.3167,
80
- "rewards/accuracies": 0.793749988079071,
81
- "rewards/chosen": 3.0596303939819336,
82
- "rewards/margins": 2.9784700870513916,
83
- "rewards/rejected": 0.08116073161363602,
84
  "step": 40
85
  },
86
  {
87
- "epoch": 0.12795905310300704,
88
- "grad_norm": 355.8436814450708,
89
  "learning_rate": 3.9903145446619833e-07,
90
- "logits/chosen": -2.6738078594207764,
91
- "logits/rejected": -2.601217031478882,
92
- "logps/chosen": -257.42144775390625,
93
- "logps/rejected": -202.82069396972656,
94
- "loss": 0.3214,
95
- "rewards/accuracies": 0.875,
96
- "rewards/chosen": 5.407862663269043,
97
- "rewards/margins": 4.2437334060668945,
98
- "rewards/rejected": 1.164129376411438,
99
  "step": 50
100
  },
101
  {
102
- "epoch": 0.15355086372360843,
103
- "grad_norm": 419.99201519842893,
104
  "learning_rate": 3.9647754748313294e-07,
105
- "logits/chosen": -2.6383204460144043,
106
- "logits/rejected": -2.5659520626068115,
107
- "logps/chosen": -259.7900695800781,
108
- "logps/rejected": -207.2904510498047,
109
- "loss": 0.3176,
110
- "rewards/accuracies": 0.8500000238418579,
111
- "rewards/chosen": 4.518810272216797,
112
- "rewards/margins": 4.13478422164917,
113
- "rewards/rejected": 0.3840256929397583,
114
  "step": 60
115
  },
116
  {
117
- "epoch": 0.17914267434420986,
118
- "grad_norm": 305.786716914146,
119
  "learning_rate": 3.9235071376357917e-07,
120
- "logits/chosen": -2.6047496795654297,
121
- "logits/rejected": -2.5317835807800293,
122
- "logps/chosen": -290.00616455078125,
123
- "logps/rejected": -227.17697143554688,
124
- "loss": 0.301,
125
- "rewards/accuracies": 0.90625,
126
- "rewards/chosen": 3.9522290229797363,
127
- "rewards/margins": 4.945390701293945,
128
- "rewards/rejected": -0.9931615591049194,
129
  "step": 70
130
  },
131
  {
132
- "epoch": 0.20473448496481125,
133
- "grad_norm": 512.2762241729247,
134
  "learning_rate": 3.8668399121531677e-07,
135
- "logits/chosen": -2.552280902862549,
136
- "logits/rejected": -2.476268768310547,
137
- "logps/chosen": -260.35906982421875,
138
- "logps/rejected": -206.0688018798828,
139
- "loss": 0.2615,
140
- "rewards/accuracies": 0.875,
141
- "rewards/chosen": 3.5995476245880127,
142
- "rewards/margins": 5.095798969268799,
143
- "rewards/rejected": -1.4962517023086548,
144
  "step": 80
145
  },
146
  {
147
- "epoch": 0.23032629558541268,
148
- "grad_norm": 515.1909309248075,
149
  "learning_rate": 3.795227455278028e-07,
150
- "logits/chosen": -2.5692882537841797,
151
- "logits/rejected": -2.510436534881592,
152
- "logps/chosen": -256.6405334472656,
153
- "logps/rejected": -217.69192504882812,
154
- "loss": 0.264,
155
- "rewards/accuracies": 0.862500011920929,
156
- "rewards/chosen": 2.5523829460144043,
157
- "rewards/margins": 4.150609016418457,
158
- "rewards/rejected": -1.5982261896133423,
159
  "step": 90
160
  },
161
  {
162
- "epoch": 0.2559181062060141,
163
- "grad_norm": 587.7494413414474,
164
  "learning_rate": 3.7092430699120403e-07,
165
- "logits/chosen": -2.608997344970703,
166
- "logits/rejected": -2.530529260635376,
167
- "logps/chosen": -235.7121124267578,
168
- "logps/rejected": -201.58700561523438,
169
- "loss": 0.3994,
170
- "rewards/accuracies": 0.84375,
171
- "rewards/chosen": 3.2284069061279297,
172
- "rewards/margins": 4.6865153312683105,
173
- "rewards/rejected": -1.4581090211868286,
174
  "step": 100
175
  },
176
  {
177
- "epoch": 0.28150991682661547,
178
- "grad_norm": 383.9666636386822,
179
  "learning_rate": 3.6095751153125107e-07,
180
- "logits/chosen": -2.605412483215332,
181
- "logits/rejected": -2.5209498405456543,
182
- "logps/chosen": -257.01318359375,
183
- "logps/rejected": -210.5634002685547,
184
- "loss": 0.3274,
185
- "rewards/accuracies": 0.862500011920929,
186
- "rewards/chosen": 4.330618381500244,
187
- "rewards/margins": 5.40033483505249,
188
- "rewards/rejected": -1.069716453552246,
189
  "step": 110
190
  },
191
  {
192
- "epoch": 0.30710172744721687,
193
- "grad_norm": 452.32780946070125,
194
  "learning_rate": 3.497021496342202e-07,
195
- "logits/chosen": -2.5948054790496826,
196
- "logits/rejected": -2.5218663215637207,
197
- "logps/chosen": -261.63739013671875,
198
- "logps/rejected": -209.2183074951172,
199
- "loss": 0.3015,
200
- "rewards/accuracies": 0.887499988079071,
201
- "rewards/chosen": 3.912487030029297,
202
- "rewards/margins": 5.338441371917725,
203
- "rewards/rejected": -1.4259542226791382,
204
  "step": 120
205
  },
206
  {
207
- "epoch": 0.3326935380678183,
208
- "grad_norm": 325.92030560656247,
209
  "learning_rate": 3.372483275737467e-07,
210
- "logits/chosen": -2.574514389038086,
211
- "logits/rejected": -2.505225896835327,
212
- "logps/chosen": -250.41543579101562,
213
- "logps/rejected": -196.6962127685547,
214
- "loss": 0.2826,
215
- "rewards/accuracies": 0.8374999761581421,
216
- "rewards/chosen": 3.443896532058716,
217
- "rewards/margins": 4.925398826599121,
218
- "rewards/rejected": -1.4815022945404053,
219
  "step": 130
220
  },
221
  {
222
- "epoch": 0.3582853486884197,
223
- "grad_norm": 286.3934639339848,
224
  "learning_rate": 3.2369574605325094e-07,
225
- "logits/chosen": -2.598271369934082,
226
- "logits/rejected": -2.5281975269317627,
227
- "logps/chosen": -268.22943115234375,
228
- "logps/rejected": -216.2914581298828,
229
- "loss": 0.3028,
230
- "rewards/accuracies": 0.856249988079071,
231
- "rewards/chosen": 3.4734299182891846,
232
- "rewards/margins": 4.84061336517334,
233
- "rewards/rejected": -1.3671828508377075,
234
  "step": 140
235
  },
236
  {
237
- "epoch": 0.3838771593090211,
238
- "grad_norm": 326.69658537939597,
239
  "learning_rate": 3.091529020389009e-07,
240
- "logits/chosen": -2.5794036388397217,
241
- "logits/rejected": -2.5112969875335693,
242
- "logps/chosen": -262.27850341796875,
243
- "logps/rejected": -213.2576141357422,
244
- "loss": 0.3047,
245
- "rewards/accuracies": 0.887499988079071,
246
- "rewards/chosen": 4.1566081047058105,
247
- "rewards/margins": 5.587738990783691,
248
- "rewards/rejected": -1.4311310052871704,
249
  "step": 150
250
  },
251
  {
252
- "epoch": 0.4094689699296225,
253
- "grad_norm": 334.0349111221283,
254
  "learning_rate": 2.937362201729407e-07,
255
- "logits/chosen": -2.569304943084717,
256
- "logits/rejected": -2.4969332218170166,
257
- "logps/chosen": -241.8642120361328,
258
- "logps/rejected": -196.44754028320312,
259
- "loss": 0.2947,
260
- "rewards/accuracies": 0.893750011920929,
261
- "rewards/chosen": 3.53686785697937,
262
- "rewards/margins": 5.24989128112793,
263
- "rewards/rejected": -1.7130241394042969,
264
  "step": 160
265
  },
266
  {
267
- "epoch": 0.4350607805502239,
268
- "grad_norm": 385.1324260357516,
269
  "learning_rate": 2.77569120720971e-07,
270
- "logits/chosen": -2.5677335262298584,
271
- "logits/rejected": -2.499415874481201,
272
- "logps/chosen": -263.1123046875,
273
- "logps/rejected": -209.7564239501953,
274
- "loss": 0.2546,
275
- "rewards/accuracies": 0.875,
276
- "rewards/chosen": 3.789729595184326,
277
- "rewards/margins": 5.479173183441162,
278
- "rewards/rejected": -1.6894439458847046,
279
  "step": 170
280
  },
281
  {
282
- "epoch": 0.46065259117082535,
283
- "grad_norm": 298.5381373642981,
284
  "learning_rate": 2.6078103151484935e-07,
285
- "logits/chosen": -2.632197856903076,
286
- "logits/rejected": -2.5449578762054443,
287
- "logps/chosen": -234.98526000976562,
288
- "logps/rejected": -185.88897705078125,
289
- "loss": 0.3076,
290
- "rewards/accuracies": 0.8812500238418579,
291
- "rewards/chosen": 3.5240702629089355,
292
- "rewards/margins": 4.843334674835205,
293
- "rewards/rejected": -1.3192641735076904,
294
  "step": 180
295
  },
296
  {
297
- "epoch": 0.48624440179142675,
298
- "grad_norm": 275.15385748139875,
299
  "learning_rate": 2.435063518012335e-07,
300
- "logits/chosen": -2.592048168182373,
301
- "logits/rejected": -2.5161328315734863,
302
- "logps/chosen": -257.56207275390625,
303
- "logps/rejected": -202.87477111816406,
304
- "loss": 0.2628,
305
- "rewards/accuracies": 0.8812500238418579,
306
- "rewards/chosen": 3.6292481422424316,
307
- "rewards/margins": 5.401675224304199,
308
- "rewards/rejected": -1.7724273204803467,
309
  "step": 190
310
  },
311
  {
312
- "epoch": 0.5118362124120281,
313
- "grad_norm": 441.7914810102142,
314
  "learning_rate": 2.2588337629081105e-07,
315
- "logits/chosen": -2.6288845539093018,
316
- "logits/rejected": -2.539705753326416,
317
- "logps/chosen": -272.2264709472656,
318
- "logps/rejected": -217.74020385742188,
319
- "loss": 0.2691,
320
- "rewards/accuracies": 0.8812500238418579,
321
- "rewards/chosen": 4.148322105407715,
322
- "rewards/margins": 6.209266662597656,
323
- "rewards/rejected": -2.0609450340270996,
324
  "step": 200
325
  },
326
  {
327
- "epoch": 0.5374280230326296,
328
- "grad_norm": 578.0076877712855,
329
  "learning_rate": 2.0805318802188303e-07,
330
- "logits/chosen": -2.6119117736816406,
331
- "logits/rejected": -2.5380859375,
332
- "logps/chosen": -265.3163146972656,
333
- "logps/rejected": -211.01132202148438,
334
- "loss": 0.4145,
335
- "rewards/accuracies": 0.824999988079071,
336
- "rewards/chosen": 2.868762969970703,
337
- "rewards/margins": 5.194035530090332,
338
- "rewards/rejected": -2.32527232170105,
339
  "step": 210
340
  },
341
  {
342
- "epoch": 0.5630198336532309,
343
- "grad_norm": 435.59772463261663,
344
  "learning_rate": 1.9015852890162436e-07,
345
- "logits/chosen": -2.628199577331543,
346
- "logits/rejected": -2.5644900798797607,
347
- "logps/chosen": -256.01708984375,
348
- "logps/rejected": -213.6505126953125,
349
- "loss": 0.302,
350
- "rewards/accuracies": 0.8812500238418579,
351
- "rewards/chosen": 2.8298840522766113,
352
- "rewards/margins": 4.771709442138672,
353
- "rewards/rejected": -1.9418258666992188,
354
  "step": 220
355
  },
356
  {
357
- "epoch": 0.5886116442738324,
358
- "grad_norm": 341.5067091186977,
359
  "learning_rate": 1.723426569670534e-07,
360
- "logits/chosen": -2.660271644592285,
361
- "logits/rejected": -2.5745387077331543,
362
- "logps/chosen": -265.7137145996094,
363
- "logps/rejected": -216.05184936523438,
364
- "loss": 0.2709,
365
- "rewards/accuracies": 0.862500011920929,
366
- "rewards/chosen": 3.8446857929229736,
367
- "rewards/margins": 5.413482666015625,
368
- "rewards/rejected": -1.5687971115112305,
369
  "step": 230
370
  },
371
  {
372
- "epoch": 0.6142034548944337,
373
- "grad_norm": 518.1474749065682,
374
  "learning_rate": 1.547481995140556e-07,
375
- "logits/chosen": -2.6460232734680176,
376
- "logits/rejected": -2.5979976654052734,
377
- "logps/chosen": -260.61566162109375,
378
- "logps/rejected": -219.18740844726562,
379
- "loss": 0.2732,
380
- "rewards/accuracies": 0.8812500238418579,
381
- "rewards/chosen": 3.5578017234802246,
382
- "rewards/margins": 6.3658447265625,
383
- "rewards/rejected": -2.808042287826538,
384
  "step": 240
385
  },
386
  {
387
- "epoch": 0.6397952655150352,
388
- "grad_norm": 488.31668996973804,
389
  "learning_rate": 1.375160112758885e-07,
390
- "logits/chosen": -2.657061815261841,
391
- "logits/rejected": -2.5865418910980225,
392
- "logps/chosen": -265.78448486328125,
393
- "logps/rejected": -221.42001342773438,
394
- "loss": 0.2763,
395
- "rewards/accuracies": 0.875,
396
- "rewards/chosen": 2.9970998764038086,
397
- "rewards/margins": 5.516369819641113,
398
- "rewards/rejected": -2.519270420074463,
399
  "step": 250
400
  },
401
  {
402
- "epoch": 0.6653870761356366,
403
- "grad_norm": 266.72245551152963,
404
  "learning_rate": 1.2078404679216862e-07,
405
- "logits/chosen": -2.648825168609619,
406
- "logits/rejected": -2.589503526687622,
407
- "logps/chosen": -260.22369384765625,
408
- "logps/rejected": -208.0442352294922,
409
- "loss": 0.2613,
410
- "rewards/accuracies": 0.875,
411
- "rewards/chosen": 2.818129301071167,
412
- "rewards/margins": 5.237301826477051,
413
- "rewards/rejected": -2.419172525405884,
414
  "step": 260
415
  },
416
  {
417
- "epoch": 0.690978886756238,
418
- "grad_norm": 410.4072035856202,
419
  "learning_rate": 1.0468625599573841e-07,
420
- "logits/chosen": -2.6281862258911133,
421
- "logits/rejected": -2.5414164066314697,
422
- "logps/chosen": -269.1915283203125,
423
- "logps/rejected": -228.3280792236328,
424
- "loss": 0.3168,
425
- "rewards/accuracies": 0.893750011920929,
426
- "rewards/chosen": 3.6961987018585205,
427
- "rewards/margins": 7.5373077392578125,
428
- "rewards/rejected": -3.841109037399292,
429
  "step": 270
430
  },
431
  {
432
- "epoch": 0.7165706973768394,
433
- "grad_norm": 230.87089413037836,
434
  "learning_rate": 8.935151185893727e-08,
435
- "logits/chosen": -2.653529644012451,
436
- "logits/rejected": -2.5968880653381348,
437
- "logps/chosen": -268.2784729003906,
438
- "logps/rejected": -218.708984375,
439
- "loss": 0.2585,
440
- "rewards/accuracies": 0.893750011920929,
441
- "rewards/chosen": 2.713934898376465,
442
- "rewards/margins": 5.596070289611816,
443
- "rewards/rejected": -2.8821353912353516,
444
  "step": 280
445
  },
446
  {
447
- "epoch": 0.7421625079974408,
448
- "grad_norm": 355.41024702928314,
449
  "learning_rate": 7.490257868414449e-08,
450
- "logits/chosen": -2.613981246948242,
451
- "logits/rejected": -2.5670533180236816,
452
- "logps/chosen": -266.4203186035156,
453
- "logps/rejected": -216.7047119140625,
454
- "loss": 0.2561,
455
- "rewards/accuracies": 0.875,
456
- "rewards/chosen": 2.7628657817840576,
457
- "rewards/margins": 5.556154727935791,
458
- "rewards/rejected": -2.793288469314575,
459
  "step": 290
460
  },
461
  {
462
- "epoch": 0.7677543186180422,
463
- "grad_norm": 334.3998771127154,
464
  "learning_rate": 6.145512929808013e-08,
465
- "logits/chosen": -2.6336982250213623,
466
- "logits/rejected": -2.579585552215576,
467
- "logps/chosen": -273.02044677734375,
468
- "logps/rejected": -220.3231964111328,
469
- "loss": 0.274,
470
- "rewards/accuracies": 0.893750011920929,
471
- "rewards/chosen": 2.7771525382995605,
472
- "rewards/margins": 5.427972316741943,
473
- "rewards/rejected": -2.650820016860962,
474
  "step": 300
475
  },
476
  {
477
- "epoch": 0.7933461292386437,
478
- "grad_norm": 248.8225361148432,
479
  "learning_rate": 4.911681901784197e-08,
480
- "logits/chosen": -2.5988125801086426,
481
- "logits/rejected": -2.5305018424987793,
482
- "logps/chosen": -276.5069885253906,
483
- "logps/rejected": -215.4237518310547,
484
- "loss": 0.3069,
485
- "rewards/accuracies": 0.893750011920929,
486
- "rewards/chosen": 2.883484125137329,
487
- "rewards/margins": 5.80063533782959,
488
- "rewards/rejected": -2.917151927947998,
489
  "step": 310
490
  },
491
  {
492
- "epoch": 0.818937939859245,
493
- "grad_norm": 378.21338988259265,
494
  "learning_rate": 3.79864238021667e-08,
495
- "logits/chosen": -2.621037006378174,
496
- "logits/rejected": -2.5753254890441895,
497
- "logps/chosen": -259.85833740234375,
498
- "logps/rejected": -204.21798706054688,
499
- "loss": 0.2691,
500
- "rewards/accuracies": 0.8999999761581421,
501
- "rewards/chosen": 2.5267233848571777,
502
- "rewards/margins": 5.2202558517456055,
503
- "rewards/rejected": -2.6935324668884277,
504
  "step": 320
505
  },
506
  {
507
- "epoch": 0.8445297504798465,
508
- "grad_norm": 354.9475645655247,
509
  "learning_rate": 2.8153049487556634e-08,
510
- "logits/chosen": -2.6436188220977783,
511
- "logits/rejected": -2.585820436477661,
512
- "logps/chosen": -263.09808349609375,
513
- "logps/rejected": -217.03341674804688,
514
- "loss": 0.2695,
515
- "rewards/accuracies": 0.8999999761581421,
516
- "rewards/chosen": 3.3003220558166504,
517
- "rewards/margins": 5.452363014221191,
518
- "rewards/rejected": -2.1520419120788574,
519
  "step": 330
520
  },
521
  {
522
- "epoch": 0.8701215611004478,
523
- "grad_norm": 298.79503920742496,
524
  "learning_rate": 1.9695418439836796e-08,
525
- "logits/chosen": -2.6284804344177246,
526
- "logits/rejected": -2.5717878341674805,
527
- "logps/chosen": -258.5690002441406,
528
- "logps/rejected": -210.4982147216797,
529
- "loss": 0.2354,
530
- "rewards/accuracies": 0.9312499761581421,
531
- "rewards/chosen": 3.1211013793945312,
532
- "rewards/margins": 5.934812545776367,
533
- "rewards/rejected": -2.813711643218994,
534
  "step": 340
535
  },
536
  {
537
- "epoch": 0.8957133717210493,
538
- "grad_norm": 256.56016530768545,
539
  "learning_rate": 1.2681239331945692e-08,
540
- "logits/chosen": -2.6408398151397705,
541
- "logits/rejected": -2.5925440788269043,
542
- "logps/chosen": -268.15728759765625,
543
- "logps/rejected": -226.1197967529297,
544
- "loss": 0.2666,
545
- "rewards/accuracies": 0.8812500238418579,
546
- "rewards/chosen": 3.046782970428467,
547
- "rewards/margins": 5.363730430603027,
548
- "rewards/rejected": -2.3169472217559814,
549
  "step": 350
550
  },
551
  {
552
- "epoch": 0.9213051823416507,
553
- "grad_norm": 316.87066881593194,
554
  "learning_rate": 7.166665093287538e-09,
555
- "logits/chosen": -2.6406874656677246,
556
- "logits/rejected": -2.5731348991394043,
557
- "logps/chosen": -273.30157470703125,
558
- "logps/rejected": -218.8077392578125,
559
- "loss": 0.3057,
560
- "rewards/accuracies": 0.90625,
561
- "rewards/chosen": 2.9576709270477295,
562
- "rewards/margins": 5.49710750579834,
563
- "rewards/rejected": -2.539436101913452,
564
  "step": 360
565
  },
566
  {
567
- "epoch": 0.946896992962252,
568
- "grad_norm": 569.0448969619218,
569
  "learning_rate": 3.1958433701019694e-09,
570
- "logits/chosen": -2.6003308296203613,
571
- "logits/rejected": -2.5643393993377686,
572
- "logps/chosen": -272.14678955078125,
573
- "logps/rejected": -228.38943481445312,
574
- "loss": 0.3023,
575
- "rewards/accuracies": 0.856249988079071,
576
- "rewards/chosen": 3.5620276927948,
577
- "rewards/margins": 5.470287322998047,
578
- "rewards/rejected": -1.908259391784668,
579
  "step": 370
580
  },
581
  {
582
- "epoch": 0.9724888035828535,
583
- "grad_norm": 206.4373391747023,
584
  "learning_rate": 8.005630957010012e-10,
585
- "logits/chosen": -2.6491971015930176,
586
- "logits/rejected": -2.6083197593688965,
587
- "logps/chosen": -261.6040344238281,
588
- "logps/rejected": -229.88302612304688,
589
- "loss": 0.273,
590
- "rewards/accuracies": 0.8812500238418579,
591
- "rewards/chosen": 3.3906753063201904,
592
- "rewards/margins": 5.430436611175537,
593
- "rewards/rejected": -2.039761543273926,
594
  "step": 380
595
  },
596
  {
597
- "epoch": 0.9980806142034548,
598
- "grad_norm": 529.0802276796763,
599
  "learning_rate": 0.0,
600
- "logits/chosen": -2.6596872806549072,
601
- "logits/rejected": -2.598179578781128,
602
- "logps/chosen": -249.18356323242188,
603
- "logps/rejected": -209.78622436523438,
604
- "loss": 0.2803,
605
- "rewards/accuracies": 0.893750011920929,
606
- "rewards/chosen": 2.9054455757141113,
607
- "rewards/margins": 5.3939642906188965,
608
- "rewards/rejected": -2.488518476486206,
609
  "step": 390
610
  },
611
  {
612
- "epoch": 0.9980806142034548,
613
  "step": 390,
614
  "total_flos": 0.0,
615
- "train_loss": 0.3119094243416419,
616
- "train_runtime": 6443.086,
617
- "train_samples_per_second": 7.76,
618
- "train_steps_per_second": 0.061
619
  }
620
  ],
621
  "logging_steps": 10,
@@ -623,18 +623,6 @@
623
  "num_input_tokens_seen": 0,
624
  "num_train_epochs": 1,
625
  "save_steps": 100,
626
- "stateful_callbacks": {
627
- "TrainerControl": {
628
- "args": {
629
- "should_epoch_stop": false,
630
- "should_evaluate": false,
631
- "should_log": false,
632
- "should_save": true,
633
- "should_training_stop": false
634
- },
635
- "attributes": {}
636
- }
637
- },
638
  "total_flos": 0.0,
639
  "train_batch_size": 4,
640
  "trial_name": null,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0,
13
+ "grad_norm": 746.9239950470109,
14
  "learning_rate": 1.0256410256410256e-08,
15
+ "logits/chosen": -2.5617921352386475,
16
+ "logits/rejected": -2.415619373321533,
17
  "logps/chosen": -258.1644592285156,
18
+ "logps/rejected": -191.65736389160156,
19
  "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
 
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.03,
28
+ "grad_norm": 712.2130131356953,
29
  "learning_rate": 1.0256410256410255e-07,
30
+ "logits/chosen": -2.611403465270996,
31
+ "logits/rejected": -2.524329900741577,
32
+ "logps/chosen": -267.30902099609375,
33
+ "logps/rejected": -198.22096252441406,
34
+ "loss": 0.6986,
35
+ "rewards/accuracies": 0.4652777910232544,
36
+ "rewards/chosen": -0.010162555612623692,
37
+ "rewards/margins": 0.005689369514584541,
38
+ "rewards/rejected": -0.015851924195885658,
39
  "step": 10
40
  },
41
  {
42
+ "epoch": 0.05,
43
+ "grad_norm": 343.2600415646243,
44
  "learning_rate": 2.051282051282051e-07,
45
+ "logits/chosen": -2.631699800491333,
46
+ "logits/rejected": -2.5292506217956543,
47
+ "logps/chosen": -260.6354064941406,
48
+ "logps/rejected": -198.5509796142578,
49
+ "loss": 0.5075,
50
+ "rewards/accuracies": 0.793749988079071,
51
+ "rewards/chosen": 0.3450052738189697,
52
+ "rewards/margins": 0.5796785354614258,
53
+ "rewards/rejected": -0.23467323184013367,
54
  "step": 20
55
  },
56
  {
57
+ "epoch": 0.08,
58
+ "grad_norm": 460.51807031992456,
59
  "learning_rate": 3.076923076923077e-07,
60
+ "logits/chosen": -2.6552329063415527,
61
+ "logits/rejected": -2.5521411895751953,
62
+ "logps/chosen": -251.4678497314453,
63
+ "logps/rejected": -195.8263702392578,
64
+ "loss": 0.2598,
65
+ "rewards/accuracies": 0.918749988079071,
66
+ "rewards/chosen": 1.8457540273666382,
67
+ "rewards/margins": 2.781405210494995,
68
+ "rewards/rejected": -0.9356509447097778,
69
  "step": 30
70
  },
71
  {
72
+ "epoch": 0.1,
73
+ "grad_norm": 386.50821557830585,
74
  "learning_rate": 3.999919890759704e-07,
75
+ "logits/chosen": -2.6647181510925293,
76
+ "logits/rejected": -2.5375823974609375,
77
+ "logps/chosen": -243.2847442626953,
78
+ "logps/rejected": -199.77682495117188,
79
+ "loss": 0.1941,
80
+ "rewards/accuracies": 0.90625,
81
+ "rewards/chosen": 3.5360469818115234,
82
+ "rewards/margins": 4.843508243560791,
83
+ "rewards/rejected": -1.3074613809585571,
84
  "step": 40
85
  },
86
  {
87
+ "epoch": 0.13,
88
+ "grad_norm": 288.39653383863197,
89
  "learning_rate": 3.9903145446619833e-07,
90
+ "logits/chosen": -2.6978812217712402,
91
+ "logits/rejected": -2.5775580406188965,
92
+ "logps/chosen": -256.521240234375,
93
+ "logps/rejected": -205.7974853515625,
94
+ "loss": 0.2479,
95
+ "rewards/accuracies": 0.8999999761581421,
96
+ "rewards/chosen": 5.355835914611816,
97
+ "rewards/margins": 6.771894931793213,
98
+ "rewards/rejected": -1.416058897972107,
99
  "step": 50
100
  },
101
  {
102
+ "epoch": 0.15,
103
+ "grad_norm": 288.38654209122495,
104
  "learning_rate": 3.9647754748313294e-07,
105
+ "logits/chosen": -2.67028546333313,
106
+ "logits/rejected": -2.5481607913970947,
107
+ "logps/chosen": -258.8858337402344,
108
+ "logps/rejected": -193.81582641601562,
109
+ "loss": 0.2103,
110
+ "rewards/accuracies": 0.925000011920929,
111
+ "rewards/chosen": 4.908812522888184,
112
+ "rewards/margins": 7.172672271728516,
113
+ "rewards/rejected": -2.263859748840332,
114
  "step": 60
115
  },
116
  {
117
+ "epoch": 0.18,
118
+ "grad_norm": 271.6769920625721,
119
  "learning_rate": 3.9235071376357917e-07,
120
+ "logits/chosen": -2.653860569000244,
121
+ "logits/rejected": -2.506782293319702,
122
+ "logps/chosen": -288.6905517578125,
123
+ "logps/rejected": -225.65853881835938,
124
+ "loss": 0.1949,
125
+ "rewards/accuracies": 0.9312499761581421,
126
+ "rewards/chosen": 4.613911151885986,
127
+ "rewards/margins": 8.780401229858398,
128
+ "rewards/rejected": -4.166489601135254,
129
  "step": 70
130
  },
131
  {
132
+ "epoch": 0.2,
133
+ "grad_norm": 381.16800166622966,
134
  "learning_rate": 3.8668399121531677e-07,
135
+ "logits/chosen": -2.605896472930908,
136
+ "logits/rejected": -2.4767518043518066,
137
+ "logps/chosen": -260.63116455078125,
138
+ "logps/rejected": -215.01742553710938,
139
+ "loss": 0.183,
140
+ "rewards/accuracies": 0.925000011920929,
141
+ "rewards/chosen": 3.8556149005889893,
142
+ "rewards/margins": 8.602324485778809,
143
+ "rewards/rejected": -4.74670934677124,
144
  "step": 80
145
  },
146
  {
147
+ "epoch": 0.23,
148
+ "grad_norm": 609.4338656498198,
149
  "learning_rate": 3.795227455278028e-07,
150
+ "logits/chosen": -2.646833896636963,
151
+ "logits/rejected": -2.5277552604675293,
152
+ "logps/chosen": -252.8790740966797,
153
+ "logps/rejected": -193.67758178710938,
154
+ "loss": 0.2079,
155
+ "rewards/accuracies": 0.893750011920929,
156
+ "rewards/chosen": 3.7215206623077393,
157
+ "rewards/margins": 7.463395118713379,
158
+ "rewards/rejected": -3.7418742179870605,
159
  "step": 90
160
  },
161
  {
162
+ "epoch": 0.26,
163
+ "grad_norm": 259.36924157384516,
164
  "learning_rate": 3.7092430699120403e-07,
165
+ "logits/chosen": -2.680565357208252,
166
+ "logits/rejected": -2.561267375946045,
167
+ "logps/chosen": -233.5777587890625,
168
+ "logps/rejected": -198.25820922851562,
169
+ "loss": 0.1876,
170
+ "rewards/accuracies": 0.918749988079071,
171
+ "rewards/chosen": 4.603877067565918,
172
+ "rewards/margins": 7.88943338394165,
173
+ "rewards/rejected": -3.285557270050049,
174
  "step": 100
175
  },
176
  {
177
+ "epoch": 0.28,
178
+ "grad_norm": 344.20413939741326,
179
  "learning_rate": 3.6095751153125107e-07,
180
+ "logits/chosen": -2.6716485023498535,
181
+ "logits/rejected": -2.5542380809783936,
182
+ "logps/chosen": -252.8291778564453,
183
+ "logps/rejected": -193.85076904296875,
184
+ "loss": 0.1768,
185
+ "rewards/accuracies": 0.9437500238418579,
186
+ "rewards/chosen": 5.924564361572266,
187
+ "rewards/margins": 8.784211158752441,
188
+ "rewards/rejected": -2.8596463203430176,
189
  "step": 110
190
  },
191
  {
192
+ "epoch": 0.31,
193
+ "grad_norm": 298.19415193137047,
194
  "learning_rate": 3.497021496342202e-07,
195
+ "logits/chosen": -2.648298501968384,
196
+ "logits/rejected": -2.5618278980255127,
197
+ "logps/chosen": -257.97100830078125,
198
+ "logps/rejected": -214.90670776367188,
199
+ "loss": 0.2066,
200
+ "rewards/accuracies": 0.925000011920929,
201
+ "rewards/chosen": 4.884228229522705,
202
+ "rewards/margins": 9.177003860473633,
203
+ "rewards/rejected": -4.292778015136719,
204
  "step": 120
205
  },
206
  {
207
+ "epoch": 0.33,
208
+ "grad_norm": 143.10334660096893,
209
  "learning_rate": 3.372483275737467e-07,
210
+ "logits/chosen": -2.6251893043518066,
211
+ "logits/rejected": -2.5034987926483154,
212
+ "logps/chosen": -250.27548217773438,
213
+ "logps/rejected": -191.64712524414062,
214
+ "loss": 0.1938,
215
+ "rewards/accuracies": 0.9125000238418579,
216
+ "rewards/chosen": 3.4552054405212402,
217
+ "rewards/margins": 9.01502799987793,
218
+ "rewards/rejected": -5.559823513031006,
219
  "step": 130
220
  },
221
  {
222
+ "epoch": 0.36,
223
+ "grad_norm": 396.95461669932075,
224
  "learning_rate": 3.2369574605325094e-07,
225
+ "logits/chosen": -2.6585159301757812,
226
+ "logits/rejected": -2.5590972900390625,
227
+ "logps/chosen": -270.13775634765625,
228
+ "logps/rejected": -210.39706420898438,
229
+ "loss": 0.2585,
230
+ "rewards/accuracies": 0.9312499761581421,
231
+ "rewards/chosen": 3.534144639968872,
232
+ "rewards/margins": 8.91390609741211,
233
+ "rewards/rejected": -5.379761695861816,
234
  "step": 140
235
  },
236
  {
237
+ "epoch": 0.38,
238
+ "grad_norm": 248.0981758080623,
239
  "learning_rate": 3.091529020389009e-07,
240
+ "logits/chosen": -2.6475913524627686,
241
+ "logits/rejected": -2.5541744232177734,
242
+ "logps/chosen": -262.02142333984375,
243
+ "logps/rejected": -214.84573364257812,
244
+ "loss": 0.2768,
245
+ "rewards/accuracies": 0.918749988079071,
246
+ "rewards/chosen": 4.376931190490723,
247
+ "rewards/margins": 9.941673278808594,
248
+ "rewards/rejected": -5.564742088317871,
249
  "step": 150
250
  },
251
  {
252
+ "epoch": 0.41,
253
+ "grad_norm": 531.5498022382194,
254
  "learning_rate": 2.937362201729407e-07,
255
+ "logits/chosen": -2.6315155029296875,
256
+ "logits/rejected": -2.5306897163391113,
257
+ "logps/chosen": -240.9332275390625,
258
+ "logps/rejected": -186.1042022705078,
259
+ "loss": 0.2307,
260
+ "rewards/accuracies": 0.9437500238418579,
261
+ "rewards/chosen": 4.558727741241455,
262
+ "rewards/margins": 8.91313362121582,
263
+ "rewards/rejected": -4.354406356811523,
264
  "step": 160
265
  },
266
  {
267
+ "epoch": 0.44,
268
+ "grad_norm": 204.85526693169226,
269
  "learning_rate": 2.77569120720971e-07,
270
+ "logits/chosen": -2.6248703002929688,
271
+ "logits/rejected": -2.520388126373291,
272
+ "logps/chosen": -259.86029052734375,
273
+ "logps/rejected": -202.1363067626953,
274
+ "loss": 0.1656,
275
+ "rewards/accuracies": 0.956250011920929,
276
+ "rewards/chosen": 4.666792869567871,
277
+ "rewards/margins": 9.15965747833252,
278
+ "rewards/rejected": -4.492865085601807,
279
  "step": 170
280
  },
281
  {
282
+ "epoch": 0.46,
283
+ "grad_norm": 346.922389452496,
284
  "learning_rate": 2.6078103151484935e-07,
285
+ "logits/chosen": -2.6694693565368652,
286
+ "logits/rejected": -2.5681331157684326,
287
+ "logps/chosen": -235.467041015625,
288
+ "logps/rejected": -193.00096130371094,
289
+ "loss": 0.2092,
290
+ "rewards/accuracies": 0.9312499761581421,
291
+ "rewards/chosen": 3.1894354820251465,
292
+ "rewards/margins": 8.24107551574707,
293
+ "rewards/rejected": -5.051640033721924,
294
  "step": 180
295
  },
296
  {
297
+ "epoch": 0.49,
298
+ "grad_norm": 469.7545752914211,
299
  "learning_rate": 2.435063518012335e-07,
300
+ "logits/chosen": -2.627861499786377,
301
+ "logits/rejected": -2.537619113922119,
302
+ "logps/chosen": -257.1391906738281,
303
+ "logps/rejected": -208.8784637451172,
304
+ "loss": 0.2104,
305
+ "rewards/accuracies": 0.918749988079071,
306
+ "rewards/chosen": 3.447594404220581,
307
+ "rewards/margins": 8.870077133178711,
308
+ "rewards/rejected": -5.422482013702393,
309
  "step": 190
310
  },
311
  {
312
+ "epoch": 0.51,
313
+ "grad_norm": 532.7452067278298,
314
  "learning_rate": 2.2588337629081105e-07,
315
+ "logits/chosen": -2.669545888900757,
316
+ "logits/rejected": -2.5410807132720947,
317
+ "logps/chosen": -270.0424499511719,
318
+ "logps/rejected": -204.46751403808594,
319
+ "loss": 0.1802,
320
+ "rewards/accuracies": 0.925000011920929,
321
+ "rewards/chosen": 5.255965709686279,
322
+ "rewards/margins": 10.433439254760742,
323
+ "rewards/rejected": -5.177473068237305,
324
  "step": 200
325
  },
326
  {
327
+ "epoch": 0.54,
328
+ "grad_norm": 489.8003677533757,
329
  "learning_rate": 2.0805318802188303e-07,
330
+ "logits/chosen": -2.656721353530884,
331
+ "logits/rejected": -2.548938035964966,
332
+ "logps/chosen": -262.646728515625,
333
+ "logps/rejected": -213.17138671875,
334
+ "loss": 0.1767,
335
+ "rewards/accuracies": 0.925000011920929,
336
+ "rewards/chosen": 4.205015659332275,
337
+ "rewards/margins": 9.936458587646484,
338
+ "rewards/rejected": -5.731442451477051,
339
  "step": 210
340
  },
341
  {
342
+ "epoch": 0.56,
343
+ "grad_norm": 398.2303862264868,
344
  "learning_rate": 1.9015852890162436e-07,
345
+ "logits/chosen": -2.65899658203125,
346
+ "logits/rejected": -2.564746856689453,
347
+ "logps/chosen": -255.7983856201172,
348
+ "logps/rejected": -209.6800079345703,
349
+ "loss": 0.2859,
350
+ "rewards/accuracies": 0.918749988079071,
351
+ "rewards/chosen": 3.3606581687927246,
352
+ "rewards/margins": 9.43185806274414,
353
+ "rewards/rejected": -6.071199417114258,
354
  "step": 220
355
  },
356
  {
357
+ "epoch": 0.59,
358
+ "grad_norm": 345.5121551134584,
359
  "learning_rate": 1.723426569670534e-07,
360
+ "logits/chosen": -2.685997247695923,
361
+ "logits/rejected": -2.5591225624084473,
362
+ "logps/chosen": -264.55902099609375,
363
+ "logps/rejected": -209.5423126220703,
364
+ "loss": 0.1839,
365
+ "rewards/accuracies": 0.893750011920929,
366
+ "rewards/chosen": 4.420236587524414,
367
+ "rewards/margins": 9.53612232208252,
368
+ "rewards/rejected": -5.1158857345581055,
369
  "step": 230
370
  },
371
  {
372
+ "epoch": 0.61,
373
+ "grad_norm": 366.69298678156184,
374
  "learning_rate": 1.547481995140556e-07,
375
+ "logits/chosen": -2.6587154865264893,
376
+ "logits/rejected": -2.5532352924346924,
377
+ "logps/chosen": -258.416259765625,
378
+ "logps/rejected": -213.5823974609375,
379
+ "loss": 0.1755,
380
+ "rewards/accuracies": 0.9312499761581421,
381
+ "rewards/chosen": 4.108377456665039,
382
+ "rewards/margins": 10.194208145141602,
383
+ "rewards/rejected": -6.085830211639404,
384
  "step": 240
385
  },
386
  {
387
+ "epoch": 0.64,
388
+ "grad_norm": 355.57895419726833,
389
  "learning_rate": 1.375160112758885e-07,
390
+ "logits/chosen": -2.6688976287841797,
391
+ "logits/rejected": -2.559256076812744,
392
+ "logps/chosen": -264.78363037109375,
393
+ "logps/rejected": -219.9192352294922,
394
+ "loss": 0.1867,
395
+ "rewards/accuracies": 0.956250011920929,
396
+ "rewards/chosen": 3.6294102668762207,
397
+ "rewards/margins": 10.07470417022705,
398
+ "rewards/rejected": -6.445294380187988,
399
  "step": 250
400
  },
401
  {
402
+ "epoch": 0.67,
403
+ "grad_norm": 284.33502896066835,
404
  "learning_rate": 1.2078404679216862e-07,
405
+ "logits/chosen": -2.6680378913879395,
406
+ "logits/rejected": -2.5718884468078613,
407
+ "logps/chosen": -258.0495910644531,
408
+ "logps/rejected": -212.5093231201172,
409
+ "loss": 0.1465,
410
+ "rewards/accuracies": 0.949999988079071,
411
+ "rewards/chosen": 3.9134533405303955,
412
+ "rewards/margins": 9.667370796203613,
413
+ "rewards/rejected": -5.753917694091797,
414
  "step": 260
415
  },
416
  {
417
+ "epoch": 0.69,
418
+ "grad_norm": 281.9194802132945,
419
  "learning_rate": 1.0468625599573841e-07,
420
+ "logits/chosen": -2.6398463249206543,
421
+ "logits/rejected": -2.5165765285491943,
422
+ "logps/chosen": -268.0271911621094,
423
+ "logps/rejected": -219.8000946044922,
424
+ "loss": 0.1901,
425
+ "rewards/accuracies": 0.9437500238418579,
426
+ "rewards/chosen": 4.4066267013549805,
427
+ "rewards/margins": 10.626806259155273,
428
+ "rewards/rejected": -6.220180511474609,
429
  "step": 270
430
  },
431
  {
432
+ "epoch": 0.72,
433
+ "grad_norm": 262.1937325706126,
434
  "learning_rate": 8.935151185893727e-08,
435
+ "logits/chosen": -2.6787869930267334,
436
+ "logits/rejected": -2.56150484085083,
437
+ "logps/chosen": -266.0087585449219,
438
+ "logps/rejected": -210.2588348388672,
439
+ "loss": 0.1777,
440
+ "rewards/accuracies": 0.918749988079071,
441
+ "rewards/chosen": 3.8459548950195312,
442
+ "rewards/margins": 10.500468254089355,
443
+ "rewards/rejected": -6.654513359069824,
444
  "step": 280
445
  },
446
  {
447
+ "epoch": 0.74,
448
+ "grad_norm": 267.1170614305438,
449
  "learning_rate": 7.490257868414449e-08,
450
+ "logits/chosen": -2.6337618827819824,
451
+ "logits/rejected": -2.5607006549835205,
452
+ "logps/chosen": -266.64654541015625,
453
+ "logps/rejected": -227.7572479248047,
454
+ "loss": 0.1881,
455
+ "rewards/accuracies": 0.9375,
456
+ "rewards/chosen": 3.585491180419922,
457
+ "rewards/margins": 9.039448738098145,
458
+ "rewards/rejected": -5.453957557678223,
459
  "step": 290
460
  },
461
  {
462
+ "epoch": 0.77,
463
+ "grad_norm": 337.4409958446907,
464
  "learning_rate": 6.145512929808013e-08,
465
+ "logits/chosen": -2.6632189750671387,
466
+ "logits/rejected": -2.548957109451294,
467
+ "logps/chosen": -270.69012451171875,
468
+ "logps/rejected": -219.18807983398438,
469
+ "loss": 0.1755,
470
+ "rewards/accuracies": 0.956250011920929,
471
+ "rewards/chosen": 3.8013317584991455,
472
+ "rewards/margins": 10.349567413330078,
473
+ "rewards/rejected": -6.5482354164123535,
474
  "step": 300
475
  },
476
  {
477
+ "epoch": 0.79,
478
+ "grad_norm": 161.29540411078162,
479
  "learning_rate": 4.911681901784197e-08,
480
+ "logits/chosen": -2.620042324066162,
481
+ "logits/rejected": -2.5278143882751465,
482
+ "logps/chosen": -275.6562805175781,
483
+ "logps/rejected": -213.7178192138672,
484
+ "loss": 0.188,
485
+ "rewards/accuracies": 0.9312499761581421,
486
+ "rewards/chosen": 3.946884870529175,
487
+ "rewards/margins": 10.046659469604492,
488
+ "rewards/rejected": -6.0997748374938965,
489
  "step": 310
490
  },
491
  {
492
+ "epoch": 0.82,
493
+ "grad_norm": 394.51415258074036,
494
  "learning_rate": 3.79864238021667e-08,
495
+ "logits/chosen": -2.6458163261413574,
496
+ "logits/rejected": -2.5520365238189697,
497
+ "logps/chosen": -258.85113525390625,
498
+ "logps/rejected": -212.78414916992188,
499
+ "loss": 0.2281,
500
+ "rewards/accuracies": 0.887499988079071,
501
+ "rewards/chosen": 3.033263683319092,
502
+ "rewards/margins": 9.521443367004395,
503
+ "rewards/rejected": -6.4881792068481445,
504
  "step": 320
505
  },
506
  {
507
+ "epoch": 0.84,
508
+ "grad_norm": 235.27856577918993,
509
  "learning_rate": 2.8153049487556634e-08,
510
+ "logits/chosen": -2.6645383834838867,
511
+ "logits/rejected": -2.5732433795928955,
512
+ "logps/chosen": -260.6364440917969,
513
+ "logps/rejected": -212.11898803710938,
514
+ "loss": 0.1828,
515
+ "rewards/accuracies": 0.9750000238418579,
516
+ "rewards/chosen": 4.755698204040527,
517
+ "rewards/margins": 10.375238418579102,
518
+ "rewards/rejected": -5.619540214538574,
519
  "step": 330
520
  },
521
  {
522
+ "epoch": 0.87,
523
+ "grad_norm": 326.32410341900527,
524
  "learning_rate": 1.9695418439836796e-08,
525
+ "logits/chosen": -2.6518936157226562,
526
+ "logits/rejected": -2.555978298187256,
527
+ "logps/chosen": -256.3904724121094,
528
+ "logps/rejected": -200.47169494628906,
529
+ "loss": 0.2192,
530
+ "rewards/accuracies": 0.9437500238418579,
531
+ "rewards/chosen": 4.210465908050537,
532
+ "rewards/margins": 10.564642906188965,
533
+ "rewards/rejected": -6.3541765213012695,
534
  "step": 340
535
  },
536
  {
537
+ "epoch": 0.9,
538
+ "grad_norm": 356.14147259437647,
539
  "learning_rate": 1.2681239331945692e-08,
540
+ "logits/chosen": -2.666987895965576,
541
+ "logits/rejected": -2.55633544921875,
542
+ "logps/chosen": -265.26556396484375,
543
+ "logps/rejected": -217.6700897216797,
544
+ "loss": 0.2137,
545
+ "rewards/accuracies": 0.918749988079071,
546
+ "rewards/chosen": 3.833866596221924,
547
+ "rewards/margins": 9.782751083374023,
548
+ "rewards/rejected": -5.9488844871521,
549
  "step": 350
550
  },
551
  {
552
+ "epoch": 0.92,
553
+ "grad_norm": 330.15606711737206,
554
  "learning_rate": 7.166665093287538e-09,
555
+ "logits/chosen": -2.6728262901306152,
556
+ "logits/rejected": -2.573294162750244,
557
+ "logps/chosen": -271.0104675292969,
558
+ "logps/rejected": -214.70834350585938,
559
+ "loss": 0.1861,
560
+ "rewards/accuracies": 0.949999988079071,
561
+ "rewards/chosen": 4.187949180603027,
562
+ "rewards/margins": 10.108842849731445,
563
+ "rewards/rejected": -5.920893669128418,
564
  "step": 360
565
  },
566
  {
567
+ "epoch": 0.95,
568
+ "grad_norm": 244.55642297419294,
569
  "learning_rate": 3.1958433701019694e-09,
570
+ "logits/chosen": -2.6283528804779053,
571
+ "logits/rejected": -2.5266776084899902,
572
+ "logps/chosen": -269.41925048828125,
573
+ "logps/rejected": -217.0562744140625,
574
+ "loss": 0.1859,
575
+ "rewards/accuracies": 0.9125000238418579,
576
+ "rewards/chosen": 4.10398006439209,
577
+ "rewards/margins": 9.084988594055176,
578
+ "rewards/rejected": -4.981008052825928,
579
  "step": 370
580
  },
581
  {
582
+ "epoch": 0.97,
583
+ "grad_norm": 197.28407297465554,
584
  "learning_rate": 8.005630957010012e-10,
585
+ "logits/chosen": -2.682586193084717,
586
+ "logits/rejected": -2.5935988426208496,
587
+ "logps/chosen": -260.6213073730469,
588
+ "logps/rejected": -221.34423828125,
589
+ "loss": 0.1775,
590
+ "rewards/accuracies": 0.918749988079071,
591
+ "rewards/chosen": 4.256922245025635,
592
+ "rewards/margins": 9.350181579589844,
593
+ "rewards/rejected": -5.093258857727051,
594
  "step": 380
595
  },
596
  {
597
+ "epoch": 1.0,
598
+ "grad_norm": 504.7364263980203,
599
  "learning_rate": 0.0,
600
+ "logits/chosen": -2.686406135559082,
601
+ "logits/rejected": -2.582782745361328,
602
+ "logps/chosen": -248.7636260986328,
603
+ "logps/rejected": -209.47628784179688,
604
+ "loss": 0.1791,
605
+ "rewards/accuracies": 0.9312499761581421,
606
+ "rewards/chosen": 3.9559149742126465,
607
+ "rewards/margins": 10.19153118133545,
608
+ "rewards/rejected": -6.235617160797119,
609
  "step": 390
610
  },
611
  {
612
+ "epoch": 1.0,
613
  "step": 390,
614
  "total_flos": 0.0,
615
+ "train_loss": 0.22171047773116676,
616
+ "train_runtime": 5890.091,
617
+ "train_samples_per_second": 8.489,
618
+ "train_steps_per_second": 0.066
619
  }
620
  ],
621
  "logging_steps": 10,
 
623
  "num_input_tokens_seen": 0,
624
  "num_train_epochs": 1,
625
  "save_steps": 100,
 
 
 
 
 
 
 
 
 
 
 
 
626
  "total_flos": 0.0,
627
  "train_batch_size": 4,
628
  "trial_name": null,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0a0444b55e899bda2b049d57e26db7ba9934f12da1827bec93bef3d39b6e277a
3
- size 6264
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa0e00d3aa1ab0a0ef1dd07b8b8bd52c64ef5747a57df47646eee4b22e879a38
3
+ size 6328