RikkiXu commited on
Commit
5ab60fa
1 Parent(s): 93e9573

Model save

Browse files
README.md CHANGED
@@ -15,15 +15,15 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  This model was trained from scratch on the None dataset.
17
  It achieves the following results on the evaluation set:
18
- - Loss: 1.1843
19
- - Rewards/chosen: -5.6098
20
- - Rewards/rejected: -5.9639
21
- - Rewards/accuracies: 0.5117
22
- - Rewards/margins: 0.3541
23
- - Logps/rejected: -1114.7808
24
- - Logps/chosen: -951.4574
25
- - Logits/rejected: -7.9900
26
- - Logits/chosen: -7.4446
27
 
28
  ## Model description
29
 
@@ -58,16 +58,16 @@ The following hyperparameters were used during training:
58
 
59
  ### Training results
60
 
61
- | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
- |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
- | 0.4015 | 0.26 | 100 | 0.9856 | -4.3603 | -4.6945 | 0.5273 | 0.3341 | -987.8358 | -826.5081 | -6.7933 | -6.4109 |
64
- | 0.3649 | 0.53 | 200 | 1.1239 | -4.8760 | -5.1429 | 0.4883 | 0.2669 | -1032.6809 | -878.0756 | -7.6378 | -7.1525 |
65
- | 0.3506 | 0.79 | 300 | 1.1843 | -5.6098 | -5.9639 | 0.5117 | 0.3541 | -1114.7808 | -951.4574 | -7.9900 | -7.4446 |
66
 
67
 
68
  ### Framework versions
69
 
70
- - Transformers 4.38.2
71
  - Pytorch 2.1.2+cu118
72
  - Datasets 2.16.1
73
- - Tokenizers 0.15.2
 
15
 
16
  This model was trained from scratch on the None dataset.
17
  It achieves the following results on the evaluation set:
18
+ - Loss: 6.9230
19
+ - Rewards/chosen: -4.5175
20
+ - Rewards/rejected: 0.4288
21
+ - Rewards/accuracies: 0.3164
22
+ - Rewards/margins: -4.9464
23
+ - Logps/rejected: -517.5300
24
+ - Logps/chosen: -399.5095
25
+ - Logits/rejected: -4.8908
26
+ - Logits/chosen: -4.6604
27
 
28
  ## Model description
29
 
 
58
 
59
  ### Training results
60
 
61
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
+ |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.5909 | 0.2635 | 100 | 6.6534 | -3.0090 | 2.8904 | 0.2773 | -5.8994 | -512.6068 | -396.4924 | -4.8508 | -4.6121 |
64
+ | 0.7239 | 0.5270 | 200 | 8.0720 | -2.8191 | 3.5065 | 0.2734 | -6.3256 | -511.3747 | -396.1127 | -4.9896 | -4.7715 |
65
+ | 0.5556 | 0.7905 | 300 | 6.9230 | -4.5175 | 0.4288 | 0.3164 | -4.9464 | -517.5300 | -399.5095 | -4.8908 | -4.6604 |
66
 
67
 
68
  ### Framework versions
69
 
70
+ - Transformers 4.41.1
71
  - Pytorch 2.1.2+cu118
72
  - Datasets 2.16.1
73
+ - Tokenizers 0.19.1
all_results.json CHANGED
@@ -1,8 +1,9 @@
1
  {
2
- "epoch": 1.0,
3
- "train_loss": 0.39733357479830217,
4
- "train_runtime": 5845.1464,
 
5
  "train_samples": 48530,
6
- "train_samples_per_second": 8.303,
7
- "train_steps_per_second": 0.065
8
  }
 
1
  {
2
+ "epoch": 0.9986824769433466,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.5517442987587962,
5
+ "train_runtime": 6181.8185,
6
  "train_samples": 48530,
7
+ "train_samples_per_second": 7.85,
8
+ "train_steps_per_second": 0.061
9
  }
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 32000,
5
- "transformers_version": "4.38.2"
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 32000,
5
+ "transformers_version": "4.41.1"
6
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f0448d3619348a37bb87330d8a16f03f2db67a5e69f6d423351f4de49ba701b4
3
  size 4943178720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fad6798eb6bb6153f6b5aff19d3bc5cffa1aeb40bf26f57d5a587788a415d08c
3
  size 4943178720
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0afb427ab7948f6418557c415a2087f26e4a2bcf506844122d917521dfff33e5
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20ca669573e568702fa6d40967a3d29846dc5ecf876326abd24dbc1904e69161
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a88ed491b35c8995c182b2bbbff2fb9fb8eab8e0c1d499eee5dfc7d7e795619f
3
  size 4540532728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddf6a93711a98049fe876e8b35960fb84be7446726bf529c7236bf8c1e7f0d9d
3
  size 4540532728
runs/May29_01-45-07_n136-082-130/events.out.tfevents.1716918651.n136-082-130.2665532.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3ddf24f553333828786988c3971d9d1022e8757f1230b262d1d8425c990e02ee
3
- size 28505
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2856f09a0f9bb2d9198cc98a815d742f99ea9ee604c8ae1f64256658c1973601
3
+ size 33675
train_results.json CHANGED
@@ -1,8 +1,9 @@
1
  {
2
- "epoch": 1.0,
3
- "train_loss": 0.39733357479830217,
4
- "train_runtime": 5845.1464,
 
5
  "train_samples": 48530,
6
- "train_samples_per_second": 8.303,
7
- "train_steps_per_second": 0.065
8
  }
 
1
  {
2
+ "epoch": 0.9986824769433466,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.5517442987587962,
5
+ "train_runtime": 6181.8185,
6
  "train_samples": 48530,
7
+ "train_samples_per_second": 7.85,
8
+ "train_steps_per_second": 0.061
9
  }
trainer_state.json CHANGED
@@ -9,8 +9,8 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.0,
13
- "grad_norm": 34.01362102288599,
14
  "learning_rate": 1.3157894736842104e-08,
15
  "logits/chosen": -4.685327529907227,
16
  "logits/rejected": -4.87608528137207,
@@ -24,616 +24,616 @@
24
  "step": 1
25
  },
26
  {
27
- "epoch": 0.03,
28
- "grad_norm": 33.835137410082986,
29
  "learning_rate": 1.3157894736842104e-07,
30
- "logits/chosen": -4.499408721923828,
31
- "logits/rejected": -4.84108829498291,
32
- "logps/chosen": -223.5843048095703,
33
- "logps/rejected": -160.73016357421875,
34
- "loss": 0.6929,
35
- "rewards/accuracies": 0.4375,
36
- "rewards/chosen": 0.0008713232818990946,
37
- "rewards/margins": 3.9665635995334014e-05,
38
- "rewards/rejected": 0.000831657787784934,
39
  "step": 10
40
  },
41
  {
42
- "epoch": 0.05,
43
- "grad_norm": 32.9122864599857,
44
  "learning_rate": 2.631578947368421e-07,
45
- "logits/chosen": -4.521907806396484,
46
- "logits/rejected": -4.8204779624938965,
47
- "logps/chosen": -220.248779296875,
48
- "logps/rejected": -173.30508422851562,
49
- "loss": 0.6818,
50
- "rewards/accuracies": 0.768750011920929,
51
- "rewards/chosen": 0.01710951328277588,
52
- "rewards/margins": 0.023774703964591026,
53
- "rewards/rejected": -0.006665193475782871,
54
  "step": 20
55
  },
56
  {
57
- "epoch": 0.08,
58
- "grad_norm": 32.91997500077958,
59
  "learning_rate": 3.9473684210526315e-07,
60
- "logits/chosen": -4.713895320892334,
61
- "logits/rejected": -5.012445449829102,
62
- "logps/chosen": -223.3787841796875,
63
- "logps/rejected": -201.9126434326172,
64
- "loss": 0.6294,
65
- "rewards/accuracies": 0.8125,
66
- "rewards/chosen": -0.047253355383872986,
67
- "rewards/margins": 0.1270204484462738,
68
- "rewards/rejected": -0.1742737889289856,
69
  "step": 30
70
  },
71
  {
72
- "epoch": 0.11,
73
- "grad_norm": 44.205273980146465,
74
  "learning_rate": 4.999575626062319e-07,
75
- "logits/chosen": -4.862967491149902,
76
- "logits/rejected": -5.199351787567139,
77
- "logps/chosen": -284.15850830078125,
78
- "logps/rejected": -260.1568298339844,
79
- "loss": 0.5878,
80
- "rewards/accuracies": 0.768750011920929,
81
- "rewards/chosen": -0.5039564371109009,
82
- "rewards/margins": 0.3390721082687378,
83
- "rewards/rejected": -0.8430284261703491,
84
  "step": 40
85
  },
86
  {
87
- "epoch": 0.13,
88
- "grad_norm": 33.79573625552293,
89
  "learning_rate": 4.984737660598186e-07,
90
- "logits/chosen": -4.860326766967773,
91
- "logits/rejected": -5.1770920753479,
92
- "logps/chosen": -287.49212646484375,
93
- "logps/rejected": -275.1466979980469,
94
- "loss": 0.544,
95
- "rewards/accuracies": 0.7749999761581421,
96
- "rewards/chosen": -0.6591841578483582,
97
- "rewards/margins": 0.3825286030769348,
98
- "rewards/rejected": -1.0417125225067139,
99
  "step": 50
100
  },
101
  {
102
- "epoch": 0.16,
103
- "grad_norm": 37.5548220875479,
104
  "learning_rate": 4.948824853131236e-07,
105
- "logits/chosen": -5.250467777252197,
106
- "logits/rejected": -5.646960258483887,
107
- "logps/chosen": -355.8065185546875,
108
- "logps/rejected": -370.16064453125,
109
- "loss": 0.4873,
110
- "rewards/accuracies": 0.768750011920929,
111
- "rewards/chosen": -1.3389923572540283,
112
- "rewards/margins": 0.6164921522140503,
113
- "rewards/rejected": -1.955484390258789,
114
  "step": 60
115
  },
116
  {
117
- "epoch": 0.18,
118
- "grad_norm": 40.80845104625175,
119
  "learning_rate": 4.892141805936084e-07,
120
- "logits/chosen": -5.506089687347412,
121
- "logits/rejected": -5.9388532638549805,
122
- "logps/chosen": -373.0067443847656,
123
- "logps/rejected": -409.2164001464844,
124
- "loss": 0.4619,
125
- "rewards/accuracies": 0.831250011920929,
126
- "rewards/chosen": -1.4274970293045044,
127
- "rewards/margins": 0.8630696535110474,
128
- "rewards/rejected": -2.290566921234131,
129
  "step": 70
130
  },
131
  {
132
- "epoch": 0.21,
133
- "grad_norm": 62.51614299706751,
134
  "learning_rate": 4.81516928858564e-07,
135
- "logits/chosen": -5.957489967346191,
136
- "logits/rejected": -6.362034797668457,
137
- "logps/chosen": -431.8949279785156,
138
- "logps/rejected": -484.7469177246094,
139
- "loss": 0.4382,
140
- "rewards/accuracies": 0.800000011920929,
141
- "rewards/chosen": -2.0529427528381348,
142
- "rewards/margins": 1.0657070875167847,
143
- "rewards/rejected": -3.118650197982788,
144
  "step": 80
145
  },
146
  {
147
- "epoch": 0.24,
148
- "grad_norm": 42.64563417225672,
149
  "learning_rate": 4.7185601601995784e-07,
150
- "logits/chosen": -5.974350929260254,
151
- "logits/rejected": -6.67104959487915,
152
- "logps/chosen": -387.2274475097656,
153
- "logps/rejected": -464.65826416015625,
154
- "loss": 0.4236,
155
  "rewards/accuracies": 0.856249988079071,
156
- "rewards/chosen": -1.6722873449325562,
157
- "rewards/margins": 1.3039813041687012,
158
- "rewards/rejected": -2.976268768310547,
159
  "step": 90
160
  },
161
  {
162
- "epoch": 0.26,
163
- "grad_norm": 49.657859383445725,
164
  "learning_rate": 4.603133832077953e-07,
165
- "logits/chosen": -6.59436559677124,
166
- "logits/rejected": -7.055686950683594,
167
- "logps/chosen": -443.9141540527344,
168
- "logps/rejected": -517.8334350585938,
169
- "loss": 0.4015,
170
- "rewards/accuracies": 0.831250011920929,
171
- "rewards/chosen": -2.304152011871338,
172
- "rewards/margins": 1.163777232170105,
173
- "rewards/rejected": -3.4679291248321533,
174
  "step": 100
175
  },
176
  {
177
- "epoch": 0.26,
178
- "eval_logits/chosen": -6.410887718200684,
179
- "eval_logits/rejected": -6.793334007263184,
180
- "eval_logps/chosen": -826.5081176757812,
181
- "eval_logps/rejected": -987.8357543945312,
182
- "eval_loss": 0.9856035113334656,
183
- "eval_rewards/accuracies": 0.52734375,
184
- "eval_rewards/chosen": -4.360336780548096,
185
- "eval_rewards/margins": 0.3341439962387085,
186
- "eval_rewards/rejected": -4.694480895996094,
187
- "eval_runtime": 97.5721,
188
- "eval_samples_per_second": 20.498,
189
- "eval_steps_per_second": 0.328,
190
  "step": 100
191
  },
192
  {
193
- "epoch": 0.29,
194
- "grad_norm": 59.660602657641974,
195
  "learning_rate": 4.4698693176863316e-07,
196
- "logits/chosen": -6.508014678955078,
197
- "logits/rejected": -6.995993137359619,
198
- "logps/chosen": -467.217041015625,
199
- "logps/rejected": -571.2991943359375,
200
- "loss": 0.3652,
201
- "rewards/accuracies": 0.824999988079071,
202
- "rewards/chosen": -2.5293219089508057,
203
- "rewards/margins": 1.5391861200332642,
204
- "rewards/rejected": -4.068508148193359,
205
  "step": 110
206
  },
207
  {
208
- "epoch": 0.32,
209
- "grad_norm": 48.65436374701769,
210
  "learning_rate": 4.319896928940505e-07,
211
- "logits/chosen": -6.631407737731934,
212
- "logits/rejected": -7.224958896636963,
213
- "logps/chosen": -449.9879455566406,
214
- "logps/rejected": -551.552734375,
215
- "loss": 0.3856,
216
- "rewards/accuracies": 0.7875000238418579,
217
- "rewards/chosen": -2.370181083679199,
218
- "rewards/margins": 1.3927323818206787,
219
- "rewards/rejected": -3.762913465499878,
220
  "step": 120
221
  },
222
  {
223
- "epoch": 0.34,
224
- "grad_norm": 41.65961698696447,
225
  "learning_rate": 4.1544886892205354e-07,
226
- "logits/chosen": -6.516014099121094,
227
- "logits/rejected": -7.032981872558594,
228
- "logps/chosen": -490.3934020996094,
229
- "logps/rejected": -610.3671264648438,
230
- "loss": 0.3736,
231
- "rewards/accuracies": 0.78125,
232
- "rewards/chosen": -2.659696578979492,
233
- "rewards/margins": 1.6181414127349854,
234
- "rewards/rejected": -4.277838230133057,
235
  "step": 130
236
  },
237
  {
238
- "epoch": 0.37,
239
- "grad_norm": 61.060769075088814,
240
  "learning_rate": 3.975047544428254e-07,
241
- "logits/chosen": -6.8892974853515625,
242
- "logits/rejected": -7.428150177001953,
243
- "logps/chosen": -479.0992736816406,
244
- "logps/rejected": -578.5732421875,
245
- "loss": 0.3918,
246
- "rewards/accuracies": 0.7875000238418579,
247
- "rewards/chosen": -2.728147506713867,
248
- "rewards/margins": 1.3954027891159058,
249
- "rewards/rejected": -4.123549938201904,
250
  "step": 140
251
  },
252
  {
253
- "epoch": 0.4,
254
- "grad_norm": 40.0921041292044,
255
  "learning_rate": 3.78309546359696e-07,
256
- "logits/chosen": -7.085695743560791,
257
- "logits/rejected": -7.593710422515869,
258
- "logps/chosen": -485.8174743652344,
259
- "logps/rejected": -611.1568603515625,
260
- "loss": 0.3821,
261
- "rewards/accuracies": 0.8125,
262
- "rewards/chosen": -2.8462297916412354,
263
- "rewards/margins": 1.527190089225769,
264
- "rewards/rejected": -4.373419761657715,
265
  "step": 150
266
  },
267
  {
268
- "epoch": 0.42,
269
- "grad_norm": 45.94809301945095,
270
  "learning_rate": 3.580260529980584e-07,
271
- "logits/chosen": -6.79840087890625,
272
- "logits/rejected": -7.57172155380249,
273
- "logps/chosen": -476.08197021484375,
274
- "logps/rejected": -613.65576171875,
275
- "loss": 0.3533,
276
- "rewards/accuracies": 0.8062499761581421,
277
- "rewards/chosen": -2.6100144386291504,
278
- "rewards/margins": 1.7850373983383179,
279
- "rewards/rejected": -4.3950514793396,
280
  "step": 160
281
  },
282
  {
283
- "epoch": 0.45,
284
- "grad_norm": 49.925985141071024,
285
  "learning_rate": 3.36826313211205e-07,
286
- "logits/chosen": -7.27915096282959,
287
- "logits/rejected": -7.999810695648193,
288
- "logps/chosen": -464.27606201171875,
289
- "logps/rejected": -618.3897705078125,
290
- "loss": 0.3586,
291
- "rewards/accuracies": 0.8125,
292
- "rewards/chosen": -2.638946056365967,
293
- "rewards/margins": 1.9340379238128662,
294
- "rewards/rejected": -4.572983741760254,
295
  "step": 170
296
  },
297
  {
298
- "epoch": 0.47,
299
- "grad_norm": 49.77263247276722,
300
  "learning_rate": 3.14890137195437e-07,
301
- "logits/chosen": -7.062252998352051,
302
- "logits/rejected": -7.757845878601074,
303
- "logps/chosen": -499.0604553222656,
304
- "logps/rejected": -631.4205322265625,
305
- "loss": 0.35,
306
- "rewards/accuracies": 0.8187500238418579,
307
- "rewards/chosen": -2.841132640838623,
308
- "rewards/margins": 1.6729885339736938,
309
- "rewards/rejected": -4.514122009277344,
310
  "step": 180
311
  },
312
  {
313
- "epoch": 0.5,
314
- "grad_norm": 47.32333832777734,
315
  "learning_rate": 2.9240358139084013e-07,
316
- "logits/chosen": -7.432755947113037,
317
- "logits/rejected": -8.177480697631836,
318
- "logps/chosen": -571.7742919921875,
319
- "logps/rejected": -753.3114013671875,
320
- "loss": 0.3584,
321
- "rewards/accuracies": 0.824999988079071,
322
- "rewards/chosen": -3.5975048542022705,
323
- "rewards/margins": 2.1865601539611816,
324
- "rewards/rejected": -5.784065246582031,
325
  "step": 190
326
  },
327
  {
328
- "epoch": 0.53,
329
- "grad_norm": 47.64189353399637,
330
  "learning_rate": 2.695573704031885e-07,
331
- "logits/chosen": -6.939781188964844,
332
- "logits/rejected": -7.71111536026001,
333
- "logps/chosen": -500.54449462890625,
334
- "logps/rejected": -651.9711303710938,
335
- "loss": 0.3649,
336
- "rewards/accuracies": 0.8500000238418579,
337
- "rewards/chosen": -2.752312421798706,
338
- "rewards/margins": 1.9267911911010742,
339
- "rewards/rejected": -4.679104328155518,
340
  "step": 200
341
  },
342
  {
343
- "epoch": 0.53,
344
- "eval_logits/chosen": -7.152472496032715,
345
- "eval_logits/rejected": -7.637792110443115,
346
- "eval_logps/chosen": -878.0755615234375,
347
- "eval_logps/rejected": -1032.680908203125,
348
- "eval_loss": 1.1239182949066162,
349
- "eval_rewards/accuracies": 0.48828125,
350
- "eval_rewards/chosen": -4.876009941101074,
351
- "eval_rewards/margins": 0.26692283153533936,
352
- "eval_rewards/rejected": -5.142932891845703,
353
- "eval_runtime": 97.9349,
354
  "eval_samples_per_second": 20.422,
355
  "eval_steps_per_second": 0.327,
356
  "step": 200
357
  },
358
  {
359
- "epoch": 0.55,
360
- "grad_norm": 54.30113996467665,
361
  "learning_rate": 2.465452793317865e-07,
362
- "logits/chosen": -6.918679237365723,
363
- "logits/rejected": -7.81919002532959,
364
- "logps/chosen": -490.4891662597656,
365
- "logps/rejected": -645.8287353515625,
366
- "loss": 0.351,
367
- "rewards/accuracies": 0.8500000238418579,
368
- "rewards/chosen": -2.56154727935791,
369
- "rewards/margins": 1.9791193008422852,
370
- "rewards/rejected": -4.5406670570373535,
371
  "step": 210
372
  },
373
  {
374
- "epoch": 0.58,
375
- "grad_norm": 48.87773515687126,
376
  "learning_rate": 2.2356249022388789e-07,
377
- "logits/chosen": -7.1236982345581055,
378
- "logits/rejected": -8.037015914916992,
379
- "logps/chosen": -493.3589782714844,
380
- "logps/rejected": -644.3180541992188,
381
- "loss": 0.3503,
382
- "rewards/accuracies": 0.8812500238418579,
383
- "rewards/chosen": -2.7893471717834473,
384
- "rewards/margins": 2.016085386276245,
385
- "rewards/rejected": -4.805432319641113,
386
  "step": 220
387
  },
388
  {
389
- "epoch": 0.61,
390
- "grad_norm": 42.654084351174774,
391
  "learning_rate": 2.0080393659578038e-07,
392
- "logits/chosen": -7.341279029846191,
393
- "logits/rejected": -8.349458694458008,
394
- "logps/chosen": -546.9940795898438,
395
- "logps/rejected": -735.5299072265625,
396
- "loss": 0.3336,
397
- "rewards/accuracies": 0.875,
398
- "rewards/chosen": -3.2596652507781982,
399
- "rewards/margins": 2.3632309436798096,
400
- "rewards/rejected": -5.622895240783691,
401
  "step": 230
402
  },
403
  {
404
- "epoch": 0.63,
405
- "grad_norm": 48.836991831948275,
406
  "learning_rate": 1.7846265006183976e-07,
407
- "logits/chosen": -7.382364749908447,
408
- "logits/rejected": -8.096385955810547,
409
- "logps/chosen": -559.1055908203125,
410
- "logps/rejected": -690.3636474609375,
411
- "loss": 0.3239,
412
- "rewards/accuracies": 0.78125,
413
- "rewards/chosen": -3.478092670440674,
414
- "rewards/margins": 1.7500022649765015,
415
- "rewards/rejected": -5.228094577789307,
416
  "step": 240
417
  },
418
  {
419
- "epoch": 0.66,
420
- "grad_norm": 41.88272757196894,
421
  "learning_rate": 1.5672812309497722e-07,
422
- "logits/chosen": -7.3642730712890625,
423
- "logits/rejected": -8.137662887573242,
424
- "logps/chosen": -481.7484436035156,
425
- "logps/rejected": -658.2447509765625,
426
- "loss": 0.3435,
427
- "rewards/accuracies": 0.831250011920929,
428
- "rewards/chosen": -2.742130756378174,
429
- "rewards/margins": 2.204786777496338,
430
- "rewards/rejected": -4.946917533874512,
431
  "step": 250
432
  },
433
  {
434
- "epoch": 0.69,
435
- "grad_norm": 42.22161557256122,
436
  "learning_rate": 1.357847018050843e-07,
437
- "logits/chosen": -7.285035133361816,
438
- "logits/rejected": -8.04423999786377,
439
- "logps/chosen": -495.21929931640625,
440
- "logps/rejected": -637.4512939453125,
441
- "loss": 0.3575,
442
- "rewards/accuracies": 0.856249988079071,
443
- "rewards/chosen": -2.5677530765533447,
444
- "rewards/margins": 1.859423041343689,
445
- "rewards/rejected": -4.427175521850586,
446
  "step": 260
447
  },
448
  {
449
- "epoch": 0.71,
450
- "grad_norm": 36.62148112990462,
451
  "learning_rate": 1.1581002236747328e-07,
452
- "logits/chosen": -7.350560188293457,
453
- "logits/rejected": -8.334383964538574,
454
- "logps/chosen": -470.9756774902344,
455
- "logps/rejected": -644.9085083007812,
456
- "loss": 0.3419,
457
- "rewards/accuracies": 0.831250011920929,
458
- "rewards/chosen": -2.7616236209869385,
459
- "rewards/margins": 2.1581368446350098,
460
- "rewards/rejected": -4.919760704040527,
461
  "step": 270
462
  },
463
  {
464
- "epoch": 0.74,
465
- "grad_norm": 42.46981787888043,
466
  "learning_rate": 9.697350436308427e-08,
467
- "logits/chosen": -7.353733062744141,
468
- "logits/rejected": -8.123218536376953,
469
- "logps/chosen": -547.2511596679688,
470
- "logps/rejected": -669.4581298828125,
471
- "loss": 0.3277,
472
- "rewards/accuracies": 0.8500000238418579,
473
- "rewards/chosen": -3.1215298175811768,
474
- "rewards/margins": 1.7408149242401123,
475
- "rewards/rejected": -4.862344264984131,
476
  "step": 280
477
  },
478
  {
479
- "epoch": 0.76,
480
- "grad_norm": 45.06982020026593,
481
  "learning_rate": 7.943491380952188e-08,
482
- "logits/chosen": -7.6978254318237305,
483
- "logits/rejected": -8.468889236450195,
484
- "logps/chosen": -488.8667907714844,
485
- "logps/rejected": -652.3041381835938,
486
- "loss": 0.3357,
487
  "rewards/accuracies": 0.862500011920929,
488
- "rewards/chosen": -2.835703134536743,
489
- "rewards/margins": 2.056410789489746,
490
- "rewards/rejected": -4.89211368560791,
491
  "step": 290
492
  },
493
  {
494
- "epoch": 0.79,
495
- "grad_norm": 50.46942762667779,
496
  "learning_rate": 6.334300807088508e-08,
497
- "logits/chosen": -7.3240485191345215,
498
- "logits/rejected": -8.329621315002441,
499
- "logps/chosen": -509.5638122558594,
500
- "logps/rejected": -673.7567749023438,
501
- "loss": 0.3506,
502
- "rewards/accuracies": 0.8500000238418579,
503
- "rewards/chosen": -3.0604350566864014,
504
- "rewards/margins": 2.1173126697540283,
505
- "rewards/rejected": -5.1777472496032715,
506
  "step": 300
507
  },
508
  {
509
- "epoch": 0.79,
510
- "eval_logits/chosen": -7.444587707519531,
511
- "eval_logits/rejected": -7.9900360107421875,
512
- "eval_logps/chosen": -951.4573974609375,
513
- "eval_logps/rejected": -1114.78076171875,
514
- "eval_loss": 1.1842519044876099,
515
- "eval_rewards/accuracies": 0.51171875,
516
- "eval_rewards/chosen": -5.609828948974609,
517
- "eval_rewards/margins": 0.35410135984420776,
518
- "eval_rewards/rejected": -5.963930606842041,
519
- "eval_runtime": 97.9073,
520
- "eval_samples_per_second": 20.427,
521
- "eval_steps_per_second": 0.327,
522
  "step": 300
523
  },
524
  {
525
- "epoch": 0.82,
526
- "grad_norm": 45.51913834484837,
527
  "learning_rate": 4.8834274139883084e-08,
528
- "logits/chosen": -7.380696773529053,
529
- "logits/rejected": -8.350247383117676,
530
- "logps/chosen": -504.37054443359375,
531
- "logps/rejected": -678.6265869140625,
532
- "loss": 0.3248,
533
- "rewards/accuracies": 0.84375,
534
- "rewards/chosen": -2.946779727935791,
535
- "rewards/margins": 2.2168285846710205,
536
- "rewards/rejected": -5.163609027862549,
537
  "step": 310
538
  },
539
  {
540
- "epoch": 0.84,
541
- "grad_norm": 53.031113784661194,
542
  "learning_rate": 3.60317709937693e-08,
543
- "logits/chosen": -7.483295440673828,
544
- "logits/rejected": -8.33633804321289,
545
- "logps/chosen": -519.6754150390625,
546
- "logps/rejected": -667.0064697265625,
547
- "loss": 0.334,
548
- "rewards/accuracies": 0.824999988079071,
549
- "rewards/chosen": -2.902660369873047,
550
- "rewards/margins": 2.0172414779663086,
551
- "rewards/rejected": -4.919901371002197,
552
  "step": 320
553
  },
554
  {
555
- "epoch": 0.87,
556
- "grad_norm": 41.68995945520798,
557
  "learning_rate": 2.5044085842905683e-08,
558
- "logits/chosen": -7.596086025238037,
559
- "logits/rejected": -8.42108154296875,
560
- "logps/chosen": -542.1593627929688,
561
- "logps/rejected": -761.1915893554688,
562
- "loss": 0.3439,
563
- "rewards/accuracies": 0.9312499761581421,
564
- "rewards/chosen": -3.237351894378662,
565
- "rewards/margins": 2.630371332168579,
566
- "rewards/rejected": -5.867722988128662,
567
  "step": 330
568
  },
569
  {
570
- "epoch": 0.9,
571
- "grad_norm": 44.38600213815557,
572
  "learning_rate": 1.5964413124758493e-08,
573
- "logits/chosen": -7.379315376281738,
574
- "logits/rejected": -8.234747886657715,
575
- "logps/chosen": -516.7742919921875,
576
- "logps/rejected": -690.0197143554688,
577
- "loss": 0.3394,
578
  "rewards/accuracies": 0.875,
579
- "rewards/chosen": -2.9615204334259033,
580
- "rewards/margins": 2.160275936126709,
581
- "rewards/rejected": -5.121796607971191,
582
  "step": 340
583
  },
584
  {
585
- "epoch": 0.92,
586
- "grad_norm": 49.890826040889124,
587
  "learning_rate": 8.869764055041501e-09,
588
- "logits/chosen": -7.525488376617432,
589
- "logits/rejected": -8.198974609375,
590
- "logps/chosen": -535.8231201171875,
591
- "logps/rejected": -737.1505737304688,
592
- "loss": 0.3403,
593
- "rewards/accuracies": 0.8374999761581421,
594
- "rewards/chosen": -3.1437525749206543,
595
- "rewards/margins": 2.3052563667297363,
596
- "rewards/rejected": -5.449008941650391,
597
  "step": 350
598
  },
599
  {
600
- "epoch": 0.95,
601
- "grad_norm": 41.28866479778979,
602
  "learning_rate": 3.82031344036729e-09,
603
- "logits/chosen": -7.295458793640137,
604
- "logits/rejected": -8.147361755371094,
605
- "logps/chosen": -541.268798828125,
606
- "logps/rejected": -692.4953002929688,
607
- "loss": 0.3191,
608
  "rewards/accuracies": 0.8187500238418579,
609
- "rewards/chosen": -3.197694778442383,
610
- "rewards/margins": 1.9286365509033203,
611
- "rewards/rejected": -5.126331329345703,
612
  "step": 360
613
  },
614
  {
615
- "epoch": 0.97,
616
- "grad_norm": 56.54843526627352,
617
  "learning_rate": 8.588892925590063e-10,
618
- "logits/chosen": -7.414445400238037,
619
- "logits/rejected": -8.447690963745117,
620
- "logps/chosen": -499.84197998046875,
621
- "logps/rejected": -670.2615966796875,
622
- "loss": 0.3342,
623
  "rewards/accuracies": 0.8500000238418579,
624
- "rewards/chosen": -2.8063206672668457,
625
- "rewards/margins": 2.2892918586730957,
626
- "rewards/rejected": -5.0956130027771,
627
  "step": 370
628
  },
629
  {
630
- "epoch": 1.0,
631
  "step": 379,
632
  "total_flos": 0.0,
633
- "train_loss": 0.39733357479830217,
634
- "train_runtime": 5845.1464,
635
- "train_samples_per_second": 8.303,
636
- "train_steps_per_second": 0.065
637
  }
638
  ],
639
  "logging_steps": 10,
@@ -641,6 +641,18 @@
641
  "num_input_tokens_seen": 0,
642
  "num_train_epochs": 1,
643
  "save_steps": 100,
 
 
 
 
 
 
 
 
 
 
 
 
644
  "total_flos": 0.0,
645
  "train_batch_size": 8,
646
  "trial_name": null,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.002635046113306983,
13
+ "grad_norm": 1701.3284378032004,
14
  "learning_rate": 1.3157894736842104e-08,
15
  "logits/chosen": -4.685327529907227,
16
  "logits/rejected": -4.87608528137207,
 
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.026350461133069828,
28
+ "grad_norm": 1584.9120962726856,
29
  "learning_rate": 1.3157894736842104e-07,
30
+ "logits/chosen": -4.499300479888916,
31
+ "logits/rejected": -4.840802192687988,
32
+ "logps/chosen": -223.6631317138672,
33
+ "logps/rejected": -160.81097412109375,
34
+ "loss": 0.7136,
35
+ "rewards/accuracies": 0.4444444477558136,
36
+ "rewards/chosen": 0.004151582717895508,
37
+ "rewards/margins": 0.002958830911666155,
38
+ "rewards/rejected": 0.001192751806229353,
39
  "step": 10
40
  },
41
  {
42
+ "epoch": 0.052700922266139656,
43
+ "grad_norm": 955.29292445595,
44
  "learning_rate": 2.631578947368421e-07,
45
+ "logits/chosen": -4.518028259277344,
46
+ "logits/rejected": -4.817793846130371,
47
+ "logps/chosen": -220.1512451171875,
48
+ "logps/rejected": -172.69322204589844,
49
+ "loss": 0.4939,
50
+ "rewards/accuracies": 0.7562500238418579,
51
+ "rewards/chosen": 0.9042474627494812,
52
+ "rewards/margins": 0.9315685033798218,
53
+ "rewards/rejected": -0.02732105180621147,
54
  "step": 20
55
  },
56
  {
57
+ "epoch": 0.07905138339920949,
58
+ "grad_norm": 1078.6380127502835,
59
  "learning_rate": 3.9473684210526315e-07,
60
+ "logits/chosen": -4.58280086517334,
61
+ "logits/rejected": -4.878857612609863,
62
+ "logps/chosen": -212.6325225830078,
63
+ "logps/rejected": -183.79238891601562,
64
+ "loss": 0.3906,
65
+ "rewards/accuracies": 0.8374999761581421,
66
+ "rewards/chosen": 3.010448932647705,
67
+ "rewards/margins": 2.6640021800994873,
68
+ "rewards/rejected": 0.346446692943573,
69
  "step": 30
70
  },
71
  {
72
+ "epoch": 0.10540184453227931,
73
+ "grad_norm": 1034.6942446334156,
74
  "learning_rate": 4.999575626062319e-07,
75
+ "logits/chosen": -4.496463298797607,
76
+ "logits/rejected": -4.832797050476074,
77
+ "logps/chosen": -225.2804412841797,
78
+ "logps/rejected": -177.3509063720703,
79
+ "loss": 0.4804,
80
+ "rewards/accuracies": 0.862500011920929,
81
+ "rewards/chosen": 4.241217136383057,
82
+ "rewards/margins": 4.989673614501953,
83
+ "rewards/rejected": -0.7484563589096069,
84
  "step": 40
85
  },
86
  {
87
+ "epoch": 0.13175230566534915,
88
+ "grad_norm": 1292.8962171873402,
89
  "learning_rate": 4.984737660598186e-07,
90
+ "logits/chosen": -4.517908573150635,
91
+ "logits/rejected": -4.786294937133789,
92
+ "logps/chosen": -214.01718139648438,
93
+ "logps/rejected": -174.09678649902344,
94
+ "loss": 0.5163,
95
+ "rewards/accuracies": 0.84375,
96
+ "rewards/chosen": 3.7782645225524902,
97
+ "rewards/margins": 5.338944435119629,
98
+ "rewards/rejected": -1.5606796741485596,
99
  "step": 50
100
  },
101
  {
102
+ "epoch": 0.15810276679841898,
103
+ "grad_norm": 1081.6576652859876,
104
  "learning_rate": 4.948824853131236e-07,
105
+ "logits/chosen": -4.719171047210693,
106
+ "logits/rejected": -4.9818878173828125,
107
+ "logps/chosen": -215.00869750976562,
108
+ "logps/rejected": -180.1785430908203,
109
+ "loss": 0.4821,
110
+ "rewards/accuracies": 0.862500011920929,
111
+ "rewards/chosen": 3.4492969512939453,
112
+ "rewards/margins": 6.232473373413086,
113
+ "rewards/rejected": -2.7831759452819824,
114
  "step": 60
115
  },
116
  {
117
+ "epoch": 0.1844532279314888,
118
+ "grad_norm": 780.7579097796144,
119
  "learning_rate": 4.892141805936084e-07,
120
+ "logits/chosen": -4.691411018371582,
121
+ "logits/rejected": -4.9814910888671875,
122
+ "logps/chosen": -222.72189331054688,
123
+ "logps/rejected": -186.29788208007812,
124
+ "loss": 0.5005,
125
+ "rewards/accuracies": 0.8500000238418579,
126
+ "rewards/chosen": 3.7675652503967285,
127
+ "rewards/margins": 6.836648464202881,
128
+ "rewards/rejected": -3.0690836906433105,
129
  "step": 70
130
  },
131
  {
132
+ "epoch": 0.21080368906455862,
133
+ "grad_norm": 1751.5015463679365,
134
  "learning_rate": 4.81516928858564e-07,
135
+ "logits/chosen": -4.691437721252441,
136
+ "logits/rejected": -4.943267822265625,
137
+ "logps/chosen": -219.7614288330078,
138
+ "logps/rejected": -179.60899353027344,
139
+ "loss": 0.5615,
140
+ "rewards/accuracies": 0.831250011920929,
141
+ "rewards/chosen": 3.4195969104766846,
142
+ "rewards/margins": 6.783135890960693,
143
+ "rewards/rejected": -3.363539457321167,
144
  "step": 80
145
  },
146
  {
147
+ "epoch": 0.23715415019762845,
148
+ "grad_norm": 1233.3333710128843,
149
  "learning_rate": 4.7185601601995784e-07,
150
+ "logits/chosen": -4.569981575012207,
151
+ "logits/rejected": -4.954944610595703,
152
+ "logps/chosen": -212.4117431640625,
153
+ "logps/rejected": -173.86911010742188,
154
+ "loss": 0.4989,
155
  "rewards/accuracies": 0.856249988079071,
156
+ "rewards/chosen": 3.7934730052948,
157
+ "rewards/margins": 7.21230936050415,
158
+ "rewards/rejected": -3.4188361167907715,
159
  "step": 90
160
  },
161
  {
162
+ "epoch": 0.2635046113306983,
163
+ "grad_norm": 1244.999056136522,
164
  "learning_rate": 4.603133832077953e-07,
165
+ "logits/chosen": -4.672577381134033,
166
+ "logits/rejected": -4.9231157302856445,
167
+ "logps/chosen": -206.11221313476562,
168
+ "logps/rejected": -176.262451171875,
169
+ "loss": 0.5909,
170
+ "rewards/accuracies": 0.8374999761581421,
171
+ "rewards/chosen": 3.6933536529541016,
172
+ "rewards/margins": 6.304307460784912,
173
+ "rewards/rejected": -2.6109542846679688,
174
  "step": 100
175
  },
176
  {
177
+ "epoch": 0.2635046113306983,
178
+ "eval_logits/chosen": -4.612120628356934,
179
+ "eval_logits/rejected": -4.85081672668457,
180
+ "eval_logps/chosen": -396.4924011230469,
181
+ "eval_logps/rejected": -512.6068115234375,
182
+ "eval_loss": 6.653407096862793,
183
+ "eval_rewards/accuracies": 0.27734375,
184
+ "eval_rewards/chosen": -3.0089728832244873,
185
+ "eval_rewards/margins": -5.8993988037109375,
186
+ "eval_rewards/rejected": 2.890425682067871,
187
+ "eval_runtime": 98.318,
188
+ "eval_samples_per_second": 20.342,
189
+ "eval_steps_per_second": 0.325,
190
  "step": 100
191
  },
192
  {
193
+ "epoch": 0.2898550724637681,
194
+ "grad_norm": 870.1595065644425,
195
  "learning_rate": 4.4698693176863316e-07,
196
+ "logits/chosen": -4.71740198135376,
197
+ "logits/rejected": -4.952963829040527,
198
+ "logps/chosen": -208.0568084716797,
199
+ "logps/rejected": -172.92605590820312,
200
+ "loss": 0.4837,
201
+ "rewards/accuracies": 0.8374999761581421,
202
+ "rewards/chosen": 3.114009380340576,
203
+ "rewards/margins": 7.352834224700928,
204
+ "rewards/rejected": -4.238823890686035,
205
  "step": 110
206
  },
207
  {
208
+ "epoch": 0.31620553359683795,
209
+ "grad_norm": 1230.5660125025618,
210
  "learning_rate": 4.319896928940505e-07,
211
+ "logits/chosen": -4.8249382972717285,
212
+ "logits/rejected": -5.139795780181885,
213
+ "logps/chosen": -204.9242401123047,
214
+ "logps/rejected": -181.16221618652344,
215
+ "loss": 0.6657,
216
+ "rewards/accuracies": 0.8125,
217
+ "rewards/chosen": 4.0227861404418945,
218
+ "rewards/margins": 6.973240852355957,
219
+ "rewards/rejected": -2.950455904006958,
220
  "step": 120
221
  },
222
  {
223
+ "epoch": 0.3425559947299078,
224
+ "grad_norm": 1400.2090737238004,
225
  "learning_rate": 4.1544886892205354e-07,
226
+ "logits/chosen": -4.680369853973389,
227
+ "logits/rejected": -4.979363441467285,
228
+ "logps/chosen": -218.03897094726562,
229
+ "logps/rejected": -192.44757080078125,
230
+ "loss": 0.4732,
231
+ "rewards/accuracies": 0.862500011920929,
232
+ "rewards/chosen": 3.1923911571502686,
233
+ "rewards/margins": 8.124491691589355,
234
+ "rewards/rejected": -4.932101249694824,
235
  "step": 130
236
  },
237
  {
238
+ "epoch": 0.3689064558629776,
239
+ "grad_norm": 1284.6303869923338,
240
  "learning_rate": 3.975047544428254e-07,
241
+ "logits/chosen": -4.738985061645508,
242
+ "logits/rejected": -4.995828628540039,
243
+ "logps/chosen": -200.0480499267578,
244
+ "logps/rejected": -173.74069213867188,
245
+ "loss": 0.7249,
246
+ "rewards/accuracies": 0.8687499761581421,
247
+ "rewards/chosen": 3.118265151977539,
248
+ "rewards/margins": 6.879487037658691,
249
+ "rewards/rejected": -3.7612221240997314,
250
  "step": 140
251
  },
252
  {
253
+ "epoch": 0.3952569169960474,
254
+ "grad_norm": 1087.9280552765,
255
  "learning_rate": 3.78309546359696e-07,
256
+ "logits/chosen": -4.816591739654541,
257
+ "logits/rejected": -5.046236991882324,
258
+ "logps/chosen": -197.75784301757812,
259
+ "logps/rejected": -187.12631225585938,
260
+ "loss": 0.6191,
261
+ "rewards/accuracies": 0.918749988079071,
262
+ "rewards/chosen": 1.7183030843734741,
263
+ "rewards/margins": 8.374044418334961,
264
+ "rewards/rejected": -6.655740261077881,
265
  "step": 150
266
  },
267
  {
268
+ "epoch": 0.42160737812911725,
269
+ "grad_norm": 870.0436658351372,
270
  "learning_rate": 3.580260529980584e-07,
271
+ "logits/chosen": -4.622679233551025,
272
+ "logits/rejected": -4.90102481842041,
273
+ "logps/chosen": -210.0752410888672,
274
+ "logps/rejected": -182.36378479003906,
275
+ "loss": 0.5743,
276
+ "rewards/accuracies": 0.8374999761581421,
277
+ "rewards/chosen": 2.5026469230651855,
278
+ "rewards/margins": 6.609257698059082,
279
+ "rewards/rejected": -4.106611251831055,
280
  "step": 160
281
  },
282
  {
283
+ "epoch": 0.4479578392621871,
284
+ "grad_norm": 1109.7545725706777,
285
  "learning_rate": 3.36826313211205e-07,
286
+ "logits/chosen": -4.862623691558838,
287
+ "logits/rejected": -5.1108198165893555,
288
+ "logps/chosen": -195.3279266357422,
289
+ "logps/rejected": -170.62802124023438,
290
+ "loss": 0.6483,
291
+ "rewards/accuracies": 0.8187500238418579,
292
+ "rewards/chosen": 2.5267701148986816,
293
+ "rewards/margins": 7.295065402984619,
294
+ "rewards/rejected": -4.768294811248779,
295
  "step": 170
296
  },
297
  {
298
+ "epoch": 0.4743083003952569,
299
+ "grad_norm": 1174.4955529247104,
300
  "learning_rate": 3.14890137195437e-07,
301
+ "logits/chosen": -4.831478595733643,
302
+ "logits/rejected": -5.037031173706055,
303
+ "logps/chosen": -211.2603302001953,
304
+ "logps/rejected": -190.32931518554688,
305
+ "loss": 0.5829,
306
+ "rewards/accuracies": 0.84375,
307
+ "rewards/chosen": 1.8433873653411865,
308
+ "rewards/margins": 7.003817558288574,
309
+ "rewards/rejected": -5.160429954528809,
310
  "step": 180
311
  },
312
  {
313
+ "epoch": 0.5006587615283268,
314
+ "grad_norm": 1405.2874598551575,
315
  "learning_rate": 2.9240358139084013e-07,
316
+ "logits/chosen": -4.9419050216674805,
317
+ "logits/rejected": -5.200289726257324,
318
+ "logps/chosen": -209.5860595703125,
319
+ "logps/rejected": -188.95069885253906,
320
+ "loss": 0.5485,
321
+ "rewards/accuracies": 0.856249988079071,
322
+ "rewards/chosen": 1.218836784362793,
323
+ "rewards/margins": 8.241748809814453,
324
+ "rewards/rejected": -7.022912502288818,
325
  "step": 190
326
  },
327
  {
328
+ "epoch": 0.5270092226613966,
329
+ "grad_norm": 1006.939051019727,
330
  "learning_rate": 2.695573704031885e-07,
331
+ "logits/chosen": -4.7473955154418945,
332
+ "logits/rejected": -5.000621795654297,
333
+ "logps/chosen": -217.6959686279297,
334
+ "logps/rejected": -190.3670196533203,
335
+ "loss": 0.7239,
336
+ "rewards/accuracies": 0.8374999761581421,
337
+ "rewards/chosen": 3.8086190223693848,
338
+ "rewards/margins": 6.961747646331787,
339
+ "rewards/rejected": -3.1531288623809814,
340
  "step": 200
341
  },
342
  {
343
+ "epoch": 0.5270092226613966,
344
+ "eval_logits/chosen": -4.771502494812012,
345
+ "eval_logits/rejected": -4.989596843719482,
346
+ "eval_logps/chosen": -396.1126708984375,
347
+ "eval_logps/rejected": -511.37469482421875,
348
+ "eval_loss": 8.072031021118164,
349
+ "eval_rewards/accuracies": 0.2734375,
350
+ "eval_rewards/chosen": -2.8191022872924805,
351
+ "eval_rewards/margins": -6.325590133666992,
352
+ "eval_rewards/rejected": 3.5064878463745117,
353
+ "eval_runtime": 97.9327,
354
  "eval_samples_per_second": 20.422,
355
  "eval_steps_per_second": 0.327,
356
  "step": 200
357
  },
358
  {
359
+ "epoch": 0.5533596837944664,
360
+ "grad_norm": 1103.8948285514384,
361
  "learning_rate": 2.465452793317865e-07,
362
+ "logits/chosen": -4.7866034507751465,
363
+ "logits/rejected": -5.081458568572998,
364
+ "logps/chosen": -228.73922729492188,
365
+ "logps/rejected": -201.91409301757812,
366
+ "loss": 0.5681,
367
+ "rewards/accuracies": 0.8687499761581421,
368
+ "rewards/chosen": 2.7976126670837402,
369
+ "rewards/margins": 7.8736419677734375,
370
+ "rewards/rejected": -5.076028823852539,
371
  "step": 210
372
  },
373
  {
374
+ "epoch": 0.5797101449275363,
375
+ "grad_norm": 715.6592810091468,
376
  "learning_rate": 2.2356249022388789e-07,
377
+ "logits/chosen": -4.758913516998291,
378
+ "logits/rejected": -5.04398250579834,
379
+ "logps/chosen": -205.92568969726562,
380
+ "logps/rejected": -170.3586883544922,
381
+ "loss": 0.4895,
382
+ "rewards/accuracies": 0.856249988079071,
383
+ "rewards/chosen": 4.249292373657227,
384
+ "rewards/margins": 7.541260719299316,
385
+ "rewards/rejected": -3.291968822479248,
386
  "step": 220
387
  },
388
  {
389
+ "epoch": 0.6060606060606061,
390
+ "grad_norm": 944.8747719910025,
391
  "learning_rate": 2.0080393659578038e-07,
392
+ "logits/chosen": -4.764594078063965,
393
+ "logits/rejected": -5.100175857543945,
394
+ "logps/chosen": -210.0486297607422,
395
+ "logps/rejected": -179.91928100585938,
396
+ "loss": 0.5197,
397
+ "rewards/accuracies": 0.8687499761581421,
398
+ "rewards/chosen": 5.489472389221191,
399
+ "rewards/margins": 8.828967094421387,
400
+ "rewards/rejected": -3.3394947052001953,
401
  "step": 230
402
  },
403
  {
404
+ "epoch": 0.6324110671936759,
405
+ "grad_norm": 1267.6123006349214,
406
  "learning_rate": 1.7846265006183976e-07,
407
+ "logits/chosen": -4.821089744567871,
408
+ "logits/rejected": -5.0871124267578125,
409
+ "logps/chosen": -211.7589874267578,
410
+ "logps/rejected": -181.33819580078125,
411
+ "loss": 0.5444,
412
+ "rewards/accuracies": 0.824999988079071,
413
+ "rewards/chosen": -0.23131565749645233,
414
+ "rewards/margins": 6.660643577575684,
415
+ "rewards/rejected": -6.891959190368652,
416
  "step": 240
417
  },
418
  {
419
+ "epoch": 0.6587615283267457,
420
+ "grad_norm": 911.8592996107284,
421
  "learning_rate": 1.5672812309497722e-07,
422
+ "logits/chosen": -4.781493663787842,
423
+ "logits/rejected": -5.052074432373047,
424
+ "logps/chosen": -201.82290649414062,
425
+ "logps/rejected": -175.06719970703125,
426
+ "loss": 0.5887,
427
+ "rewards/accuracies": 0.856249988079071,
428
+ "rewards/chosen": 2.856235980987549,
429
+ "rewards/margins": 8.613394737243652,
430
+ "rewards/rejected": -5.757159233093262,
431
  "step": 250
432
  },
433
  {
434
+ "epoch": 0.6851119894598156,
435
+ "grad_norm": 840.8020636445922,
436
  "learning_rate": 1.357847018050843e-07,
437
+ "logits/chosen": -4.674568176269531,
438
+ "logits/rejected": -4.961749076843262,
439
+ "logps/chosen": -232.63119506835938,
440
+ "logps/rejected": -204.2981719970703,
441
+ "loss": 0.6421,
442
+ "rewards/accuracies": 0.8062499761581421,
443
+ "rewards/chosen": 2.906367778778076,
444
+ "rewards/margins": 7.688606262207031,
445
+ "rewards/rejected": -4.782238483428955,
446
  "step": 260
447
  },
448
  {
449
+ "epoch": 0.7114624505928854,
450
+ "grad_norm": 787.2425305884327,
451
  "learning_rate": 1.1581002236747328e-07,
452
+ "logits/chosen": -4.637770652770996,
453
+ "logits/rejected": -4.975251197814941,
454
+ "logps/chosen": -190.36253356933594,
455
+ "logps/rejected": -166.21788024902344,
456
+ "loss": 0.5113,
457
+ "rewards/accuracies": 0.856249988079071,
458
+ "rewards/chosen": 2.2253878116607666,
459
+ "rewards/margins": 8.868097305297852,
460
+ "rewards/rejected": -6.6427106857299805,
461
  "step": 270
462
  },
463
  {
464
+ "epoch": 0.7378129117259552,
465
+ "grad_norm": 1064.0700290859438,
466
  "learning_rate": 9.697350436308427e-08,
467
+ "logits/chosen": -4.585692405700684,
468
+ "logits/rejected": -4.871885776519775,
469
+ "logps/chosen": -229.0143585205078,
470
+ "logps/rejected": -194.08761596679688,
471
+ "loss": 0.5252,
472
+ "rewards/accuracies": 0.8374999761581421,
473
+ "rewards/chosen": 3.0419421195983887,
474
+ "rewards/margins": 8.473891258239746,
475
+ "rewards/rejected": -5.431949138641357,
476
  "step": 280
477
  },
478
  {
479
+ "epoch": 0.764163372859025,
480
+ "grad_norm": 839.1911415542294,
481
  "learning_rate": 7.943491380952188e-08,
482
+ "logits/chosen": -4.828024864196777,
483
+ "logits/rejected": -5.060397148132324,
484
+ "logps/chosen": -196.42611694335938,
485
+ "logps/rejected": -171.01446533203125,
486
+ "loss": 0.4822,
487
  "rewards/accuracies": 0.862500011920929,
488
+ "rewards/chosen": 4.435191631317139,
489
+ "rewards/margins": 8.396050453186035,
490
+ "rewards/rejected": -3.9608588218688965,
491
  "step": 290
492
  },
493
  {
494
+ "epoch": 0.7905138339920948,
495
+ "grad_norm": 887.4606387846492,
496
  "learning_rate": 6.334300807088508e-08,
497
+ "logits/chosen": -4.608688831329346,
498
+ "logits/rejected": -4.9593987464904785,
499
+ "logps/chosen": -195.76962280273438,
500
+ "logps/rejected": -164.97048950195312,
501
+ "loss": 0.5556,
502
+ "rewards/accuracies": 0.893750011920929,
503
+ "rewards/chosen": 3.875316619873047,
504
+ "rewards/margins": 8.369561195373535,
505
+ "rewards/rejected": -4.4942450523376465,
506
  "step": 300
507
  },
508
  {
509
+ "epoch": 0.7905138339920948,
510
+ "eval_logits/chosen": -4.660388469696045,
511
+ "eval_logits/rejected": -4.890798091888428,
512
+ "eval_logps/chosen": -399.5094909667969,
513
+ "eval_logps/rejected": -517.530029296875,
514
+ "eval_loss": 6.923000335693359,
515
+ "eval_rewards/accuracies": 0.31640625,
516
+ "eval_rewards/chosen": -4.517519474029541,
517
+ "eval_rewards/margins": -4.946353435516357,
518
+ "eval_rewards/rejected": 0.42883408069610596,
519
+ "eval_runtime": 99.1917,
520
+ "eval_samples_per_second": 20.163,
521
+ "eval_steps_per_second": 0.323,
522
  "step": 300
523
  },
524
  {
525
+ "epoch": 0.8168642951251647,
526
+ "grad_norm": 1030.5057638496492,
527
  "learning_rate": 4.8834274139883084e-08,
528
+ "logits/chosen": -4.663365840911865,
529
+ "logits/rejected": -5.023074150085449,
530
+ "logps/chosen": -201.54336547851562,
531
+ "logps/rejected": -171.71324157714844,
532
+ "loss": 0.533,
533
+ "rewards/accuracies": 0.8812500238418579,
534
+ "rewards/chosen": 4.074584007263184,
535
+ "rewards/margins": 8.798391342163086,
536
+ "rewards/rejected": -4.723808765411377,
537
  "step": 310
538
  },
539
  {
540
+ "epoch": 0.8432147562582345,
541
+ "grad_norm": 1279.040936993484,
542
  "learning_rate": 3.60317709937693e-08,
543
+ "logits/chosen": -4.707262992858887,
544
+ "logits/rejected": -5.011012077331543,
545
+ "logps/chosen": -223.0199432373047,
546
+ "logps/rejected": -183.51393127441406,
547
+ "loss": 0.5239,
548
+ "rewards/accuracies": 0.8187500238418579,
549
+ "rewards/chosen": 3.1946961879730225,
550
+ "rewards/margins": 7.443505764007568,
551
+ "rewards/rejected": -4.248808860778809,
552
  "step": 320
553
  },
554
  {
555
+ "epoch": 0.8695652173913043,
556
+ "grad_norm": 790.9511672313881,
557
  "learning_rate": 2.5044085842905683e-08,
558
+ "logits/chosen": -4.707150459289551,
559
+ "logits/rejected": -4.967694282531738,
560
+ "logps/chosen": -208.9776153564453,
561
+ "logps/rejected": -184.1087188720703,
562
+ "loss": 0.5674,
563
+ "rewards/accuracies": 0.918749988079071,
564
+ "rewards/chosen": 4.723270893096924,
565
+ "rewards/margins": 9.567978858947754,
566
+ "rewards/rejected": -4.844708442687988,
567
  "step": 330
568
  },
569
  {
570
+ "epoch": 0.8959156785243741,
571
+ "grad_norm": 1075.929120672392,
572
  "learning_rate": 1.5964413124758493e-08,
573
+ "logits/chosen": -4.645999431610107,
574
+ "logits/rejected": -4.947402477264404,
575
+ "logps/chosen": -212.19015502929688,
576
+ "logps/rejected": -185.76678466796875,
577
+ "loss": 0.5578,
578
  "rewards/accuracies": 0.875,
579
+ "rewards/chosen": 4.216076374053955,
580
+ "rewards/margins": 8.17945671081543,
581
+ "rewards/rejected": -3.9633796215057373,
582
  "step": 340
583
  },
584
  {
585
+ "epoch": 0.922266139657444,
586
+ "grad_norm": 1055.0113319220882,
587
  "learning_rate": 8.869764055041501e-09,
588
+ "logits/chosen": -4.714905738830566,
589
+ "logits/rejected": -4.937991619110107,
590
+ "logps/chosen": -215.5680389404297,
591
+ "logps/rejected": -200.6318359375,
592
+ "loss": 0.5113,
593
+ "rewards/accuracies": 0.793749988079071,
594
+ "rewards/chosen": 2.9399425983428955,
595
+ "rewards/margins": 7.131015777587891,
596
+ "rewards/rejected": -4.191073417663574,
597
  "step": 350
598
  },
599
  {
600
+ "epoch": 0.9486166007905138,
601
+ "grad_norm": 1003.3923870155849,
602
  "learning_rate": 3.82031344036729e-09,
603
+ "logits/chosen": -4.614500999450684,
604
+ "logits/rejected": -4.9195661544799805,
605
+ "logps/chosen": -216.197998046875,
606
+ "logps/rejected": -186.8596649169922,
607
+ "loss": 0.4994,
608
  "rewards/accuracies": 0.8187500238418579,
609
+ "rewards/chosen": 2.650672197341919,
610
+ "rewards/margins": 6.149393081665039,
611
+ "rewards/rejected": -3.49872088432312,
612
  "step": 360
613
  },
614
  {
615
+ "epoch": 0.9749670619235836,
616
+ "grad_norm": 1021.842139855702,
617
  "learning_rate": 8.588892925590063e-10,
618
+ "logits/chosen": -4.69917631149292,
619
+ "logits/rejected": -5.054296016693115,
620
+ "logps/chosen": -212.79714965820312,
621
+ "logps/rejected": -171.7442169189453,
622
+ "loss": 0.5535,
623
  "rewards/accuracies": 0.8500000238418579,
624
+ "rewards/chosen": 3.2063796520233154,
625
+ "rewards/margins": 8.728368759155273,
626
+ "rewards/rejected": -5.521987438201904,
627
  "step": 370
628
  },
629
  {
630
+ "epoch": 0.9986824769433466,
631
  "step": 379,
632
  "total_flos": 0.0,
633
+ "train_loss": 0.5517442987587962,
634
+ "train_runtime": 6181.8185,
635
+ "train_samples_per_second": 7.85,
636
+ "train_steps_per_second": 0.061
637
  }
638
  ],
639
  "logging_steps": 10,
 
641
  "num_input_tokens_seen": 0,
642
  "num_train_epochs": 1,
643
  "save_steps": 100,
644
+ "stateful_callbacks": {
645
+ "TrainerControl": {
646
+ "args": {
647
+ "should_epoch_stop": false,
648
+ "should_evaluate": false,
649
+ "should_log": false,
650
+ "should_save": true,
651
+ "should_training_stop": false
652
+ },
653
+ "attributes": {}
654
+ }
655
+ },
656
  "total_flos": 0.0,
657
  "train_batch_size": 8,
658
  "trial_name": null,