RikkiXu commited on
Commit
49f13d1
1 Parent(s): 259dde6

Model save

Browse files
README.md CHANGED
@@ -16,14 +16,14 @@ should probably proofread and complete it, then remove this comment. -->
16
  This model was trained from scratch on the None dataset.
17
  It achieves the following results on the evaluation set:
18
  - Loss: 0.6931
19
- - Rewards/chosen: -1.7904
20
- - Rewards/rejected: -1.7904
21
  - Rewards/accuracies: 0.0
22
  - Rewards/margins: 0.0
23
- - Logps/rejected: -157.3674
24
- - Logps/chosen: -157.3674
25
- - Logits/rejected: -3.2202
26
- - Logits/chosen: -3.2202
27
 
28
  ## Model description
29
 
@@ -53,15 +53,16 @@ The following hyperparameters were used during training:
53
  - total_eval_batch_size: 64
54
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
55
  - lr_scheduler_type: cosine
 
56
  - num_epochs: 1
57
 
58
  ### Training results
59
 
60
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
61
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
62
- | 0.3573 | 0.29 | 100 | 0.6931 | -2.2646 | -2.2646 | 0.0 | 0.0 | -157.8416 | -157.8416 | -3.2259 | -3.2259 |
63
- | 0.3184 | 0.57 | 200 | 0.6931 | -1.8023 | -1.8023 | 0.0 | 0.0 | -157.3793 | -157.3793 | -3.2195 | -3.2195 |
64
- | 0.3594 | 0.86 | 300 | 0.6931 | -1.7904 | -1.7904 | 0.0 | 0.0 | -157.3674 | -157.3674 | -3.2202 | -3.2202 |
65
 
66
 
67
  ### Framework versions
 
16
  This model was trained from scratch on the None dataset.
17
  It achieves the following results on the evaluation set:
18
  - Loss: 0.6931
19
+ - Rewards/chosen: -1.9201
20
+ - Rewards/rejected: -1.9201
21
  - Rewards/accuracies: 0.0
22
  - Rewards/margins: 0.0
23
+ - Logps/rejected: -159.4172
24
+ - Logps/chosen: -159.4172
25
+ - Logits/rejected: -3.2283
26
+ - Logits/chosen: -3.2283
27
 
28
  ## Model description
29
 
 
53
  - total_eval_batch_size: 64
54
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
55
  - lr_scheduler_type: cosine
56
+ - lr_scheduler_warmup_ratio: 0.1
57
  - num_epochs: 1
58
 
59
  ### Training results
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.3536 | 0.29 | 100 | 0.6931 | -1.3869 | -1.3869 | 0.0 | 0.0 | -158.3509 | -158.3509 | -3.2370 | -3.2370 |
64
+ | 0.3091 | 0.57 | 200 | 0.6931 | -1.8814 | -1.8814 | 0.0 | 0.0 | -159.3398 | -159.3398 | -3.2304 | -3.2304 |
65
+ | 0.3363 | 0.86 | 300 | 0.6931 | -1.9201 | -1.9201 | 0.0 | 0.0 | -159.4172 | -159.4172 | -3.2283 | -3.2283 |
66
 
67
 
68
  ### Framework versions
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.36299856867109026,
4
- "train_runtime": 5294.123,
5
  "train_samples": 44755,
6
- "train_samples_per_second": 8.454,
7
  "train_steps_per_second": 0.066
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.37015721593584333,
4
+ "train_runtime": 5302.4484,
5
  "train_samples": 44755,
6
+ "train_samples_per_second": 8.44,
7
  "train_steps_per_second": 0.066
8
  }
config.json CHANGED
@@ -20,7 +20,7 @@
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
- "transformers_version": "4.41.1",
24
  "use_cache": false,
25
  "vocab_size": 32002
26
  }
 
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.38.2",
24
  "use_cache": false,
25
  "vocab_size": 32002
26
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d36163549571106621ebac26e38ec0b50cd9e0d81a1c0f2736d167eeed2d940
3
  size 4943178720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f8493cd3d8633107f37f766933768b972a71580f90b096571fcc9f7f95e4a96
3
  size 4943178720
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:60f039e2e1484a47443bf956b71679bc1af07034027486d7035fb8ea530cd16f
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f5824ec54fb5bda3f68132a93217d0b800db90a78fb0515b4b723ed929035a3
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:552efc8fed558e148b25650586e753393bf9251331d7541a0c3aa3f226b236d6
3
  size 4540532728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d89f85d62fe83d0e63f9373321633097e7aab9bbb8ae37954505ffd93eb3af47
3
  size 4540532728
runs/May31_01-11-22_n136-129-074/events.out.tfevents.1717089214.n136-129-074.2749411.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:275796e5bd929ae6c4f759e110bd7ec0875146360faacbd21a3dd6c334629515
3
- size 28347
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:083d5da614984a5ada392826e0cad02ab77a8ccea77830bc83c44603b6d47169
3
+ size 32141
tokenizer.json CHANGED
@@ -152,7 +152,6 @@
152
  "end_of_word_suffix": null,
153
  "fuse_unk": true,
154
  "byte_fallback": true,
155
- "ignore_merges": false,
156
  "vocab": {
157
  "<unk>": 0,
158
  "<s>": 1,
 
152
  "end_of_word_suffix": null,
153
  "fuse_unk": true,
154
  "byte_fallback": true,
 
155
  "vocab": {
156
  "<unk>": 0,
157
  "<s>": 1,
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.36299856867109026,
4
- "train_runtime": 5294.123,
5
  "train_samples": 44755,
6
- "train_samples_per_second": 8.454,
7
  "train_steps_per_second": 0.066
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.37015721593584333,
4
+ "train_runtime": 5302.4484,
5
  "train_samples": 44755,
6
+ "train_samples_per_second": 8.44,
7
  "train_steps_per_second": 0.066
8
  }
trainer_state.json CHANGED
@@ -10,8 +10,8 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
- "grad_norm": 3067.8628948133914,
14
- "learning_rate": 4.9998992904271775e-08,
15
  "logits/chosen": -4.185730934143066,
16
  "logits/rejected": -4.509836196899414,
17
  "logps/chosen": -274.000732421875,
@@ -25,584 +25,584 @@
25
  },
26
  {
27
  "epoch": 0.03,
28
- "grad_norm": 3330.3974170986107,
29
- "learning_rate": 4.9899357349880975e-08,
30
- "logits/chosen": -4.211880207061768,
31
- "logits/rejected": -4.48573637008667,
32
- "logps/chosen": -318.31072998046875,
33
- "logps/rejected": -257.18267822265625,
34
- "loss": 0.7459,
35
- "rewards/accuracies": 0.5625,
36
- "rewards/chosen": 0.200405091047287,
37
- "rewards/margins": 0.10155472159385681,
38
- "rewards/rejected": 0.09885036945343018,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.06,
43
- "grad_norm": 2932.727170813642,
44
- "learning_rate": 4.959823971496574e-08,
45
- "logits/chosen": -4.2464704513549805,
46
- "logits/rejected": -4.50115966796875,
47
- "logps/chosen": -304.53350830078125,
48
- "logps/rejected": -244.1282501220703,
49
- "loss": 0.6293,
50
- "rewards/accuracies": 0.731249988079071,
51
- "rewards/chosen": 0.7030802965164185,
52
- "rewards/margins": 0.6052380800247192,
53
- "rewards/rejected": 0.09784229844808578,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.09,
58
- "grad_norm": 2159.097276891197,
59
- "learning_rate": 4.9099071517396326e-08,
60
- "logits/chosen": -4.3018364906311035,
61
- "logits/rejected": -4.5636820793151855,
62
- "logps/chosen": -305.11822509765625,
63
- "logps/rejected": -258.89215087890625,
64
- "loss": 0.5093,
65
- "rewards/accuracies": 0.71875,
66
- "rewards/chosen": 1.3964869976043701,
67
- "rewards/margins": 0.9537334442138672,
68
- "rewards/rejected": 0.44275355339050293,
69
  "step": 30
70
  },
71
  {
72
  "epoch": 0.11,
73
- "grad_norm": 2233.10446662558,
74
- "learning_rate": 4.8405871765993426e-08,
75
- "logits/chosen": -4.304145812988281,
76
- "logits/rejected": -4.571420192718506,
77
- "logps/chosen": -293.4151916503906,
78
- "logps/rejected": -234.4054412841797,
79
- "loss": 0.4371,
80
- "rewards/accuracies": 0.7437499761581421,
81
- "rewards/chosen": 2.119215488433838,
82
- "rewards/margins": 1.3193193674087524,
83
- "rewards/rejected": 0.7998961806297302,
84
  "step": 40
85
  },
86
  {
87
  "epoch": 0.14,
88
- "grad_norm": 1863.9092640792912,
89
- "learning_rate": 4.7524221697560474e-08,
90
- "logits/chosen": -4.298985481262207,
91
- "logits/rejected": -4.545313835144043,
92
- "logps/chosen": -299.71026611328125,
93
- "logps/rejected": -252.57339477539062,
94
- "loss": 0.4054,
95
- "rewards/accuracies": 0.793749988079071,
96
- "rewards/chosen": 3.015381336212158,
97
- "rewards/margins": 1.8283360004425049,
98
- "rewards/rejected": 1.1870452165603638,
99
  "step": 50
100
  },
101
  {
102
  "epoch": 0.17,
103
- "grad_norm": 1861.0742759245438,
104
- "learning_rate": 4.646121984004665e-08,
105
- "logits/chosen": -4.3018717765808105,
106
- "logits/rejected": -4.5299859046936035,
107
- "logps/chosen": -308.25457763671875,
108
- "logps/rejected": -261.1996154785156,
109
- "loss": 0.3815,
110
- "rewards/accuracies": 0.793749988079071,
111
- "rewards/chosen": 3.097055673599243,
112
- "rewards/margins": 1.6846046447753906,
113
- "rewards/rejected": 1.412451148033142,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.2,
118
- "grad_norm": 2083.1341477087894,
119
- "learning_rate": 4.522542485937369e-08,
120
- "logits/chosen": -4.417206764221191,
121
- "logits/rejected": -4.548245429992676,
122
- "logps/chosen": -285.4747009277344,
123
- "logps/rejected": -236.24136352539062,
124
- "loss": 0.3773,
125
- "rewards/accuracies": 0.8062499761581421,
126
- "rewards/chosen": 3.4294419288635254,
127
- "rewards/margins": 2.4485509395599365,
128
- "rewards/rejected": 0.9808910489082336,
129
  "step": 70
130
  },
131
  {
132
  "epoch": 0.23,
133
- "grad_norm": 1999.1118673285923,
134
- "learning_rate": 4.3826786650090273e-08,
135
- "logits/chosen": -4.271725177764893,
136
- "logits/rejected": -4.525103569030762,
137
- "logps/chosen": -292.2157897949219,
138
- "logps/rejected": -239.5623321533203,
139
- "loss": 0.3663,
140
- "rewards/accuracies": 0.856249988079071,
141
- "rewards/chosen": 3.471898317337036,
142
- "rewards/margins": 2.5827386379241943,
143
- "rewards/rejected": 0.8891592025756836,
144
  "step": 80
145
  },
146
  {
147
  "epoch": 0.26,
148
- "grad_norm": 1543.0151245523064,
149
- "learning_rate": 4.2276566224671614e-08,
150
- "logits/chosen": -4.196888446807861,
151
- "logits/rejected": -4.430451393127441,
152
- "logps/chosen": -303.9364929199219,
153
- "logps/rejected": -258.19708251953125,
154
- "loss": 0.37,
155
- "rewards/accuracies": 0.762499988079071,
156
- "rewards/chosen": 3.6267776489257812,
157
- "rewards/margins": 2.5005435943603516,
158
- "rewards/rejected": 1.1262344121932983,
159
  "step": 90
160
  },
161
  {
162
  "epoch": 0.29,
163
- "grad_norm": 2558.2358091969077,
164
- "learning_rate": 4.058724504646834e-08,
165
- "logits/chosen": -4.298203468322754,
166
- "logits/rejected": -4.51765251159668,
167
- "logps/chosen": -291.99151611328125,
168
- "logps/rejected": -240.97909545898438,
169
- "loss": 0.3573,
170
- "rewards/accuracies": 0.824999988079071,
171
- "rewards/chosen": 3.8364264965057373,
172
- "rewards/margins": 2.6143250465393066,
173
- "rewards/rejected": 1.2221016883850098,
174
  "step": 100
175
  },
176
  {
177
  "epoch": 0.29,
178
- "eval_logits/chosen": -3.2259409427642822,
179
- "eval_logits/rejected": -3.2259409427642822,
180
- "eval_logps/chosen": -157.8415985107422,
181
- "eval_logps/rejected": -157.8415985107422,
182
  "eval_loss": 0.6931471824645996,
183
  "eval_rewards/accuracies": 0.0,
184
- "eval_rewards/chosen": -2.2645912170410156,
185
  "eval_rewards/margins": 0.0,
186
- "eval_rewards/rejected": -2.2645912170410156,
187
- "eval_runtime": 1.5044,
188
- "eval_samples_per_second": 0.665,
189
- "eval_steps_per_second": 0.665,
190
  "step": 100
191
  },
192
  {
193
  "epoch": 0.31,
194
- "grad_norm": 2075.8470964199623,
195
- "learning_rate": 3.8772424536302564e-08,
196
- "logits/chosen": -4.3160247802734375,
197
- "logits/rejected": -4.557186126708984,
198
- "logps/chosen": -299.556640625,
199
- "logps/rejected": -250.2120361328125,
200
- "loss": 0.3653,
201
- "rewards/accuracies": 0.8687499761581421,
202
- "rewards/chosen": 3.8744053840637207,
203
- "rewards/margins": 2.781764268875122,
204
- "rewards/rejected": 1.0926413536071777,
205
  "step": 110
206
  },
207
  {
208
  "epoch": 0.34,
209
- "grad_norm": 2129.2578794603846,
210
- "learning_rate": 3.6846716561824964e-08,
211
- "logits/chosen": -4.358242988586426,
212
- "logits/rejected": -4.6036834716796875,
213
- "logps/chosen": -288.9602966308594,
214
- "logps/rejected": -237.98257446289062,
215
- "loss": 0.346,
216
- "rewards/accuracies": 0.8687499761581421,
217
- "rewards/chosen": 3.973881959915161,
218
- "rewards/margins": 2.8389506340026855,
219
- "rewards/rejected": 1.1349313259124756,
220
  "step": 120
221
  },
222
  {
223
  "epoch": 0.37,
224
- "grad_norm": 1374.3088736284383,
225
- "learning_rate": 3.482562579134809e-08,
226
- "logits/chosen": -4.360684871673584,
227
- "logits/rejected": -4.608490467071533,
228
- "logps/chosen": -278.861572265625,
229
- "logps/rejected": -218.7367706298828,
230
- "loss": 0.3426,
231
  "rewards/accuracies": 0.8374999761581421,
232
- "rewards/chosen": 3.8384926319122314,
233
- "rewards/margins": 2.634833812713623,
234
- "rewards/rejected": 1.2036586999893188,
235
  "step": 130
236
  },
237
  {
238
  "epoch": 0.4,
239
- "grad_norm": 1741.7465783603645,
240
- "learning_rate": 3.272542485937369e-08,
241
- "logits/chosen": -4.276978969573975,
242
- "logits/rejected": -4.593733787536621,
243
- "logps/chosen": -296.0984191894531,
244
- "logps/rejected": -240.01248168945312,
245
- "loss": 0.3729,
246
- "rewards/accuracies": 0.856249988079071,
247
- "rewards/chosen": 3.785256862640381,
248
- "rewards/margins": 2.9941701889038086,
249
- "rewards/rejected": 0.79108726978302,
250
  "step": 140
251
  },
252
  {
253
  "epoch": 0.43,
254
- "grad_norm": 1837.7137132104272,
255
- "learning_rate": 3.056302334890786e-08,
256
- "logits/chosen": -4.245262622833252,
257
- "logits/rejected": -4.510401725769043,
258
- "logps/chosen": -295.3984680175781,
259
- "logps/rejected": -250.73580932617188,
260
- "loss": 0.3235,
261
- "rewards/accuracies": 0.84375,
262
- "rewards/chosen": 4.011710166931152,
263
- "rewards/margins": 3.0462794303894043,
264
- "rewards/rejected": 0.9654304385185242,
265
  "step": 150
266
  },
267
  {
268
  "epoch": 0.46,
269
- "grad_norm": 1744.335126050233,
270
- "learning_rate": 2.8355831645441387e-08,
271
- "logits/chosen": -4.277425765991211,
272
- "logits/rejected": -4.570274829864502,
273
- "logps/chosen": -296.66839599609375,
274
- "logps/rejected": -235.6475372314453,
275
- "loss": 0.36,
276
  "rewards/accuracies": 0.8500000238418579,
277
- "rewards/chosen": 4.259499549865723,
278
- "rewards/margins": 3.127065658569336,
279
- "rewards/rejected": 1.1324341297149658,
280
  "step": 160
281
  },
282
  {
283
  "epoch": 0.49,
284
- "grad_norm": 1875.319827037545,
285
- "learning_rate": 2.6121620758762875e-08,
286
- "logits/chosen": -4.229983329772949,
287
- "logits/rejected": -4.467092990875244,
288
- "logps/chosen": -296.31683349609375,
289
- "logps/rejected": -241.3401336669922,
290
- "loss": 0.3474,
291
  "rewards/accuracies": 0.8687499761581421,
292
- "rewards/chosen": 4.343829154968262,
293
- "rewards/margins": 3.233609437942505,
294
- "rewards/rejected": 1.1102204322814941,
295
  "step": 170
296
  },
297
  {
298
  "epoch": 0.51,
299
- "grad_norm": 2082.5003671787076,
300
- "learning_rate": 2.3878379241237133e-08,
301
- "logits/chosen": -4.364750862121582,
302
- "logits/rejected": -4.597868919372559,
303
- "logps/chosen": -285.72869873046875,
304
- "logps/rejected": -241.40652465820312,
305
- "loss": 0.3417,
306
- "rewards/accuracies": 0.8374999761581421,
307
- "rewards/chosen": 4.1484293937683105,
308
- "rewards/margins": 3.0738511085510254,
309
- "rewards/rejected": 1.074578046798706,
310
  "step": 180
311
  },
312
  {
313
  "epoch": 0.54,
314
- "grad_norm": 1597.9774938638957,
315
- "learning_rate": 2.164416835455862e-08,
316
- "logits/chosen": -4.3281121253967285,
317
- "logits/rejected": -4.498069763183594,
318
- "logps/chosen": -308.14776611328125,
319
- "logps/rejected": -257.7415466308594,
320
- "loss": 0.2852,
321
- "rewards/accuracies": 0.875,
322
- "rewards/chosen": 4.146700859069824,
323
- "rewards/margins": 3.202249526977539,
324
- "rewards/rejected": 0.9444509744644165,
325
  "step": 190
326
  },
327
  {
328
  "epoch": 0.57,
329
- "grad_norm": 1601.8580723204816,
330
- "learning_rate": 1.943697665109214e-08,
331
- "logits/chosen": -4.358348846435547,
332
- "logits/rejected": -4.601215839385986,
333
- "logps/chosen": -292.93658447265625,
334
- "logps/rejected": -249.59469604492188,
335
- "loss": 0.3184,
336
- "rewards/accuracies": 0.824999988079071,
337
- "rewards/chosen": 4.194998741149902,
338
- "rewards/margins": 2.974621534347534,
339
- "rewards/rejected": 1.2203772068023682,
340
  "step": 200
341
  },
342
  {
343
  "epoch": 0.57,
344
- "eval_logits/chosen": -3.2195205688476562,
345
- "eval_logits/rejected": -3.2195205688476562,
346
- "eval_logps/chosen": -157.37933349609375,
347
- "eval_logps/rejected": -157.37933349609375,
348
  "eval_loss": 0.6931471824645996,
349
  "eval_rewards/accuracies": 0.0,
350
- "eval_rewards/chosen": -1.8023262023925781,
351
  "eval_rewards/margins": 0.0,
352
- "eval_rewards/rejected": -1.8023262023925781,
353
- "eval_runtime": 1.4741,
354
- "eval_samples_per_second": 0.678,
355
- "eval_steps_per_second": 0.678,
356
  "step": 200
357
  },
358
  {
359
  "epoch": 0.6,
360
- "grad_norm": 1818.1510653253358,
361
- "learning_rate": 1.7274575140626317e-08,
362
- "logits/chosen": -4.293700218200684,
363
- "logits/rejected": -4.587708473205566,
364
- "logps/chosen": -306.94647216796875,
365
- "logps/rejected": -254.83981323242188,
366
- "loss": 0.3169,
367
- "rewards/accuracies": 0.8374999761581421,
368
- "rewards/chosen": 4.274092674255371,
369
- "rewards/margins": 3.556690216064453,
370
- "rewards/rejected": 0.7174022793769836,
371
  "step": 210
372
  },
373
  {
374
  "epoch": 0.63,
375
- "grad_norm": 2084.9707047014217,
376
- "learning_rate": 1.517437420865191e-08,
377
- "logits/chosen": -4.2438554763793945,
378
- "logits/rejected": -4.590119361877441,
379
- "logps/chosen": -297.3277587890625,
380
- "logps/rejected": -225.09414672851562,
381
  "loss": 0.3117,
382
- "rewards/accuracies": 0.862500011920929,
383
- "rewards/chosen": 4.186089515686035,
384
- "rewards/margins": 3.6873459815979004,
385
- "rewards/rejected": 0.4987434446811676,
386
  "step": 220
387
  },
388
  {
389
  "epoch": 0.66,
390
- "grad_norm": 1793.5243127965375,
391
- "learning_rate": 1.3153283438175034e-08,
392
- "logits/chosen": -4.3719801902771,
393
- "logits/rejected": -4.563234806060791,
394
- "logps/chosen": -281.373779296875,
395
- "logps/rejected": -226.25576782226562,
396
- "loss": 0.2879,
397
- "rewards/accuracies": 0.831250011920929,
398
- "rewards/chosen": 3.584429979324341,
399
- "rewards/margins": 2.8807406425476074,
400
- "rewards/rejected": 0.7036892771720886,
401
  "step": 230
402
  },
403
  {
404
  "epoch": 0.69,
405
- "grad_norm": 1621.528952660571,
406
- "learning_rate": 1.1227575463697438e-08,
407
- "logits/chosen": -4.3936567306518555,
408
- "logits/rejected": -4.714280128479004,
409
- "logps/chosen": -258.6517639160156,
410
- "logps/rejected": -215.28759765625,
411
- "loss": 0.3042,
412
- "rewards/accuracies": 0.831250011920929,
413
- "rewards/chosen": 3.862626552581787,
414
- "rewards/margins": 3.0625431537628174,
415
- "rewards/rejected": 0.8000835180282593,
416
  "step": 240
417
  },
418
  {
419
  "epoch": 0.71,
420
- "grad_norm": 2231.5682374793205,
421
- "learning_rate": 9.412754953531663e-09,
422
- "logits/chosen": -4.34213924407959,
423
- "logits/rejected": -4.6162428855896,
424
- "logps/chosen": -278.9085388183594,
425
- "logps/rejected": -232.6056365966797,
426
- "loss": 0.3109,
427
- "rewards/accuracies": 0.831250011920929,
428
- "rewards/chosen": 3.6670002937316895,
429
- "rewards/margins": 2.8657121658325195,
430
- "rewards/rejected": 0.8012881278991699,
431
  "step": 250
432
  },
433
  {
434
  "epoch": 0.74,
435
- "grad_norm": 1668.5476234310504,
436
- "learning_rate": 7.723433775328384e-09,
437
- "logits/chosen": -4.386145114898682,
438
- "logits/rejected": -4.632050037384033,
439
- "logps/chosen": -271.8704833984375,
440
- "logps/rejected": -240.48257446289062,
441
- "loss": 0.3039,
442
- "rewards/accuracies": 0.862500011920929,
443
- "rewards/chosen": 3.5878806114196777,
444
- "rewards/margins": 2.9320101737976074,
445
- "rewards/rejected": 0.6558703184127808,
446
  "step": 260
447
  },
448
  {
449
  "epoch": 0.77,
450
- "grad_norm": 1726.7631750123023,
451
- "learning_rate": 6.173213349909728e-09,
452
- "logits/chosen": -4.517698764801025,
453
- "logits/rejected": -4.687317848205566,
454
- "logps/chosen": -273.4754943847656,
455
- "logps/rejected": -228.2833709716797,
456
- "loss": 0.3356,
457
- "rewards/accuracies": 0.793749988079071,
458
- "rewards/chosen": 3.6022744178771973,
459
- "rewards/margins": 2.6589503288269043,
460
- "rewards/rejected": 0.9433239698410034,
461
  "step": 270
462
  },
463
  {
464
  "epoch": 0.8,
465
- "grad_norm": 1197.1122441391342,
466
- "learning_rate": 4.7745751406263165e-09,
467
- "logits/chosen": -4.299304008483887,
468
- "logits/rejected": -4.589285850524902,
469
- "logps/chosen": -274.9901123046875,
470
- "logps/rejected": -229.76449584960938,
471
- "loss": 0.2791,
472
- "rewards/accuracies": 0.893750011920929,
473
- "rewards/chosen": 4.094995021820068,
474
- "rewards/margins": 3.0975327491760254,
475
- "rewards/rejected": 0.997462272644043,
476
  "step": 280
477
  },
478
  {
479
  "epoch": 0.83,
480
- "grad_norm": 2356.4193384705377,
481
- "learning_rate": 3.5387801599533474e-09,
482
- "logits/chosen": -4.320891857147217,
483
- "logits/rejected": -4.508334636688232,
484
- "logps/chosen": -282.45013427734375,
485
- "logps/rejected": -236.50424194335938,
486
- "loss": 0.3316,
487
- "rewards/accuracies": 0.8812500238418579,
488
- "rewards/chosen": 4.115664958953857,
489
- "rewards/margins": 3.2731566429138184,
490
- "rewards/rejected": 0.8425084948539734,
491
  "step": 290
492
  },
493
  {
494
  "epoch": 0.86,
495
- "grad_norm": 1485.14332328563,
496
- "learning_rate": 2.475778302439524e-09,
497
- "logits/chosen": -4.295617580413818,
498
- "logits/rejected": -4.5400543212890625,
499
- "logps/chosen": -298.4153137207031,
500
- "logps/rejected": -240.1478271484375,
501
- "loss": 0.3594,
502
- "rewards/accuracies": 0.831250011920929,
503
- "rewards/chosen": 4.416214942932129,
504
- "rewards/margins": 3.1984994411468506,
505
- "rewards/rejected": 1.2177152633666992,
506
  "step": 300
507
  },
508
  {
509
  "epoch": 0.86,
510
- "eval_logits/chosen": -3.220174551010132,
511
- "eval_logits/rejected": -3.220174551010132,
512
- "eval_logps/chosen": -157.367431640625,
513
- "eval_logps/rejected": -157.367431640625,
514
  "eval_loss": 0.6931471824645996,
515
  "eval_rewards/accuracies": 0.0,
516
- "eval_rewards/chosen": -1.7904319763183594,
517
  "eval_rewards/margins": 0.0,
518
- "eval_rewards/rejected": -1.7904319763183594,
519
- "eval_runtime": 1.47,
520
- "eval_samples_per_second": 0.68,
521
- "eval_steps_per_second": 0.68,
522
  "step": 300
523
  },
524
  {
525
  "epoch": 0.89,
526
- "grad_norm": 2625.0873445651387,
527
- "learning_rate": 1.5941282340065698e-09,
528
- "logits/chosen": -4.43851900100708,
529
- "logits/rejected": -4.580752372741699,
530
- "logps/chosen": -262.37445068359375,
531
- "logps/rejected": -226.46572875976562,
532
- "loss": 0.3007,
533
- "rewards/accuracies": 0.856249988079071,
534
- "rewards/chosen": 3.741738796234131,
535
- "rewards/margins": 2.9144444465637207,
536
- "rewards/rejected": 0.8272944688796997,
537
  "step": 310
538
  },
539
  {
540
  "epoch": 0.91,
541
- "grad_norm": 1589.6112135444553,
542
- "learning_rate": 9.009284826036689e-10,
543
- "logits/chosen": -4.277141094207764,
544
- "logits/rejected": -4.5314412117004395,
545
- "logps/chosen": -292.65875244140625,
546
- "logps/rejected": -243.8509063720703,
547
- "loss": 0.3277,
548
- "rewards/accuracies": 0.8687499761581421,
549
- "rewards/chosen": 4.2250542640686035,
550
- "rewards/margins": 3.1320207118988037,
551
- "rewards/rejected": 1.093034029006958,
552
  "step": 320
553
  },
554
  {
555
  "epoch": 0.94,
556
- "grad_norm": 2192.855370501752,
557
- "learning_rate": 4.017602850342583e-10,
558
- "logits/chosen": -4.330888271331787,
559
- "logits/rejected": -4.536975383758545,
560
- "logps/chosen": -305.5764465332031,
561
- "logps/rejected": -252.0467529296875,
562
- "loss": 0.3203,
563
- "rewards/accuracies": 0.8687499761581421,
564
- "rewards/chosen": 4.009243488311768,
565
- "rewards/margins": 3.056270122528076,
566
- "rewards/rejected": 0.9529730677604675,
567
  "step": 330
568
  },
569
  {
570
  "epoch": 0.97,
571
- "grad_norm": 2158.7231383937637,
572
- "learning_rate": 1.0064265011902328e-10,
573
- "logits/chosen": -4.319821357727051,
574
- "logits/rejected": -4.614516735076904,
575
- "logps/chosen": -285.194091796875,
576
- "logps/rejected": -227.5124053955078,
577
- "loss": 0.3239,
578
- "rewards/accuracies": 0.862500011920929,
579
- "rewards/chosen": 3.751185178756714,
580
- "rewards/margins": 2.758882522583008,
581
- "rewards/rejected": 0.9923027753829956,
582
  "step": 340
583
  },
584
  {
585
  "epoch": 1.0,
586
- "grad_norm": 1350.3403367664616,
587
  "learning_rate": 0.0,
588
- "logits/chosen": -4.290497779846191,
589
- "logits/rejected": -4.4949870109558105,
590
- "logps/chosen": -291.93768310546875,
591
- "logps/rejected": -244.3520965576172,
592
- "loss": 0.3142,
593
- "rewards/accuracies": 0.8187500238418579,
594
- "rewards/chosen": 4.083470344543457,
595
- "rewards/margins": 3.2036800384521484,
596
- "rewards/rejected": 0.8797903060913086,
597
  "step": 350
598
  },
599
  {
600
  "epoch": 1.0,
601
  "step": 350,
602
  "total_flos": 0.0,
603
- "train_loss": 0.36299856867109026,
604
- "train_runtime": 5294.123,
605
- "train_samples_per_second": 8.454,
606
  "train_steps_per_second": 0.066
607
  }
608
  ],
 
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
+ "grad_norm": 1533.9529579439338,
14
+ "learning_rate": 1.4285714285714284e-09,
15
  "logits/chosen": -4.185730934143066,
16
  "logits/rejected": -4.509836196899414,
17
  "logps/chosen": -274.000732421875,
 
25
  },
26
  {
27
  "epoch": 0.03,
28
+ "grad_norm": 1798.989663851012,
29
+ "learning_rate": 1.4285714285714284e-08,
30
+ "logits/chosen": -4.211680889129639,
31
+ "logits/rejected": -4.4850640296936035,
32
+ "logps/chosen": -318.5819091796875,
33
+ "logps/rejected": -257.2215270996094,
34
+ "loss": 0.7173,
35
+ "rewards/accuracies": 0.3888888955116272,
36
+ "rewards/chosen": -0.03539733216166496,
37
+ "rewards/margins": -0.0653969869017601,
38
+ "rewards/rejected": 0.02999965287744999,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.06,
43
+ "grad_norm": 1668.4002920396833,
44
+ "learning_rate": 2.857142857142857e-08,
45
+ "logits/chosen": -4.237612724304199,
46
+ "logits/rejected": -4.492175102233887,
47
+ "logps/chosen": -305.21356201171875,
48
+ "logps/rejected": -244.23391723632812,
49
+ "loss": 0.7172,
50
+ "rewards/accuracies": 0.5249999761581421,
51
+ "rewards/chosen": 0.011527794413268566,
52
+ "rewards/margins": 0.015434956178069115,
53
+ "rewards/rejected": -0.003907163627445698,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.09,
58
+ "grad_norm": 1590.2675307754077,
59
+ "learning_rate": 4.285714285714285e-08,
60
+ "logits/chosen": -4.289905548095703,
61
+ "logits/rejected": -4.550080299377441,
62
+ "logps/chosen": -306.20733642578125,
63
+ "logps/rejected": -259.1973571777344,
64
+ "loss": 0.6742,
65
+ "rewards/accuracies": 0.581250011920929,
66
+ "rewards/chosen": 0.15369151532649994,
67
+ "rewards/margins": 0.08493399620056152,
68
+ "rewards/rejected": 0.06875751912593842,
69
  "step": 30
70
  },
71
  {
72
  "epoch": 0.11,
73
+ "grad_norm": 1234.2347053418307,
74
+ "learning_rate": 4.996892303047306e-08,
75
+ "logits/chosen": -4.294736862182617,
76
+ "logits/rejected": -4.560345649719238,
77
+ "logps/chosen": -294.7554931640625,
78
+ "logps/rejected": -234.93032836914062,
79
+ "loss": 0.5901,
80
+ "rewards/accuracies": 0.6312500238418579,
81
+ "rewards/chosen": 0.3894590735435486,
82
+ "rewards/margins": 0.2519429624080658,
83
+ "rewards/rejected": 0.1375161111354828,
84
  "step": 40
85
  },
86
  {
87
  "epoch": 0.14,
88
+ "grad_norm": 1024.357051064495,
89
+ "learning_rate": 4.972077065562821e-08,
90
+ "logits/chosen": -4.300290584564209,
91
+ "logits/rejected": -4.545838832855225,
92
+ "logps/chosen": -300.6907043457031,
93
+ "logps/rejected": -252.9388427734375,
94
+ "loss": 0.4845,
95
+ "rewards/accuracies": 0.78125,
96
+ "rewards/chosen": 1.0174586772918701,
97
+ "rewards/margins": 0.6066709160804749,
98
+ "rewards/rejected": 0.41078776121139526,
99
  "step": 50
100
  },
101
  {
102
  "epoch": 0.17,
103
+ "grad_norm": 993.3434930666515,
104
+ "learning_rate": 4.922693215572695e-08,
105
+ "logits/chosen": -4.309796333312988,
106
+ "logits/rejected": -4.538843154907227,
107
+ "logps/chosen": -308.38818359375,
108
+ "logps/rejected": -261.2379150390625,
109
+ "loss": 0.4421,
110
+ "rewards/accuracies": 0.7437499761581421,
111
+ "rewards/chosen": 1.4816973209381104,
112
+ "rewards/margins": 0.7946016192436218,
113
+ "rewards/rejected": 0.6870955228805542,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.2,
118
+ "grad_norm": 1067.0305947544678,
119
+ "learning_rate": 4.849231551964771e-08,
120
+ "logits/chosen": -4.422641277313232,
121
+ "logits/rejected": -4.553921699523926,
122
+ "logps/chosen": -285.1670227050781,
123
+ "logps/rejected": -236.08352661132812,
124
+ "loss": 0.4036,
125
+ "rewards/accuracies": 0.8187500238418579,
126
+ "rewards/chosen": 1.8685623407363892,
127
+ "rewards/margins": 1.2992055416107178,
128
+ "rewards/rejected": 0.5693566203117371,
129
  "step": 70
130
  },
131
  {
132
  "epoch": 0.23,
133
+ "grad_norm": 1032.775358630634,
134
+ "learning_rate": 4.7524221697560474e-08,
135
+ "logits/chosen": -4.270499229431152,
136
+ "logits/rejected": -4.5226149559021,
137
+ "logps/chosen": -291.38165283203125,
138
+ "logps/rejected": -239.2751922607422,
139
+ "loss": 0.379,
140
+ "rewards/accuracies": 0.8687499761581421,
141
+ "rewards/chosen": 2.1530368328094482,
142
+ "rewards/margins": 1.5649009943008423,
143
+ "rewards/rejected": 0.5881360769271851,
144
  "step": 80
145
  },
146
  {
147
  "epoch": 0.26,
148
+ "grad_norm": 872.5516189664596,
149
+ "learning_rate": 4.633227204080389e-08,
150
+ "logits/chosen": -4.196683406829834,
151
+ "logits/rejected": -4.429889678955078,
152
+ "logps/chosen": -302.6337890625,
153
+ "logps/rejected": -257.6473388671875,
154
+ "loss": 0.3672,
155
+ "rewards/accuracies": 0.78125,
156
+ "rewards/chosen": 2.4647421836853027,
157
+ "rewards/margins": 1.6267496347427368,
158
+ "rewards/rejected": 0.8379926681518555,
159
  "step": 90
160
  },
161
  {
162
  "epoch": 0.29,
163
+ "grad_norm": 1367.4563539236096,
164
+ "learning_rate": 4.4928312680573064e-08,
165
+ "logits/chosen": -4.303212642669678,
166
+ "logits/rejected": -4.523682594299316,
167
+ "logps/chosen": -290.59271240234375,
168
+ "logps/rejected": -240.2918701171875,
169
+ "loss": 0.3536,
170
+ "rewards/accuracies": 0.8374999761581421,
171
+ "rewards/chosen": 2.6176071166992188,
172
+ "rewards/margins": 1.6629337072372437,
173
+ "rewards/rejected": 0.9546731114387512,
174
  "step": 100
175
  },
176
  {
177
  "epoch": 0.29,
178
+ "eval_logits/chosen": -3.2369751930236816,
179
+ "eval_logits/rejected": -3.2369751930236816,
180
+ "eval_logps/chosen": -158.35089111328125,
181
+ "eval_logps/rejected": -158.35089111328125,
182
  "eval_loss": 0.6931471824645996,
183
  "eval_rewards/accuracies": 0.0,
184
+ "eval_rewards/chosen": -1.3869400024414062,
185
  "eval_rewards/margins": 0.0,
186
+ "eval_rewards/rejected": -1.3869400024414062,
187
+ "eval_runtime": 1.4807,
188
+ "eval_samples_per_second": 0.675,
189
+ "eval_steps_per_second": 0.675,
190
  "step": 100
191
  },
192
  {
193
  "epoch": 0.31,
194
+ "grad_norm": 934.0407259676991,
195
+ "learning_rate": 4.3326296795745654e-08,
196
+ "logits/chosen": -4.324947357177734,
197
+ "logits/rejected": -4.566166400909424,
198
+ "logps/chosen": -297.800537109375,
199
+ "logps/rejected": -249.71450805664062,
200
+ "loss": 0.3471,
201
+ "rewards/accuracies": 0.8812500238418579,
202
+ "rewards/chosen": 2.8152554035186768,
203
+ "rewards/margins": 2.0201711654663086,
204
+ "rewards/rejected": 0.7950841784477234,
205
  "step": 110
206
  },
207
  {
208
  "epoch": 0.34,
209
+ "grad_norm": 1096.418461846408,
210
+ "learning_rate": 4.1542145939921484e-08,
211
+ "logits/chosen": -4.368184566497803,
212
+ "logits/rejected": -4.613701820373535,
213
+ "logps/chosen": -287.2909851074219,
214
+ "logps/rejected": -237.5634002685547,
215
+ "loss": 0.3344,
216
+ "rewards/accuracies": 0.862500011920929,
217
+ "rewards/chosen": 2.821599245071411,
218
+ "rewards/margins": 2.0445477962493896,
219
+ "rewards/rejected": 0.7770514488220215,
220
  "step": 120
221
  },
222
  {
223
  "epoch": 0.37,
224
+ "grad_norm": 722.1338343165834,
225
+ "learning_rate": 3.959359180586975e-08,
226
+ "logits/chosen": -4.369457244873047,
227
+ "logits/rejected": -4.6166582107543945,
228
+ "logps/chosen": -277.1622314453125,
229
+ "logps/rejected": -218.28323364257812,
230
+ "loss": 0.3351,
231
  "rewards/accuracies": 0.8374999761581421,
232
+ "rewards/chosen": 2.7689156532287598,
233
+ "rewards/margins": 1.9403083324432373,
234
+ "rewards/rejected": 0.8286076784133911,
235
  "step": 130
236
  },
237
  {
238
  "epoch": 0.4,
239
+ "grad_norm": 854.554160074848,
240
+ "learning_rate": 3.75e-08,
241
+ "logits/chosen": -4.2873382568359375,
242
+ "logits/rejected": -4.604461669921875,
243
+ "logps/chosen": -294.3111267089844,
244
+ "logps/rejected": -239.65628051757812,
245
+ "loss": 0.3544,
246
+ "rewards/accuracies": 0.893750011920929,
247
+ "rewards/chosen": 2.786259174346924,
248
+ "rewards/margins": 2.21262526512146,
249
+ "rewards/rejected": 0.5736337900161743,
250
  "step": 140
251
  },
252
  {
253
  "epoch": 0.43,
254
+ "grad_norm": 1024.227354138342,
255
+ "learning_rate": 3.5282177578265295e-08,
256
+ "logits/chosen": -4.251392364501953,
257
+ "logits/rejected": -4.515843391418457,
258
+ "logps/chosen": -293.588623046875,
259
+ "logps/rejected": -250.3025360107422,
260
+ "loss": 0.3157,
261
+ "rewards/accuracies": 0.862500011920929,
262
+ "rewards/chosen": 2.910778045654297,
263
+ "rewards/margins": 2.2114222049713135,
264
+ "rewards/rejected": 0.6993557810783386,
265
  "step": 150
266
  },
267
  {
268
  "epoch": 0.46,
269
+ "grad_norm": 958.7773617526785,
270
+ "learning_rate": 3.296216625629211e-08,
271
+ "logits/chosen": -4.2807087898254395,
272
+ "logits/rejected": -4.571825981140137,
273
+ "logps/chosen": -294.7529296875,
274
+ "logps/rejected": -235.1978759765625,
275
+ "loss": 0.3382,
276
  "rewards/accuracies": 0.8500000238418579,
277
+ "rewards/chosen": 3.0874907970428467,
278
+ "rewards/margins": 2.296449899673462,
279
+ "rewards/rejected": 0.7910411953926086,
280
  "step": 160
281
  },
282
  {
283
  "epoch": 0.49,
284
+ "grad_norm": 844.9738579712869,
285
+ "learning_rate": 3.056302334890786e-08,
286
+ "logits/chosen": -4.230380058288574,
287
+ "logits/rejected": -4.466236114501953,
288
+ "logps/chosen": -294.3287658691406,
289
+ "logps/rejected": -241.07357788085938,
290
+ "loss": 0.3284,
291
  "rewards/accuracies": 0.8687499761581421,
292
+ "rewards/chosen": 3.165980815887451,
293
+ "rewards/margins": 2.4775989055633545,
294
+ "rewards/rejected": 0.6883817911148071,
295
  "step": 170
296
  },
297
  {
298
  "epoch": 0.51,
299
+ "grad_norm": 878.602697816289,
300
+ "learning_rate": 2.8108592616187133e-08,
301
+ "logits/chosen": -4.364335060119629,
302
+ "logits/rejected": -4.596997261047363,
303
+ "logps/chosen": -283.4359130859375,
304
+ "logps/rejected": -240.91796875,
305
+ "loss": 0.3163,
306
+ "rewards/accuracies": 0.875,
307
+ "rewards/chosen": 3.220620632171631,
308
+ "rewards/margins": 2.4390523433685303,
309
+ "rewards/rejected": 0.7815683484077454,
310
  "step": 180
311
  },
312
  {
313
  "epoch": 0.54,
314
+ "grad_norm": 831.8981359537083,
315
+ "learning_rate": 2.562326729345182e-08,
316
+ "logits/chosen": -4.325879096984863,
317
+ "logits/rejected": -4.494770526885986,
318
+ "logps/chosen": -306.01171875,
319
+ "logps/rejected": -257.24298095703125,
320
+ "loss": 0.2864,
321
+ "rewards/accuracies": 0.887499988079071,
322
+ "rewards/chosen": 3.1413779258728027,
323
+ "rewards/margins": 2.4198684692382812,
324
+ "rewards/rejected": 0.7215089797973633,
325
  "step": 190
326
  },
327
  {
328
  "epoch": 0.57,
329
+ "grad_norm": 752.1347907356251,
330
+ "learning_rate": 2.3131747660339392e-08,
331
+ "logits/chosen": -4.358603000640869,
332
+ "logits/rejected": -4.599839687347412,
333
+ "logps/chosen": -290.7633972167969,
334
+ "logps/rejected": -249.0517120361328,
335
+ "loss": 0.3091,
336
+ "rewards/accuracies": 0.856249988079071,
337
+ "rewards/chosen": 3.1841001510620117,
338
+ "rewards/margins": 2.3024184703826904,
339
+ "rewards/rejected": 0.8816817998886108,
340
  "step": 200
341
  },
342
  {
343
  "epoch": 0.57,
344
+ "eval_logits/chosen": -3.230353832244873,
345
+ "eval_logits/rejected": -3.230353832244873,
346
+ "eval_logps/chosen": -159.3397674560547,
347
+ "eval_logps/rejected": -159.3397674560547,
348
  "eval_loss": 0.6931471824645996,
349
  "eval_rewards/accuracies": 0.0,
350
+ "eval_rewards/chosen": -1.8813800811767578,
351
  "eval_rewards/margins": 0.0,
352
+ "eval_rewards/rejected": -1.8813800811767578,
353
+ "eval_runtime": 1.4678,
354
+ "eval_samples_per_second": 0.681,
355
+ "eval_steps_per_second": 0.681,
356
  "step": 200
357
  },
358
  {
359
  "epoch": 0.6,
360
+ "grad_norm": 873.0977005391466,
361
+ "learning_rate": 2.065879555832674e-08,
362
+ "logits/chosen": -4.293581962585449,
363
+ "logits/rejected": -4.587343692779541,
364
+ "logps/chosen": -304.852294921875,
365
+ "logps/rejected": -254.5345916748047,
366
+ "loss": 0.3125,
367
+ "rewards/accuracies": 0.887499988079071,
368
+ "rewards/chosen": 3.1841378211975098,
369
+ "rewards/margins": 2.6728169918060303,
370
+ "rewards/rejected": 0.5113206505775452,
371
  "step": 210
372
  },
373
  {
374
  "epoch": 0.63,
375
+ "grad_norm": 896.6576085052702,
376
+ "learning_rate": 1.8228988296424874e-08,
377
+ "logits/chosen": -4.245147705078125,
378
+ "logits/rejected": -4.591282367706299,
379
+ "logps/chosen": -295.0483703613281,
380
+ "logps/rejected": -224.8583984375,
381
  "loss": 0.3117,
382
+ "rewards/accuracies": 0.875,
383
+ "rewards/chosen": 3.232748031616211,
384
+ "rewards/margins": 2.8655002117156982,
385
+ "rewards/rejected": 0.367247611284256,
386
  "step": 220
387
  },
388
  {
389
  "epoch": 0.66,
390
+ "grad_norm": 896.8048647139675,
391
+ "learning_rate": 1.5866474390840123e-08,
392
+ "logits/chosen": -4.371578216552734,
393
+ "logits/rejected": -4.561336994171143,
394
+ "logps/chosen": -279.5164489746094,
395
+ "logps/rejected": -225.96630859375,
396
+ "loss": 0.2801,
397
+ "rewards/accuracies": 0.824999988079071,
398
+ "rewards/chosen": 2.720884323120117,
399
+ "rewards/margins": 2.2243082523345947,
400
+ "rewards/rejected": 0.4965757727622986,
401
  "step": 230
402
  },
403
  {
404
  "epoch": 0.69,
405
+ "grad_norm": 794.1572537064102,
406
+ "learning_rate": 1.3594733566170923e-08,
407
+ "logits/chosen": -4.393925666809082,
408
+ "logits/rejected": -4.711283206939697,
409
+ "logps/chosen": -256.6236877441406,
410
+ "logps/rejected": -215.0515594482422,
411
+ "loss": 0.3001,
412
+ "rewards/accuracies": 0.8374999761581421,
413
+ "rewards/chosen": 2.945354700088501,
414
+ "rewards/margins": 2.4272878170013428,
415
+ "rewards/rejected": 0.5180668830871582,
416
  "step": 240
417
  },
418
  {
419
  "epoch": 0.71,
420
+ "grad_norm": 1028.7241893624246,
421
+ "learning_rate": 1.1436343403356017e-08,
422
+ "logits/chosen": -4.338345527648926,
423
+ "logits/rejected": -4.6125383377075195,
424
+ "logps/chosen": -276.8526611328125,
425
+ "logps/rejected": -232.35494995117188,
426
+ "loss": 0.296,
427
+ "rewards/accuracies": 0.856249988079071,
428
+ "rewards/chosen": 2.86142897605896,
429
+ "rewards/margins": 2.335439443588257,
430
+ "rewards/rejected": 0.5259896516799927,
431
  "step": 250
432
  },
433
  {
434
  "epoch": 0.74,
435
+ "grad_norm": 955.6521893892625,
436
+ "learning_rate": 9.412754953531663e-09,
437
+ "logits/chosen": -4.384321212768555,
438
+ "logits/rejected": -4.630730628967285,
439
+ "logps/chosen": -269.96063232421875,
440
+ "logps/rejected": -240.08322143554688,
441
+ "loss": 0.2927,
442
+ "rewards/accuracies": 0.8500000238418579,
443
+ "rewards/chosen": 2.748866558074951,
444
+ "rewards/margins": 2.221247434616089,
445
+ "rewards/rejected": 0.5276187658309937,
446
  "step": 260
447
  },
448
  {
449
  "epoch": 0.77,
450
+ "grad_norm": 861.1823956058561,
451
+ "learning_rate": 7.544079547848182e-09,
452
+ "logits/chosen": -4.517904281616211,
453
+ "logits/rejected": -4.6855998039245605,
454
+ "logps/chosen": -271.70074462890625,
455
+ "logps/rejected": -227.885009765625,
456
+ "loss": 0.3225,
457
+ "rewards/accuracies": 0.78125,
458
+ "rewards/chosen": 2.6885054111480713,
459
+ "rewards/margins": 2.017667293548584,
460
+ "rewards/rejected": 0.670837938785553,
461
  "step": 270
462
  },
463
  {
464
  "epoch": 0.8,
465
+ "grad_norm": 732.1581791035536,
466
+ "learning_rate": 5.8488889220255525e-09,
467
+ "logits/chosen": -4.298913955688477,
468
+ "logits/rejected": -4.587487697601318,
469
+ "logps/chosen": -272.83935546875,
470
+ "logps/rejected": -229.5067596435547,
471
+ "loss": 0.2873,
472
+ "rewards/accuracies": 0.90625,
473
+ "rewards/chosen": 3.1228713989257812,
474
+ "rewards/margins": 2.495274066925049,
475
+ "rewards/rejected": 0.6275972723960876,
476
  "step": 280
477
  },
478
  {
479
  "epoch": 0.83,
480
+ "grad_norm": 1125.5972329270946,
481
+ "learning_rate": 4.344030642100133e-09,
482
+ "logits/chosen": -4.320634365081787,
483
+ "logits/rejected": -4.5083842277526855,
484
+ "logps/chosen": -280.2475280761719,
485
+ "logps/rejected": -236.018798828125,
486
+ "loss": 0.3136,
487
+ "rewards/accuracies": 0.8374999761581421,
488
+ "rewards/chosen": 3.1591415405273438,
489
+ "rewards/margins": 2.4951727390289307,
490
+ "rewards/rejected": 0.6639689207077026,
491
  "step": 290
492
  },
493
  {
494
  "epoch": 0.86,
495
+ "grad_norm": 788.6539907080908,
496
+ "learning_rate": 3.0444606657442835e-09,
497
+ "logits/chosen": -4.298077583312988,
498
+ "logits/rejected": -4.541121959686279,
499
+ "logps/chosen": -296.12176513671875,
500
+ "logps/rejected": -239.7339324951172,
501
+ "loss": 0.3363,
502
+ "rewards/accuracies": 0.856249988079071,
503
+ "rewards/chosen": 3.3548855781555176,
504
+ "rewards/margins": 2.5390803813934326,
505
+ "rewards/rejected": 0.8158050775527954,
506
  "step": 300
507
  },
508
  {
509
  "epoch": 0.86,
510
+ "eval_logits/chosen": -3.2283477783203125,
511
+ "eval_logits/rejected": -3.2283477783203125,
512
+ "eval_logps/chosen": -159.417236328125,
513
+ "eval_logps/rejected": -159.417236328125,
514
  "eval_loss": 0.6931471824645996,
515
  "eval_rewards/accuracies": 0.0,
516
+ "eval_rewards/chosen": -1.9201126098632812,
517
  "eval_rewards/margins": 0.0,
518
+ "eval_rewards/rejected": -1.9201126098632812,
519
+ "eval_runtime": 1.4723,
520
+ "eval_samples_per_second": 0.679,
521
+ "eval_steps_per_second": 0.679,
522
  "step": 300
523
  },
524
  {
525
  "epoch": 0.89,
526
+ "grad_norm": 1232.483796740804,
527
+ "learning_rate": 1.9630947032398067e-09,
528
+ "logits/chosen": -4.439385414123535,
529
+ "logits/rejected": -4.579739093780518,
530
+ "logps/chosen": -260.4383850097656,
531
+ "logps/rejected": -226.1852264404297,
532
+ "loss": 0.2944,
533
+ "rewards/accuracies": 0.8374999761581421,
534
+ "rewards/chosen": 2.838869571685791,
535
+ "rewards/margins": 2.284980297088623,
536
+ "rewards/rejected": 0.5538893342018127,
537
  "step": 310
538
  },
539
  {
540
  "epoch": 0.91,
541
+ "grad_norm": 734.9918892690486,
542
+ "learning_rate": 1.1106798553464803e-09,
543
+ "logits/chosen": -4.274772644042969,
544
+ "logits/rejected": -4.528196334838867,
545
+ "logps/chosen": -290.4882507324219,
546
+ "logps/rejected": -243.47128295898438,
547
+ "loss": 0.3081,
548
+ "rewards/accuracies": 0.862500011920929,
549
+ "rewards/chosen": 3.197766065597534,
550
+ "rewards/margins": 2.4614479541778564,
551
+ "rewards/rejected": 0.7363181114196777,
552
  "step": 320
553
  },
554
  {
555
  "epoch": 0.94,
556
+ "grad_norm": 1013.5360319244875,
557
+ "learning_rate": 4.956878037864043e-10,
558
+ "logits/chosen": -4.329029083251953,
559
+ "logits/rejected": -4.53527307510376,
560
+ "logps/chosen": -303.6526794433594,
561
+ "logps/rejected": -251.791259765625,
562
+ "loss": 0.3026,
563
+ "rewards/accuracies": 0.862500011920929,
564
+ "rewards/chosen": 2.9665093421936035,
565
+ "rewards/margins": 2.3622777462005615,
566
+ "rewards/rejected": 0.6042317152023315,
567
  "step": 330
568
  },
569
  {
570
  "epoch": 0.97,
571
+ "grad_norm": 1029.0631247014248,
572
+ "learning_rate": 1.2423061586496474e-10,
573
+ "logits/chosen": -4.317473888397217,
574
+ "logits/rejected": -4.609925270080566,
575
+ "logps/chosen": -283.091552734375,
576
+ "logps/rejected": -227.1355438232422,
577
+ "loss": 0.3064,
578
+ "rewards/accuracies": 0.8500000238418579,
579
+ "rewards/chosen": 2.9268617630004883,
580
+ "rewards/margins": 2.242302417755127,
581
+ "rewards/rejected": 0.6845596432685852,
582
  "step": 340
583
  },
584
  {
585
  "epoch": 1.0,
586
+ "grad_norm": 657.4804286362323,
587
  "learning_rate": 0.0,
588
+ "logits/chosen": -4.288041114807129,
589
+ "logits/rejected": -4.490693092346191,
590
+ "logps/chosen": -289.8371887207031,
591
+ "logps/rejected": -243.97164916992188,
592
+ "loss": 0.2999,
593
+ "rewards/accuracies": 0.8500000238418579,
594
+ "rewards/chosen": 3.091984987258911,
595
+ "rewards/margins": 2.4618594646453857,
596
+ "rewards/rejected": 0.6301255226135254,
597
  "step": 350
598
  },
599
  {
600
  "epoch": 1.0,
601
  "step": 350,
602
  "total_flos": 0.0,
603
+ "train_loss": 0.37015721593584333,
604
+ "train_runtime": 5302.4484,
605
+ "train_samples_per_second": 8.44,
606
  "train_steps_per_second": 0.066
607
  }
608
  ],
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4cfcdc187e96bc5f72f21240b097691d6a8cdf8d0fcc5ad2fe20f43a43287471
3
- size 6520
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:749665d380846ffdd47f0b7ab06064e45e0a2bb2e78deb6e356454a33e729001
3
+ size 6328