RikkiXu commited on
Commit
00dc39f
1 Parent(s): b2dd4a0

Model save

Browse files
README.md CHANGED
@@ -16,14 +16,14 @@ should probably proofread and complete it, then remove this comment. -->
16
  This model was trained from scratch on the None dataset.
17
  It achieves the following results on the evaluation set:
18
  - Loss: 0.6931
19
- - Rewards/chosen: -2.1301
20
- - Rewards/rejected: -2.1301
21
  - Rewards/accuracies: 0.0
22
  - Rewards/margins: 0.0
23
- - Logps/rejected: -159.8372
24
- - Logps/chosen: -159.8372
25
- - Logits/rejected: -3.1995
26
- - Logits/chosen: -3.1995
27
 
28
  ## Model description
29
 
@@ -42,7 +42,7 @@ More information needed
42
  ### Training hyperparameters
43
 
44
  The following hyperparameters were used during training:
45
- - learning_rate: 2e-07
46
  - train_batch_size: 8
47
  - eval_batch_size: 8
48
  - seed: 42
@@ -53,16 +53,15 @@ The following hyperparameters were used during training:
53
  - total_eval_batch_size: 64
54
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
55
  - lr_scheduler_type: cosine
56
- - lr_scheduler_warmup_ratio: 0.1
57
  - num_epochs: 1
58
 
59
  ### Training results
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
- | 0.3114 | 0.29 | 100 | 0.6931 | -2.8588 | -2.8588 | 0.0 | 0.0 | -161.2947 | -161.2947 | -3.2262 | -3.2262 |
64
- | 0.2741 | 0.57 | 200 | 0.6931 | -2.6760 | -2.6760 | 0.0 | 0.0 | -160.9289 | -160.9289 | -3.2033 | -3.2033 |
65
- | 0.2695 | 0.86 | 300 | 0.6931 | -2.1301 | -2.1301 | 0.0 | 0.0 | -159.8372 | -159.8372 | -3.1995 | -3.1995 |
66
 
67
 
68
  ### Framework versions
 
16
  This model was trained from scratch on the None dataset.
17
  It achieves the following results on the evaluation set:
18
  - Loss: 0.6931
19
+ - Rewards/chosen: -1.7904
20
+ - Rewards/rejected: -1.7904
21
  - Rewards/accuracies: 0.0
22
  - Rewards/margins: 0.0
23
+ - Logps/rejected: -157.3674
24
+ - Logps/chosen: -157.3674
25
+ - Logits/rejected: -3.2202
26
+ - Logits/chosen: -3.2202
27
 
28
  ## Model description
29
 
 
42
  ### Training hyperparameters
43
 
44
  The following hyperparameters were used during training:
45
+ - learning_rate: 5e-08
46
  - train_batch_size: 8
47
  - eval_batch_size: 8
48
  - seed: 42
 
53
  - total_eval_batch_size: 64
54
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
55
  - lr_scheduler_type: cosine
 
56
  - num_epochs: 1
57
 
58
  ### Training results
59
 
60
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
61
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
62
+ | 0.3573 | 0.29 | 100 | 0.6931 | -2.2646 | -2.2646 | 0.0 | 0.0 | -157.8416 | -157.8416 | -3.2259 | -3.2259 |
63
+ | 0.3184 | 0.57 | 200 | 0.6931 | -1.8023 | -1.8023 | 0.0 | 0.0 | -157.3793 | -157.3793 | -3.2195 | -3.2195 |
64
+ | 0.3594 | 0.86 | 300 | 0.6931 | -1.7904 | -1.7904 | 0.0 | 0.0 | -157.3674 | -157.3674 | -3.2202 | -3.2202 |
65
 
66
 
67
  ### Framework versions
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.30806411576476,
4
- "train_runtime": 5299.0292,
5
- "train_samples": 44682,
6
- "train_samples_per_second": 8.432,
7
  "train_steps_per_second": 0.066
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.36299856867109026,
4
+ "train_runtime": 5294.123,
5
+ "train_samples": 44755,
6
+ "train_samples_per_second": 8.454,
7
  "train_steps_per_second": 0.066
8
  }
config.json CHANGED
@@ -20,7 +20,7 @@
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
- "transformers_version": "4.41.1",
24
  "use_cache": false,
25
  "vocab_size": 32002
26
  }
 
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.38.2",
24
  "use_cache": false,
25
  "vocab_size": 32002
26
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff3d0ac00c3304811f4f56db0c3a49ea7bf825902fea45c8a9791a2e4f7431ab
3
  size 4943178720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d36163549571106621ebac26e38ec0b50cd9e0d81a1c0f2736d167eeed2d940
3
  size 4943178720
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1122a41f096dacbe12e70bff78c3951321a0a6f46db2c7995a726fd8f909bf84
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60f039e2e1484a47443bf956b71679bc1af07034027486d7035fb8ea530cd16f
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c18a2acaf553956d0cddde18a5d52d992dfb5723fc496a27bcde59c9bb62529
3
  size 4540532728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:552efc8fed558e148b25650586e753393bf9251331d7541a0c3aa3f226b236d6
3
  size 4540532728
runs/May30_23-06-15_n136-129-074/events.out.tfevents.1717081710.n136-129-074.2680508.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:900e43821138ab2f4858e5dd2280d3934b7a82a306dcb7660db2a9221551344a
3
- size 28313
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2eb9c3ef9c137948b60c969d7d5ff4259f0fd72becfb1520727015878e012f26
3
+ size 32107
tokenizer.json CHANGED
@@ -152,7 +152,6 @@
152
  "end_of_word_suffix": null,
153
  "fuse_unk": true,
154
  "byte_fallback": true,
155
- "ignore_merges": false,
156
  "vocab": {
157
  "<unk>": 0,
158
  "<s>": 1,
 
152
  "end_of_word_suffix": null,
153
  "fuse_unk": true,
154
  "byte_fallback": true,
 
155
  "vocab": {
156
  "<unk>": 0,
157
  "<s>": 1,
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.30806411576476,
4
- "train_runtime": 5299.0292,
5
- "train_samples": 44682,
6
- "train_samples_per_second": 8.432,
7
  "train_steps_per_second": 0.066
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.36299856867109026,
4
+ "train_runtime": 5294.123,
5
+ "train_samples": 44755,
6
+ "train_samples_per_second": 8.454,
7
  "train_steps_per_second": 0.066
8
  }
trainer_state.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.9985693848354793,
5
  "eval_steps": 100,
6
- "global_step": 349,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
- "grad_norm": 1483.079147156977,
14
- "learning_rate": 5.7142857142857136e-09,
15
- "logits/chosen": -4.490396976470947,
16
- "logits/rejected": -4.787891387939453,
17
- "logps/chosen": -300.56573486328125,
18
- "logps/rejected": -263.39849853515625,
19
  "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
@@ -25,574 +25,589 @@
25
  },
26
  {
27
  "epoch": 0.03,
28
- "grad_norm": 1678.9009350663757,
29
- "learning_rate": 5.714285714285714e-08,
30
- "logits/chosen": -4.292892932891846,
31
- "logits/rejected": -4.4704790115356445,
32
- "logps/chosen": -285.7367858886719,
33
- "logps/rejected": -241.40879821777344,
34
- "loss": 0.7232,
35
- "rewards/accuracies": 0.4375,
36
- "rewards/chosen": 0.03021715022623539,
37
- "rewards/margins": 0.018014244735240936,
38
- "rewards/rejected": 0.012202905490994453,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.06,
43
- "grad_norm": 1307.5912774162387,
44
- "learning_rate": 1.1428571428571427e-07,
45
- "logits/chosen": -4.212913990020752,
46
- "logits/rejected": -4.504936695098877,
47
- "logps/chosen": -312.25714111328125,
48
- "logps/rejected": -247.5605926513672,
49
- "loss": 0.6348,
50
- "rewards/accuracies": 0.6187499761581421,
51
- "rewards/chosen": 0.25936827063560486,
52
- "rewards/margins": 0.15551437437534332,
53
- "rewards/rejected": 0.10385391861200333,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.09,
58
- "grad_norm": 877.5830541670848,
59
- "learning_rate": 1.714285714285714e-07,
60
- "logits/chosen": -4.158999919891357,
61
- "logits/rejected": -4.378296852111816,
62
- "logps/chosen": -320.1895446777344,
63
- "logps/rejected": -273.6168518066406,
64
- "loss": 0.4394,
65
- "rewards/accuracies": 0.793749988079071,
66
- "rewards/chosen": 1.6762861013412476,
67
- "rewards/margins": 1.1370527744293213,
68
- "rewards/rejected": 0.5392333269119263,
69
  "step": 30
70
  },
71
  {
72
  "epoch": 0.11,
73
- "grad_norm": 1094.7268353859254,
74
- "learning_rate": 1.9987489925657699e-07,
75
- "logits/chosen": -4.327412128448486,
76
- "logits/rejected": -4.590438365936279,
77
- "logps/chosen": -272.52813720703125,
78
- "logps/rejected": -226.4479522705078,
79
- "loss": 0.3519,
80
- "rewards/accuracies": 0.8374999761581421,
81
- "rewards/chosen": 3.0370898246765137,
82
- "rewards/margins": 2.1227898597717285,
83
- "rewards/rejected": 0.9143003225326538,
84
  "step": 40
85
  },
86
  {
87
  "epoch": 0.14,
88
- "grad_norm": 1142.545986046605,
89
- "learning_rate": 1.98875970549573e-07,
90
- "logits/chosen": -4.374251365661621,
91
- "logits/rejected": -4.627996921539307,
92
- "logps/chosen": -264.49615478515625,
93
- "logps/rejected": -226.27197265625,
94
- "loss": 0.3689,
95
- "rewards/accuracies": 0.831250011920929,
96
- "rewards/chosen": 4.1793413162231445,
97
- "rewards/margins": 2.736725091934204,
98
- "rewards/rejected": 1.4426158666610718,
99
  "step": 50
100
  },
101
  {
102
  "epoch": 0.17,
103
- "grad_norm": 635.1242404757879,
104
- "learning_rate": 1.968881042201029e-07,
105
- "logits/chosen": -4.35978889465332,
106
- "logits/rejected": -4.594507694244385,
107
- "logps/chosen": -296.86907958984375,
108
- "logps/rejected": -251.1016845703125,
109
- "loss": 0.3246,
110
- "rewards/accuracies": 0.8062499761581421,
111
- "rewards/chosen": 4.9698944091796875,
112
- "rewards/margins": 3.8340022563934326,
113
- "rewards/rejected": 1.1358922719955444,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.2,
118
- "grad_norm": 871.5895225513464,
119
- "learning_rate": 1.9393118250841894e-07,
120
- "logits/chosen": -4.349625587463379,
121
- "logits/rejected": -4.586968421936035,
122
- "logps/chosen": -285.0860595703125,
123
- "logps/rejected": -241.4550018310547,
124
- "loss": 0.3259,
125
- "rewards/accuracies": 0.8812500238418579,
126
- "rewards/chosen": 4.21413516998291,
127
- "rewards/margins": 3.7849984169006348,
128
- "rewards/rejected": 0.42913690209388733,
129
  "step": 70
130
  },
131
  {
132
  "epoch": 0.23,
133
- "grad_norm": 869.2800598030351,
134
- "learning_rate": 1.900347799523094e-07,
135
- "logits/chosen": -4.277868747711182,
136
- "logits/rejected": -4.535857677459717,
137
- "logps/chosen": -318.2683410644531,
138
- "logps/rejected": -272.18206787109375,
139
- "loss": 0.2501,
140
- "rewards/accuracies": 0.9375,
141
- "rewards/chosen": 4.3253655433654785,
142
- "rewards/margins": 4.170114040374756,
143
- "rewards/rejected": 0.15525080263614655,
144
  "step": 80
145
  },
146
  {
147
  "epoch": 0.26,
148
- "grad_norm": 616.9171514391553,
149
- "learning_rate": 1.8523786758850436e-07,
150
- "logits/chosen": -4.401219844818115,
151
- "logits/rejected": -4.6477251052856445,
152
- "logps/chosen": -280.7421569824219,
153
- "logps/rejected": -236.2926025390625,
154
- "loss": 0.2859,
155
- "rewards/accuracies": 0.856249988079071,
156
- "rewards/chosen": 3.923023223876953,
157
- "rewards/margins": 4.055254936218262,
158
- "rewards/rejected": -0.13223108649253845,
159
  "step": 90
160
  },
161
  {
162
  "epoch": 0.29,
163
- "grad_norm": 735.4670744748103,
164
- "learning_rate": 1.795884231721841e-07,
165
- "logits/chosen": -4.318561553955078,
166
- "logits/rejected": -4.602439880371094,
167
- "logps/chosen": -267.45794677734375,
168
- "logps/rejected": -232.0417938232422,
169
- "loss": 0.3114,
170
- "rewards/accuracies": 0.875,
171
- "rewards/chosen": 4.1319708824157715,
172
- "rewards/margins": 4.702456951141357,
173
- "rewards/rejected": -0.5704857110977173,
174
  "step": 100
175
  },
176
  {
177
  "epoch": 0.29,
178
- "eval_logits/chosen": -3.226163148880005,
179
- "eval_logits/rejected": -3.226163148880005,
180
- "eval_logps/chosen": -161.294677734375,
181
- "eval_logps/rejected": -161.294677734375,
182
  "eval_loss": 0.6931471824645996,
183
  "eval_rewards/accuracies": 0.0,
184
- "eval_rewards/chosen": -2.8588294982910156,
185
  "eval_rewards/margins": 0.0,
186
- "eval_rewards/rejected": -2.8588294982910156,
187
- "eval_runtime": 1.5114,
188
- "eval_samples_per_second": 0.662,
189
- "eval_steps_per_second": 0.662,
190
  "step": 100
191
  },
192
  {
193
  "epoch": 0.31,
194
- "grad_norm": 817.8641381018862,
195
- "learning_rate": 1.7314295131309637e-07,
196
- "logits/chosen": -4.324714660644531,
197
- "logits/rejected": -4.614729404449463,
198
- "logps/chosen": -293.42364501953125,
199
- "logps/rejected": -245.01620483398438,
200
- "loss": 0.3079,
201
- "rewards/accuracies": 0.8812500238418579,
202
- "rewards/chosen": 4.0577192306518555,
203
- "rewards/margins": 4.518521308898926,
204
- "rewards/rejected": -0.4608024060726166,
205
  "step": 110
206
  },
207
  {
208
  "epoch": 0.34,
209
- "grad_norm": 926.642960527248,
210
- "learning_rate": 1.6596591832778466e-07,
211
- "logits/chosen": -4.334780216217041,
212
- "logits/rejected": -4.567566394805908,
213
- "logps/chosen": -298.56976318359375,
214
- "logps/rejected": -266.8869323730469,
215
- "loss": 0.2848,
216
- "rewards/accuracies": 0.84375,
217
- "rewards/chosen": 3.83539080619812,
218
- "rewards/margins": 4.040234088897705,
219
- "rewards/rejected": -0.20484237372875214,
220
  "step": 120
221
  },
222
  {
223
  "epoch": 0.37,
224
- "grad_norm": 675.9811999128359,
225
- "learning_rate": 1.5812910746042256e-07,
226
- "logits/chosen": -4.3064866065979,
227
- "logits/rejected": -4.553826332092285,
228
- "logps/chosen": -286.5328063964844,
229
- "logps/rejected": -242.8630828857422,
230
- "loss": 0.2782,
231
- "rewards/accuracies": 0.893750011920929,
232
- "rewards/chosen": 3.7822985649108887,
233
- "rewards/margins": 4.371214389801025,
234
- "rewards/rejected": -0.5889158248901367,
235
  "step": 130
236
  },
237
  {
238
  "epoch": 0.4,
239
- "grad_norm": 1048.4484509272795,
240
- "learning_rate": 1.4971090092120542e-07,
241
- "logits/chosen": -4.304565906524658,
242
- "logits/rejected": -4.545400142669678,
243
- "logps/chosen": -294.17095947265625,
244
- "logps/rejected": -243.83935546875,
245
- "loss": 0.269,
246
- "rewards/accuracies": 0.9125000238418579,
247
- "rewards/chosen": 3.8339390754699707,
248
- "rewards/margins": 4.558647155761719,
249
- "rewards/rejected": -0.7247086763381958,
250
  "step": 140
251
  },
252
  {
253
  "epoch": 0.43,
254
- "grad_norm": 844.0558457976633,
255
- "learning_rate": 1.4079549592320782e-07,
256
- "logits/chosen": -4.313709735870361,
257
- "logits/rejected": -4.621119499206543,
258
- "logps/chosen": -292.96661376953125,
259
- "logps/rejected": -248.0418701171875,
260
- "loss": 0.2727,
261
- "rewards/accuracies": 0.856249988079071,
262
- "rewards/chosen": 3.8533847332000732,
263
- "rewards/margins": 4.262558937072754,
264
- "rewards/rejected": -0.4091736674308777,
265
  "step": 150
266
  },
267
  {
268
  "epoch": 0.46,
269
- "grad_norm": 684.2161612155076,
270
- "learning_rate": 1.3147206255874882e-07,
271
- "logits/chosen": -4.248518943786621,
272
- "logits/rejected": -4.44361686706543,
273
- "logps/chosen": -309.7388610839844,
274
- "logps/rejected": -272.90252685546875,
275
- "loss": 0.2961,
276
- "rewards/accuracies": 0.90625,
277
- "rewards/chosen": 4.352166652679443,
278
- "rewards/margins": 4.611546516418457,
279
- "rewards/rejected": -0.2593800127506256,
280
  "step": 160
281
  },
282
  {
283
  "epoch": 0.49,
284
- "grad_norm": 795.319436926025,
285
- "learning_rate": 1.2183385193801653e-07,
286
- "logits/chosen": -4.289888381958008,
287
- "logits/rejected": -4.489100933074951,
288
- "logps/chosen": -272.3153991699219,
289
- "logps/rejected": -237.64114379882812,
290
- "loss": 0.2547,
291
- "rewards/accuracies": 0.918749988079071,
292
- "rewards/chosen": 4.135977745056152,
293
- "rewards/margins": 4.544943809509277,
294
- "rewards/rejected": -0.40896692872047424,
295
  "step": 170
296
  },
297
  {
298
- "epoch": 0.52,
299
- "grad_norm": 917.2913745554154,
300
- "learning_rate": 1.1197726351017051e-07,
301
- "logits/chosen": -4.37355375289917,
302
- "logits/rejected": -4.5346221923828125,
303
- "logps/chosen": -261.6694641113281,
304
- "logps/rejected": -230.89599609375,
305
- "loss": 0.3104,
306
- "rewards/accuracies": 0.862500011920929,
307
- "rewards/chosen": 3.4763107299804688,
308
- "rewards/margins": 3.877528429031372,
309
- "rewards/rejected": -0.4012181758880615,
310
  "step": 180
311
  },
312
  {
313
  "epoch": 0.54,
314
- "grad_norm": 880.6071936953809,
315
- "learning_rate": 1.0200088089538943e-07,
316
- "logits/chosen": -4.33815860748291,
317
- "logits/rejected": -4.5543646812438965,
318
- "logps/chosen": -288.26873779296875,
319
- "logps/rejected": -253.7147216796875,
320
- "loss": 0.3129,
321
- "rewards/accuracies": 0.856249988079071,
322
- "rewards/chosen": 4.4660868644714355,
323
- "rewards/margins": 4.566544055938721,
324
- "rewards/rejected": -0.10045762360095978,
325
  "step": 190
326
  },
327
  {
328
  "epoch": 0.57,
329
- "grad_norm": 781.3066688232569,
330
- "learning_rate": 9.20044858712785e-08,
331
- "logits/chosen": -4.292983055114746,
332
- "logits/rejected": -4.566291332244873,
333
- "logps/chosen": -315.58251953125,
334
- "logps/rejected": -270.0286560058594,
335
- "loss": 0.2741,
336
- "rewards/accuracies": 0.862500011920929,
337
- "rewards/chosen": 4.502341270446777,
338
- "rewards/margins": 4.4218339920043945,
339
- "rewards/rejected": 0.08050797879695892,
340
  "step": 200
341
  },
342
  {
343
  "epoch": 0.57,
344
- "eval_logits/chosen": -3.2033309936523438,
345
- "eval_logits/rejected": -3.2033309936523438,
346
- "eval_logps/chosen": -160.92892456054688,
347
- "eval_logps/rejected": -160.92892456054688,
348
  "eval_loss": 0.6931471824645996,
349
  "eval_rewards/accuracies": 0.0,
350
- "eval_rewards/chosen": -2.675952911376953,
351
  "eval_rewards/margins": 0.0,
352
- "eval_rewards/rejected": -2.675952911376953,
353
- "eval_runtime": 1.48,
354
- "eval_samples_per_second": 0.676,
355
- "eval_steps_per_second": 0.676,
356
  "step": 200
357
  },
358
  {
359
  "epoch": 0.6,
360
- "grad_norm": 1021.5696401893487,
361
- "learning_rate": 8.208806037554645e-08,
362
- "logits/chosen": -4.327147483825684,
363
- "logits/rejected": -4.6312479972839355,
364
- "logps/chosen": -292.85919189453125,
365
- "logps/rejected": -236.7397918701172,
366
- "loss": 0.2345,
367
- "rewards/accuracies": 0.875,
368
- "rewards/chosen": 4.086556434631348,
369
- "rewards/margins": 4.589372634887695,
370
- "rewards/rejected": -0.5028160810470581,
371
  "step": 210
372
  },
373
  {
374
  "epoch": 0.63,
375
- "grad_norm": 720.6830161967047,
376
- "learning_rate": 7.23507865067214e-08,
377
- "logits/chosen": -4.357525825500488,
378
- "logits/rejected": -4.7082133293151855,
379
- "logps/chosen": -298.2110900878906,
380
- "logps/rejected": -237.28903198242188,
381
- "loss": 0.241,
382
- "rewards/accuracies": 0.9125000238418579,
383
- "rewards/chosen": 3.84515118598938,
384
- "rewards/margins": 4.655531406402588,
385
- "rewards/rejected": -0.8103805780410767,
386
  "step": 220
387
  },
388
  {
389
  "epoch": 0.66,
390
- "grad_norm": 784.9312094778309,
391
- "learning_rate": 6.289005452469778e-08,
392
- "logits/chosen": -4.388636112213135,
393
- "logits/rejected": -4.748753070831299,
394
- "logps/chosen": -288.0061340332031,
395
- "logps/rejected": -236.6077880859375,
396
- "loss": 0.268,
397
- "rewards/accuracies": 0.9125000238418579,
398
- "rewards/chosen": 3.5908846855163574,
399
- "rewards/margins": 4.1128339767456055,
400
- "rewards/rejected": -0.5219489932060242,
401
  "step": 230
402
  },
403
  {
404
  "epoch": 0.69,
405
- "grad_norm": 765.61870702253,
406
- "learning_rate": 5.38004887728938e-08,
407
- "logits/chosen": -4.234927177429199,
408
- "logits/rejected": -4.550938606262207,
409
- "logps/chosen": -283.8558654785156,
410
- "logps/rejected": -242.86538696289062,
411
- "loss": 0.2908,
412
- "rewards/accuracies": 0.887499988079071,
413
- "rewards/chosen": 3.562377452850342,
414
- "rewards/margins": 4.074954032897949,
415
- "rewards/rejected": -0.5125768780708313,
416
  "step": 240
417
  },
418
  {
419
- "epoch": 0.72,
420
- "grad_norm": 787.5249084258048,
421
- "learning_rate": 4.517300126455066e-08,
422
- "logits/chosen": -4.290322303771973,
423
- "logits/rejected": -4.522026538848877,
424
- "logps/chosen": -292.47442626953125,
425
- "logps/rejected": -257.0675354003906,
426
- "loss": 0.2635,
427
- "rewards/accuracies": 0.8812500238418579,
428
- "rewards/chosen": 3.95862078666687,
429
- "rewards/margins": 4.515480995178223,
430
- "rewards/rejected": -0.5568601489067078,
431
  "step": 250
432
  },
433
  {
434
  "epoch": 0.74,
435
- "grad_norm": 894.7487355536125,
436
- "learning_rate": 3.70938823990135e-08,
437
- "logits/chosen": -4.367636680603027,
438
- "logits/rejected": -4.524864196777344,
439
- "logps/chosen": -293.8681640625,
440
- "logps/rejected": -266.19622802734375,
441
- "loss": 0.2999,
442
- "rewards/accuracies": 0.887499988079071,
443
- "rewards/chosen": 3.311814069747925,
444
- "rewards/margins": 3.883411407470703,
445
- "rewards/rejected": -0.5715969800949097,
446
  "step": 260
447
  },
448
  {
449
  "epoch": 0.77,
450
- "grad_norm": 582.5138549963486,
451
- "learning_rate": 2.9643937902467277e-08,
452
- "logits/chosen": -4.23429012298584,
453
- "logits/rejected": -4.521827697753906,
454
- "logps/chosen": -280.5379333496094,
455
- "logps/rejected": -241.7247314453125,
456
- "loss": 0.2388,
457
- "rewards/accuracies": 0.862500011920929,
458
- "rewards/chosen": 3.911522626876831,
459
- "rewards/margins": 4.728894233703613,
460
- "rewards/rejected": -0.8173705339431763,
461
  "step": 270
462
  },
463
  {
464
  "epoch": 0.8,
465
- "grad_norm": 810.3954826895692,
466
- "learning_rate": 2.289768062527362e-08,
467
- "logits/chosen": -4.273584842681885,
468
- "logits/rejected": -4.6050519943237305,
469
- "logps/chosen": -283.619384765625,
470
- "logps/rejected": -233.9824981689453,
471
- "loss": 0.2895,
472
- "rewards/accuracies": 0.8812500238418579,
473
- "rewards/chosen": 3.576643466949463,
474
- "rewards/margins": 3.991179943084717,
475
- "rewards/rejected": -0.41453617811203003,
476
  "step": 280
477
  },
478
  {
479
  "epoch": 0.83,
480
- "grad_norm": 730.6997584190797,
481
- "learning_rate": 1.6922585279389035e-08,
482
- "logits/chosen": -4.406073093414307,
483
- "logits/rejected": -4.6901140213012695,
484
- "logps/chosen": -294.35699462890625,
485
- "logps/rejected": -233.74594116210938,
486
- "loss": 0.2669,
487
- "rewards/accuracies": 0.90625,
488
- "rewards/chosen": 3.295900344848633,
489
- "rewards/margins": 3.856853485107422,
490
- "rewards/rejected": -0.5609533786773682,
491
  "step": 290
492
  },
493
  {
494
  "epoch": 0.86,
495
- "grad_norm": 1254.3257141110294,
496
- "learning_rate": 1.1778413569831725e-08,
497
- "logits/chosen": -4.323153495788574,
498
- "logits/rejected": -4.481599807739258,
499
- "logps/chosen": -291.29742431640625,
500
- "logps/rejected": -254.1814422607422,
501
- "loss": 0.2695,
502
- "rewards/accuracies": 0.856249988079071,
503
- "rewards/chosen": 3.8485710620880127,
504
- "rewards/margins": 3.8490569591522217,
505
- "rewards/rejected": -0.0004864335060119629,
506
  "step": 300
507
  },
508
  {
509
  "epoch": 0.86,
510
- "eval_logits/chosen": -3.199521064758301,
511
- "eval_logits/rejected": -3.199521064758301,
512
- "eval_logps/chosen": -159.83721923828125,
513
- "eval_logps/rejected": -159.83721923828125,
514
  "eval_loss": 0.6931471824645996,
515
  "eval_rewards/accuracies": 0.0,
516
- "eval_rewards/chosen": -2.1301021575927734,
517
  "eval_rewards/margins": 0.0,
518
- "eval_rewards/rejected": -2.1301021575927734,
519
- "eval_runtime": 1.4681,
520
- "eval_samples_per_second": 0.681,
521
- "eval_steps_per_second": 0.681,
522
  "step": 300
523
  },
524
  {
525
  "epoch": 0.89,
526
- "grad_norm": 1302.095010227008,
527
- "learning_rate": 7.516616470096315e-09,
528
- "logits/chosen": -4.3807525634765625,
529
- "logits/rejected": -4.600883483886719,
530
- "logps/chosen": -299.8600158691406,
531
- "logps/rejected": -248.4993438720703,
532
- "loss": 0.2545,
533
- "rewards/accuracies": 0.875,
534
- "rewards/chosen": 4.215323448181152,
535
- "rewards/margins": 4.587640762329102,
536
- "rewards/rejected": -0.3723169267177582,
537
  "step": 310
538
  },
539
  {
540
- "epoch": 0.92,
541
- "grad_norm": 875.4939401121867,
542
- "learning_rate": 4.179819619838454e-09,
543
- "logits/chosen": -4.2180280685424805,
544
- "logits/rejected": -4.570239067077637,
545
- "logps/chosen": -286.0614013671875,
546
- "logps/rejected": -234.3032684326172,
547
- "loss": 0.269,
548
- "rewards/accuracies": 0.8374999761581421,
549
- "rewards/chosen": 3.9898929595947266,
550
- "rewards/margins": 4.13907527923584,
551
- "rewards/rejected": -0.1491830050945282,
552
  "step": 320
553
  },
554
  {
555
  "epoch": 0.94,
556
- "grad_norm": 1010.1848642033616,
557
- "learning_rate": 1.8013969917777483e-09,
558
- "logits/chosen": -4.380679130554199,
559
- "logits/rejected": -4.608093738555908,
560
- "logps/chosen": -278.8153381347656,
561
- "logps/rejected": -235.4047088623047,
562
- "loss": 0.247,
563
- "rewards/accuracies": 0.887499988079071,
564
- "rewards/chosen": 3.914651870727539,
565
- "rewards/margins": 4.33533239364624,
566
- "rewards/rejected": -0.42068085074424744,
567
  "step": 330
568
  },
569
  {
570
  "epoch": 0.97,
571
- "grad_norm": 702.698454631269,
572
- "learning_rate": 4.051370919176289e-10,
573
- "logits/chosen": -4.314477920532227,
574
- "logits/rejected": -4.502932548522949,
575
- "logps/chosen": -282.89666748046875,
576
- "logps/rejected": -246.3631134033203,
577
- "loss": 0.243,
578
- "rewards/accuracies": 0.90625,
579
- "rewards/chosen": 3.8934428691864014,
580
- "rewards/margins": 4.308337688446045,
581
- "rewards/rejected": -0.41489481925964355,
582
  "step": 340
583
  },
584
  {
585
  "epoch": 1.0,
586
- "step": 349,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
587
  "total_flos": 0.0,
588
- "train_loss": 0.30806411576476,
589
- "train_runtime": 5299.0292,
590
- "train_samples_per_second": 8.432,
591
  "train_steps_per_second": 0.066
592
  }
593
  ],
594
  "logging_steps": 10,
595
- "max_steps": 349,
596
  "num_input_tokens_seen": 0,
597
  "num_train_epochs": 1,
598
  "save_steps": 100,
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
  "eval_steps": 100,
6
+ "global_step": 350,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
+ "grad_norm": 3067.8628948133914,
14
+ "learning_rate": 4.9998992904271775e-08,
15
+ "logits/chosen": -4.185730934143066,
16
+ "logits/rejected": -4.509836196899414,
17
+ "logps/chosen": -274.000732421875,
18
+ "logps/rejected": -205.8054962158203,
19
  "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
 
25
  },
26
  {
27
  "epoch": 0.03,
28
+ "grad_norm": 3330.3974170986107,
29
+ "learning_rate": 4.9899357349880975e-08,
30
+ "logits/chosen": -4.211880207061768,
31
+ "logits/rejected": -4.48573637008667,
32
+ "logps/chosen": -318.31072998046875,
33
+ "logps/rejected": -257.18267822265625,
34
+ "loss": 0.7459,
35
+ "rewards/accuracies": 0.5625,
36
+ "rewards/chosen": 0.200405091047287,
37
+ "rewards/margins": 0.10155472159385681,
38
+ "rewards/rejected": 0.09885036945343018,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.06,
43
+ "grad_norm": 2932.727170813642,
44
+ "learning_rate": 4.959823971496574e-08,
45
+ "logits/chosen": -4.2464704513549805,
46
+ "logits/rejected": -4.50115966796875,
47
+ "logps/chosen": -304.53350830078125,
48
+ "logps/rejected": -244.1282501220703,
49
+ "loss": 0.6293,
50
+ "rewards/accuracies": 0.731249988079071,
51
+ "rewards/chosen": 0.7030802965164185,
52
+ "rewards/margins": 0.6052380800247192,
53
+ "rewards/rejected": 0.09784229844808578,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.09,
58
+ "grad_norm": 2159.097276891197,
59
+ "learning_rate": 4.9099071517396326e-08,
60
+ "logits/chosen": -4.3018364906311035,
61
+ "logits/rejected": -4.5636820793151855,
62
+ "logps/chosen": -305.11822509765625,
63
+ "logps/rejected": -258.89215087890625,
64
+ "loss": 0.5093,
65
+ "rewards/accuracies": 0.71875,
66
+ "rewards/chosen": 1.3964869976043701,
67
+ "rewards/margins": 0.9537334442138672,
68
+ "rewards/rejected": 0.44275355339050293,
69
  "step": 30
70
  },
71
  {
72
  "epoch": 0.11,
73
+ "grad_norm": 2233.10446662558,
74
+ "learning_rate": 4.8405871765993426e-08,
75
+ "logits/chosen": -4.304145812988281,
76
+ "logits/rejected": -4.571420192718506,
77
+ "logps/chosen": -293.4151916503906,
78
+ "logps/rejected": -234.4054412841797,
79
+ "loss": 0.4371,
80
+ "rewards/accuracies": 0.7437499761581421,
81
+ "rewards/chosen": 2.119215488433838,
82
+ "rewards/margins": 1.3193193674087524,
83
+ "rewards/rejected": 0.7998961806297302,
84
  "step": 40
85
  },
86
  {
87
  "epoch": 0.14,
88
+ "grad_norm": 1863.9092640792912,
89
+ "learning_rate": 4.7524221697560474e-08,
90
+ "logits/chosen": -4.298985481262207,
91
+ "logits/rejected": -4.545313835144043,
92
+ "logps/chosen": -299.71026611328125,
93
+ "logps/rejected": -252.57339477539062,
94
+ "loss": 0.4054,
95
+ "rewards/accuracies": 0.793749988079071,
96
+ "rewards/chosen": 3.015381336212158,
97
+ "rewards/margins": 1.8283360004425049,
98
+ "rewards/rejected": 1.1870452165603638,
99
  "step": 50
100
  },
101
  {
102
  "epoch": 0.17,
103
+ "grad_norm": 1861.0742759245438,
104
+ "learning_rate": 4.646121984004665e-08,
105
+ "logits/chosen": -4.3018717765808105,
106
+ "logits/rejected": -4.5299859046936035,
107
+ "logps/chosen": -308.25457763671875,
108
+ "logps/rejected": -261.1996154785156,
109
+ "loss": 0.3815,
110
+ "rewards/accuracies": 0.793749988079071,
111
+ "rewards/chosen": 3.097055673599243,
112
+ "rewards/margins": 1.6846046447753906,
113
+ "rewards/rejected": 1.412451148033142,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.2,
118
+ "grad_norm": 2083.1341477087894,
119
+ "learning_rate": 4.522542485937369e-08,
120
+ "logits/chosen": -4.417206764221191,
121
+ "logits/rejected": -4.548245429992676,
122
+ "logps/chosen": -285.4747009277344,
123
+ "logps/rejected": -236.24136352539062,
124
+ "loss": 0.3773,
125
+ "rewards/accuracies": 0.8062499761581421,
126
+ "rewards/chosen": 3.4294419288635254,
127
+ "rewards/margins": 2.4485509395599365,
128
+ "rewards/rejected": 0.9808910489082336,
129
  "step": 70
130
  },
131
  {
132
  "epoch": 0.23,
133
+ "grad_norm": 1999.1118673285923,
134
+ "learning_rate": 4.3826786650090273e-08,
135
+ "logits/chosen": -4.271725177764893,
136
+ "logits/rejected": -4.525103569030762,
137
+ "logps/chosen": -292.2157897949219,
138
+ "logps/rejected": -239.5623321533203,
139
+ "loss": 0.3663,
140
+ "rewards/accuracies": 0.856249988079071,
141
+ "rewards/chosen": 3.471898317337036,
142
+ "rewards/margins": 2.5827386379241943,
143
+ "rewards/rejected": 0.8891592025756836,
144
  "step": 80
145
  },
146
  {
147
  "epoch": 0.26,
148
+ "grad_norm": 1543.0151245523064,
149
+ "learning_rate": 4.2276566224671614e-08,
150
+ "logits/chosen": -4.196888446807861,
151
+ "logits/rejected": -4.430451393127441,
152
+ "logps/chosen": -303.9364929199219,
153
+ "logps/rejected": -258.19708251953125,
154
+ "loss": 0.37,
155
+ "rewards/accuracies": 0.762499988079071,
156
+ "rewards/chosen": 3.6267776489257812,
157
+ "rewards/margins": 2.5005435943603516,
158
+ "rewards/rejected": 1.1262344121932983,
159
  "step": 90
160
  },
161
  {
162
  "epoch": 0.29,
163
+ "grad_norm": 2558.2358091969077,
164
+ "learning_rate": 4.058724504646834e-08,
165
+ "logits/chosen": -4.298203468322754,
166
+ "logits/rejected": -4.51765251159668,
167
+ "logps/chosen": -291.99151611328125,
168
+ "logps/rejected": -240.97909545898438,
169
+ "loss": 0.3573,
170
+ "rewards/accuracies": 0.824999988079071,
171
+ "rewards/chosen": 3.8364264965057373,
172
+ "rewards/margins": 2.6143250465393066,
173
+ "rewards/rejected": 1.2221016883850098,
174
  "step": 100
175
  },
176
  {
177
  "epoch": 0.29,
178
+ "eval_logits/chosen": -3.2259409427642822,
179
+ "eval_logits/rejected": -3.2259409427642822,
180
+ "eval_logps/chosen": -157.8415985107422,
181
+ "eval_logps/rejected": -157.8415985107422,
182
  "eval_loss": 0.6931471824645996,
183
  "eval_rewards/accuracies": 0.0,
184
+ "eval_rewards/chosen": -2.2645912170410156,
185
  "eval_rewards/margins": 0.0,
186
+ "eval_rewards/rejected": -2.2645912170410156,
187
+ "eval_runtime": 1.5044,
188
+ "eval_samples_per_second": 0.665,
189
+ "eval_steps_per_second": 0.665,
190
  "step": 100
191
  },
192
  {
193
  "epoch": 0.31,
194
+ "grad_norm": 2075.8470964199623,
195
+ "learning_rate": 3.8772424536302564e-08,
196
+ "logits/chosen": -4.3160247802734375,
197
+ "logits/rejected": -4.557186126708984,
198
+ "logps/chosen": -299.556640625,
199
+ "logps/rejected": -250.2120361328125,
200
+ "loss": 0.3653,
201
+ "rewards/accuracies": 0.8687499761581421,
202
+ "rewards/chosen": 3.8744053840637207,
203
+ "rewards/margins": 2.781764268875122,
204
+ "rewards/rejected": 1.0926413536071777,
205
  "step": 110
206
  },
207
  {
208
  "epoch": 0.34,
209
+ "grad_norm": 2129.2578794603846,
210
+ "learning_rate": 3.6846716561824964e-08,
211
+ "logits/chosen": -4.358242988586426,
212
+ "logits/rejected": -4.6036834716796875,
213
+ "logps/chosen": -288.9602966308594,
214
+ "logps/rejected": -237.98257446289062,
215
+ "loss": 0.346,
216
+ "rewards/accuracies": 0.8687499761581421,
217
+ "rewards/chosen": 3.973881959915161,
218
+ "rewards/margins": 2.8389506340026855,
219
+ "rewards/rejected": 1.1349313259124756,
220
  "step": 120
221
  },
222
  {
223
  "epoch": 0.37,
224
+ "grad_norm": 1374.3088736284383,
225
+ "learning_rate": 3.482562579134809e-08,
226
+ "logits/chosen": -4.360684871673584,
227
+ "logits/rejected": -4.608490467071533,
228
+ "logps/chosen": -278.861572265625,
229
+ "logps/rejected": -218.7367706298828,
230
+ "loss": 0.3426,
231
+ "rewards/accuracies": 0.8374999761581421,
232
+ "rewards/chosen": 3.8384926319122314,
233
+ "rewards/margins": 2.634833812713623,
234
+ "rewards/rejected": 1.2036586999893188,
235
  "step": 130
236
  },
237
  {
238
  "epoch": 0.4,
239
+ "grad_norm": 1741.7465783603645,
240
+ "learning_rate": 3.272542485937369e-08,
241
+ "logits/chosen": -4.276978969573975,
242
+ "logits/rejected": -4.593733787536621,
243
+ "logps/chosen": -296.0984191894531,
244
+ "logps/rejected": -240.01248168945312,
245
+ "loss": 0.3729,
246
+ "rewards/accuracies": 0.856249988079071,
247
+ "rewards/chosen": 3.785256862640381,
248
+ "rewards/margins": 2.9941701889038086,
249
+ "rewards/rejected": 0.79108726978302,
250
  "step": 140
251
  },
252
  {
253
  "epoch": 0.43,
254
+ "grad_norm": 1837.7137132104272,
255
+ "learning_rate": 3.056302334890786e-08,
256
+ "logits/chosen": -4.245262622833252,
257
+ "logits/rejected": -4.510401725769043,
258
+ "logps/chosen": -295.3984680175781,
259
+ "logps/rejected": -250.73580932617188,
260
+ "loss": 0.3235,
261
+ "rewards/accuracies": 0.84375,
262
+ "rewards/chosen": 4.011710166931152,
263
+ "rewards/margins": 3.0462794303894043,
264
+ "rewards/rejected": 0.9654304385185242,
265
  "step": 150
266
  },
267
  {
268
  "epoch": 0.46,
269
+ "grad_norm": 1744.335126050233,
270
+ "learning_rate": 2.8355831645441387e-08,
271
+ "logits/chosen": -4.277425765991211,
272
+ "logits/rejected": -4.570274829864502,
273
+ "logps/chosen": -296.66839599609375,
274
+ "logps/rejected": -235.6475372314453,
275
+ "loss": 0.36,
276
+ "rewards/accuracies": 0.8500000238418579,
277
+ "rewards/chosen": 4.259499549865723,
278
+ "rewards/margins": 3.127065658569336,
279
+ "rewards/rejected": 1.1324341297149658,
280
  "step": 160
281
  },
282
  {
283
  "epoch": 0.49,
284
+ "grad_norm": 1875.319827037545,
285
+ "learning_rate": 2.6121620758762875e-08,
286
+ "logits/chosen": -4.229983329772949,
287
+ "logits/rejected": -4.467092990875244,
288
+ "logps/chosen": -296.31683349609375,
289
+ "logps/rejected": -241.3401336669922,
290
+ "loss": 0.3474,
291
+ "rewards/accuracies": 0.8687499761581421,
292
+ "rewards/chosen": 4.343829154968262,
293
+ "rewards/margins": 3.233609437942505,
294
+ "rewards/rejected": 1.1102204322814941,
295
  "step": 170
296
  },
297
  {
298
+ "epoch": 0.51,
299
+ "grad_norm": 2082.5003671787076,
300
+ "learning_rate": 2.3878379241237133e-08,
301
+ "logits/chosen": -4.364750862121582,
302
+ "logits/rejected": -4.597868919372559,
303
+ "logps/chosen": -285.72869873046875,
304
+ "logps/rejected": -241.40652465820312,
305
+ "loss": 0.3417,
306
+ "rewards/accuracies": 0.8374999761581421,
307
+ "rewards/chosen": 4.1484293937683105,
308
+ "rewards/margins": 3.0738511085510254,
309
+ "rewards/rejected": 1.074578046798706,
310
  "step": 180
311
  },
312
  {
313
  "epoch": 0.54,
314
+ "grad_norm": 1597.9774938638957,
315
+ "learning_rate": 2.164416835455862e-08,
316
+ "logits/chosen": -4.3281121253967285,
317
+ "logits/rejected": -4.498069763183594,
318
+ "logps/chosen": -308.14776611328125,
319
+ "logps/rejected": -257.7415466308594,
320
+ "loss": 0.2852,
321
+ "rewards/accuracies": 0.875,
322
+ "rewards/chosen": 4.146700859069824,
323
+ "rewards/margins": 3.202249526977539,
324
+ "rewards/rejected": 0.9444509744644165,
325
  "step": 190
326
  },
327
  {
328
  "epoch": 0.57,
329
+ "grad_norm": 1601.8580723204816,
330
+ "learning_rate": 1.943697665109214e-08,
331
+ "logits/chosen": -4.358348846435547,
332
+ "logits/rejected": -4.601215839385986,
333
+ "logps/chosen": -292.93658447265625,
334
+ "logps/rejected": -249.59469604492188,
335
+ "loss": 0.3184,
336
+ "rewards/accuracies": 0.824999988079071,
337
+ "rewards/chosen": 4.194998741149902,
338
+ "rewards/margins": 2.974621534347534,
339
+ "rewards/rejected": 1.2203772068023682,
340
  "step": 200
341
  },
342
  {
343
  "epoch": 0.57,
344
+ "eval_logits/chosen": -3.2195205688476562,
345
+ "eval_logits/rejected": -3.2195205688476562,
346
+ "eval_logps/chosen": -157.37933349609375,
347
+ "eval_logps/rejected": -157.37933349609375,
348
  "eval_loss": 0.6931471824645996,
349
  "eval_rewards/accuracies": 0.0,
350
+ "eval_rewards/chosen": -1.8023262023925781,
351
  "eval_rewards/margins": 0.0,
352
+ "eval_rewards/rejected": -1.8023262023925781,
353
+ "eval_runtime": 1.4741,
354
+ "eval_samples_per_second": 0.678,
355
+ "eval_steps_per_second": 0.678,
356
  "step": 200
357
  },
358
  {
359
  "epoch": 0.6,
360
+ "grad_norm": 1818.1510653253358,
361
+ "learning_rate": 1.7274575140626317e-08,
362
+ "logits/chosen": -4.293700218200684,
363
+ "logits/rejected": -4.587708473205566,
364
+ "logps/chosen": -306.94647216796875,
365
+ "logps/rejected": -254.83981323242188,
366
+ "loss": 0.3169,
367
+ "rewards/accuracies": 0.8374999761581421,
368
+ "rewards/chosen": 4.274092674255371,
369
+ "rewards/margins": 3.556690216064453,
370
+ "rewards/rejected": 0.7174022793769836,
371
  "step": 210
372
  },
373
  {
374
  "epoch": 0.63,
375
+ "grad_norm": 2084.9707047014217,
376
+ "learning_rate": 1.517437420865191e-08,
377
+ "logits/chosen": -4.2438554763793945,
378
+ "logits/rejected": -4.590119361877441,
379
+ "logps/chosen": -297.3277587890625,
380
+ "logps/rejected": -225.09414672851562,
381
+ "loss": 0.3117,
382
+ "rewards/accuracies": 0.862500011920929,
383
+ "rewards/chosen": 4.186089515686035,
384
+ "rewards/margins": 3.6873459815979004,
385
+ "rewards/rejected": 0.4987434446811676,
386
  "step": 220
387
  },
388
  {
389
  "epoch": 0.66,
390
+ "grad_norm": 1793.5243127965375,
391
+ "learning_rate": 1.3153283438175034e-08,
392
+ "logits/chosen": -4.3719801902771,
393
+ "logits/rejected": -4.563234806060791,
394
+ "logps/chosen": -281.373779296875,
395
+ "logps/rejected": -226.25576782226562,
396
+ "loss": 0.2879,
397
+ "rewards/accuracies": 0.831250011920929,
398
+ "rewards/chosen": 3.584429979324341,
399
+ "rewards/margins": 2.8807406425476074,
400
+ "rewards/rejected": 0.7036892771720886,
401
  "step": 230
402
  },
403
  {
404
  "epoch": 0.69,
405
+ "grad_norm": 1621.528952660571,
406
+ "learning_rate": 1.1227575463697438e-08,
407
+ "logits/chosen": -4.3936567306518555,
408
+ "logits/rejected": -4.714280128479004,
409
+ "logps/chosen": -258.6517639160156,
410
+ "logps/rejected": -215.28759765625,
411
+ "loss": 0.3042,
412
+ "rewards/accuracies": 0.831250011920929,
413
+ "rewards/chosen": 3.862626552581787,
414
+ "rewards/margins": 3.0625431537628174,
415
+ "rewards/rejected": 0.8000835180282593,
416
  "step": 240
417
  },
418
  {
419
+ "epoch": 0.71,
420
+ "grad_norm": 2231.5682374793205,
421
+ "learning_rate": 9.412754953531663e-09,
422
+ "logits/chosen": -4.34213924407959,
423
+ "logits/rejected": -4.6162428855896,
424
+ "logps/chosen": -278.9085388183594,
425
+ "logps/rejected": -232.6056365966797,
426
+ "loss": 0.3109,
427
+ "rewards/accuracies": 0.831250011920929,
428
+ "rewards/chosen": 3.6670002937316895,
429
+ "rewards/margins": 2.8657121658325195,
430
+ "rewards/rejected": 0.8012881278991699,
431
  "step": 250
432
  },
433
  {
434
  "epoch": 0.74,
435
+ "grad_norm": 1668.5476234310504,
436
+ "learning_rate": 7.723433775328384e-09,
437
+ "logits/chosen": -4.386145114898682,
438
+ "logits/rejected": -4.632050037384033,
439
+ "logps/chosen": -271.8704833984375,
440
+ "logps/rejected": -240.48257446289062,
441
+ "loss": 0.3039,
442
+ "rewards/accuracies": 0.862500011920929,
443
+ "rewards/chosen": 3.5878806114196777,
444
+ "rewards/margins": 2.9320101737976074,
445
+ "rewards/rejected": 0.6558703184127808,
446
  "step": 260
447
  },
448
  {
449
  "epoch": 0.77,
450
+ "grad_norm": 1726.7631750123023,
451
+ "learning_rate": 6.173213349909728e-09,
452
+ "logits/chosen": -4.517698764801025,
453
+ "logits/rejected": -4.687317848205566,
454
+ "logps/chosen": -273.4754943847656,
455
+ "logps/rejected": -228.2833709716797,
456
+ "loss": 0.3356,
457
+ "rewards/accuracies": 0.793749988079071,
458
+ "rewards/chosen": 3.6022744178771973,
459
+ "rewards/margins": 2.6589503288269043,
460
+ "rewards/rejected": 0.9433239698410034,
461
  "step": 270
462
  },
463
  {
464
  "epoch": 0.8,
465
+ "grad_norm": 1197.1122441391342,
466
+ "learning_rate": 4.7745751406263165e-09,
467
+ "logits/chosen": -4.299304008483887,
468
+ "logits/rejected": -4.589285850524902,
469
+ "logps/chosen": -274.9901123046875,
470
+ "logps/rejected": -229.76449584960938,
471
+ "loss": 0.2791,
472
+ "rewards/accuracies": 0.893750011920929,
473
+ "rewards/chosen": 4.094995021820068,
474
+ "rewards/margins": 3.0975327491760254,
475
+ "rewards/rejected": 0.997462272644043,
476
  "step": 280
477
  },
478
  {
479
  "epoch": 0.83,
480
+ "grad_norm": 2356.4193384705377,
481
+ "learning_rate": 3.5387801599533474e-09,
482
+ "logits/chosen": -4.320891857147217,
483
+ "logits/rejected": -4.508334636688232,
484
+ "logps/chosen": -282.45013427734375,
485
+ "logps/rejected": -236.50424194335938,
486
+ "loss": 0.3316,
487
+ "rewards/accuracies": 0.8812500238418579,
488
+ "rewards/chosen": 4.115664958953857,
489
+ "rewards/margins": 3.2731566429138184,
490
+ "rewards/rejected": 0.8425084948539734,
491
  "step": 290
492
  },
493
  {
494
  "epoch": 0.86,
495
+ "grad_norm": 1485.14332328563,
496
+ "learning_rate": 2.475778302439524e-09,
497
+ "logits/chosen": -4.295617580413818,
498
+ "logits/rejected": -4.5400543212890625,
499
+ "logps/chosen": -298.4153137207031,
500
+ "logps/rejected": -240.1478271484375,
501
+ "loss": 0.3594,
502
+ "rewards/accuracies": 0.831250011920929,
503
+ "rewards/chosen": 4.416214942932129,
504
+ "rewards/margins": 3.1984994411468506,
505
+ "rewards/rejected": 1.2177152633666992,
506
  "step": 300
507
  },
508
  {
509
  "epoch": 0.86,
510
+ "eval_logits/chosen": -3.220174551010132,
511
+ "eval_logits/rejected": -3.220174551010132,
512
+ "eval_logps/chosen": -157.367431640625,
513
+ "eval_logps/rejected": -157.367431640625,
514
  "eval_loss": 0.6931471824645996,
515
  "eval_rewards/accuracies": 0.0,
516
+ "eval_rewards/chosen": -1.7904319763183594,
517
  "eval_rewards/margins": 0.0,
518
+ "eval_rewards/rejected": -1.7904319763183594,
519
+ "eval_runtime": 1.47,
520
+ "eval_samples_per_second": 0.68,
521
+ "eval_steps_per_second": 0.68,
522
  "step": 300
523
  },
524
  {
525
  "epoch": 0.89,
526
+ "grad_norm": 2625.0873445651387,
527
+ "learning_rate": 1.5941282340065698e-09,
528
+ "logits/chosen": -4.43851900100708,
529
+ "logits/rejected": -4.580752372741699,
530
+ "logps/chosen": -262.37445068359375,
531
+ "logps/rejected": -226.46572875976562,
532
+ "loss": 0.3007,
533
+ "rewards/accuracies": 0.856249988079071,
534
+ "rewards/chosen": 3.741738796234131,
535
+ "rewards/margins": 2.9144444465637207,
536
+ "rewards/rejected": 0.8272944688796997,
537
  "step": 310
538
  },
539
  {
540
+ "epoch": 0.91,
541
+ "grad_norm": 1589.6112135444553,
542
+ "learning_rate": 9.009284826036689e-10,
543
+ "logits/chosen": -4.277141094207764,
544
+ "logits/rejected": -4.5314412117004395,
545
+ "logps/chosen": -292.65875244140625,
546
+ "logps/rejected": -243.8509063720703,
547
+ "loss": 0.3277,
548
+ "rewards/accuracies": 0.8687499761581421,
549
+ "rewards/chosen": 4.2250542640686035,
550
+ "rewards/margins": 3.1320207118988037,
551
+ "rewards/rejected": 1.093034029006958,
552
  "step": 320
553
  },
554
  {
555
  "epoch": 0.94,
556
+ "grad_norm": 2192.855370501752,
557
+ "learning_rate": 4.017602850342583e-10,
558
+ "logits/chosen": -4.330888271331787,
559
+ "logits/rejected": -4.536975383758545,
560
+ "logps/chosen": -305.5764465332031,
561
+ "logps/rejected": -252.0467529296875,
562
+ "loss": 0.3203,
563
+ "rewards/accuracies": 0.8687499761581421,
564
+ "rewards/chosen": 4.009243488311768,
565
+ "rewards/margins": 3.056270122528076,
566
+ "rewards/rejected": 0.9529730677604675,
567
  "step": 330
568
  },
569
  {
570
  "epoch": 0.97,
571
+ "grad_norm": 2158.7231383937637,
572
+ "learning_rate": 1.0064265011902328e-10,
573
+ "logits/chosen": -4.319821357727051,
574
+ "logits/rejected": -4.614516735076904,
575
+ "logps/chosen": -285.194091796875,
576
+ "logps/rejected": -227.5124053955078,
577
+ "loss": 0.3239,
578
+ "rewards/accuracies": 0.862500011920929,
579
+ "rewards/chosen": 3.751185178756714,
580
+ "rewards/margins": 2.758882522583008,
581
+ "rewards/rejected": 0.9923027753829956,
582
  "step": 340
583
  },
584
  {
585
  "epoch": 1.0,
586
+ "grad_norm": 1350.3403367664616,
587
+ "learning_rate": 0.0,
588
+ "logits/chosen": -4.290497779846191,
589
+ "logits/rejected": -4.4949870109558105,
590
+ "logps/chosen": -291.93768310546875,
591
+ "logps/rejected": -244.3520965576172,
592
+ "loss": 0.3142,
593
+ "rewards/accuracies": 0.8187500238418579,
594
+ "rewards/chosen": 4.083470344543457,
595
+ "rewards/margins": 3.2036800384521484,
596
+ "rewards/rejected": 0.8797903060913086,
597
+ "step": 350
598
+ },
599
+ {
600
+ "epoch": 1.0,
601
+ "step": 350,
602
  "total_flos": 0.0,
603
+ "train_loss": 0.36299856867109026,
604
+ "train_runtime": 5294.123,
605
+ "train_samples_per_second": 8.454,
606
  "train_steps_per_second": 0.066
607
  }
608
  ],
609
  "logging_steps": 10,
610
+ "max_steps": 350,
611
  "num_input_tokens_seen": 0,
612
  "num_train_epochs": 1,
613
  "save_steps": 100,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:64b9ce726680b0e48b6ed9b9cf02bd9886d9c4dc43b9488efca75e67b2fe6cd7
3
- size 6520
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22e6e8fd2db42b5bcee0f6513108dbd03df91bb18e83b0bf1c0a9a1f8f523058
3
+ size 6264