RikkiXu commited on
Commit
5ae4bb7
1 Parent(s): a03da79

Model save

Browse files
README.md CHANGED
@@ -16,14 +16,14 @@ should probably proofread and complete it, then remove this comment. -->
16
  This model was trained from scratch on the None dataset.
17
  It achieves the following results on the evaluation set:
18
  - Loss: 0.6931
19
- - Rewards/chosen: -1.9201
20
- - Rewards/rejected: -1.9201
21
  - Rewards/accuracies: 0.0
22
  - Rewards/margins: 0.0
23
- - Logps/rejected: -159.4172
24
- - Logps/chosen: -159.4172
25
- - Logits/rejected: -3.2283
26
- - Logits/chosen: -3.2283
27
 
28
  ## Model description
29
 
@@ -53,21 +53,20 @@ The following hyperparameters were used during training:
53
  - total_eval_batch_size: 64
54
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
55
  - lr_scheduler_type: cosine
56
- - lr_scheduler_warmup_ratio: 0.1
57
  - num_epochs: 1
58
 
59
  ### Training results
60
 
61
- | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
- |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
- | 0.3536 | 0.29 | 100 | 0.6931 | -1.3869 | -1.3869 | 0.0 | 0.0 | -158.3509 | -158.3509 | -3.2370 | -3.2370 |
64
- | 0.3091 | 0.57 | 200 | 0.6931 | -1.8814 | -1.8814 | 0.0 | 0.0 | -159.3398 | -159.3398 | -3.2304 | -3.2304 |
65
- | 0.3363 | 0.86 | 300 | 0.6931 | -1.9201 | -1.9201 | 0.0 | 0.0 | -159.4172 | -159.4172 | -3.2283 | -3.2283 |
66
 
67
 
68
  ### Framework versions
69
 
70
- - Transformers 4.38.2
71
  - Pytorch 2.1.2+cu118
72
  - Datasets 2.16.1
73
- - Tokenizers 0.15.2
 
16
  This model was trained from scratch on the None dataset.
17
  It achieves the following results on the evaluation set:
18
  - Loss: 0.6931
19
+ - Rewards/chosen: -1.6342
20
+ - Rewards/rejected: -1.6342
21
  - Rewards/accuracies: 0.0
22
  - Rewards/margins: 0.0
23
+ - Logps/rejected: -158.8454
24
+ - Logps/chosen: -158.8454
25
+ - Logits/rejected: -3.2278
26
+ - Logits/chosen: -3.2278
27
 
28
  ## Model description
29
 
 
53
  - total_eval_batch_size: 64
54
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
55
  - lr_scheduler_type: cosine
 
56
  - num_epochs: 1
57
 
58
  ### Training results
59
 
60
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
61
+ |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
62
+ | 0.3253 | 0.2857 | 100 | 0.6931 | -1.6970 | -1.6970 | 0.0 | 0.0 | -158.9711 | -158.9711 | -3.2366 | -3.2366 |
63
+ | 0.3071 | 0.5714 | 200 | 0.6931 | -2.0914 | -2.0914 | 0.0 | 0.0 | -159.7597 | -159.7597 | -3.2287 | -3.2287 |
64
+ | 0.3336 | 0.8571 | 300 | 0.6931 | -1.6342 | -1.6342 | 0.0 | 0.0 | -158.8454 | -158.8454 | -3.2278 | -3.2278 |
65
 
66
 
67
  ### Framework versions
68
 
69
+ - Transformers 4.41.1
70
  - Pytorch 2.1.2+cu118
71
  - Datasets 2.16.1
72
+ - Tokenizers 0.19.1
all_results.json CHANGED
@@ -1,8 +1,9 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.37015721593584333,
4
- "train_runtime": 5302.4484,
 
5
  "train_samples": 44755,
6
- "train_samples_per_second": 8.44,
7
- "train_steps_per_second": 0.066
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.34604193687438967,
5
+ "train_runtime": 5793.0735,
6
  "train_samples": 44755,
7
+ "train_samples_per_second": 7.726,
8
+ "train_steps_per_second": 0.06
9
  }
config.json CHANGED
@@ -20,7 +20,7 @@
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
- "transformers_version": "4.38.2",
24
  "use_cache": false,
25
  "vocab_size": 32002
26
  }
 
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.41.1",
24
  "use_cache": false,
25
  "vocab_size": 32002
26
  }
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 32000,
5
- "transformers_version": "4.38.2"
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 32000,
5
+ "transformers_version": "4.41.1"
6
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f8493cd3d8633107f37f766933768b972a71580f90b096571fcc9f7f95e4a96
3
  size 4943178720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5620686540e3b69a8dff87d7dbb09524fd94d7f2126dc65a769fe4d670bd4a90
3
  size 4943178720
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f5824ec54fb5bda3f68132a93217d0b800db90a78fb0515b4b723ed929035a3
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e57f83d8c347274f3c77cf169473468937aa6cee843f09e0709b1c3248e0fb8
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d89f85d62fe83d0e63f9373321633097e7aab9bbb8ae37954505ffd93eb3af47
3
  size 4540532728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:639c1f7e701b304b9eb6019aefbf2f2172c8a61237b358c0f6da984843c038be
3
  size 4540532728
runs/May31_01-13-18_n136-082-130/events.out.tfevents.1717089327.n136-082-130.3730659.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a615e1890f482937e0071599f0ffc9c51798e79a0ce8a2f8ca23e8b4e559918f
3
- size 28579
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d7ceb3534c06a6da90d2635eb77b149335443213d3e7abafd6753ad70d80c9f
3
+ size 32373
tokenizer.json CHANGED
@@ -152,6 +152,7 @@
152
  "end_of_word_suffix": null,
153
  "fuse_unk": true,
154
  "byte_fallback": true,
 
155
  "vocab": {
156
  "<unk>": 0,
157
  "<s>": 1,
 
152
  "end_of_word_suffix": null,
153
  "fuse_unk": true,
154
  "byte_fallback": true,
155
+ "ignore_merges": false,
156
  "vocab": {
157
  "<unk>": 0,
158
  "<s>": 1,
train_results.json CHANGED
@@ -1,8 +1,9 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.37015721593584333,
4
- "train_runtime": 5302.4484,
 
5
  "train_samples": 44755,
6
- "train_samples_per_second": 8.44,
7
- "train_steps_per_second": 0.066
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.34604193687438967,
5
+ "train_runtime": 5793.0735,
6
  "train_samples": 44755,
7
+ "train_samples_per_second": 7.726,
8
+ "train_steps_per_second": 0.06
9
  }
trainer_state.json CHANGED
@@ -9,9 +9,9 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.0,
13
- "grad_norm": 1533.9529579439338,
14
- "learning_rate": 1.4285714285714284e-09,
15
  "logits/chosen": -4.185730934143066,
16
  "logits/rejected": -4.509836196899414,
17
  "logps/chosen": -274.000732421875,
@@ -24,586 +24,586 @@
24
  "step": 1
25
  },
26
  {
27
- "epoch": 0.03,
28
- "grad_norm": 1798.989663851012,
29
- "learning_rate": 1.4285714285714284e-08,
30
- "logits/chosen": -4.211680889129639,
31
- "logits/rejected": -4.4850640296936035,
32
- "logps/chosen": -318.5819091796875,
33
- "logps/rejected": -257.2215270996094,
34
- "loss": 0.7173,
35
- "rewards/accuracies": 0.3888888955116272,
36
- "rewards/chosen": -0.03539733216166496,
37
- "rewards/margins": -0.0653969869017601,
38
- "rewards/rejected": 0.02999965287744999,
39
  "step": 10
40
  },
41
  {
42
- "epoch": 0.06,
43
- "grad_norm": 1668.4002920396833,
44
- "learning_rate": 2.857142857142857e-08,
45
- "logits/chosen": -4.237612724304199,
46
- "logits/rejected": -4.492175102233887,
47
- "logps/chosen": -305.21356201171875,
48
- "logps/rejected": -244.23391723632812,
49
- "loss": 0.7172,
50
- "rewards/accuracies": 0.5249999761581421,
51
- "rewards/chosen": 0.011527794413268566,
52
- "rewards/margins": 0.015434956178069115,
53
- "rewards/rejected": -0.003907163627445698,
54
  "step": 20
55
  },
56
  {
57
- "epoch": 0.09,
58
- "grad_norm": 1590.2675307754077,
59
- "learning_rate": 4.285714285714285e-08,
60
- "logits/chosen": -4.289905548095703,
61
- "logits/rejected": -4.550080299377441,
62
- "logps/chosen": -306.20733642578125,
63
- "logps/rejected": -259.1973571777344,
64
- "loss": 0.6742,
65
- "rewards/accuracies": 0.581250011920929,
66
- "rewards/chosen": 0.15369151532649994,
67
- "rewards/margins": 0.08493399620056152,
68
- "rewards/rejected": 0.06875751912593842,
69
  "step": 30
70
  },
71
  {
72
- "epoch": 0.11,
73
- "grad_norm": 1234.2347053418307,
74
- "learning_rate": 4.996892303047306e-08,
75
- "logits/chosen": -4.294736862182617,
76
- "logits/rejected": -4.560345649719238,
77
- "logps/chosen": -294.7554931640625,
78
- "logps/rejected": -234.93032836914062,
79
- "loss": 0.5901,
80
- "rewards/accuracies": 0.6312500238418579,
81
- "rewards/chosen": 0.3894590735435486,
82
- "rewards/margins": 0.2519429624080658,
83
- "rewards/rejected": 0.1375161111354828,
84
  "step": 40
85
  },
86
  {
87
- "epoch": 0.14,
88
- "grad_norm": 1024.357051064495,
89
- "learning_rate": 4.972077065562821e-08,
90
- "logits/chosen": -4.300290584564209,
91
- "logits/rejected": -4.545838832855225,
92
- "logps/chosen": -300.6907043457031,
93
- "logps/rejected": -252.9388427734375,
94
- "loss": 0.4845,
95
- "rewards/accuracies": 0.78125,
96
- "rewards/chosen": 1.0174586772918701,
97
- "rewards/margins": 0.6066709160804749,
98
- "rewards/rejected": 0.41078776121139526,
99
  "step": 50
100
  },
101
  {
102
- "epoch": 0.17,
103
- "grad_norm": 993.3434930666515,
104
- "learning_rate": 4.922693215572695e-08,
105
- "logits/chosen": -4.309796333312988,
106
- "logits/rejected": -4.538843154907227,
107
- "logps/chosen": -308.38818359375,
108
- "logps/rejected": -261.2379150390625,
109
- "loss": 0.4421,
110
- "rewards/accuracies": 0.7437499761581421,
111
- "rewards/chosen": 1.4816973209381104,
112
- "rewards/margins": 0.7946016192436218,
113
- "rewards/rejected": 0.6870955228805542,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.2,
118
- "grad_norm": 1067.0305947544678,
119
- "learning_rate": 4.849231551964771e-08,
120
- "logits/chosen": -4.422641277313232,
121
- "logits/rejected": -4.553921699523926,
122
- "logps/chosen": -285.1670227050781,
123
- "logps/rejected": -236.08352661132812,
124
- "loss": 0.4036,
125
  "rewards/accuracies": 0.8187500238418579,
126
- "rewards/chosen": 1.8685623407363892,
127
- "rewards/margins": 1.2992055416107178,
128
- "rewards/rejected": 0.5693566203117371,
129
  "step": 70
130
  },
131
  {
132
- "epoch": 0.23,
133
- "grad_norm": 1032.775358630634,
134
- "learning_rate": 4.7524221697560474e-08,
135
- "logits/chosen": -4.270499229431152,
136
- "logits/rejected": -4.5226149559021,
137
- "logps/chosen": -291.38165283203125,
138
- "logps/rejected": -239.2751922607422,
139
- "loss": 0.379,
140
  "rewards/accuracies": 0.8687499761581421,
141
- "rewards/chosen": 2.1530368328094482,
142
- "rewards/margins": 1.5649009943008423,
143
- "rewards/rejected": 0.5881360769271851,
144
  "step": 80
145
  },
146
  {
147
- "epoch": 0.26,
148
- "grad_norm": 872.5516189664596,
149
- "learning_rate": 4.633227204080389e-08,
150
- "logits/chosen": -4.196683406829834,
151
- "logits/rejected": -4.429889678955078,
152
- "logps/chosen": -302.6337890625,
153
- "logps/rejected": -257.6473388671875,
154
- "loss": 0.3672,
155
- "rewards/accuracies": 0.78125,
156
- "rewards/chosen": 2.4647421836853027,
157
- "rewards/margins": 1.6267496347427368,
158
- "rewards/rejected": 0.8379926681518555,
159
  "step": 90
160
  },
161
  {
162
- "epoch": 0.29,
163
- "grad_norm": 1367.4563539236096,
164
- "learning_rate": 4.4928312680573064e-08,
165
- "logits/chosen": -4.303212642669678,
166
- "logits/rejected": -4.523682594299316,
167
- "logps/chosen": -290.59271240234375,
168
- "logps/rejected": -240.2918701171875,
169
- "loss": 0.3536,
170
- "rewards/accuracies": 0.8374999761581421,
171
- "rewards/chosen": 2.6176071166992188,
172
- "rewards/margins": 1.6629337072372437,
173
- "rewards/rejected": 0.9546731114387512,
174
  "step": 100
175
  },
176
  {
177
- "epoch": 0.29,
178
- "eval_logits/chosen": -3.2369751930236816,
179
- "eval_logits/rejected": -3.2369751930236816,
180
- "eval_logps/chosen": -158.35089111328125,
181
- "eval_logps/rejected": -158.35089111328125,
182
  "eval_loss": 0.6931471824645996,
183
  "eval_rewards/accuracies": 0.0,
184
- "eval_rewards/chosen": -1.3869400024414062,
185
  "eval_rewards/margins": 0.0,
186
- "eval_rewards/rejected": -1.3869400024414062,
187
- "eval_runtime": 1.4807,
188
- "eval_samples_per_second": 0.675,
189
- "eval_steps_per_second": 0.675,
190
  "step": 100
191
  },
192
  {
193
- "epoch": 0.31,
194
- "grad_norm": 934.0407259676991,
195
- "learning_rate": 4.3326296795745654e-08,
196
- "logits/chosen": -4.324947357177734,
197
- "logits/rejected": -4.566166400909424,
198
- "logps/chosen": -297.800537109375,
199
- "logps/rejected": -249.71450805664062,
200
- "loss": 0.3471,
201
- "rewards/accuracies": 0.8812500238418579,
202
- "rewards/chosen": 2.8152554035186768,
203
- "rewards/margins": 2.0201711654663086,
204
- "rewards/rejected": 0.7950841784477234,
205
  "step": 110
206
  },
207
  {
208
- "epoch": 0.34,
209
- "grad_norm": 1096.418461846408,
210
- "learning_rate": 4.1542145939921484e-08,
211
- "logits/chosen": -4.368184566497803,
212
- "logits/rejected": -4.613701820373535,
213
- "logps/chosen": -287.2909851074219,
214
- "logps/rejected": -237.5634002685547,
215
- "loss": 0.3344,
216
- "rewards/accuracies": 0.862500011920929,
217
- "rewards/chosen": 2.821599245071411,
218
- "rewards/margins": 2.0445477962493896,
219
- "rewards/rejected": 0.7770514488220215,
220
  "step": 120
221
  },
222
  {
223
- "epoch": 0.37,
224
- "grad_norm": 722.1338343165834,
225
- "learning_rate": 3.959359180586975e-08,
226
- "logits/chosen": -4.369457244873047,
227
- "logits/rejected": -4.6166582107543945,
228
- "logps/chosen": -277.1622314453125,
229
- "logps/rejected": -218.28323364257812,
230
- "loss": 0.3351,
231
- "rewards/accuracies": 0.8374999761581421,
232
- "rewards/chosen": 2.7689156532287598,
233
- "rewards/margins": 1.9403083324432373,
234
- "rewards/rejected": 0.8286076784133911,
235
  "step": 130
236
  },
237
  {
238
  "epoch": 0.4,
239
- "grad_norm": 854.554160074848,
240
- "learning_rate": 3.75e-08,
241
- "logits/chosen": -4.2873382568359375,
242
- "logits/rejected": -4.604461669921875,
243
- "logps/chosen": -294.3111267089844,
244
- "logps/rejected": -239.65628051757812,
245
- "loss": 0.3544,
246
- "rewards/accuracies": 0.893750011920929,
247
- "rewards/chosen": 2.786259174346924,
248
- "rewards/margins": 2.21262526512146,
249
- "rewards/rejected": 0.5736337900161743,
250
  "step": 140
251
  },
252
  {
253
- "epoch": 0.43,
254
- "grad_norm": 1024.227354138342,
255
- "learning_rate": 3.5282177578265295e-08,
256
- "logits/chosen": -4.251392364501953,
257
- "logits/rejected": -4.515843391418457,
258
- "logps/chosen": -293.588623046875,
259
- "logps/rejected": -250.3025360107422,
260
- "loss": 0.3157,
261
- "rewards/accuracies": 0.862500011920929,
262
- "rewards/chosen": 2.910778045654297,
263
- "rewards/margins": 2.2114222049713135,
264
- "rewards/rejected": 0.6993557810783386,
265
  "step": 150
266
  },
267
  {
268
- "epoch": 0.46,
269
- "grad_norm": 958.7773617526785,
270
- "learning_rate": 3.296216625629211e-08,
271
- "logits/chosen": -4.2807087898254395,
272
- "logits/rejected": -4.571825981140137,
273
- "logps/chosen": -294.7529296875,
274
- "logps/rejected": -235.1978759765625,
275
- "loss": 0.3382,
276
- "rewards/accuracies": 0.8500000238418579,
277
- "rewards/chosen": 3.0874907970428467,
278
- "rewards/margins": 2.296449899673462,
279
- "rewards/rejected": 0.7910411953926086,
280
  "step": 160
281
  },
282
  {
283
- "epoch": 0.49,
284
- "grad_norm": 844.9738579712869,
285
- "learning_rate": 3.056302334890786e-08,
286
- "logits/chosen": -4.230380058288574,
287
- "logits/rejected": -4.466236114501953,
288
- "logps/chosen": -294.3287658691406,
289
- "logps/rejected": -241.07357788085938,
290
- "loss": 0.3284,
291
- "rewards/accuracies": 0.8687499761581421,
292
- "rewards/chosen": 3.165980815887451,
293
- "rewards/margins": 2.4775989055633545,
294
- "rewards/rejected": 0.6883817911148071,
295
  "step": 170
296
  },
297
  {
298
- "epoch": 0.51,
299
- "grad_norm": 878.602697816289,
300
- "learning_rate": 2.8108592616187133e-08,
301
- "logits/chosen": -4.364335060119629,
302
- "logits/rejected": -4.596997261047363,
303
- "logps/chosen": -283.4359130859375,
304
- "logps/rejected": -240.91796875,
305
- "loss": 0.3163,
306
- "rewards/accuracies": 0.875,
307
- "rewards/chosen": 3.220620632171631,
308
- "rewards/margins": 2.4390523433685303,
309
- "rewards/rejected": 0.7815683484077454,
310
  "step": 180
311
  },
312
  {
313
- "epoch": 0.54,
314
- "grad_norm": 831.8981359537083,
315
- "learning_rate": 2.562326729345182e-08,
316
- "logits/chosen": -4.325879096984863,
317
- "logits/rejected": -4.494770526885986,
318
- "logps/chosen": -306.01171875,
319
- "logps/rejected": -257.24298095703125,
320
- "loss": 0.2864,
321
- "rewards/accuracies": 0.887499988079071,
322
- "rewards/chosen": 3.1413779258728027,
323
- "rewards/margins": 2.4198684692382812,
324
- "rewards/rejected": 0.7215089797973633,
325
  "step": 190
326
  },
327
  {
328
- "epoch": 0.57,
329
- "grad_norm": 752.1347907356251,
330
- "learning_rate": 2.3131747660339392e-08,
331
- "logits/chosen": -4.358603000640869,
332
- "logits/rejected": -4.599839687347412,
333
- "logps/chosen": -290.7633972167969,
334
- "logps/rejected": -249.0517120361328,
335
- "loss": 0.3091,
336
  "rewards/accuracies": 0.856249988079071,
337
- "rewards/chosen": 3.1841001510620117,
338
- "rewards/margins": 2.3024184703826904,
339
- "rewards/rejected": 0.8816817998886108,
340
  "step": 200
341
  },
342
  {
343
- "epoch": 0.57,
344
- "eval_logits/chosen": -3.230353832244873,
345
- "eval_logits/rejected": -3.230353832244873,
346
- "eval_logps/chosen": -159.3397674560547,
347
- "eval_logps/rejected": -159.3397674560547,
348
  "eval_loss": 0.6931471824645996,
349
  "eval_rewards/accuracies": 0.0,
350
- "eval_rewards/chosen": -1.8813800811767578,
351
  "eval_rewards/margins": 0.0,
352
- "eval_rewards/rejected": -1.8813800811767578,
353
- "eval_runtime": 1.4678,
354
- "eval_samples_per_second": 0.681,
355
- "eval_steps_per_second": 0.681,
356
  "step": 200
357
  },
358
  {
359
  "epoch": 0.6,
360
- "grad_norm": 873.0977005391466,
361
- "learning_rate": 2.065879555832674e-08,
362
- "logits/chosen": -4.293581962585449,
363
- "logits/rejected": -4.587343692779541,
364
- "logps/chosen": -304.852294921875,
365
- "logps/rejected": -254.5345916748047,
366
- "loss": 0.3125,
367
- "rewards/accuracies": 0.887499988079071,
368
- "rewards/chosen": 3.1841378211975098,
369
- "rewards/margins": 2.6728169918060303,
370
- "rewards/rejected": 0.5113206505775452,
371
  "step": 210
372
  },
373
  {
374
- "epoch": 0.63,
375
- "grad_norm": 896.6576085052702,
376
- "learning_rate": 1.8228988296424874e-08,
377
- "logits/chosen": -4.245147705078125,
378
- "logits/rejected": -4.591282367706299,
379
- "logps/chosen": -295.0483703613281,
380
- "logps/rejected": -224.8583984375,
381
- "loss": 0.3117,
382
- "rewards/accuracies": 0.875,
383
- "rewards/chosen": 3.232748031616211,
384
- "rewards/margins": 2.8655002117156982,
385
- "rewards/rejected": 0.367247611284256,
386
  "step": 220
387
  },
388
  {
389
- "epoch": 0.66,
390
- "grad_norm": 896.8048647139675,
391
- "learning_rate": 1.5866474390840123e-08,
392
- "logits/chosen": -4.371578216552734,
393
- "logits/rejected": -4.561336994171143,
394
- "logps/chosen": -279.5164489746094,
395
- "logps/rejected": -225.96630859375,
396
- "loss": 0.2801,
397
- "rewards/accuracies": 0.824999988079071,
398
- "rewards/chosen": 2.720884323120117,
399
- "rewards/margins": 2.2243082523345947,
400
- "rewards/rejected": 0.4965757727622986,
401
  "step": 230
402
  },
403
  {
404
- "epoch": 0.69,
405
- "grad_norm": 794.1572537064102,
406
- "learning_rate": 1.3594733566170923e-08,
407
- "logits/chosen": -4.393925666809082,
408
- "logits/rejected": -4.711283206939697,
409
- "logps/chosen": -256.6236877441406,
410
- "logps/rejected": -215.0515594482422,
411
- "loss": 0.3001,
412
- "rewards/accuracies": 0.8374999761581421,
413
- "rewards/chosen": 2.945354700088501,
414
- "rewards/margins": 2.4272878170013428,
415
- "rewards/rejected": 0.5180668830871582,
416
  "step": 240
417
  },
418
  {
419
- "epoch": 0.71,
420
- "grad_norm": 1028.7241893624246,
421
- "learning_rate": 1.1436343403356017e-08,
422
- "logits/chosen": -4.338345527648926,
423
- "logits/rejected": -4.6125383377075195,
424
- "logps/chosen": -276.8526611328125,
425
- "logps/rejected": -232.35494995117188,
426
- "loss": 0.296,
427
- "rewards/accuracies": 0.856249988079071,
428
- "rewards/chosen": 2.86142897605896,
429
- "rewards/margins": 2.335439443588257,
430
- "rewards/rejected": 0.5259896516799927,
431
  "step": 250
432
  },
433
  {
434
- "epoch": 0.74,
435
- "grad_norm": 955.6521893892625,
436
- "learning_rate": 9.412754953531663e-09,
437
- "logits/chosen": -4.384321212768555,
438
- "logits/rejected": -4.630730628967285,
439
- "logps/chosen": -269.96063232421875,
440
- "logps/rejected": -240.08322143554688,
441
- "loss": 0.2927,
442
- "rewards/accuracies": 0.8500000238418579,
443
- "rewards/chosen": 2.748866558074951,
444
- "rewards/margins": 2.221247434616089,
445
- "rewards/rejected": 0.5276187658309937,
446
  "step": 260
447
  },
448
  {
449
- "epoch": 0.77,
450
- "grad_norm": 861.1823956058561,
451
- "learning_rate": 7.544079547848182e-09,
452
- "logits/chosen": -4.517904281616211,
453
- "logits/rejected": -4.6855998039245605,
454
- "logps/chosen": -271.70074462890625,
455
- "logps/rejected": -227.885009765625,
456
- "loss": 0.3225,
457
- "rewards/accuracies": 0.78125,
458
- "rewards/chosen": 2.6885054111480713,
459
- "rewards/margins": 2.017667293548584,
460
- "rewards/rejected": 0.670837938785553,
461
  "step": 270
462
  },
463
  {
464
  "epoch": 0.8,
465
- "grad_norm": 732.1581791035536,
466
- "learning_rate": 5.8488889220255525e-09,
467
- "logits/chosen": -4.298913955688477,
468
- "logits/rejected": -4.587487697601318,
469
- "logps/chosen": -272.83935546875,
470
- "logps/rejected": -229.5067596435547,
471
- "loss": 0.2873,
472
- "rewards/accuracies": 0.90625,
473
- "rewards/chosen": 3.1228713989257812,
474
- "rewards/margins": 2.495274066925049,
475
- "rewards/rejected": 0.6275972723960876,
476
  "step": 280
477
  },
478
  {
479
- "epoch": 0.83,
480
- "grad_norm": 1125.5972329270946,
481
- "learning_rate": 4.344030642100133e-09,
482
- "logits/chosen": -4.320634365081787,
483
- "logits/rejected": -4.5083842277526855,
484
- "logps/chosen": -280.2475280761719,
485
- "logps/rejected": -236.018798828125,
486
- "loss": 0.3136,
487
- "rewards/accuracies": 0.8374999761581421,
488
- "rewards/chosen": 3.1591415405273438,
489
- "rewards/margins": 2.4951727390289307,
490
- "rewards/rejected": 0.6639689207077026,
491
  "step": 290
492
  },
493
  {
494
- "epoch": 0.86,
495
- "grad_norm": 788.6539907080908,
496
- "learning_rate": 3.0444606657442835e-09,
497
- "logits/chosen": -4.298077583312988,
498
- "logits/rejected": -4.541121959686279,
499
- "logps/chosen": -296.12176513671875,
500
- "logps/rejected": -239.7339324951172,
501
- "loss": 0.3363,
502
- "rewards/accuracies": 0.856249988079071,
503
- "rewards/chosen": 3.3548855781555176,
504
- "rewards/margins": 2.5390803813934326,
505
- "rewards/rejected": 0.8158050775527954,
506
  "step": 300
507
  },
508
  {
509
- "epoch": 0.86,
510
- "eval_logits/chosen": -3.2283477783203125,
511
- "eval_logits/rejected": -3.2283477783203125,
512
- "eval_logps/chosen": -159.417236328125,
513
- "eval_logps/rejected": -159.417236328125,
514
  "eval_loss": 0.6931471824645996,
515
  "eval_rewards/accuracies": 0.0,
516
- "eval_rewards/chosen": -1.9201126098632812,
517
  "eval_rewards/margins": 0.0,
518
- "eval_rewards/rejected": -1.9201126098632812,
519
- "eval_runtime": 1.4723,
520
- "eval_samples_per_second": 0.679,
521
- "eval_steps_per_second": 0.679,
522
  "step": 300
523
  },
524
  {
525
- "epoch": 0.89,
526
- "grad_norm": 1232.483796740804,
527
- "learning_rate": 1.9630947032398067e-09,
528
- "logits/chosen": -4.439385414123535,
529
- "logits/rejected": -4.579739093780518,
530
- "logps/chosen": -260.4383850097656,
531
- "logps/rejected": -226.1852264404297,
532
- "loss": 0.2944,
533
- "rewards/accuracies": 0.8374999761581421,
534
- "rewards/chosen": 2.838869571685791,
535
- "rewards/margins": 2.284980297088623,
536
- "rewards/rejected": 0.5538893342018127,
537
  "step": 310
538
  },
539
  {
540
- "epoch": 0.91,
541
- "grad_norm": 734.9918892690486,
542
- "learning_rate": 1.1106798553464803e-09,
543
- "logits/chosen": -4.274772644042969,
544
- "logits/rejected": -4.528196334838867,
545
- "logps/chosen": -290.4882507324219,
546
- "logps/rejected": -243.47128295898438,
547
- "loss": 0.3081,
548
- "rewards/accuracies": 0.862500011920929,
549
- "rewards/chosen": 3.197766065597534,
550
- "rewards/margins": 2.4614479541778564,
551
- "rewards/rejected": 0.7363181114196777,
552
  "step": 320
553
  },
554
  {
555
- "epoch": 0.94,
556
- "grad_norm": 1013.5360319244875,
557
- "learning_rate": 4.956878037864043e-10,
558
- "logits/chosen": -4.329029083251953,
559
- "logits/rejected": -4.53527307510376,
560
- "logps/chosen": -303.6526794433594,
561
- "logps/rejected": -251.791259765625,
562
- "loss": 0.3026,
563
- "rewards/accuracies": 0.862500011920929,
564
- "rewards/chosen": 2.9665093421936035,
565
- "rewards/margins": 2.3622777462005615,
566
- "rewards/rejected": 0.6042317152023315,
567
  "step": 330
568
  },
569
  {
570
- "epoch": 0.97,
571
- "grad_norm": 1029.0631247014248,
572
- "learning_rate": 1.2423061586496474e-10,
573
- "logits/chosen": -4.317473888397217,
574
- "logits/rejected": -4.609925270080566,
575
- "logps/chosen": -283.091552734375,
576
- "logps/rejected": -227.1355438232422,
577
- "loss": 0.3064,
578
- "rewards/accuracies": 0.8500000238418579,
579
- "rewards/chosen": 2.9268617630004883,
580
- "rewards/margins": 2.242302417755127,
581
- "rewards/rejected": 0.6845596432685852,
582
  "step": 340
583
  },
584
  {
585
  "epoch": 1.0,
586
- "grad_norm": 657.4804286362323,
587
  "learning_rate": 0.0,
588
- "logits/chosen": -4.288041114807129,
589
- "logits/rejected": -4.490693092346191,
590
- "logps/chosen": -289.8371887207031,
591
- "logps/rejected": -243.97164916992188,
592
- "loss": 0.2999,
593
- "rewards/accuracies": 0.8500000238418579,
594
- "rewards/chosen": 3.091984987258911,
595
- "rewards/margins": 2.4618594646453857,
596
- "rewards/rejected": 0.6301255226135254,
597
  "step": 350
598
  },
599
  {
600
  "epoch": 1.0,
601
  "step": 350,
602
  "total_flos": 0.0,
603
- "train_loss": 0.37015721593584333,
604
- "train_runtime": 5302.4484,
605
- "train_samples_per_second": 8.44,
606
- "train_steps_per_second": 0.066
607
  }
608
  ],
609
  "logging_steps": 10,
@@ -611,6 +611,18 @@
611
  "num_input_tokens_seen": 0,
612
  "num_train_epochs": 1,
613
  "save_steps": 100,
 
 
 
 
 
 
 
 
 
 
 
 
614
  "total_flos": 0.0,
615
  "train_batch_size": 8,
616
  "trial_name": null,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.002857142857142857,
13
+ "grad_norm": 1533.819121390321,
14
+ "learning_rate": 4.9998992904271775e-08,
15
  "logits/chosen": -4.185730934143066,
16
  "logits/rejected": -4.509836196899414,
17
  "logps/chosen": -274.000732421875,
 
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.02857142857142857,
28
+ "grad_norm": 1668.215158817105,
29
+ "learning_rate": 4.9899357349880975e-08,
30
+ "logits/chosen": -4.21472692489624,
31
+ "logits/rejected": -4.488520622253418,
32
+ "logps/chosen": -318.4333801269531,
33
+ "logps/rejected": -257.2440185546875,
34
+ "loss": 0.6858,
35
+ "rewards/accuracies": 0.4930555522441864,
36
+ "rewards/chosen": 0.03888694569468498,
37
+ "rewards/margins": 0.020132217556238174,
38
+ "rewards/rejected": 0.018754728138446808,
39
  "step": 10
40
  },
41
  {
42
+ "epoch": 0.05714285714285714,
43
+ "grad_norm": 1345.3140295690048,
44
+ "learning_rate": 4.959823971496574e-08,
45
+ "logits/chosen": -4.247828006744385,
46
+ "logits/rejected": -4.502226829528809,
47
+ "logps/chosen": -304.4950866699219,
48
+ "logps/rejected": -244.0281219482422,
49
+ "loss": 0.6107,
50
+ "rewards/accuracies": 0.7250000238418579,
51
+ "rewards/chosen": 0.37075769901275635,
52
+ "rewards/margins": 0.27177533507347107,
53
+ "rewards/rejected": 0.09898237138986588,
54
  "step": 20
55
  },
56
  {
57
+ "epoch": 0.08571428571428572,
58
+ "grad_norm": 1125.2250514399805,
59
+ "learning_rate": 4.9099071517396326e-08,
60
+ "logits/chosen": -4.306983947753906,
61
+ "logits/rejected": -4.5679826736450195,
62
+ "logps/chosen": -304.89337158203125,
63
+ "logps/rejected": -258.8996887207031,
64
+ "loss": 0.5148,
65
+ "rewards/accuracies": 0.78125,
66
+ "rewards/chosen": 0.8106807470321655,
67
+ "rewards/margins": 0.5930811762809753,
68
+ "rewards/rejected": 0.21759963035583496,
69
  "step": 30
70
  },
71
  {
72
+ "epoch": 0.11428571428571428,
73
+ "grad_norm": 1094.2742476604449,
74
+ "learning_rate": 4.8405871765993426e-08,
75
+ "logits/chosen": -4.310123443603516,
76
+ "logits/rejected": -4.577700614929199,
77
+ "logps/chosen": -292.9500427246094,
78
+ "logps/rejected": -234.384765625,
79
+ "loss": 0.4433,
80
+ "rewards/accuracies": 0.7749999761581421,
81
+ "rewards/chosen": 1.2921748161315918,
82
+ "rewards/margins": 0.8818836212158203,
83
+ "rewards/rejected": 0.41029104590415955,
84
  "step": 40
85
  },
86
  {
87
+ "epoch": 0.14285714285714285,
88
+ "grad_norm": 866.0178113474443,
89
+ "learning_rate": 4.7524221697560474e-08,
90
+ "logits/chosen": -4.307633876800537,
91
+ "logits/rejected": -4.553541660308838,
92
+ "logps/chosen": -298.9430847167969,
93
+ "logps/rejected": -252.4582977294922,
94
+ "loss": 0.3906,
95
+ "rewards/accuracies": 0.8374999761581421,
96
+ "rewards/chosen": 1.8912757635116577,
97
+ "rewards/margins": 1.240208387374878,
98
+ "rewards/rejected": 0.6510674357414246,
99
  "step": 50
100
  },
101
  {
102
+ "epoch": 0.17142857142857143,
103
+ "grad_norm": 837.0640990054532,
104
+ "learning_rate": 4.646121984004665e-08,
105
+ "logits/chosen": -4.309387683868408,
106
+ "logits/rejected": -4.5390214920043945,
107
+ "logps/chosen": -307.09820556640625,
108
+ "logps/rejected": -260.90191650390625,
109
+ "loss": 0.3814,
110
+ "rewards/accuracies": 0.8125,
111
+ "rewards/chosen": 2.126704216003418,
112
+ "rewards/margins": 1.2716383934020996,
113
+ "rewards/rejected": 0.8550659418106079,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.2,
118
+ "grad_norm": 985.5848045316892,
119
+ "learning_rate": 4.522542485937369e-08,
120
+ "logits/chosen": -4.4262213706970215,
121
+ "logits/rejected": -4.558186054229736,
122
+ "logps/chosen": -284.1617736816406,
123
+ "logps/rejected": -235.8411102294922,
124
+ "loss": 0.3695,
125
  "rewards/accuracies": 0.8187500238418579,
126
+ "rewards/chosen": 2.3711953163146973,
127
+ "rewards/margins": 1.6806213855743408,
128
+ "rewards/rejected": 0.690574049949646,
129
  "step": 70
130
  },
131
  {
132
+ "epoch": 0.22857142857142856,
133
+ "grad_norm": 951.7016793785169,
134
+ "learning_rate": 4.3826786650090273e-08,
135
+ "logits/chosen": -4.280414581298828,
136
+ "logits/rejected": -4.534539222717285,
137
+ "logps/chosen": -290.68597412109375,
138
+ "logps/rejected": -239.3174591064453,
139
+ "loss": 0.3466,
140
  "rewards/accuracies": 0.8687499761581421,
141
+ "rewards/chosen": 2.500866413116455,
142
+ "rewards/margins": 1.933850646018982,
143
+ "rewards/rejected": 0.5670154094696045,
144
  "step": 80
145
  },
146
  {
147
+ "epoch": 0.2571428571428571,
148
+ "grad_norm": 868.4695107443264,
149
+ "learning_rate": 4.2276566224671614e-08,
150
+ "logits/chosen": -4.205895900726318,
151
+ "logits/rejected": -4.440802574157715,
152
+ "logps/chosen": -301.97674560546875,
153
+ "logps/rejected": -257.63153076171875,
154
+ "loss": 0.3457,
155
+ "rewards/accuracies": 0.7875000238418579,
156
+ "rewards/chosen": 2.7932560443878174,
157
+ "rewards/margins": 1.9473575353622437,
158
+ "rewards/rejected": 0.8458986282348633,
159
  "step": 90
160
  },
161
  {
162
+ "epoch": 0.2857142857142857,
163
+ "grad_norm": 1324.6980610698608,
164
+ "learning_rate": 4.058724504646834e-08,
165
+ "logits/chosen": -4.310187816619873,
166
+ "logits/rejected": -4.531655311584473,
167
+ "logps/chosen": -290.22113037109375,
168
+ "logps/rejected": -240.4936981201172,
169
+ "loss": 0.3253,
170
+ "rewards/accuracies": 0.856249988079071,
171
+ "rewards/chosen": 2.803382396697998,
172
+ "rewards/margins": 1.9496362209320068,
173
+ "rewards/rejected": 0.8537459373474121,
174
  "step": 100
175
  },
176
  {
177
+ "epoch": 0.2857142857142857,
178
+ "eval_logits/chosen": -3.2365729808807373,
179
+ "eval_logits/rejected": -3.2365729808807373,
180
+ "eval_logps/chosen": -158.97109985351562,
181
+ "eval_logps/rejected": -158.97109985351562,
182
  "eval_loss": 0.6931471824645996,
183
  "eval_rewards/accuracies": 0.0,
184
+ "eval_rewards/chosen": -1.697042465209961,
185
  "eval_rewards/margins": 0.0,
186
+ "eval_rewards/rejected": -1.697042465209961,
187
+ "eval_runtime": 1.5274,
188
+ "eval_samples_per_second": 0.655,
189
+ "eval_steps_per_second": 0.655,
190
  "step": 100
191
  },
192
  {
193
+ "epoch": 0.3142857142857143,
194
+ "grad_norm": 904.1535779728154,
195
+ "learning_rate": 3.8772424536302564e-08,
196
+ "logits/chosen": -4.3289313316345215,
197
+ "logits/rejected": -4.571288108825684,
198
+ "logps/chosen": -297.5599060058594,
199
+ "logps/rejected": -249.8257293701172,
200
+ "loss": 0.345,
201
+ "rewards/accuracies": 0.893750011920929,
202
+ "rewards/chosen": 2.935572385787964,
203
+ "rewards/margins": 2.1961002349853516,
204
+ "rewards/rejected": 0.7394723892211914,
205
  "step": 110
206
  },
207
  {
208
+ "epoch": 0.34285714285714286,
209
+ "grad_norm": 1086.0405455439977,
210
+ "learning_rate": 3.6846716561824964e-08,
211
+ "logits/chosen": -4.373476982116699,
212
+ "logits/rejected": -4.619527816772461,
213
+ "logps/chosen": -287.13385009765625,
214
+ "logps/rejected": -237.6899871826172,
215
+ "loss": 0.3263,
216
+ "rewards/accuracies": 0.824999988079071,
217
+ "rewards/chosen": 2.900177478790283,
218
+ "rewards/margins": 2.18641996383667,
219
+ "rewards/rejected": 0.7137576937675476,
220
  "step": 120
221
  },
222
  {
223
+ "epoch": 0.37142857142857144,
224
+ "grad_norm": 753.3076911412429,
225
+ "learning_rate": 3.482562579134809e-08,
226
+ "logits/chosen": -4.375483512878418,
227
+ "logits/rejected": -4.624612331390381,
228
+ "logps/chosen": -276.9853515625,
229
+ "logps/rejected": -218.39633178710938,
230
+ "loss": 0.3228,
231
+ "rewards/accuracies": 0.8500000238418579,
232
+ "rewards/chosen": 2.8573689460754395,
233
+ "rewards/margins": 2.0853238105773926,
234
+ "rewards/rejected": 0.7720457315444946,
235
  "step": 130
236
  },
237
  {
238
  "epoch": 0.4,
239
+ "grad_norm": 840.6908677183937,
240
+ "learning_rate": 3.272542485937369e-08,
241
+ "logits/chosen": -4.290375232696533,
242
+ "logits/rejected": -4.607089996337891,
243
+ "logps/chosen": -294.1933288574219,
244
+ "logps/rejected": -239.8271942138672,
245
+ "loss": 0.3428,
246
+ "rewards/accuracies": 0.875,
247
+ "rewards/chosen": 2.845167875289917,
248
+ "rewards/margins": 2.3569939136505127,
249
+ "rewards/rejected": 0.48817411065101624,
250
  "step": 140
251
  },
252
  {
253
+ "epoch": 0.42857142857142855,
254
+ "grad_norm": 1010.0880176318159,
255
+ "learning_rate": 3.056302334890786e-08,
256
+ "logits/chosen": -4.25304651260376,
257
+ "logits/rejected": -4.517129421234131,
258
+ "logps/chosen": -293.35797119140625,
259
+ "logps/rejected": -250.48641967773438,
260
+ "loss": 0.313,
261
+ "rewards/accuracies": 0.8374999761581421,
262
+ "rewards/chosen": 3.026089906692505,
263
+ "rewards/margins": 2.4186887741088867,
264
+ "rewards/rejected": 0.6074013113975525,
265
  "step": 150
266
  },
267
  {
268
+ "epoch": 0.45714285714285713,
269
+ "grad_norm": 881.3705397776472,
270
+ "learning_rate": 2.8355831645441387e-08,
271
+ "logits/chosen": -4.279029369354248,
272
+ "logits/rejected": -4.569698810577393,
273
+ "logps/chosen": -294.6650695800781,
274
+ "logps/rejected": -235.29067993164062,
275
+ "loss": 0.338,
276
+ "rewards/accuracies": 0.8374999761581421,
277
+ "rewards/chosen": 3.13142728805542,
278
+ "rewards/margins": 2.386794090270996,
279
+ "rewards/rejected": 0.7446335554122925,
280
  "step": 160
281
  },
282
  {
283
+ "epoch": 0.4857142857142857,
284
+ "grad_norm": 857.1741181734847,
285
+ "learning_rate": 2.6121620758762875e-08,
286
+ "logits/chosen": -4.228262901306152,
287
+ "logits/rejected": -4.463376522064209,
288
+ "logps/chosen": -294.2881774902344,
289
+ "logps/rejected": -241.1619415283203,
290
+ "loss": 0.3264,
291
+ "rewards/accuracies": 0.875,
292
+ "rewards/chosen": 3.1862380504608154,
293
+ "rewards/margins": 2.542022228240967,
294
+ "rewards/rejected": 0.6442161202430725,
295
  "step": 170
296
  },
297
  {
298
+ "epoch": 0.5142857142857142,
299
+ "grad_norm": 923.726950031985,
300
+ "learning_rate": 2.3878379241237133e-08,
301
+ "logits/chosen": -4.36321496963501,
302
+ "logits/rejected": -4.595080375671387,
303
+ "logps/chosen": -283.4067687988281,
304
+ "logps/rejected": -241.1226043701172,
305
+ "loss": 0.3098,
306
+ "rewards/accuracies": 0.8374999761581421,
307
+ "rewards/chosen": 3.2351880073547363,
308
+ "rewards/margins": 2.5559356212615967,
309
+ "rewards/rejected": 0.679252028465271,
310
  "step": 180
311
  },
312
  {
313
+ "epoch": 0.5428571428571428,
314
+ "grad_norm": 792.8076625116224,
315
+ "learning_rate": 2.164416835455862e-08,
316
+ "logits/chosen": -4.323281288146973,
317
+ "logits/rejected": -4.492599964141846,
318
+ "logps/chosen": -305.88592529296875,
319
+ "logps/rejected": -257.3919982910156,
320
+ "loss": 0.2759,
321
+ "rewards/accuracies": 0.893750011920929,
322
+ "rewards/chosen": 3.2042744159698486,
323
+ "rewards/margins": 2.557298421859741,
324
+ "rewards/rejected": 0.6469758152961731,
325
  "step": 190
326
  },
327
  {
328
+ "epoch": 0.5714285714285714,
329
+ "grad_norm": 757.2657183486399,
330
+ "learning_rate": 1.943697665109214e-08,
331
+ "logits/chosen": -4.354622840881348,
332
+ "logits/rejected": -4.594660758972168,
333
+ "logps/chosen": -290.7183532714844,
334
+ "logps/rejected": -249.0703582763672,
335
+ "loss": 0.3071,
336
  "rewards/accuracies": 0.856249988079071,
337
+ "rewards/chosen": 3.2066097259521484,
338
+ "rewards/margins": 2.334256649017334,
339
+ "rewards/rejected": 0.8723530769348145,
340
  "step": 200
341
  },
342
  {
343
+ "epoch": 0.5714285714285714,
344
+ "eval_logits/chosen": -3.2287425994873047,
345
+ "eval_logits/rejected": -3.2287425994873047,
346
+ "eval_logps/chosen": -159.75973510742188,
347
+ "eval_logps/rejected": -159.75973510742188,
348
  "eval_loss": 0.6931471824645996,
349
  "eval_rewards/accuracies": 0.0,
350
+ "eval_rewards/chosen": -2.0913619995117188,
351
  "eval_rewards/margins": 0.0,
352
+ "eval_rewards/rejected": -2.0913619995117188,
353
+ "eval_runtime": 1.503,
354
+ "eval_samples_per_second": 0.665,
355
+ "eval_steps_per_second": 0.665,
356
  "step": 200
357
  },
358
  {
359
  "epoch": 0.6,
360
+ "grad_norm": 891.2229023091579,
361
+ "learning_rate": 1.7274575140626317e-08,
362
+ "logits/chosen": -4.2898783683776855,
363
+ "logits/rejected": -4.5833001136779785,
364
+ "logps/chosen": -304.8259582519531,
365
+ "logps/rejected": -254.60757446289062,
366
+ "loss": 0.3024,
367
+ "rewards/accuracies": 0.8687499761581421,
368
+ "rewards/chosen": 3.1973044872283936,
369
+ "rewards/margins": 2.722479820251465,
370
+ "rewards/rejected": 0.47482460737228394,
371
  "step": 210
372
  },
373
  {
374
+ "epoch": 0.6285714285714286,
375
+ "grad_norm": 938.9689090425716,
376
+ "learning_rate": 1.517437420865191e-08,
377
+ "logits/chosen": -4.243287086486816,
378
+ "logits/rejected": -4.58882999420166,
379
+ "logps/chosen": -294.97943115234375,
380
+ "logps/rejected": -225.01730346679688,
381
+ "loss": 0.2916,
382
+ "rewards/accuracies": 0.856249988079071,
383
+ "rewards/chosen": 3.2672336101531982,
384
+ "rewards/margins": 2.979445695877075,
385
+ "rewards/rejected": 0.2877880036830902,
386
  "step": 220
387
  },
388
  {
389
+ "epoch": 0.6571428571428571,
390
+ "grad_norm": 889.4233945739674,
391
+ "learning_rate": 1.3153283438175034e-08,
392
+ "logits/chosen": -4.367541313171387,
393
+ "logits/rejected": -4.557114601135254,
394
+ "logps/chosen": -279.52362060546875,
395
+ "logps/rejected": -226.0491180419922,
396
+ "loss": 0.2804,
397
+ "rewards/accuracies": 0.862500011920929,
398
+ "rewards/chosen": 2.717285633087158,
399
+ "rewards/margins": 2.2621235847473145,
400
+ "rewards/rejected": 0.45516151189804077,
401
  "step": 230
402
  },
403
  {
404
+ "epoch": 0.6857142857142857,
405
+ "grad_norm": 764.4075642697645,
406
+ "learning_rate": 1.1227575463697438e-08,
407
+ "logits/chosen": -4.391533851623535,
408
+ "logits/rejected": -4.709025859832764,
409
+ "logps/chosen": -256.52532958984375,
410
+ "logps/rejected": -215.2234649658203,
411
+ "loss": 0.287,
412
+ "rewards/accuracies": 0.8500000238418579,
413
+ "rewards/chosen": 2.9945430755615234,
414
+ "rewards/margins": 2.5624358654022217,
415
+ "rewards/rejected": 0.4321066439151764,
416
  "step": 240
417
  },
418
  {
419
+ "epoch": 0.7142857142857143,
420
+ "grad_norm": 1078.8910476393369,
421
+ "learning_rate": 9.412754953531663e-09,
422
+ "logits/chosen": -4.338193416595459,
423
+ "logits/rejected": -4.612923622131348,
424
+ "logps/chosen": -276.8057861328125,
425
+ "logps/rejected": -232.43807983398438,
426
+ "loss": 0.2923,
427
+ "rewards/accuracies": 0.875,
428
+ "rewards/chosen": 2.8848717212677,
429
+ "rewards/margins": 2.400455951690674,
430
+ "rewards/rejected": 0.4844156801700592,
431
  "step": 250
432
  },
433
  {
434
+ "epoch": 0.7428571428571429,
435
+ "grad_norm": 888.6230207618648,
436
+ "learning_rate": 7.723433775328384e-09,
437
+ "logits/chosen": -4.384097099304199,
438
+ "logits/rejected": -4.630164623260498,
439
+ "logps/chosen": -269.947265625,
440
+ "logps/rejected": -240.2582244873047,
441
+ "loss": 0.2917,
442
+ "rewards/accuracies": 0.8687499761581421,
443
+ "rewards/chosen": 2.7555510997772217,
444
+ "rewards/margins": 2.3154397010803223,
445
+ "rewards/rejected": 0.4401116967201233,
446
  "step": 260
447
  },
448
  {
449
+ "epoch": 0.7714285714285715,
450
+ "grad_norm": 839.6114448931011,
451
+ "learning_rate": 6.173213349909728e-09,
452
+ "logits/chosen": -4.516074180603027,
453
+ "logits/rejected": -4.6834282875061035,
454
+ "logps/chosen": -271.71820068359375,
455
+ "logps/rejected": -227.9601287841797,
456
+ "loss": 0.3199,
457
+ "rewards/accuracies": 0.793749988079071,
458
+ "rewards/chosen": 2.6797890663146973,
459
+ "rewards/margins": 2.0464982986450195,
460
+ "rewards/rejected": 0.6332908868789673,
461
  "step": 270
462
  },
463
  {
464
  "epoch": 0.8,
465
+ "grad_norm": 794.9828383467956,
466
+ "learning_rate": 4.7745751406263165e-09,
467
+ "logits/chosen": -4.297934532165527,
468
+ "logits/rejected": -4.586764335632324,
469
+ "logps/chosen": -272.8429870605469,
470
+ "logps/rejected": -229.56021118164062,
471
+ "loss": 0.2842,
472
+ "rewards/accuracies": 0.893750011920929,
473
+ "rewards/chosen": 3.1210646629333496,
474
+ "rewards/margins": 2.520183801651001,
475
+ "rewards/rejected": 0.6008811593055725,
476
  "step": 280
477
  },
478
  {
479
+ "epoch": 0.8285714285714286,
480
+ "grad_norm": 1122.8170349728516,
481
+ "learning_rate": 3.5387801599533474e-09,
482
+ "logits/chosen": -4.3172149658203125,
483
+ "logits/rejected": -4.5055928230285645,
484
+ "logps/chosen": -280.2183837890625,
485
+ "logps/rejected": -236.1828155517578,
486
+ "loss": 0.3083,
487
+ "rewards/accuracies": 0.8500000238418579,
488
+ "rewards/chosen": 3.173701286315918,
489
+ "rewards/margins": 2.5917415618896484,
490
+ "rewards/rejected": 0.5819598436355591,
491
  "step": 290
492
  },
493
  {
494
+ "epoch": 0.8571428571428571,
495
+ "grad_norm": 746.1081306618772,
496
+ "learning_rate": 2.475778302439524e-09,
497
+ "logits/chosen": -4.29483699798584,
498
+ "logits/rejected": -4.537248134613037,
499
+ "logps/chosen": -296.06756591796875,
500
+ "logps/rejected": -239.85092163085938,
501
+ "loss": 0.3336,
502
+ "rewards/accuracies": 0.875,
503
+ "rewards/chosen": 3.3819689750671387,
504
+ "rewards/margins": 2.6246695518493652,
505
+ "rewards/rejected": 0.7572996020317078,
506
  "step": 300
507
  },
508
  {
509
+ "epoch": 0.8571428571428571,
510
+ "eval_logits/chosen": -3.2277979850769043,
511
+ "eval_logits/rejected": -3.2277979850769043,
512
+ "eval_logps/chosen": -158.84542846679688,
513
+ "eval_logps/rejected": -158.84542846679688,
514
  "eval_loss": 0.6931471824645996,
515
  "eval_rewards/accuracies": 0.0,
516
+ "eval_rewards/chosen": -1.6342105865478516,
517
  "eval_rewards/margins": 0.0,
518
+ "eval_rewards/rejected": -1.6342105865478516,
519
+ "eval_runtime": 1.5181,
520
+ "eval_samples_per_second": 0.659,
521
+ "eval_steps_per_second": 0.659,
522
  "step": 300
523
  },
524
  {
525
+ "epoch": 0.8857142857142857,
526
+ "grad_norm": 1352.5130710151727,
527
+ "learning_rate": 1.5941282340065698e-09,
528
+ "logits/chosen": -4.436648368835449,
529
+ "logits/rejected": -4.576234817504883,
530
+ "logps/chosen": -260.4552001953125,
531
+ "logps/rejected": -226.2211456298828,
532
+ "loss": 0.2894,
533
+ "rewards/accuracies": 0.856249988079071,
534
+ "rewards/chosen": 2.8304812908172607,
535
+ "rewards/margins": 2.2945332527160645,
536
+ "rewards/rejected": 0.535947859287262,
537
  "step": 310
538
  },
539
  {
540
+ "epoch": 0.9142857142857143,
541
+ "grad_norm": 759.0330474241811,
542
+ "learning_rate": 9.009284826036689e-10,
543
+ "logits/chosen": -4.273808002471924,
544
+ "logits/rejected": -4.527020454406738,
545
+ "logps/chosen": -290.4542541503906,
546
+ "logps/rejected": -243.5283966064453,
547
+ "loss": 0.314,
548
+ "rewards/accuracies": 0.8687499761581421,
549
+ "rewards/chosen": 3.2147815227508545,
550
+ "rewards/margins": 2.507025718688965,
551
+ "rewards/rejected": 0.7077558040618896,
552
  "step": 320
553
  },
554
  {
555
+ "epoch": 0.9428571428571428,
556
+ "grad_norm": 1012.631208038228,
557
+ "learning_rate": 4.017602850342583e-10,
558
+ "logits/chosen": -4.329623222351074,
559
+ "logits/rejected": -4.535677433013916,
560
+ "logps/chosen": -303.55926513671875,
561
+ "logps/rejected": -251.89437866210938,
562
+ "loss": 0.3017,
563
+ "rewards/accuracies": 0.875,
564
+ "rewards/chosen": 3.013214588165283,
565
+ "rewards/margins": 2.4605515003204346,
566
+ "rewards/rejected": 0.5526631474494934,
567
  "step": 330
568
  },
569
  {
570
+ "epoch": 0.9714285714285714,
571
+ "grad_norm": 1047.7085022489064,
572
+ "learning_rate": 1.0064265011902328e-10,
573
+ "logits/chosen": -4.3172688484191895,
574
+ "logits/rejected": -4.609848976135254,
575
+ "logps/chosen": -283.23883056640625,
576
+ "logps/rejected": -227.2130126953125,
577
+ "loss": 0.3029,
578
+ "rewards/accuracies": 0.856249988079071,
579
+ "rewards/chosen": 2.853224277496338,
580
+ "rewards/margins": 2.2073826789855957,
581
+ "rewards/rejected": 0.6458419561386108,
582
  "step": 340
583
  },
584
  {
585
  "epoch": 1.0,
586
+ "grad_norm": 570.6961447414922,
587
  "learning_rate": 0.0,
588
+ "logits/chosen": -4.2885661125183105,
589
+ "logits/rejected": -4.4927144050598145,
590
+ "logps/chosen": -289.79022216796875,
591
+ "logps/rejected": -244.1138916015625,
592
+ "loss": 0.2904,
593
+ "rewards/accuracies": 0.84375,
594
+ "rewards/chosen": 3.115464448928833,
595
+ "rewards/margins": 2.5564632415771484,
596
+ "rewards/rejected": 0.5590011477470398,
597
  "step": 350
598
  },
599
  {
600
  "epoch": 1.0,
601
  "step": 350,
602
  "total_flos": 0.0,
603
+ "train_loss": 0.34604193687438967,
604
+ "train_runtime": 5793.0735,
605
+ "train_samples_per_second": 7.726,
606
+ "train_steps_per_second": 0.06
607
  }
608
  ],
609
  "logging_steps": 10,
 
611
  "num_input_tokens_seen": 0,
612
  "num_train_epochs": 1,
613
  "save_steps": 100,
614
+ "stateful_callbacks": {
615
+ "TrainerControl": {
616
+ "args": {
617
+ "should_epoch_stop": false,
618
+ "should_evaluate": false,
619
+ "should_log": false,
620
+ "should_save": true,
621
+ "should_training_stop": false
622
+ },
623
+ "attributes": {}
624
+ }
625
+ },
626
  "total_flos": 0.0,
627
  "train_batch_size": 8,
628
  "trial_name": null,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:25686a2a94e778bd06e64992e121362fb8e396ce160a2ce4cb0004cc6efb88ff
3
- size 6264
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cfcdc187e96bc5f72f21240b097691d6a8cdf8d0fcc5ad2fe20f43a43287471
3
+ size 6520