RikkiXu commited on
Commit
f335070
1 Parent(s): 6a7d5b8

Model save

Browse files
README.md CHANGED
@@ -32,7 +32,7 @@ More information needed
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
- - learning_rate: 5e-07
36
  - train_batch_size: 4
37
  - eval_batch_size: 4
38
  - seed: 42
@@ -52,7 +52,7 @@ The following hyperparameters were used during training:
52
 
53
  ### Framework versions
54
 
55
- - Transformers 4.39.3
56
  - Pytorch 2.1.2+cu118
57
  - Datasets 2.16.1
58
- - Tokenizers 0.15.2
 
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
+ - learning_rate: 2e-07
36
  - train_batch_size: 4
37
  - eval_batch_size: 4
38
  - seed: 42
 
52
 
53
  ### Framework versions
54
 
55
+ - Transformers 4.41.1
56
  - Pytorch 2.1.2+cu118
57
  - Datasets 2.16.1
58
+ - Tokenizers 0.19.1
all_results.json CHANGED
@@ -1,8 +1,9 @@
1
  {
2
- "epoch": 1.0,
3
- "train_loss": 0.39275855483152927,
4
- "train_runtime": 5850.2913,
 
5
  "train_samples": 50000,
6
- "train_samples_per_second": 8.547,
7
- "train_steps_per_second": 0.067
8
  }
 
1
  {
2
+ "epoch": 0.9980806142034548,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.2947830175742125,
5
+ "train_runtime": 6391.2384,
6
  "train_samples": 50000,
7
+ "train_samples_per_second": 7.823,
8
+ "train_steps_per_second": 0.061
9
  }
config.json CHANGED
@@ -20,7 +20,7 @@
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
- "transformers_version": "4.39.3",
24
  "use_cache": false,
25
  "vocab_size": 32000
26
  }
 
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.41.1",
24
  "use_cache": false,
25
  "vocab_size": 32000
26
  }
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
- "transformers_version": "4.39.3"
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
+ "transformers_version": "4.41.1"
6
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1c0dc0c292d489e5548e3b75999fe6cd030919ab96386b961a7432dab2fccb25
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c879c438925d1255e6bb62e95443c7b9be4c54b554c5661246d16917a1112924
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78055b666c3b09cd419d126d46142b7e4cbc261fd5db79d249138904dfc1a57d
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c2dd72f78dcf556e6528d803c589c6105e3a837532325d354a805e21db0d419
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:04307748a9391cbe41e028618c312d319041f81f61a38ff22b6d44f0630d8343
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a55211455915c4c6cc8aa0486c54d0d029dd088874b044c923db3b11eb37773
3
  size 4540516344
runs/Jun22_03-45-12_n136-082-130/events.out.tfevents.1718999150.n136-082-130.2429577.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:28126c3869c0fe579ef32582aed3225c779b7e3bdb966a58d323f9c2b91f62be
3
- size 26299
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76c76e9e221e4f8cc6ab660e0755399876a9ef2ac84345577d63bbe730e2236f
3
+ size 32845
tokenizer.json CHANGED
@@ -134,6 +134,7 @@
134
  "end_of_word_suffix": null,
135
  "fuse_unk": true,
136
  "byte_fallback": true,
 
137
  "vocab": {
138
  "<unk>": 0,
139
  "<s>": 1,
 
134
  "end_of_word_suffix": null,
135
  "fuse_unk": true,
136
  "byte_fallback": true,
137
+ "ignore_merges": false,
138
  "vocab": {
139
  "<unk>": 0,
140
  "<s>": 1,
train_results.json CHANGED
@@ -1,8 +1,9 @@
1
  {
2
- "epoch": 1.0,
3
- "train_loss": 0.39275855483152927,
4
- "train_runtime": 5850.2913,
 
5
  "train_samples": 50000,
6
- "train_samples_per_second": 8.547,
7
- "train_steps_per_second": 0.067
8
  }
 
1
  {
2
+ "epoch": 0.9980806142034548,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.2947830175742125,
5
+ "train_runtime": 6391.2384,
6
  "train_samples": 50000,
7
+ "train_samples_per_second": 7.823,
8
+ "train_steps_per_second": 0.061
9
  }
trainer_state.json CHANGED
@@ -9,14 +9,14 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.0,
13
- "grad_norm": 992.8142742850837,
14
- "learning_rate": 1.282051282051282e-08,
15
  "logits/chosen": -2.5583817958831787,
16
  "logits/rejected": -2.4487552642822266,
17
  "logps/chosen": -258.1644592285156,
18
  "logps/rejected": -216.25729370117188,
19
- "loss": 0.6965,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
22
  "rewards/margins": 0.0,
@@ -24,598 +24,598 @@
24
  "step": 1
25
  },
26
  {
27
- "epoch": 0.03,
28
- "grad_norm": 972.5242632724868,
29
- "learning_rate": 1.2820512820512818e-07,
30
- "logits/chosen": -2.606093168258667,
31
- "logits/rejected": -2.5532331466674805,
32
- "logps/chosen": -267.5519714355469,
33
- "logps/rejected": -217.63278198242188,
34
- "loss": 0.7101,
35
- "rewards/accuracies": 0.4375,
36
- "rewards/chosen": 0.008765195496380329,
37
- "rewards/margins": 0.004809141159057617,
38
- "rewards/rejected": 0.0039560552686452866,
39
  "step": 10
40
  },
41
  {
42
- "epoch": 0.05,
43
- "grad_norm": 659.3539113664132,
44
- "learning_rate": 2.5641025641025636e-07,
45
- "logits/chosen": -2.6304450035095215,
46
- "logits/rejected": -2.5677618980407715,
47
- "logps/chosen": -260.57421875,
48
- "logps/rejected": -207.02157592773438,
49
- "loss": 0.5261,
50
- "rewards/accuracies": 0.7250000238418579,
51
- "rewards/chosen": 0.5109978318214417,
52
- "rewards/margins": 0.48820367455482483,
53
- "rewards/rejected": 0.022794198244810104,
54
  "step": 20
55
  },
56
  {
57
- "epoch": 0.08,
58
- "grad_norm": 779.0248633950893,
59
- "learning_rate": 3.8461538461538463e-07,
60
- "logits/chosen": -2.648696184158325,
61
- "logits/rejected": -2.573786973953247,
62
- "logps/chosen": -250.76904296875,
63
- "logps/rejected": -198.43540954589844,
64
- "loss": 0.3369,
65
- "rewards/accuracies": 0.856249988079071,
66
- "rewards/chosen": 3.0647289752960205,
67
- "rewards/margins": 2.7615580558776855,
68
- "rewards/rejected": 0.3031708300113678,
69
  "step": 30
70
  },
71
  {
72
- "epoch": 0.1,
73
- "grad_norm": 384.3988310437092,
74
- "learning_rate": 4.99989986344963e-07,
75
- "logits/chosen": -2.6418910026550293,
76
- "logits/rejected": -2.5625271797180176,
77
- "logps/chosen": -243.13668823242188,
78
- "logps/rejected": -192.8317108154297,
79
- "loss": 0.3186,
80
- "rewards/accuracies": 0.824999988079071,
81
- "rewards/chosen": 5.048842430114746,
82
- "rewards/margins": 4.450641632080078,
83
- "rewards/rejected": 0.5982006192207336,
84
  "step": 40
85
  },
86
  {
87
- "epoch": 0.13,
88
- "grad_norm": 557.6508821014772,
89
- "learning_rate": 4.987893180827479e-07,
90
- "logits/chosen": -2.651369333267212,
91
- "logits/rejected": -2.5784316062927246,
92
- "logps/chosen": -257.6221923828125,
93
- "logps/rejected": -203.21920776367188,
94
- "loss": 0.3491,
95
- "rewards/accuracies": 0.862500011920929,
96
- "rewards/chosen": 7.43048620223999,
97
- "rewards/margins": 6.079649448394775,
98
- "rewards/rejected": 1.3508365154266357,
99
  "step": 50
100
  },
101
  {
102
- "epoch": 0.15,
103
- "grad_norm": 595.0405769986861,
104
- "learning_rate": 4.955969343539162e-07,
105
- "logits/chosen": -2.606095552444458,
106
- "logits/rejected": -2.5313313007354736,
107
- "logps/chosen": -261.7010192871094,
108
- "logps/rejected": -209.05029296875,
109
- "loss": 0.3678,
110
- "rewards/accuracies": 0.8687499761581421,
111
- "rewards/chosen": 4.988667011260986,
112
- "rewards/margins": 5.682937145233154,
113
- "rewards/rejected": -0.6942700147628784,
114
  "step": 60
115
  },
116
  {
117
- "epoch": 0.18,
118
- "grad_norm": 406.24830016448993,
119
- "learning_rate": 4.90438392204474e-07,
120
- "logits/chosen": -2.57716703414917,
121
- "logits/rejected": -2.5019829273223877,
122
- "logps/chosen": -291.1893615722656,
123
- "logps/rejected": -227.8859100341797,
124
- "loss": 0.3366,
125
- "rewards/accuracies": 0.9125000238418579,
126
- "rewards/chosen": 4.704905033111572,
127
- "rewards/margins": 6.591601371765137,
128
- "rewards/rejected": -1.8866965770721436,
129
  "step": 70
130
  },
131
  {
132
- "epoch": 0.2,
133
- "grad_norm": 771.1936928093826,
134
- "learning_rate": 4.83354989019146e-07,
135
- "logits/chosen": -2.534698247909546,
136
- "logits/rejected": -2.4588425159454346,
137
- "logps/chosen": -259.4831848144531,
138
- "logps/rejected": -204.6412353515625,
139
- "loss": 0.3162,
140
- "rewards/accuracies": 0.887499988079071,
141
- "rewards/chosen": 5.652478218078613,
142
- "rewards/margins": 6.747942924499512,
143
- "rewards/rejected": -1.0954645872116089,
144
  "step": 80
145
  },
146
  {
147
- "epoch": 0.23,
148
- "grad_norm": 746.9475535161084,
149
- "learning_rate": 4.7440343190975353e-07,
150
- "logits/chosen": -2.5645816326141357,
151
- "logits/rejected": -2.5068752765655518,
152
- "logps/chosen": -257.1772155761719,
153
- "logps/rejected": -217.83139038085938,
154
- "loss": 0.31,
155
- "rewards/accuracies": 0.84375,
156
- "rewards/chosen": 3.197655200958252,
157
- "rewards/margins": 5.532810211181641,
158
- "rewards/rejected": -2.3351550102233887,
159
  "step": 90
160
  },
161
  {
162
- "epoch": 0.26,
163
- "grad_norm": 823.7911810864297,
164
- "learning_rate": 4.6365538373900506e-07,
165
- "logits/chosen": -2.6224541664123535,
166
- "logits/rejected": -2.5483615398406982,
167
- "logps/chosen": -236.20339965820312,
168
- "logps/rejected": -201.47940063476562,
169
- "loss": 0.5348,
170
  "rewards/accuracies": 0.8500000238418579,
171
- "rewards/chosen": 4.17586088180542,
172
- "rewards/margins": 6.1418962478637695,
173
- "rewards/rejected": -1.9660362005233765,
174
  "step": 100
175
  },
176
  {
177
- "epoch": 0.28,
178
- "grad_norm": 579.991739437595,
179
- "learning_rate": 4.5119688941406386e-07,
180
- "logits/chosen": -2.6264939308166504,
181
- "logits/rejected": -2.5470776557922363,
182
- "logps/chosen": -256.7718200683594,
183
- "logps/rejected": -209.46231079101562,
184
- "loss": 0.4091,
185
- "rewards/accuracies": 0.856249988079071,
186
- "rewards/chosen": 6.23184061050415,
187
- "rewards/margins": 6.958665370941162,
188
- "rewards/rejected": -0.7268255352973938,
189
  "step": 110
190
  },
191
  {
192
- "epoch": 0.31,
193
- "grad_norm": 690.1539082640518,
194
- "learning_rate": 4.3712768704277524e-07,
195
- "logits/chosen": -2.6043858528137207,
196
- "logits/rejected": -2.5360493659973145,
197
- "logps/chosen": -261.69561767578125,
198
- "logps/rejected": -208.3325653076172,
199
- "loss": 0.4048,
200
- "rewards/accuracies": 0.862500011920929,
201
- "rewards/chosen": 5.436749458312988,
202
- "rewards/margins": 6.8130621910095215,
203
- "rewards/rejected": -1.3763134479522705,
204
  "step": 120
205
  },
206
  {
207
- "epoch": 0.33,
208
- "grad_norm": 451.1743521072461,
209
- "learning_rate": 4.2156040946718343e-07,
210
- "logits/chosen": -2.578007698059082,
211
- "logits/rejected": -2.5135886669158936,
212
- "logps/chosen": -250.7688446044922,
213
- "logps/rejected": -196.73779296875,
214
- "loss": 0.3698,
215
- "rewards/accuracies": 0.8374999761581421,
216
- "rewards/chosen": 4.574068069458008,
217
- "rewards/margins": 6.677270412445068,
218
- "rewards/rejected": -2.1032023429870605,
219
  "step": 130
220
  },
221
  {
222
- "epoch": 0.36,
223
- "grad_norm": 465.41383196085104,
224
- "learning_rate": 4.046196825665637e-07,
225
- "logits/chosen": -2.600147247314453,
226
- "logits/rejected": -2.534235715866089,
227
- "logps/chosen": -269.448974609375,
228
- "logps/rejected": -216.6135711669922,
229
- "loss": 0.4171,
230
- "rewards/accuracies": 0.862500011920929,
231
- "rewards/chosen": 4.009121417999268,
232
- "rewards/margins": 6.148682594299316,
233
- "rewards/rejected": -2.139561176300049,
234
  "step": 140
235
  },
236
  {
237
- "epoch": 0.38,
238
- "grad_norm": 485.4989695160128,
239
- "learning_rate": 3.864411275486261e-07,
240
- "logits/chosen": -2.583752155303955,
241
- "logits/rejected": -2.51660418510437,
242
- "logps/chosen": -262.35479736328125,
243
- "logps/rejected": -212.38021850585938,
244
- "loss": 0.429,
245
- "rewards/accuracies": 0.856249988079071,
246
- "rewards/chosen": 5.765866756439209,
247
- "rewards/margins": 7.155278205871582,
248
- "rewards/rejected": -1.3894122838974,
249
  "step": 150
250
  },
251
  {
252
- "epoch": 0.41,
253
- "grad_norm": 546.3479101912284,
254
- "learning_rate": 3.671702752161759e-07,
255
- "logits/chosen": -2.5821540355682373,
256
- "logits/rejected": -2.514200210571289,
257
- "logps/chosen": -242.9073944091797,
258
- "logps/rejected": -197.12026977539062,
259
- "loss": 0.4562,
260
- "rewards/accuracies": 0.8812500238418579,
261
- "rewards/chosen": 4.221369743347168,
262
- "rewards/margins": 7.090517520904541,
263
- "rewards/rejected": -2.869147777557373,
264
  "step": 160
265
  },
266
  {
267
- "epoch": 0.44,
268
- "grad_norm": 655.017465367729,
269
- "learning_rate": 3.4696140090121375e-07,
270
- "logits/chosen": -2.582387685775757,
271
- "logits/rejected": -2.517115831375122,
272
- "logps/chosen": -264.36541748046875,
273
- "logps/rejected": -210.4468231201172,
274
- "loss": 0.3517,
275
  "rewards/accuracies": 0.875,
276
- "rewards/chosen": 4.428440570831299,
277
- "rewards/margins": 7.276960849761963,
278
- "rewards/rejected": -2.8485207557678223,
279
  "step": 170
280
  },
281
  {
282
- "epoch": 0.46,
283
- "grad_norm": 492.2183630043871,
284
- "learning_rate": 3.259762893935617e-07,
285
- "logits/chosen": -2.6398727893829346,
286
- "logits/rejected": -2.5528438091278076,
287
- "logps/chosen": -235.0208282470703,
288
- "logps/rejected": -185.29156494140625,
289
- "loss": 0.4065,
290
- "rewards/accuracies": 0.856249988079071,
291
- "rewards/chosen": 4.908794403076172,
292
- "rewards/margins": 6.33758544921875,
293
- "rewards/rejected": -1.4287910461425781,
294
  "step": 180
295
  },
296
  {
297
- "epoch": 0.49,
298
- "grad_norm": 502.0957454157663,
299
- "learning_rate": 3.0438293975154184e-07,
300
- "logits/chosen": -2.5951266288757324,
301
- "logits/rejected": -2.519169330596924,
302
- "logps/chosen": -258.04022216796875,
303
- "logps/rejected": -202.89727783203125,
304
- "loss": 0.3426,
305
- "rewards/accuracies": 0.8812500238418579,
306
- "rewards/chosen": 4.746270179748535,
307
- "rewards/margins": 7.24340295791626,
308
- "rewards/rejected": -2.4971330165863037,
309
  "step": 190
310
  },
311
  {
312
- "epoch": 0.51,
313
- "grad_norm": 536.1992163678008,
314
- "learning_rate": 2.823542203635138e-07,
315
- "logits/chosen": -2.6266205310821533,
316
- "logits/rejected": -2.5365912914276123,
317
- "logps/chosen": -273.0335388183594,
318
- "logps/rejected": -217.910888671875,
319
- "loss": 0.3772,
320
- "rewards/accuracies": 0.875,
321
- "rewards/chosen": 5.242685794830322,
322
- "rewards/margins": 8.247489929199219,
323
- "rewards/rejected": -3.004803419113159,
324
  "step": 200
325
  },
326
  {
327
- "epoch": 0.54,
328
- "grad_norm": 550.4099704660905,
329
- "learning_rate": 2.600664850273538e-07,
330
- "logits/chosen": -2.6228280067443848,
331
- "logits/rejected": -2.550497531890869,
332
- "logps/chosen": -265.55755615234375,
333
- "logps/rejected": -210.61660766601562,
334
- "loss": 0.5254,
335
  "rewards/accuracies": 0.856249988079071,
336
- "rewards/chosen": 3.847407102584839,
337
- "rewards/margins": 6.826489448547363,
338
- "rewards/rejected": -2.979081869125366,
339
  "step": 210
340
  },
341
  {
342
- "epoch": 0.56,
343
- "grad_norm": 581.5523533407168,
344
- "learning_rate": 2.3769816112703045e-07,
345
- "logits/chosen": -2.6491611003875732,
346
- "logits/rejected": -2.5865907669067383,
347
- "logps/chosen": -255.453857421875,
348
- "logps/rejected": -212.5009765625,
349
- "loss": 0.4229,
350
- "rewards/accuracies": 0.8687499761581421,
351
- "rewards/chosen": 4.356105327606201,
352
- "rewards/margins": 6.26999568939209,
353
- "rewards/rejected": -1.9138901233673096,
354
  "step": 220
355
  },
356
  {
357
- "epoch": 0.59,
358
- "grad_norm": 486.64295333011063,
359
- "learning_rate": 2.1542832120881677e-07,
360
- "logits/chosen": -2.6853079795837402,
361
- "logits/rejected": -2.600752830505371,
362
- "logps/chosen": -264.80615234375,
363
- "logps/rejected": -214.556396484375,
364
- "loss": 0.3669,
365
- "rewards/accuracies": 0.862500011920929,
366
- "rewards/chosen": 6.017881393432617,
367
- "rewards/margins": 7.167391300201416,
368
- "rewards/rejected": -1.1495091915130615,
369
  "step": 230
370
  },
371
  {
372
- "epoch": 0.61,
373
- "grad_norm": 831.6224733555287,
374
- "learning_rate": 1.934352493925695e-07,
375
- "logits/chosen": -2.667158603668213,
376
- "logits/rejected": -2.620408058166504,
377
- "logps/chosen": -261.4100036621094,
378
- "logps/rejected": -219.61032104492188,
379
- "loss": 0.3738,
380
- "rewards/accuracies": 0.893750011920929,
381
- "rewards/chosen": 4.424870014190674,
382
- "rewards/margins": 8.652185440063477,
383
- "rewards/rejected": -4.227315425872803,
384
  "step": 240
385
  },
386
  {
387
- "epoch": 0.64,
388
- "grad_norm": 563.6526543418182,
389
- "learning_rate": 1.7189501409486059e-07,
390
- "logits/chosen": -2.674989938735962,
391
- "logits/rejected": -2.606071949005127,
392
- "logps/chosen": -266.0426940917969,
393
- "logps/rejected": -221.0687713623047,
394
- "loss": 0.367,
395
- "rewards/accuracies": 0.84375,
396
- "rewards/chosen": 4.015201091766357,
397
- "rewards/margins": 7.296302795410156,
398
- "rewards/rejected": -3.281102418899536,
399
  "step": 250
400
  },
401
  {
402
- "epoch": 0.67,
403
- "grad_norm": 457.0558678943042,
404
- "learning_rate": 1.5098005849021078e-07,
405
- "logits/chosen": -2.6647772789001465,
406
- "logits/rejected": -2.6081786155700684,
407
- "logps/chosen": -260.12384033203125,
408
- "logps/rejected": -207.2174835205078,
409
- "loss": 0.3387,
410
- "rewards/accuracies": 0.875,
411
- "rewards/chosen": 4.0153021812438965,
412
- "rewards/margins": 6.823422908782959,
413
- "rewards/rejected": -2.8081207275390625,
414
  "step": 260
415
  },
416
  {
417
- "epoch": 0.69,
418
- "grad_norm": 478.6275517216598,
419
- "learning_rate": 1.30857819994673e-07,
420
- "logits/chosen": -2.641366481781006,
421
- "logits/rejected": -2.558147668838501,
422
- "logps/chosen": -272.80377197265625,
423
- "logps/rejected": -228.5403594970703,
424
- "loss": 0.4885,
425
- "rewards/accuracies": 0.90625,
426
- "rewards/chosen": 2.646123170852661,
427
- "rewards/margins": 8.172276496887207,
428
- "rewards/rejected": -5.526152610778809,
429
  "step": 270
430
  },
431
  {
432
- "epoch": 0.72,
433
- "grad_norm": 369.20521183996,
434
- "learning_rate": 1.116893898236716e-07,
435
- "logits/chosen": -2.6526267528533936,
436
- "logits/rejected": -2.597562789916992,
437
- "logps/chosen": -268.1092224121094,
438
- "logps/rejected": -217.9169464111328,
439
- "loss": 0.3441,
440
- "rewards/accuracies": 0.875,
441
- "rewards/chosen": 3.9179558753967285,
442
- "rewards/margins": 7.3985185623168945,
443
- "rewards/rejected": -3.480562686920166,
444
  "step": 280
445
  },
446
  {
447
- "epoch": 0.74,
448
- "grad_norm": 498.11211487860106,
449
- "learning_rate": 9.362822335518062e-08,
450
- "logits/chosen": -2.6078855991363525,
451
- "logits/rejected": -2.5603675842285156,
452
- "logps/chosen": -265.8270263671875,
453
- "logps/rejected": -215.421142578125,
454
- "loss": 0.3216,
455
- "rewards/accuracies": 0.8687499761581421,
456
- "rewards/chosen": 4.283304691314697,
457
- "rewards/margins": 7.295420169830322,
458
- "rewards/rejected": -3.012115478515625,
459
  "step": 290
460
  },
461
  {
462
- "epoch": 0.77,
463
- "grad_norm": 433.49223304725587,
464
- "learning_rate": 7.681891162260015e-08,
465
- "logits/chosen": -2.6258552074432373,
466
- "logits/rejected": -2.5720603466033936,
467
- "logps/chosen": -272.7791442871094,
468
- "logps/rejected": -219.44140625,
469
- "loss": 0.3717,
470
- "rewards/accuracies": 0.862500011920929,
471
- "rewards/chosen": 4.056910514831543,
472
- "rewards/margins": 7.150809288024902,
473
- "rewards/rejected": -3.093899726867676,
474
  "step": 300
475
  },
476
  {
477
- "epoch": 0.79,
478
- "grad_norm": 369.6165987845038,
479
- "learning_rate": 6.139602377230247e-08,
480
- "logits/chosen": -2.5929980278015137,
481
- "logits/rejected": -2.524878978729248,
482
- "logps/chosen": -276.72418212890625,
483
- "logps/rejected": -214.73757934570312,
484
- "loss": 0.3974,
485
- "rewards/accuracies": 0.887499988079071,
486
- "rewards/chosen": 3.884862184524536,
487
- "rewards/margins": 7.488539695739746,
488
- "rewards/rejected": -3.6036782264709473,
489
  "step": 310
490
  },
491
  {
492
- "epoch": 0.82,
493
- "grad_norm": 542.2853213894015,
494
- "learning_rate": 4.748302975270837e-08,
495
- "logits/chosen": -2.618070125579834,
496
- "logits/rejected": -2.5723397731781006,
497
- "logps/chosen": -259.38433837890625,
498
- "logps/rejected": -202.79501342773438,
499
- "loss": 0.3593,
500
- "rewards/accuracies": 0.8812500238418579,
501
- "rewards/chosen": 3.869190216064453,
502
- "rewards/margins": 6.644041538238525,
503
- "rewards/rejected": -2.7748515605926514,
504
  "step": 320
505
  },
506
  {
507
- "epoch": 0.84,
508
- "grad_norm": 1179.920803140471,
509
- "learning_rate": 3.5191311859445795e-08,
510
- "logits/chosen": -2.639923572540283,
511
- "logits/rejected": -2.582639455795288,
512
- "logps/chosen": -262.33123779296875,
513
- "logps/rejected": -215.505615234375,
514
- "loss": 0.3667,
515
- "rewards/accuracies": 0.8999999761581421,
516
- "rewards/chosen": 5.157271385192871,
517
- "rewards/margins": 7.100647926330566,
518
- "rewards/rejected": -1.9433765411376953,
519
  "step": 330
520
  },
521
  {
522
- "epoch": 0.87,
523
- "grad_norm": 467.7658799716397,
524
- "learning_rate": 2.4619273049795996e-08,
525
- "logits/chosen": -2.6265780925750732,
526
- "logits/rejected": -2.5702362060546875,
527
- "logps/chosen": -257.75689697265625,
528
- "logps/rejected": -209.0670623779297,
529
- "loss": 0.324,
530
- "rewards/accuracies": 0.9125000238418579,
531
- "rewards/chosen": 4.93801212310791,
532
- "rewards/margins": 7.8753981590271,
533
- "rewards/rejected": -2.9373855590820312,
534
  "step": 340
535
  },
536
  {
537
- "epoch": 0.9,
538
- "grad_norm": 345.87107109384476,
539
- "learning_rate": 1.5851549164932115e-08,
540
- "logits/chosen": -2.637718677520752,
541
- "logits/rejected": -2.589808940887451,
542
- "logps/chosen": -267.7754821777344,
543
- "logps/rejected": -225.0698699951172,
544
- "loss": 0.3399,
545
- "rewards/accuracies": 0.893750011920929,
546
- "rewards/chosen": 4.532751560211182,
547
- "rewards/margins": 7.0415191650390625,
548
- "rewards/rejected": -2.508768320083618,
549
  "step": 350
550
  },
551
  {
552
- "epoch": 0.92,
553
- "grad_norm": 491.63656420568554,
554
- "learning_rate": 8.958331366609423e-09,
555
- "logits/chosen": -2.641171932220459,
556
- "logits/rejected": -2.574852705001831,
557
- "logps/chosen": -272.898193359375,
558
- "logps/rejected": -217.81997680664062,
559
- "loss": 0.3962,
560
  "rewards/accuracies": 0.893750011920929,
561
- "rewards/chosen": 4.423092842102051,
562
- "rewards/margins": 7.286849021911621,
563
- "rewards/rejected": -2.863755464553833,
564
  "step": 360
565
  },
566
  {
567
- "epoch": 0.95,
568
- "grad_norm": 863.5117187901297,
569
- "learning_rate": 3.994804212627461e-09,
570
- "logits/chosen": -2.600586175918579,
571
- "logits/rejected": -2.565150737762451,
572
- "logps/chosen": -271.3792419433594,
573
- "logps/rejected": -227.0088348388672,
574
- "loss": 0.4043,
575
  "rewards/accuracies": 0.862500011920929,
576
- "rewards/chosen": 5.524130821228027,
577
- "rewards/margins": 7.229277610778809,
578
- "rewards/rejected": -1.7051467895507812,
579
  "step": 370
580
  },
581
  {
582
- "epoch": 0.97,
583
- "grad_norm": 386.7539067890418,
584
- "learning_rate": 1.0007038696262516e-09,
585
- "logits/chosen": -2.649324893951416,
586
- "logits/rejected": -2.608961820602417,
587
- "logps/chosen": -261.44415283203125,
588
- "logps/rejected": -229.10617065429688,
589
- "loss": 0.3514,
590
- "rewards/accuracies": 0.862500011920929,
591
- "rewards/chosen": 4.858848571777344,
592
- "rewards/margins": 7.17071008682251,
593
- "rewards/rejected": -2.311861276626587,
594
  "step": 380
595
  },
596
  {
597
- "epoch": 1.0,
598
- "grad_norm": 658.7609084129675,
599
  "learning_rate": 0.0,
600
- "logits/chosen": -2.657970428466797,
601
- "logits/rejected": -2.5974597930908203,
602
- "logps/chosen": -249.08193969726562,
603
- "logps/rejected": -208.86935424804688,
604
- "loss": 0.389,
605
- "rewards/accuracies": 0.8687499761581421,
606
- "rewards/chosen": 4.138771057128906,
607
- "rewards/margins": 6.9808807373046875,
608
- "rewards/rejected": -2.8421101570129395,
609
  "step": 390
610
  },
611
  {
612
- "epoch": 1.0,
613
  "step": 390,
614
  "total_flos": 0.0,
615
- "train_loss": 0.39275855483152927,
616
- "train_runtime": 5850.2913,
617
- "train_samples_per_second": 8.547,
618
- "train_steps_per_second": 0.067
619
  }
620
  ],
621
  "logging_steps": 10,
@@ -623,6 +623,18 @@
623
  "num_input_tokens_seen": 0,
624
  "num_train_epochs": 1,
625
  "save_steps": 100,
 
 
 
 
 
 
 
 
 
 
 
 
626
  "total_flos": 0.0,
627
  "train_batch_size": 4,
628
  "trial_name": null,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0025591810620601407,
13
+ "grad_norm": 709.6319834867974,
14
+ "learning_rate": 5.128205128205128e-09,
15
  "logits/chosen": -2.5583817958831787,
16
  "logits/rejected": -2.4487552642822266,
17
  "logps/chosen": -258.1644592285156,
18
  "logps/rejected": -216.25729370117188,
19
+ "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
22
  "rewards/margins": 0.0,
 
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.025591810620601407,
28
+ "grad_norm": 670.9725237886468,
29
+ "learning_rate": 5.128205128205127e-08,
30
+ "logits/chosen": -2.606004238128662,
31
+ "logits/rejected": -2.553126573562622,
32
+ "logps/chosen": -267.5910339355469,
33
+ "logps/rejected": -217.67140197753906,
34
+ "loss": 0.6976,
35
+ "rewards/accuracies": 0.4513888955116272,
36
+ "rewards/chosen": -0.013270225375890732,
37
+ "rewards/margins": 0.0032119054812937975,
38
+ "rewards/rejected": -0.01648213155567646,
39
  "step": 10
40
  },
41
  {
42
+ "epoch": 0.05118362124120281,
43
+ "grad_norm": 590.5735450897452,
44
+ "learning_rate": 1.0256410256410255e-07,
45
+ "logits/chosen": -2.6245453357696533,
46
+ "logits/rejected": -2.562206745147705,
47
+ "logps/chosen": -261.142333984375,
48
+ "logps/rejected": -207.0738067626953,
49
+ "loss": 0.6676,
50
+ "rewards/accuracies": 0.6312500238418579,
51
+ "rewards/chosen": 0.08092932403087616,
52
+ "rewards/margins": 0.09077399969100952,
53
+ "rewards/rejected": -0.009844672866165638,
54
  "step": 20
55
  },
56
  {
57
+ "epoch": 0.07677543186180422,
58
+ "grad_norm": 644.2505273786248,
59
+ "learning_rate": 1.5384615384615385e-07,
60
+ "logits/chosen": -2.6289420127868652,
61
+ "logits/rejected": -2.5566790103912354,
62
+ "logps/chosen": -253.9628143310547,
63
+ "logps/rejected": -198.890380859375,
64
+ "loss": 0.5156,
65
+ "rewards/accuracies": 0.800000011920929,
66
+ "rewards/chosen": 0.5922147035598755,
67
+ "rewards/margins": 0.6031575798988342,
68
+ "rewards/rejected": -0.010942881926894188,
69
  "step": 30
70
  },
71
  {
72
+ "epoch": 0.10236724248240563,
73
+ "grad_norm": 428.1589759637874,
74
+ "learning_rate": 1.999959945379852e-07,
75
+ "logits/chosen": -2.637456178665161,
76
+ "logits/rejected": -2.5609130859375,
77
+ "logps/chosen": -247.48666381835938,
78
+ "logps/rejected": -193.59280395507812,
79
+ "loss": 0.3866,
80
+ "rewards/accuracies": 0.8062499761581421,
81
+ "rewards/chosen": 1.4313427209854126,
82
+ "rewards/margins": 1.3846019506454468,
83
+ "rewards/rejected": 0.04674070328474045,
84
  "step": 40
85
  },
86
  {
87
+ "epoch": 0.12795905310300704,
88
+ "grad_norm": 366.0325785868742,
89
+ "learning_rate": 1.9951572723309917e-07,
90
+ "logits/chosen": -2.684983730316162,
91
+ "logits/rejected": -2.618076801300049,
92
+ "logps/chosen": -262.3794250488281,
93
+ "logps/rejected": -204.0382843017578,
94
+ "loss": 0.3488,
95
+ "rewards/accuracies": 0.8500000238418579,
96
+ "rewards/chosen": 2.9288697242736816,
97
+ "rewards/margins": 2.3735203742980957,
98
+ "rewards/rejected": 0.5553494095802307,
99
  "step": 50
100
  },
101
  {
102
+ "epoch": 0.15355086372360843,
103
+ "grad_norm": 375.0458504104559,
104
+ "learning_rate": 1.9823877374156647e-07,
105
+ "logits/chosen": -2.669403553009033,
106
+ "logits/rejected": -2.6018548011779785,
107
+ "logps/chosen": -262.41534423828125,
108
+ "logps/rejected": -206.5387420654297,
109
+ "loss": 0.311,
110
+ "rewards/accuracies": 0.824999988079071,
111
+ "rewards/chosen": 3.2061855792999268,
112
+ "rewards/margins": 2.446310520172119,
113
+ "rewards/rejected": 0.7598745822906494,
114
  "step": 60
115
  },
116
  {
117
+ "epoch": 0.17914267434420986,
118
+ "grad_norm": 371.9136724850062,
119
+ "learning_rate": 1.9617535688178958e-07,
120
+ "logits/chosen": -2.6535415649414062,
121
+ "logits/rejected": -2.590367078781128,
122
+ "logps/chosen": -290.74871826171875,
123
+ "logps/rejected": -224.1929168701172,
124
+ "loss": 0.3051,
125
+ "rewards/accuracies": 0.875,
126
+ "rewards/chosen": 3.5809733867645264,
127
+ "rewards/margins": 3.0821049213409424,
128
+ "rewards/rejected": 0.4988683760166168,
129
  "step": 70
130
  },
131
  {
132
+ "epoch": 0.20473448496481125,
133
+ "grad_norm": 381.95411975681657,
134
+ "learning_rate": 1.9334199560765839e-07,
135
+ "logits/chosen": -2.6085634231567383,
136
+ "logits/rejected": -2.5423429012298584,
137
+ "logps/chosen": -260.9337463378906,
138
+ "logps/rejected": -203.12725830078125,
139
+ "loss": 0.2569,
140
+ "rewards/accuracies": 0.8687499761581421,
141
+ "rewards/chosen": 3.3122222423553467,
142
+ "rewards/margins": 3.3377063274383545,
143
+ "rewards/rejected": -0.025484371930360794,
144
  "step": 80
145
  },
146
  {
147
+ "epoch": 0.23032629558541268,
148
+ "grad_norm": 331.9831865016541,
149
+ "learning_rate": 1.897613727639014e-07,
150
+ "logits/chosen": -2.610903739929199,
151
+ "logits/rejected": -2.556959867477417,
152
+ "logps/chosen": -256.61138916015625,
153
+ "logps/rejected": -215.09701538085938,
154
+ "loss": 0.2658,
155
+ "rewards/accuracies": 0.8687499761581421,
156
+ "rewards/chosen": 2.5669326782226562,
157
+ "rewards/margins": 2.867710590362549,
158
+ "rewards/rejected": -0.3007778227329254,
159
  "step": 90
160
  },
161
  {
162
+ "epoch": 0.2559181062060141,
163
+ "grad_norm": 634.1345493995642,
164
+ "learning_rate": 1.8546215349560202e-07,
165
+ "logits/chosen": -2.6384997367858887,
166
+ "logits/rejected": -2.563520908355713,
167
+ "logps/chosen": -236.5784912109375,
168
+ "logps/rejected": -199.6844940185547,
169
+ "loss": 0.3117,
170
  "rewards/accuracies": 0.8500000238418579,
171
+ "rewards/chosen": 2.7952113151550293,
172
+ "rewards/margins": 3.302074909210205,
173
+ "rewards/rejected": -0.5068637132644653,
174
  "step": 100
175
  },
176
  {
177
+ "epoch": 0.28150991682661547,
178
+ "grad_norm": 319.85079714910466,
179
+ "learning_rate": 1.8047875576562553e-07,
180
+ "logits/chosen": -2.630192518234253,
181
+ "logits/rejected": -2.549891710281372,
182
+ "logps/chosen": -258.26971435546875,
183
+ "logps/rejected": -208.4285430908203,
184
+ "loss": 0.282,
185
+ "rewards/accuracies": 0.862500011920929,
186
+ "rewards/chosen": 3.702361583709717,
187
+ "rewards/margins": 3.704643726348877,
188
+ "rewards/rejected": -0.002281466033309698,
189
  "step": 110
190
  },
191
  {
192
+ "epoch": 0.30710172744721687,
193
+ "grad_norm": 376.18943633320504,
194
+ "learning_rate": 1.748510748171101e-07,
195
+ "logits/chosen": -2.6130287647247314,
196
+ "logits/rejected": -2.543454170227051,
197
+ "logps/chosen": -262.194580078125,
198
+ "logps/rejected": -206.72488403320312,
199
+ "loss": 0.2802,
200
+ "rewards/accuracies": 0.8812500238418579,
201
+ "rewards/chosen": 3.633894443511963,
202
+ "rewards/margins": 3.8131375312805176,
203
+ "rewards/rejected": -0.17924347519874573,
204
  "step": 120
205
  },
206
  {
207
+ "epoch": 0.3326935380678183,
208
+ "grad_norm": 258.08224292776333,
209
+ "learning_rate": 1.6862416378687336e-07,
210
+ "logits/chosen": -2.587902069091797,
211
+ "logits/rejected": -2.5218377113342285,
212
+ "logps/chosen": -250.2627410888672,
213
+ "logps/rejected": -193.98731994628906,
214
+ "loss": 0.2644,
215
+ "rewards/accuracies": 0.856249988079071,
216
+ "rewards/chosen": 3.5202510356903076,
217
+ "rewards/margins": 3.647292375564575,
218
+ "rewards/rejected": -0.12704159319400787,
219
  "step": 130
220
  },
221
  {
222
+ "epoch": 0.3582853486884197,
223
+ "grad_norm": 312.9816016514414,
224
+ "learning_rate": 1.6184787302662547e-07,
225
+ "logits/chosen": -2.6076598167419434,
226
+ "logits/rejected": -2.539602279663086,
227
+ "logps/chosen": -267.7660217285156,
228
+ "logps/rejected": -213.330078125,
229
+ "loss": 0.3055,
230
+ "rewards/accuracies": 0.875,
231
+ "rewards/chosen": 3.705143690109253,
232
+ "rewards/margins": 3.5916450023651123,
233
+ "rewards/rejected": 0.11349865049123764,
234
  "step": 140
235
  },
236
  {
237
+ "epoch": 0.3838771593090211,
238
+ "grad_norm": 298.63492430036257,
239
+ "learning_rate": 1.5457645101945046e-07,
240
+ "logits/chosen": -2.588223457336426,
241
+ "logits/rejected": -2.521327495574951,
242
+ "logps/chosen": -262.39776611328125,
243
+ "logps/rejected": -210.2900848388672,
244
+ "loss": 0.2675,
245
+ "rewards/accuracies": 0.887499988079071,
246
+ "rewards/chosen": 4.096975803375244,
247
+ "rewards/margins": 4.044339656829834,
248
+ "rewards/rejected": 0.05263558775186539,
249
  "step": 150
250
  },
251
  {
252
+ "epoch": 0.4094689699296225,
253
+ "grad_norm": 277.3876418755778,
254
+ "learning_rate": 1.4686811008647035e-07,
255
+ "logits/chosen": -2.5860092639923096,
256
+ "logits/rejected": -2.5171661376953125,
257
+ "logps/chosen": -241.29501342773438,
258
+ "logps/rejected": -192.94955444335938,
259
+ "loss": 0.2544,
260
+ "rewards/accuracies": 0.887499988079071,
261
+ "rewards/chosen": 3.8214523792266846,
262
+ "rewards/margins": 3.785486936569214,
263
+ "rewards/rejected": 0.0359656922519207,
264
  "step": 160
265
  },
266
  {
267
+ "epoch": 0.4350607805502239,
268
+ "grad_norm": 339.61682629601955,
269
+ "learning_rate": 1.387845603604855e-07,
270
+ "logits/chosen": -2.5826542377471924,
271
+ "logits/rejected": -2.51751446723938,
272
+ "logps/chosen": -262.99298095703125,
273
+ "logps/rejected": -206.5457000732422,
274
+ "loss": 0.2334,
275
  "rewards/accuracies": 0.875,
276
+ "rewards/chosen": 3.8493824005126953,
277
+ "rewards/margins": 3.933476209640503,
278
+ "rewards/rejected": -0.08409398794174194,
279
  "step": 170
280
  },
281
  {
282
+ "epoch": 0.46065259117082535,
283
+ "grad_norm": 293.5783730044579,
284
+ "learning_rate": 1.3039051575742468e-07,
285
+ "logits/chosen": -2.6330299377441406,
286
+ "logits/rejected": -2.547426700592041,
287
+ "logps/chosen": -235.4442596435547,
288
+ "logps/rejected": -183.63687133789062,
289
+ "loss": 0.2871,
290
+ "rewards/accuracies": 0.862500011920929,
291
+ "rewards/chosen": 3.294564723968506,
292
+ "rewards/margins": 3.4877796173095703,
293
+ "rewards/rejected": -0.193215012550354,
294
  "step": 180
295
  },
296
  {
297
+ "epoch": 0.48624440179142675,
298
+ "grad_norm": 298.0737945063753,
299
+ "learning_rate": 1.2175317590061675e-07,
300
+ "logits/chosen": -2.5894675254821777,
301
+ "logits/rejected": -2.51374888420105,
302
+ "logps/chosen": -257.42132568359375,
303
+ "logps/rejected": -199.62106323242188,
304
+ "loss": 0.239,
305
+ "rewards/accuracies": 0.887499988079071,
306
+ "rewards/chosen": 3.699636936187744,
307
+ "rewards/margins": 3.845207929611206,
308
+ "rewards/rejected": -0.1455712914466858,
309
  "step": 190
310
  },
311
  {
312
+ "epoch": 0.5118362124120281,
313
+ "grad_norm": 326.3771289852159,
314
+ "learning_rate": 1.1294168814540553e-07,
315
+ "logits/chosen": -2.625087261199951,
316
+ "logits/rejected": -2.5350747108459473,
317
+ "logps/chosen": -271.9561462402344,
318
+ "logps/rejected": -214.1556854248047,
319
+ "loss": 0.2317,
320
+ "rewards/accuracies": 0.90625,
321
+ "rewards/chosen": 4.283473014831543,
322
+ "rewards/margins": 4.552158832550049,
323
+ "rewards/rejected": -0.2686860263347626,
324
  "step": 200
325
  },
326
  {
327
+ "epoch": 0.5374280230326296,
328
+ "grad_norm": 510.1343443372893,
329
+ "learning_rate": 1.0402659401094151e-07,
330
+ "logits/chosen": -2.602785587310791,
331
+ "logits/rejected": -2.5288333892822266,
332
+ "logps/chosen": -264.21099853515625,
333
+ "logps/rejected": -207.3443145751953,
334
+ "loss": 0.3153,
335
  "rewards/accuracies": 0.856249988079071,
336
+ "rewards/chosen": 3.421428680419922,
337
+ "rewards/margins": 3.9131996631622314,
338
+ "rewards/rejected": -0.4917708933353424,
339
  "step": 210
340
  },
341
  {
342
+ "epoch": 0.5630198336532309,
343
+ "grad_norm": 362.61099626331173,
344
+ "learning_rate": 9.507926445081218e-08,
345
+ "logits/chosen": -2.607333183288574,
346
+ "logits/rejected": -2.5410573482513428,
347
+ "logps/chosen": -255.5386199951172,
348
+ "logps/rejected": -210.58261108398438,
349
+ "loss": 0.2657,
350
+ "rewards/accuracies": 0.893750011920929,
351
+ "rewards/chosen": 3.069124698638916,
352
+ "rewards/margins": 3.4770140647888184,
353
+ "rewards/rejected": -0.4078896641731262,
354
  "step": 220
355
  },
356
  {
357
+ "epoch": 0.5886116442738324,
358
+ "grad_norm": 270.86067735708986,
359
+ "learning_rate": 8.61713284835267e-08,
360
+ "logits/chosen": -2.6318671703338623,
361
+ "logits/rejected": -2.5431408882141113,
362
+ "logps/chosen": -265.3260803222656,
363
+ "logps/rejected": -212.6390380859375,
364
+ "loss": 0.249,
365
+ "rewards/accuracies": 0.8500000238418579,
366
+ "rewards/chosen": 4.038520812988281,
367
+ "rewards/margins": 3.9009087085723877,
368
+ "rewards/rejected": 0.1376120150089264,
369
  "step": 230
370
  },
371
  {
372
+ "epoch": 0.6142034548944337,
373
+ "grad_norm": 496.26361322510223,
374
+ "learning_rate": 7.73740997570278e-08,
375
+ "logits/chosen": -2.6172757148742676,
376
+ "logits/rejected": -2.566117763519287,
377
+ "logps/chosen": -259.39569091796875,
378
+ "logps/rejected": -214.13717651367188,
379
+ "loss": 0.2362,
380
+ "rewards/accuracies": 0.8999999761581421,
381
+ "rewards/chosen": 4.167778968811035,
382
+ "rewards/margins": 4.450715065002441,
383
+ "rewards/rejected": -0.28293582797050476,
384
  "step": 240
385
  },
386
  {
387
+ "epoch": 0.6397952655150352,
388
+ "grad_norm": 385.39306248287744,
389
+ "learning_rate": 6.875800563794424e-08,
390
+ "logits/chosen": -2.6301958560943604,
391
+ "logits/rejected": -2.5562362670898438,
392
+ "logps/chosen": -264.5022277832031,
393
+ "logps/rejected": -217.06472778320312,
394
+ "loss": 0.2541,
395
+ "rewards/accuracies": 0.862500011920929,
396
+ "rewards/chosen": 3.638233184814453,
397
+ "rewards/margins": 3.9798378944396973,
398
+ "rewards/rejected": -0.34160494804382324,
399
  "step": 250
400
  },
401
  {
402
+ "epoch": 0.6653870761356366,
403
+ "grad_norm": 246.0647695237683,
404
+ "learning_rate": 6.039202339608431e-08,
405
+ "logits/chosen": -2.6222901344299316,
406
+ "logits/rejected": -2.5577120780944824,
407
+ "logps/chosen": -258.84637451171875,
408
+ "logps/rejected": -203.7962188720703,
409
+ "loss": 0.2317,
410
+ "rewards/accuracies": 0.893750011920929,
411
+ "rewards/chosen": 3.5067996978759766,
412
+ "rewards/margins": 3.801957607269287,
413
+ "rewards/rejected": -0.295158326625824,
414
  "step": 260
415
  },
416
  {
417
+ "epoch": 0.690978886756238,
418
+ "grad_norm": 382.6551210350461,
419
+ "learning_rate": 5.2343127997869205e-08,
420
+ "logits/chosen": -2.602179765701294,
421
+ "logits/rejected": -2.5085558891296387,
422
+ "logps/chosen": -270.6271057128906,
423
+ "logps/rejected": -222.81655883789062,
424
+ "loss": 0.2926,
425
+ "rewards/accuracies": 0.8999999761581421,
426
+ "rewards/chosen": 2.9784107208251953,
427
+ "rewards/margins": 4.063758850097656,
428
+ "rewards/rejected": -1.085348129272461,
429
  "step": 270
430
  },
431
  {
432
+ "epoch": 0.7165706973768394,
433
+ "grad_norm": 291.15530026271324,
434
+ "learning_rate": 4.4675755929468636e-08,
435
+ "logits/chosen": -2.627225160598755,
436
+ "logits/rejected": -2.565744638442993,
437
+ "logps/chosen": -266.7916259765625,
438
+ "logps/rejected": -214.30709838867188,
439
+ "loss": 0.2375,
440
+ "rewards/accuracies": 0.90625,
441
+ "rewards/chosen": 3.4573512077331543,
442
+ "rewards/margins": 4.138546943664551,
443
+ "rewards/rejected": -0.6811951398849487,
444
  "step": 280
445
  },
446
  {
447
+ "epoch": 0.7421625079974408,
448
+ "grad_norm": 302.9770657235692,
449
+ "learning_rate": 3.745128934207224e-08,
450
+ "logits/chosen": -2.590620279312134,
451
+ "logits/rejected": -2.5388922691345215,
452
+ "logps/chosen": -265.51971435546875,
453
+ "logps/rejected": -212.6427764892578,
454
+ "loss": 0.2286,
455
+ "rewards/accuracies": 0.893750011920929,
456
+ "rewards/chosen": 3.2131683826446533,
457
+ "rewards/margins": 3.975499391555786,
458
+ "rewards/rejected": -0.7623313665390015,
459
  "step": 290
460
  },
461
  {
462
+ "epoch": 0.7677543186180422,
463
+ "grad_norm": 339.1306333767976,
464
+ "learning_rate": 3.0727564649040063e-08,
465
+ "logits/chosen": -2.6111063957214355,
466
+ "logits/rejected": -2.5518836975097656,
467
+ "logps/chosen": -271.9001159667969,
468
+ "logps/rejected": -216.2710723876953,
469
+ "loss": 0.2446,
470
+ "rewards/accuracies": 0.9125000238418579,
471
+ "rewards/chosen": 3.3372998237609863,
472
+ "rewards/margins": 3.9620718955993652,
473
+ "rewards/rejected": -0.6247718930244446,
474
  "step": 300
475
  },
476
  {
477
+ "epoch": 0.7933461292386437,
478
+ "grad_norm": 235.34557136252968,
479
+ "learning_rate": 2.4558409508920985e-08,
480
+ "logits/chosen": -2.5742857456207275,
481
+ "logits/rejected": -2.499851703643799,
482
+ "logps/chosen": -275.28167724609375,
483
+ "logps/rejected": -211.0914764404297,
484
+ "loss": 0.2598,
485
+ "rewards/accuracies": 0.862500011920929,
486
+ "rewards/chosen": 3.4961440563201904,
487
+ "rewards/margins": 4.247152805328369,
488
+ "rewards/rejected": -0.7510083913803101,
489
  "step": 310
490
  },
491
  {
492
+ "epoch": 0.818937939859245,
493
+ "grad_norm": 368.2414347720992,
494
+ "learning_rate": 1.899321190108335e-08,
495
+ "logits/chosen": -2.59656023979187,
496
+ "logits/rejected": -2.545151472091675,
497
+ "logps/chosen": -258.7973327636719,
498
+ "logps/rejected": -200.56219482421875,
499
+ "loss": 0.2329,
500
+ "rewards/accuracies": 0.8999999761581421,
501
+ "rewards/chosen": 3.0572078227996826,
502
+ "rewards/margins": 3.9228408336639404,
503
+ "rewards/rejected": -0.8656327128410339,
504
  "step": 320
505
  },
506
  {
507
+ "epoch": 0.8445297504798465,
508
+ "grad_norm": 412.2800478150093,
509
+ "learning_rate": 1.4076524743778317e-08,
510
+ "logits/chosen": -2.6195101737976074,
511
+ "logits/rejected": -2.55584716796875,
512
+ "logps/chosen": -262.2277526855469,
513
+ "logps/rejected": -213.35403442382812,
514
+ "loss": 0.231,
515
+ "rewards/accuracies": 0.925000011920929,
516
+ "rewards/chosen": 3.7354748249053955,
517
+ "rewards/margins": 4.0478105545043945,
518
+ "rewards/rejected": -0.31233564019203186,
519
  "step": 330
520
  },
521
  {
522
+ "epoch": 0.8701215611004478,
523
+ "grad_norm": 291.1936087568274,
524
+ "learning_rate": 9.847709219918398e-09,
525
+ "logits/chosen": -2.6071531772613525,
526
+ "logits/rejected": -2.5460381507873535,
527
+ "logps/chosen": -257.75164794921875,
528
+ "logps/rejected": -206.60986328125,
529
+ "loss": 0.221,
530
+ "rewards/accuracies": 0.949999988079071,
531
+ "rewards/chosen": 3.5297904014587402,
532
+ "rewards/margins": 4.399306297302246,
533
+ "rewards/rejected": -0.8695155382156372,
534
  "step": 340
535
  },
536
  {
537
+ "epoch": 0.8957133717210493,
538
+ "grad_norm": 301.99507828681374,
539
+ "learning_rate": 6.340619665972846e-09,
540
+ "logits/chosen": -2.617502212524414,
541
+ "logits/rejected": -2.5671114921569824,
542
+ "logps/chosen": -267.4635009765625,
543
+ "logps/rejected": -222.5786895751953,
544
+ "loss": 0.2579,
545
+ "rewards/accuracies": 0.8687499761581421,
546
+ "rewards/chosen": 3.393681287765503,
547
+ "rewards/margins": 3.9400668144226074,
548
+ "rewards/rejected": -0.546385645866394,
549
  "step": 350
550
  },
551
  {
552
+ "epoch": 0.9213051823416507,
553
+ "grad_norm": 348.99526280716225,
554
+ "learning_rate": 3.583332546643769e-09,
555
+ "logits/chosen": -2.6180646419525146,
556
+ "logits/rejected": -2.5445971488952637,
557
+ "logps/chosen": -272.35406494140625,
558
+ "logps/rejected": -214.8975067138672,
559
+ "loss": 0.2825,
560
  "rewards/accuracies": 0.893750011920929,
561
+ "rewards/chosen": 3.431410551071167,
562
+ "rewards/margins": 4.0157270431518555,
563
+ "rewards/rejected": -0.584316074848175,
564
  "step": 360
565
  },
566
  {
567
+ "epoch": 0.946896992962252,
568
+ "grad_norm": 251.0460885716357,
569
+ "learning_rate": 1.5979216850509847e-09,
570
+ "logits/chosen": -2.5794007778167725,
571
+ "logits/rejected": -2.5391972064971924,
572
+ "logps/chosen": -271.82513427734375,
573
+ "logps/rejected": -225.1968231201172,
574
+ "loss": 0.2529,
575
  "rewards/accuracies": 0.862500011920929,
576
+ "rewards/chosen": 3.722885847091675,
577
+ "rewards/margins": 4.0348358154296875,
578
+ "rewards/rejected": -0.3119499981403351,
579
  "step": 370
580
  },
581
  {
582
+ "epoch": 0.9724888035828535,
583
+ "grad_norm": 201.79043052952954,
584
+ "learning_rate": 4.002815478505006e-10,
585
+ "logits/chosen": -2.6285715103149414,
586
+ "logits/rejected": -2.585494041442871,
587
+ "logps/chosen": -261.1199951171875,
588
+ "logps/rejected": -226.61105346679688,
589
+ "loss": 0.255,
590
+ "rewards/accuracies": 0.8500000238418579,
591
+ "rewards/chosen": 3.6326937675476074,
592
+ "rewards/margins": 4.036477088928223,
593
+ "rewards/rejected": -0.40378379821777344,
594
  "step": 380
595
  },
596
  {
597
+ "epoch": 0.9980806142034548,
598
+ "grad_norm": 458.24518941114127,
599
  "learning_rate": 0.0,
600
+ "logits/chosen": -2.641213893890381,
601
+ "logits/rejected": -2.575150966644287,
602
+ "logps/chosen": -248.17904663085938,
603
+ "logps/rejected": -206.1395721435547,
604
+ "loss": 0.2365,
605
+ "rewards/accuracies": 0.918749988079071,
606
+ "rewards/chosen": 3.4077067375183105,
607
+ "rewards/margins": 4.0728960037231445,
608
+ "rewards/rejected": -0.6651893854141235,
609
  "step": 390
610
  },
611
  {
612
+ "epoch": 0.9980806142034548,
613
  "step": 390,
614
  "total_flos": 0.0,
615
+ "train_loss": 0.2947830175742125,
616
+ "train_runtime": 6391.2384,
617
+ "train_samples_per_second": 7.823,
618
+ "train_steps_per_second": 0.061
619
  }
620
  ],
621
  "logging_steps": 10,
 
623
  "num_input_tokens_seen": 0,
624
  "num_train_epochs": 1,
625
  "save_steps": 100,
626
+ "stateful_callbacks": {
627
+ "TrainerControl": {
628
+ "args": {
629
+ "should_epoch_stop": false,
630
+ "should_evaluate": false,
631
+ "should_log": false,
632
+ "should_save": true,
633
+ "should_training_stop": false
634
+ },
635
+ "attributes": {}
636
+ }
637
+ },
638
  "total_flos": 0.0,
639
  "train_batch_size": 4,
640
  "trial_name": null,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:69755ef6c720cdfd7d14b231fbf0ad5a8fc29c369a6e770d1d3031286dc30b37
3
- size 6264
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4794be63c2fe75e163cfc9de1b0a4ccd9a5f02340e7eb2b99ae28feb9b6d4609
3
+ size 6520