RikkiXu commited on
Commit
29d6b1c
1 Parent(s): b4f4d12

Model save

Browse files
README.md CHANGED
@@ -32,7 +32,7 @@ More information needed
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
- - learning_rate: 5e-07
36
  - train_batch_size: 4
37
  - eval_batch_size: 4
38
  - seed: 42
@@ -52,7 +52,7 @@ The following hyperparameters were used during training:
52
 
53
  ### Framework versions
54
 
55
- - Transformers 4.39.3
56
  - Pytorch 2.1.2+cu118
57
  - Datasets 2.16.1
58
- - Tokenizers 0.15.2
 
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
+ - learning_rate: 4e-07
36
  - train_batch_size: 4
37
  - eval_batch_size: 4
38
  - seed: 42
 
52
 
53
  ### Framework versions
54
 
55
+ - Transformers 4.41.1
56
  - Pytorch 2.1.2+cu118
57
  - Datasets 2.16.1
58
+ - Tokenizers 0.19.1
all_results.json CHANGED
@@ -1,8 +1,9 @@
1
  {
2
- "epoch": 1.0,
3
- "train_loss": 0.4513903107398596,
4
- "train_runtime": 5854.463,
 
5
  "train_samples": 50000,
6
- "train_samples_per_second": 8.54,
7
- "train_steps_per_second": 0.067
8
  }
 
1
  {
2
+ "epoch": 0.9980806142034548,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.3119094243416419,
5
+ "train_runtime": 6443.086,
6
  "train_samples": 50000,
7
+ "train_samples_per_second": 7.76,
8
+ "train_steps_per_second": 0.061
9
  }
config.json CHANGED
@@ -20,7 +20,7 @@
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
- "transformers_version": "4.39.3",
24
  "use_cache": false,
25
  "vocab_size": 32000
26
  }
 
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.41.1",
24
  "use_cache": false,
25
  "vocab_size": 32000
26
  }
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
- "transformers_version": "4.39.3"
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
+ "transformers_version": "4.41.1"
6
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:04f2436f631684bd85d9ee0f015c2a6d18e79bd1c5a33dd8503659b12524b028
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bc88b0288a40faf21a1e24a96384ccf683d38e8246d3b0cab22d31c505d8b9f
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:70de4abb0b18d600edfa17cb8203a64d82cb5cd93011b4c04bcdbe4e85b25295
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c430b926ebd01f65609ab319292fa842772b87fc92486924b50fdd7065be6155
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f381addd443a7edabe9d8200e1356682fb04f61fecc2810526dc1a4c51d7868
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d5049e6472b9a1956a056ed3a8a66408f55efa3d2ea2bd03073317eac4eec19
3
  size 4540516344
runs/Jun22_08-58-28_n136-082-130/events.out.tfevents.1719017941.n136-082-130.2587327.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:322059a7357c1ed3b22a6955d1cc16bf4648a1464ced2249bb3c51a70d6dfacf
3
- size 26299
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ae7f710e69ad55192397299709b08f7cf7e04bccf24313d743eb75ad7f03d42
3
+ size 32845
tokenizer.json CHANGED
@@ -134,6 +134,7 @@
134
  "end_of_word_suffix": null,
135
  "fuse_unk": true,
136
  "byte_fallback": true,
 
137
  "vocab": {
138
  "<unk>": 0,
139
  "<s>": 1,
 
134
  "end_of_word_suffix": null,
135
  "fuse_unk": true,
136
  "byte_fallback": true,
137
+ "ignore_merges": false,
138
  "vocab": {
139
  "<unk>": 0,
140
  "<s>": 1,
train_results.json CHANGED
@@ -1,8 +1,9 @@
1
  {
2
- "epoch": 1.0,
3
- "train_loss": 0.4513903107398596,
4
- "train_runtime": 5854.463,
 
5
  "train_samples": 50000,
6
- "train_samples_per_second": 8.54,
7
- "train_steps_per_second": 0.067
8
  }
 
1
  {
2
+ "epoch": 0.9980806142034548,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.3119094243416419,
5
+ "train_runtime": 6443.086,
6
  "train_samples": 50000,
7
+ "train_samples_per_second": 7.76,
8
+ "train_steps_per_second": 0.061
9
  }
trainer_state.json CHANGED
@@ -9,14 +9,14 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.0,
13
- "grad_norm": 1292.2103490533002,
14
- "learning_rate": 1.282051282051282e-08,
15
  "logits/chosen": -2.5583817958831787,
16
  "logits/rejected": -2.4487552642822266,
17
  "logps/chosen": -258.1644592285156,
18
  "logps/rejected": -216.25729370117188,
19
- "loss": 0.6964,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
22
  "rewards/margins": 0.0,
@@ -24,598 +24,598 @@
24
  "step": 1
25
  },
26
  {
27
- "epoch": 0.03,
28
- "grad_norm": 1261.1198733310573,
29
- "learning_rate": 1.2820512820512818e-07,
30
- "logits/chosen": -2.6061007976531982,
31
- "logits/rejected": -2.553147315979004,
32
- "logps/chosen": -267.5506591796875,
33
- "logps/rejected": -217.63583374023438,
34
- "loss": 0.7045,
35
- "rewards/accuracies": 0.4375,
36
- "rewards/chosen": 0.012439604848623276,
37
- "rewards/margins": 0.01009546872228384,
38
- "rewards/rejected": 0.002344133099541068,
39
  "step": 10
40
  },
41
  {
42
- "epoch": 0.05,
43
- "grad_norm": 853.2707411908161,
44
- "learning_rate": 2.5641025641025636e-07,
45
- "logits/chosen": -2.629751443862915,
46
- "logits/rejected": -2.5669989585876465,
47
- "logps/chosen": -260.5412292480469,
48
- "logps/rejected": -207.0039825439453,
49
- "loss": 0.5192,
50
- "rewards/accuracies": 0.7250000238418579,
51
- "rewards/chosen": 0.6866833567619324,
52
- "rewards/margins": 0.6415502429008484,
53
- "rewards/rejected": 0.045133065432310104,
54
  "step": 20
55
  },
56
  {
57
- "epoch": 0.08,
58
- "grad_norm": 1142.6320871817904,
59
- "learning_rate": 3.8461538461538463e-07,
60
- "logits/chosen": -2.6441783905029297,
61
- "logits/rejected": -2.5700392723083496,
62
- "logps/chosen": -251.12313842773438,
63
- "logps/rejected": -198.34071350097656,
64
- "loss": 0.3383,
65
- "rewards/accuracies": 0.856249988079071,
66
- "rewards/chosen": 3.621673583984375,
67
- "rewards/margins": 3.1466643810272217,
68
- "rewards/rejected": 0.47500935196876526,
69
  "step": 30
70
  },
71
  {
72
- "epoch": 0.1,
73
- "grad_norm": 498.4795422948633,
74
- "learning_rate": 4.99989986344963e-07,
75
- "logits/chosen": -2.6392924785614014,
76
- "logits/rejected": -2.561156749725342,
77
- "logps/chosen": -243.8309783935547,
78
- "logps/rejected": -192.75845336914062,
79
- "loss": 0.3207,
80
- "rewards/accuracies": 0.8500000238418579,
81
- "rewards/chosen": 5.866530418395996,
82
- "rewards/margins": 5.031473636627197,
83
- "rewards/rejected": 0.8350569009780884,
84
  "step": 40
85
  },
86
  {
87
- "epoch": 0.13,
88
- "grad_norm": 687.258084457276,
89
- "learning_rate": 4.987893180827479e-07,
90
- "logits/chosen": -2.6534440517425537,
91
- "logits/rejected": -2.5842742919921875,
92
- "logps/chosen": -258.9037170410156,
93
- "logps/rejected": -203.41665649414062,
94
- "loss": 0.3666,
95
- "rewards/accuracies": 0.8687499761581421,
96
- "rewards/chosen": 8.400110244750977,
97
- "rewards/margins": 6.8410444259643555,
98
- "rewards/rejected": 1.559065818786621,
99
  "step": 50
100
  },
101
  {
102
- "epoch": 0.15,
103
- "grad_norm": 759.1516730607283,
104
- "learning_rate": 4.955969343539162e-07,
105
- "logits/chosen": -2.616115093231201,
106
- "logits/rejected": -2.5453152656555176,
107
- "logps/chosen": -262.9014892578125,
108
- "logps/rejected": -209.2264404296875,
109
- "loss": 0.3916,
110
- "rewards/accuracies": 0.862500011920929,
111
- "rewards/chosen": 5.333600044250488,
112
- "rewards/margins": 6.3847455978393555,
113
- "rewards/rejected": -1.0511456727981567,
114
  "step": 60
115
  },
116
  {
117
- "epoch": 0.18,
118
- "grad_norm": 560.0502144214485,
119
- "learning_rate": 4.90438392204474e-07,
120
- "logits/chosen": -2.590355634689331,
121
- "logits/rejected": -2.5212607383728027,
122
- "logps/chosen": -292.03955078125,
123
- "logps/rejected": -227.56900024414062,
124
- "loss": 0.366,
125
- "rewards/accuracies": 0.918749988079071,
126
- "rewards/chosen": 5.28397798538208,
127
- "rewards/margins": 7.424477577209473,
128
- "rewards/rejected": -2.1404995918273926,
129
  "step": 70
130
  },
131
  {
132
- "epoch": 0.2,
133
- "grad_norm": 1037.6404299504059,
134
- "learning_rate": 4.83354989019146e-07,
135
- "logits/chosen": -2.5414998531341553,
136
- "logits/rejected": -2.468327283859253,
137
- "logps/chosen": -260.0003356933594,
138
- "logps/rejected": -203.8599090576172,
139
- "loss": 0.3627,
140
  "rewards/accuracies": 0.875,
141
- "rewards/chosen": 6.802065372467041,
142
- "rewards/margins": 7.507330894470215,
143
- "rewards/rejected": -0.705264687538147,
144
  "step": 80
145
  },
146
  {
147
- "epoch": 0.23,
148
- "grad_norm": 1095.0164975833522,
149
- "learning_rate": 4.7440343190975353e-07,
150
- "logits/chosen": -2.5701346397399902,
151
- "logits/rejected": -2.513249397277832,
152
- "logps/chosen": -257.8319396972656,
153
- "logps/rejected": -217.47695922851562,
154
- "loss": 0.3529,
155
- "rewards/accuracies": 0.84375,
156
- "rewards/chosen": 3.5220108032226562,
157
- "rewards/margins": 6.205338478088379,
158
- "rewards/rejected": -2.683328151702881,
159
  "step": 90
160
  },
161
  {
162
- "epoch": 0.26,
163
- "grad_norm": 493.8561719761038,
164
- "learning_rate": 4.6365538373900506e-07,
165
- "logits/chosen": -2.624537944793701,
166
- "logits/rejected": -2.5495262145996094,
167
- "logps/chosen": -237.0422821044922,
168
- "logps/rejected": -201.1056671142578,
169
- "loss": 0.6296,
170
- "rewards/accuracies": 0.8687499761581421,
171
- "rewards/chosen": 4.613960266113281,
172
- "rewards/margins": 6.805362701416016,
173
- "rewards/rejected": -2.191401958465576,
174
  "step": 100
175
  },
176
  {
177
- "epoch": 0.28,
178
- "grad_norm": 783.3339585792174,
179
- "learning_rate": 4.5119688941406386e-07,
180
- "logits/chosen": -2.6185028553009033,
181
- "logits/rejected": -2.5373358726501465,
182
- "logps/chosen": -258.2229919433594,
183
- "logps/rejected": -209.95010375976562,
184
- "loss": 0.5085,
185
  "rewards/accuracies": 0.862500011920929,
186
- "rewards/chosen": 6.706292629241943,
187
- "rewards/margins": 8.07982349395752,
188
- "rewards/rejected": -1.3735301494598389,
189
  "step": 110
190
  },
191
  {
192
- "epoch": 0.31,
193
- "grad_norm": 1013.1425815966796,
194
- "learning_rate": 4.3712768704277524e-07,
195
- "logits/chosen": -2.5965781211853027,
196
- "logits/rejected": -2.5263915061950684,
197
- "logps/chosen": -263.1861572265625,
198
- "logps/rejected": -208.8050079345703,
199
- "loss": 0.4699,
200
- "rewards/accuracies": 0.8812500238418579,
201
- "rewards/chosen": 5.6486382484436035,
202
- "rewards/margins": 7.843411445617676,
203
- "rewards/rejected": -2.194772720336914,
204
  "step": 120
205
  },
206
  {
207
- "epoch": 0.33,
208
- "grad_norm": 659.1716861853262,
209
- "learning_rate": 4.2156040946718343e-07,
210
- "logits/chosen": -2.571542263031006,
211
- "logits/rejected": -2.502825975418091,
212
- "logps/chosen": -252.36941528320312,
213
- "logps/rejected": -197.37109375,
214
- "loss": 0.4289,
215
  "rewards/accuracies": 0.8374999761581421,
216
- "rewards/chosen": 4.440427303314209,
217
- "rewards/margins": 7.714503288269043,
218
- "rewards/rejected": -3.274075746536255,
219
  "step": 130
220
  },
221
  {
222
- "epoch": 0.36,
223
- "grad_norm": 706.862930333198,
224
- "learning_rate": 4.046196825665637e-07,
225
- "logits/chosen": -2.5883898735046387,
226
- "logits/rejected": -2.5185062885284424,
227
- "logps/chosen": -271.88494873046875,
228
- "logps/rejected": -218.22323608398438,
229
- "loss": 0.4943,
230
- "rewards/accuracies": 0.84375,
231
- "rewards/chosen": 2.9622278213500977,
232
- "rewards/margins": 7.161763668060303,
233
- "rewards/rejected": -4.199534893035889,
234
  "step": 140
235
  },
236
  {
237
- "epoch": 0.38,
238
- "grad_norm": 715.990249793818,
239
- "learning_rate": 3.864411275486261e-07,
240
- "logits/chosen": -2.5699923038482666,
241
- "logits/rejected": -2.502592086791992,
242
- "logps/chosen": -264.418212890625,
243
- "logps/rejected": -213.1693115234375,
244
- "loss": 0.5111,
245
- "rewards/accuracies": 0.875,
246
- "rewards/chosen": 5.556182384490967,
247
- "rewards/margins": 8.052742004394531,
248
- "rewards/rejected": -2.4965591430664062,
249
  "step": 150
250
  },
251
  {
252
- "epoch": 0.41,
253
- "grad_norm": 710.6520782907305,
254
- "learning_rate": 3.671702752161759e-07,
255
- "logits/chosen": -2.559586763381958,
256
- "logits/rejected": -2.4897525310516357,
257
- "logps/chosen": -245.1999969482422,
258
- "logps/rejected": -198.19442749023438,
259
- "loss": 0.4696,
260
- "rewards/accuracies": 0.875,
261
- "rewards/chosen": 3.364131212234497,
262
- "rewards/margins": 8.019769668579102,
263
- "rewards/rejected": -4.655638694763184,
264
  "step": 160
265
  },
266
  {
267
- "epoch": 0.44,
268
- "grad_norm": 865.8215089865342,
269
- "learning_rate": 3.4696140090121375e-07,
270
- "logits/chosen": -2.57694935798645,
271
- "logits/rejected": -2.5136027336120605,
272
- "logps/chosen": -266.91412353515625,
273
- "logps/rejected": -211.6914825439453,
274
- "loss": 0.397,
275
- "rewards/accuracies": 0.862500011920929,
276
- "rewards/chosen": 3.399867296218872,
277
- "rewards/margins": 8.182429313659668,
278
- "rewards/rejected": -4.782561302185059,
279
  "step": 170
280
  },
281
  {
282
- "epoch": 0.46,
283
- "grad_norm": 658.5054936178984,
284
- "learning_rate": 3.259762893935617e-07,
285
- "logits/chosen": -2.6607277393341064,
286
- "logits/rejected": -2.581068515777588,
287
- "logps/chosen": -239.2282257080078,
288
- "logps/rejected": -188.63772583007812,
289
- "loss": 0.4691,
290
- "rewards/accuracies": 0.8687499761581421,
291
- "rewards/chosen": 2.5246458053588867,
292
- "rewards/margins": 7.373200416564941,
293
- "rewards/rejected": -4.848554611206055,
294
  "step": 180
295
  },
296
  {
297
- "epoch": 0.49,
298
- "grad_norm": 746.921617266345,
299
- "learning_rate": 3.0438293975154184e-07,
300
- "logits/chosen": -2.6273646354675293,
301
- "logits/rejected": -2.5594921112060547,
302
- "logps/chosen": -263.73443603515625,
303
- "logps/rejected": -207.8045654296875,
304
- "loss": 0.4399,
305
- "rewards/accuracies": 0.8687499761581421,
306
- "rewards/chosen": 0.9775351285934448,
307
- "rewards/margins": 8.604721069335938,
308
- "rewards/rejected": -7.6271867752075195,
309
  "step": 190
310
  },
311
  {
312
- "epoch": 0.51,
313
- "grad_norm": 859.0725144089504,
314
- "learning_rate": 2.823542203635138e-07,
315
- "logits/chosen": -2.6617209911346436,
316
- "logits/rejected": -2.5823440551757812,
317
- "logps/chosen": -278.6440734863281,
318
- "logps/rejected": -222.3423309326172,
319
- "loss": 0.493,
320
- "rewards/accuracies": 0.887499988079071,
321
- "rewards/chosen": 1.6911423206329346,
322
- "rewards/margins": 9.542765617370605,
323
- "rewards/rejected": -7.851624488830566,
324
  "step": 200
325
  },
326
  {
327
- "epoch": 0.54,
328
- "grad_norm": 1073.7778122145994,
329
- "learning_rate": 2.600664850273538e-07,
330
- "logits/chosen": -2.6473941802978516,
331
- "logits/rejected": -2.581458568572998,
332
- "logps/chosen": -270.93621826171875,
333
- "logps/rejected": -214.9030303955078,
334
- "loss": 0.7378,
335
- "rewards/accuracies": 0.8374999761581421,
336
- "rewards/chosen": 0.10583686828613281,
337
- "rewards/margins": 7.7938642501831055,
338
- "rewards/rejected": -7.688027858734131,
339
  "step": 210
340
  },
341
  {
342
- "epoch": 0.56,
343
- "grad_norm": 783.9779969138841,
344
- "learning_rate": 2.3769816112703045e-07,
345
- "logits/chosen": -2.6640350818634033,
346
- "logits/rejected": -2.606898307800293,
347
- "logps/chosen": -260.6977233886719,
348
- "logps/rejected": -216.708740234375,
349
- "loss": 0.5298,
350
- "rewards/accuracies": 0.8687499761581421,
351
- "rewards/chosen": 0.881191611289978,
352
- "rewards/margins": 7.128912448883057,
353
- "rewards/rejected": -6.247720241546631,
354
  "step": 220
355
  },
356
  {
357
- "epoch": 0.59,
358
- "grad_norm": 656.1996441950056,
359
- "learning_rate": 2.1542832120881677e-07,
360
- "logits/chosen": -2.6948089599609375,
361
- "logits/rejected": -2.615889072418213,
362
- "logps/chosen": -270.77557373046875,
363
- "logps/rejected": -219.38320922851562,
364
- "loss": 0.4547,
365
- "rewards/accuracies": 0.856249988079071,
366
- "rewards/chosen": 2.3647713661193848,
367
- "rewards/margins": 8.186816215515137,
368
- "rewards/rejected": -5.82204532623291,
369
  "step": 230
370
  },
371
  {
372
- "epoch": 0.61,
373
- "grad_norm": 1015.3762946599339,
374
- "learning_rate": 1.934352493925695e-07,
375
- "logits/chosen": -2.6751513481140137,
376
- "logits/rejected": -2.6343884468078613,
377
- "logps/chosen": -266.14263916015625,
378
- "logps/rejected": -223.5273895263672,
379
- "loss": 0.429,
380
- "rewards/accuracies": 0.8999999761581421,
381
- "rewards/chosen": 1.4297797679901123,
382
- "rewards/margins": 10.390259742736816,
383
- "rewards/rejected": -8.960479736328125,
384
  "step": 240
385
  },
386
  {
387
- "epoch": 0.64,
388
- "grad_norm": 586.0366917195352,
389
- "learning_rate": 1.7189501409486059e-07,
390
- "logits/chosen": -2.6815237998962402,
391
- "logits/rejected": -2.6186068058013916,
392
- "logps/chosen": -271.32818603515625,
393
- "logps/rejected": -225.2965545654297,
394
- "loss": 0.4243,
395
- "rewards/accuracies": 0.8374999761581421,
396
- "rewards/chosen": 0.4054955542087555,
397
- "rewards/margins": 8.429037094116211,
398
- "rewards/rejected": -8.023542404174805,
399
  "step": 250
400
  },
401
  {
402
- "epoch": 0.67,
403
- "grad_norm": 567.2324686905487,
404
- "learning_rate": 1.5098005849021078e-07,
405
- "logits/chosen": -2.6706247329711914,
406
- "logits/rejected": -2.6183619499206543,
407
- "logps/chosen": -265.3566589355469,
408
- "logps/rejected": -211.55789184570312,
409
- "loss": 0.4026,
410
- "rewards/accuracies": 0.856249988079071,
411
- "rewards/chosen": 0.452970027923584,
412
- "rewards/margins": 7.969753265380859,
413
- "rewards/rejected": -7.516783237457275,
414
  "step": 260
415
  },
416
  {
417
- "epoch": 0.69,
418
- "grad_norm": 651.6922692823155,
419
- "learning_rate": 1.30857819994673e-07,
420
- "logits/chosen": -2.648026943206787,
421
- "logits/rejected": -2.5697438716888428,
422
- "logps/chosen": -276.09661865234375,
423
- "logps/rejected": -233.24966430664062,
424
- "loss": 0.4615,
425
- "rewards/accuracies": 0.875,
426
- "rewards/chosen": 0.4385630190372467,
427
- "rewards/margins": 11.782003402709961,
428
- "rewards/rejected": -11.343441009521484,
429
  "step": 270
430
  },
431
  {
432
- "epoch": 0.72,
433
- "grad_norm": 492.898374979646,
434
- "learning_rate": 1.116893898236716e-07,
435
- "logits/chosen": -2.6764636039733887,
436
- "logits/rejected": -2.6275370121002197,
437
- "logps/chosen": -273.55120849609375,
438
- "logps/rejected": -222.28256225585938,
439
- "loss": 0.431,
440
- "rewards/accuracies": 0.8687499761581421,
441
- "rewards/chosen": 0.1396121382713318,
442
- "rewards/margins": 8.543681144714355,
443
- "rewards/rejected": -8.404068946838379,
444
  "step": 280
445
  },
446
  {
447
- "epoch": 0.74,
448
- "grad_norm": 601.4691313440442,
449
- "learning_rate": 9.362822335518062e-08,
450
- "logits/chosen": -2.639714241027832,
451
- "logits/rejected": -2.6006019115448,
452
- "logps/chosen": -271.51104736328125,
453
- "logps/rejected": -219.78646850585938,
454
- "loss": 0.3785,
455
- "rewards/accuracies": 0.887499988079071,
456
- "rewards/chosen": 0.39154329895973206,
457
- "rewards/margins": 8.193072319030762,
458
- "rewards/rejected": -7.8015289306640625,
459
  "step": 290
460
  },
461
  {
462
- "epoch": 0.77,
463
- "grad_norm": 876.6568683843423,
464
- "learning_rate": 7.681891162260015e-08,
465
- "logits/chosen": -2.6577625274658203,
466
- "logits/rejected": -2.610130786895752,
467
- "logps/chosen": -278.3086853027344,
468
- "logps/rejected": -223.9765625,
469
- "loss": 0.4175,
470
- "rewards/accuracies": 0.862500011920929,
471
- "rewards/chosen": 0.23941349983215332,
472
- "rewards/margins": 8.298933029174805,
473
- "rewards/rejected": -8.059518814086914,
474
  "step": 300
475
  },
476
  {
477
- "epoch": 0.79,
478
- "grad_norm": 491.2362210232499,
479
- "learning_rate": 6.139602377230247e-08,
480
- "logits/chosen": -2.623286724090576,
481
- "logits/rejected": -2.5633485317230225,
482
- "logps/chosen": -281.3511047363281,
483
- "logps/rejected": -218.5331268310547,
484
- "loss": 0.4291,
485
- "rewards/accuracies": 0.875,
486
- "rewards/chosen": 0.8305476903915405,
487
- "rewards/margins": 8.879867553710938,
488
- "rewards/rejected": -8.049318313598633,
489
  "step": 310
490
  },
491
  {
492
- "epoch": 0.82,
493
- "grad_norm": 762.3598954431114,
494
- "learning_rate": 4.748302975270837e-08,
495
- "logits/chosen": -2.6497387886047363,
496
- "logits/rejected": -2.610168933868408,
497
- "logps/chosen": -264.21588134765625,
498
- "logps/rejected": -206.8611602783203,
499
- "loss": 0.4476,
500
- "rewards/accuracies": 0.875,
501
- "rewards/chosen": 0.6263083219528198,
502
- "rewards/margins": 7.853513240814209,
503
- "rewards/rejected": -7.227204322814941,
504
  "step": 320
505
  },
506
  {
507
- "epoch": 0.84,
508
- "grad_norm": 668.8344256472935,
509
- "learning_rate": 3.5191311859445795e-08,
510
- "logits/chosen": -2.6693060398101807,
511
- "logits/rejected": -2.6197760105133057,
512
- "logps/chosen": -268.38543701171875,
513
- "logps/rejected": -220.74081420898438,
514
- "loss": 0.4113,
515
- "rewards/accuracies": 0.90625,
516
- "rewards/chosen": 1.1819716691970825,
517
- "rewards/margins": 8.392292976379395,
518
- "rewards/rejected": -7.210320949554443,
519
  "step": 330
520
  },
521
  {
522
- "epoch": 0.87,
523
- "grad_norm": 600.9418350776771,
524
- "learning_rate": 2.4619273049795996e-08,
525
- "logits/chosen": -2.653104066848755,
526
- "logits/rejected": -2.6034810543060303,
527
- "logps/chosen": -263.5737609863281,
528
- "logps/rejected": -213.79544067382812,
529
- "loss": 0.3933,
530
- "rewards/accuracies": 0.9125000238418579,
531
- "rewards/chosen": 1.1136972904205322,
532
- "rewards/margins": 9.145872116088867,
533
- "rewards/rejected": -8.032175064086914,
534
  "step": 340
535
  },
536
  {
537
- "epoch": 0.9,
538
- "grad_norm": 448.6960620183454,
539
- "learning_rate": 1.5851549164932115e-08,
540
- "logits/chosen": -2.663357734680176,
541
- "logits/rejected": -2.6194939613342285,
542
- "logps/chosen": -272.70233154296875,
543
- "logps/rejected": -229.30697631835938,
544
- "loss": 0.4249,
545
- "rewards/accuracies": 0.887499988079071,
546
- "rewards/chosen": 1.3936760425567627,
547
- "rewards/margins": 8.432661056518555,
548
- "rewards/rejected": -7.0389862060546875,
549
  "step": 350
550
  },
551
  {
552
- "epoch": 0.92,
553
- "grad_norm": 606.8337189375926,
554
- "learning_rate": 8.958331366609423e-09,
555
- "logits/chosen": -2.666874885559082,
556
- "logits/rejected": -2.6079227924346924,
557
- "logps/chosen": -278.23712158203125,
558
- "logps/rejected": -222.1784210205078,
559
- "loss": 0.4777,
560
- "rewards/accuracies": 0.893750011920929,
561
- "rewards/chosen": 0.8818317651748657,
562
- "rewards/margins": 8.486412048339844,
563
- "rewards/rejected": -7.604579925537109,
564
  "step": 360
565
  },
566
  {
567
- "epoch": 0.95,
568
- "grad_norm": 565.1329564450134,
569
- "learning_rate": 3.994804212627461e-09,
570
- "logits/chosen": -2.626528739929199,
571
- "logits/rejected": -2.5960590839385986,
572
- "logps/chosen": -277.90789794921875,
573
- "logps/rejected": -232.6277313232422,
574
- "loss": 0.4164,
575
- "rewards/accuracies": 0.84375,
576
- "rewards/chosen": 1.226670503616333,
577
- "rewards/margins": 8.475992202758789,
578
- "rewards/rejected": -7.249321937561035,
579
  "step": 370
580
  },
581
  {
582
- "epoch": 0.97,
583
- "grad_norm": 494.78405010061283,
584
- "learning_rate": 1.0007038696262516e-09,
585
- "logits/chosen": -2.672760486602783,
586
- "logits/rejected": -2.6364920139312744,
587
- "logps/chosen": -266.59918212890625,
588
- "logps/rejected": -233.4165496826172,
589
- "loss": 0.4064,
590
- "rewards/accuracies": 0.862500011920929,
591
- "rewards/chosen": 1.6075836420059204,
592
- "rewards/margins": 8.459333419799805,
593
- "rewards/rejected": -6.851749420166016,
594
  "step": 380
595
  },
596
  {
597
- "epoch": 1.0,
598
- "grad_norm": 917.0969156966863,
599
  "learning_rate": 0.0,
600
- "logits/chosen": -2.6849241256713867,
601
- "logits/rejected": -2.630876302719116,
602
- "logps/chosen": -254.27664184570312,
603
- "logps/rejected": -213.14102172851562,
604
- "loss": 0.4983,
605
- "rewards/accuracies": 0.862500011920929,
606
- "rewards/chosen": 0.6460372805595398,
607
- "rewards/margins": 8.144643783569336,
608
- "rewards/rejected": -7.498606204986572,
609
  "step": 390
610
  },
611
  {
612
- "epoch": 1.0,
613
  "step": 390,
614
  "total_flos": 0.0,
615
- "train_loss": 0.4513903107398596,
616
- "train_runtime": 5854.463,
617
- "train_samples_per_second": 8.54,
618
- "train_steps_per_second": 0.067
619
  }
620
  ],
621
  "logging_steps": 10,
@@ -623,6 +623,18 @@
623
  "num_input_tokens_seen": 0,
624
  "num_train_epochs": 1,
625
  "save_steps": 100,
 
 
 
 
 
 
 
 
 
 
 
 
626
  "total_flos": 0.0,
627
  "train_batch_size": 4,
628
  "trial_name": null,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0025591810620601407,
13
+ "grad_norm": 709.5459154500938,
14
+ "learning_rate": 1.0256410256410256e-08,
15
  "logits/chosen": -2.5583817958831787,
16
  "logits/rejected": -2.4487552642822266,
17
  "logps/chosen": -258.1644592285156,
18
  "logps/rejected": -216.25729370117188,
19
+ "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
22
  "rewards/margins": 0.0,
 
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.025591810620601407,
28
+ "grad_norm": 684.196668594934,
29
+ "learning_rate": 1.0256410256410255e-07,
30
+ "logits/chosen": -2.605860471725464,
31
+ "logits/rejected": -2.5528433322906494,
32
+ "logps/chosen": -267.56048583984375,
33
+ "logps/rejected": -217.59442138671875,
34
+ "loss": 0.7027,
35
+ "rewards/accuracies": 0.4166666567325592,
36
+ "rewards/chosen": 0.0020057493820786476,
37
+ "rewards/margins": -0.020002255216240883,
38
+ "rewards/rejected": 0.022008005529642105,
39
  "step": 10
40
  },
41
  {
42
+ "epoch": 0.05118362124120281,
43
+ "grad_norm": 515.3907221989738,
44
+ "learning_rate": 2.051282051282051e-07,
45
+ "logits/chosen": -2.6285552978515625,
46
+ "logits/rejected": -2.566141128540039,
47
+ "logps/chosen": -260.791259765625,
48
+ "logps/rejected": -207.073486328125,
49
+ "loss": 0.5875,
50
+ "rewards/accuracies": 0.71875,
51
+ "rewards/chosen": 0.2564736604690552,
52
+ "rewards/margins": 0.2661496698856354,
53
+ "rewards/rejected": -0.00967598520219326,
54
  "step": 20
55
  },
56
  {
57
+ "epoch": 0.07677543186180422,
58
+ "grad_norm": 608.3322300561331,
59
+ "learning_rate": 3.076923076923077e-07,
60
+ "logits/chosen": -2.6465039253234863,
61
+ "logits/rejected": -2.5724854469299316,
62
+ "logps/chosen": -251.90835571289062,
63
+ "logps/rejected": -198.9151611328125,
64
+ "loss": 0.3737,
65
+ "rewards/accuracies": 0.8500000238418579,
66
+ "rewards/chosen": 1.6194490194320679,
67
+ "rewards/margins": 1.6427723169326782,
68
+ "rewards/rejected": -0.02332335151731968,
69
  "step": 30
70
  },
71
  {
72
+ "epoch": 0.10236724248240563,
73
+ "grad_norm": 286.23056215212245,
74
+ "learning_rate": 3.999919890759704e-07,
75
+ "logits/chosen": -2.650782823562622,
76
+ "logits/rejected": -2.571556329727173,
77
+ "logps/chosen": -244.2300567626953,
78
+ "logps/rejected": -193.5239715576172,
79
+ "loss": 0.3167,
80
+ "rewards/accuracies": 0.793749988079071,
81
+ "rewards/chosen": 3.0596303939819336,
82
+ "rewards/margins": 2.9784700870513916,
83
+ "rewards/rejected": 0.08116073161363602,
84
  "step": 40
85
  },
86
  {
87
+ "epoch": 0.12795905310300704,
88
+ "grad_norm": 355.8436814450708,
89
+ "learning_rate": 3.9903145446619833e-07,
90
+ "logits/chosen": -2.6738078594207764,
91
+ "logits/rejected": -2.601217031478882,
92
+ "logps/chosen": -257.42144775390625,
93
+ "logps/rejected": -202.82069396972656,
94
+ "loss": 0.3214,
95
+ "rewards/accuracies": 0.875,
96
+ "rewards/chosen": 5.407862663269043,
97
+ "rewards/margins": 4.2437334060668945,
98
+ "rewards/rejected": 1.164129376411438,
99
  "step": 50
100
  },
101
  {
102
+ "epoch": 0.15355086372360843,
103
+ "grad_norm": 419.99201519842893,
104
+ "learning_rate": 3.9647754748313294e-07,
105
+ "logits/chosen": -2.6383204460144043,
106
+ "logits/rejected": -2.5659520626068115,
107
+ "logps/chosen": -259.7900695800781,
108
+ "logps/rejected": -207.2904510498047,
109
+ "loss": 0.3176,
110
+ "rewards/accuracies": 0.8500000238418579,
111
+ "rewards/chosen": 4.518810272216797,
112
+ "rewards/margins": 4.13478422164917,
113
+ "rewards/rejected": 0.3840256929397583,
114
  "step": 60
115
  },
116
  {
117
+ "epoch": 0.17914267434420986,
118
+ "grad_norm": 305.786716914146,
119
+ "learning_rate": 3.9235071376357917e-07,
120
+ "logits/chosen": -2.6047496795654297,
121
+ "logits/rejected": -2.5317835807800293,
122
+ "logps/chosen": -290.00616455078125,
123
+ "logps/rejected": -227.17697143554688,
124
+ "loss": 0.301,
125
+ "rewards/accuracies": 0.90625,
126
+ "rewards/chosen": 3.9522290229797363,
127
+ "rewards/margins": 4.945390701293945,
128
+ "rewards/rejected": -0.9931615591049194,
129
  "step": 70
130
  },
131
  {
132
+ "epoch": 0.20473448496481125,
133
+ "grad_norm": 512.2762241729247,
134
+ "learning_rate": 3.8668399121531677e-07,
135
+ "logits/chosen": -2.552280902862549,
136
+ "logits/rejected": -2.476268768310547,
137
+ "logps/chosen": -260.35906982421875,
138
+ "logps/rejected": -206.0688018798828,
139
+ "loss": 0.2615,
140
  "rewards/accuracies": 0.875,
141
+ "rewards/chosen": 3.5995476245880127,
142
+ "rewards/margins": 5.095798969268799,
143
+ "rewards/rejected": -1.4962517023086548,
144
  "step": 80
145
  },
146
  {
147
+ "epoch": 0.23032629558541268,
148
+ "grad_norm": 515.1909309248075,
149
+ "learning_rate": 3.795227455278028e-07,
150
+ "logits/chosen": -2.5692882537841797,
151
+ "logits/rejected": -2.510436534881592,
152
+ "logps/chosen": -256.6405334472656,
153
+ "logps/rejected": -217.69192504882812,
154
+ "loss": 0.264,
155
+ "rewards/accuracies": 0.862500011920929,
156
+ "rewards/chosen": 2.5523829460144043,
157
+ "rewards/margins": 4.150609016418457,
158
+ "rewards/rejected": -1.5982261896133423,
159
  "step": 90
160
  },
161
  {
162
+ "epoch": 0.2559181062060141,
163
+ "grad_norm": 587.7494413414474,
164
+ "learning_rate": 3.7092430699120403e-07,
165
+ "logits/chosen": -2.608997344970703,
166
+ "logits/rejected": -2.530529260635376,
167
+ "logps/chosen": -235.7121124267578,
168
+ "logps/rejected": -201.58700561523438,
169
+ "loss": 0.3994,
170
+ "rewards/accuracies": 0.84375,
171
+ "rewards/chosen": 3.2284069061279297,
172
+ "rewards/margins": 4.6865153312683105,
173
+ "rewards/rejected": -1.4581090211868286,
174
  "step": 100
175
  },
176
  {
177
+ "epoch": 0.28150991682661547,
178
+ "grad_norm": 383.9666636386822,
179
+ "learning_rate": 3.6095751153125107e-07,
180
+ "logits/chosen": -2.605412483215332,
181
+ "logits/rejected": -2.5209498405456543,
182
+ "logps/chosen": -257.01318359375,
183
+ "logps/rejected": -210.5634002685547,
184
+ "loss": 0.3274,
185
  "rewards/accuracies": 0.862500011920929,
186
+ "rewards/chosen": 4.330618381500244,
187
+ "rewards/margins": 5.40033483505249,
188
+ "rewards/rejected": -1.069716453552246,
189
  "step": 110
190
  },
191
  {
192
+ "epoch": 0.30710172744721687,
193
+ "grad_norm": 452.32780946070125,
194
+ "learning_rate": 3.497021496342202e-07,
195
+ "logits/chosen": -2.5948054790496826,
196
+ "logits/rejected": -2.5218663215637207,
197
+ "logps/chosen": -261.63739013671875,
198
+ "logps/rejected": -209.2183074951172,
199
+ "loss": 0.3015,
200
+ "rewards/accuracies": 0.887499988079071,
201
+ "rewards/chosen": 3.912487030029297,
202
+ "rewards/margins": 5.338441371917725,
203
+ "rewards/rejected": -1.4259542226791382,
204
  "step": 120
205
  },
206
  {
207
+ "epoch": 0.3326935380678183,
208
+ "grad_norm": 325.92030560656247,
209
+ "learning_rate": 3.372483275737467e-07,
210
+ "logits/chosen": -2.574514389038086,
211
+ "logits/rejected": -2.505225896835327,
212
+ "logps/chosen": -250.41543579101562,
213
+ "logps/rejected": -196.6962127685547,
214
+ "loss": 0.2826,
215
  "rewards/accuracies": 0.8374999761581421,
216
+ "rewards/chosen": 3.443896532058716,
217
+ "rewards/margins": 4.925398826599121,
218
+ "rewards/rejected": -1.4815022945404053,
219
  "step": 130
220
  },
221
  {
222
+ "epoch": 0.3582853486884197,
223
+ "grad_norm": 286.3934639339848,
224
+ "learning_rate": 3.2369574605325094e-07,
225
+ "logits/chosen": -2.598271369934082,
226
+ "logits/rejected": -2.5281975269317627,
227
+ "logps/chosen": -268.22943115234375,
228
+ "logps/rejected": -216.2914581298828,
229
+ "loss": 0.3028,
230
+ "rewards/accuracies": 0.856249988079071,
231
+ "rewards/chosen": 3.4734299182891846,
232
+ "rewards/margins": 4.84061336517334,
233
+ "rewards/rejected": -1.3671828508377075,
234
  "step": 140
235
  },
236
  {
237
+ "epoch": 0.3838771593090211,
238
+ "grad_norm": 326.69658537939597,
239
+ "learning_rate": 3.091529020389009e-07,
240
+ "logits/chosen": -2.5794036388397217,
241
+ "logits/rejected": -2.5112969875335693,
242
+ "logps/chosen": -262.27850341796875,
243
+ "logps/rejected": -213.2576141357422,
244
+ "loss": 0.3047,
245
+ "rewards/accuracies": 0.887499988079071,
246
+ "rewards/chosen": 4.1566081047058105,
247
+ "rewards/margins": 5.587738990783691,
248
+ "rewards/rejected": -1.4311310052871704,
249
  "step": 150
250
  },
251
  {
252
+ "epoch": 0.4094689699296225,
253
+ "grad_norm": 334.0349111221283,
254
+ "learning_rate": 2.937362201729407e-07,
255
+ "logits/chosen": -2.569304943084717,
256
+ "logits/rejected": -2.4969332218170166,
257
+ "logps/chosen": -241.8642120361328,
258
+ "logps/rejected": -196.44754028320312,
259
+ "loss": 0.2947,
260
+ "rewards/accuracies": 0.893750011920929,
261
+ "rewards/chosen": 3.53686785697937,
262
+ "rewards/margins": 5.24989128112793,
263
+ "rewards/rejected": -1.7130241394042969,
264
  "step": 160
265
  },
266
  {
267
+ "epoch": 0.4350607805502239,
268
+ "grad_norm": 385.1324260357516,
269
+ "learning_rate": 2.77569120720971e-07,
270
+ "logits/chosen": -2.5677335262298584,
271
+ "logits/rejected": -2.499415874481201,
272
+ "logps/chosen": -263.1123046875,
273
+ "logps/rejected": -209.7564239501953,
274
+ "loss": 0.2546,
275
+ "rewards/accuracies": 0.875,
276
+ "rewards/chosen": 3.789729595184326,
277
+ "rewards/margins": 5.479173183441162,
278
+ "rewards/rejected": -1.6894439458847046,
279
  "step": 170
280
  },
281
  {
282
+ "epoch": 0.46065259117082535,
283
+ "grad_norm": 298.5381373642981,
284
+ "learning_rate": 2.6078103151484935e-07,
285
+ "logits/chosen": -2.632197856903076,
286
+ "logits/rejected": -2.5449578762054443,
287
+ "logps/chosen": -234.98526000976562,
288
+ "logps/rejected": -185.88897705078125,
289
+ "loss": 0.3076,
290
+ "rewards/accuracies": 0.8812500238418579,
291
+ "rewards/chosen": 3.5240702629089355,
292
+ "rewards/margins": 4.843334674835205,
293
+ "rewards/rejected": -1.3192641735076904,
294
  "step": 180
295
  },
296
  {
297
+ "epoch": 0.48624440179142675,
298
+ "grad_norm": 275.15385748139875,
299
+ "learning_rate": 2.435063518012335e-07,
300
+ "logits/chosen": -2.592048168182373,
301
+ "logits/rejected": -2.5161328315734863,
302
+ "logps/chosen": -257.56207275390625,
303
+ "logps/rejected": -202.87477111816406,
304
+ "loss": 0.2628,
305
+ "rewards/accuracies": 0.8812500238418579,
306
+ "rewards/chosen": 3.6292481422424316,
307
+ "rewards/margins": 5.401675224304199,
308
+ "rewards/rejected": -1.7724273204803467,
309
  "step": 190
310
  },
311
  {
312
+ "epoch": 0.5118362124120281,
313
+ "grad_norm": 441.7914810102142,
314
+ "learning_rate": 2.2588337629081105e-07,
315
+ "logits/chosen": -2.6288845539093018,
316
+ "logits/rejected": -2.539705753326416,
317
+ "logps/chosen": -272.2264709472656,
318
+ "logps/rejected": -217.74020385742188,
319
+ "loss": 0.2691,
320
+ "rewards/accuracies": 0.8812500238418579,
321
+ "rewards/chosen": 4.148322105407715,
322
+ "rewards/margins": 6.209266662597656,
323
+ "rewards/rejected": -2.0609450340270996,
324
  "step": 200
325
  },
326
  {
327
+ "epoch": 0.5374280230326296,
328
+ "grad_norm": 578.0076877712855,
329
+ "learning_rate": 2.0805318802188303e-07,
330
+ "logits/chosen": -2.6119117736816406,
331
+ "logits/rejected": -2.5380859375,
332
+ "logps/chosen": -265.3163146972656,
333
+ "logps/rejected": -211.01132202148438,
334
+ "loss": 0.4145,
335
+ "rewards/accuracies": 0.824999988079071,
336
+ "rewards/chosen": 2.868762969970703,
337
+ "rewards/margins": 5.194035530090332,
338
+ "rewards/rejected": -2.32527232170105,
339
  "step": 210
340
  },
341
  {
342
+ "epoch": 0.5630198336532309,
343
+ "grad_norm": 435.59772463261663,
344
+ "learning_rate": 1.9015852890162436e-07,
345
+ "logits/chosen": -2.628199577331543,
346
+ "logits/rejected": -2.5644900798797607,
347
+ "logps/chosen": -256.01708984375,
348
+ "logps/rejected": -213.6505126953125,
349
+ "loss": 0.302,
350
+ "rewards/accuracies": 0.8812500238418579,
351
+ "rewards/chosen": 2.8298840522766113,
352
+ "rewards/margins": 4.771709442138672,
353
+ "rewards/rejected": -1.9418258666992188,
354
  "step": 220
355
  },
356
  {
357
+ "epoch": 0.5886116442738324,
358
+ "grad_norm": 341.5067091186977,
359
+ "learning_rate": 1.723426569670534e-07,
360
+ "logits/chosen": -2.660271644592285,
361
+ "logits/rejected": -2.5745387077331543,
362
+ "logps/chosen": -265.7137145996094,
363
+ "logps/rejected": -216.05184936523438,
364
+ "loss": 0.2709,
365
+ "rewards/accuracies": 0.862500011920929,
366
+ "rewards/chosen": 3.8446857929229736,
367
+ "rewards/margins": 5.413482666015625,
368
+ "rewards/rejected": -1.5687971115112305,
369
  "step": 230
370
  },
371
  {
372
+ "epoch": 0.6142034548944337,
373
+ "grad_norm": 518.1474749065682,
374
+ "learning_rate": 1.547481995140556e-07,
375
+ "logits/chosen": -2.6460232734680176,
376
+ "logits/rejected": -2.5979976654052734,
377
+ "logps/chosen": -260.61566162109375,
378
+ "logps/rejected": -219.18740844726562,
379
+ "loss": 0.2732,
380
+ "rewards/accuracies": 0.8812500238418579,
381
+ "rewards/chosen": 3.5578017234802246,
382
+ "rewards/margins": 6.3658447265625,
383
+ "rewards/rejected": -2.808042287826538,
384
  "step": 240
385
  },
386
  {
387
+ "epoch": 0.6397952655150352,
388
+ "grad_norm": 488.31668996973804,
389
+ "learning_rate": 1.375160112758885e-07,
390
+ "logits/chosen": -2.657061815261841,
391
+ "logits/rejected": -2.5865418910980225,
392
+ "logps/chosen": -265.78448486328125,
393
+ "logps/rejected": -221.42001342773438,
394
+ "loss": 0.2763,
395
+ "rewards/accuracies": 0.875,
396
+ "rewards/chosen": 2.9970998764038086,
397
+ "rewards/margins": 5.516369819641113,
398
+ "rewards/rejected": -2.519270420074463,
399
  "step": 250
400
  },
401
  {
402
+ "epoch": 0.6653870761356366,
403
+ "grad_norm": 266.72245551152963,
404
+ "learning_rate": 1.2078404679216862e-07,
405
+ "logits/chosen": -2.648825168609619,
406
+ "logits/rejected": -2.589503526687622,
407
+ "logps/chosen": -260.22369384765625,
408
+ "logps/rejected": -208.0442352294922,
409
+ "loss": 0.2613,
410
+ "rewards/accuracies": 0.875,
411
+ "rewards/chosen": 2.818129301071167,
412
+ "rewards/margins": 5.237301826477051,
413
+ "rewards/rejected": -2.419172525405884,
414
  "step": 260
415
  },
416
  {
417
+ "epoch": 0.690978886756238,
418
+ "grad_norm": 410.4072035856202,
419
+ "learning_rate": 1.0468625599573841e-07,
420
+ "logits/chosen": -2.6281862258911133,
421
+ "logits/rejected": -2.5414164066314697,
422
+ "logps/chosen": -269.1915283203125,
423
+ "logps/rejected": -228.3280792236328,
424
+ "loss": 0.3168,
425
+ "rewards/accuracies": 0.893750011920929,
426
+ "rewards/chosen": 3.6961987018585205,
427
+ "rewards/margins": 7.5373077392578125,
428
+ "rewards/rejected": -3.841109037399292,
429
  "step": 270
430
  },
431
  {
432
+ "epoch": 0.7165706973768394,
433
+ "grad_norm": 230.87089413037836,
434
+ "learning_rate": 8.935151185893727e-08,
435
+ "logits/chosen": -2.653529644012451,
436
+ "logits/rejected": -2.5968880653381348,
437
+ "logps/chosen": -268.2784729003906,
438
+ "logps/rejected": -218.708984375,
439
+ "loss": 0.2585,
440
+ "rewards/accuracies": 0.893750011920929,
441
+ "rewards/chosen": 2.713934898376465,
442
+ "rewards/margins": 5.596070289611816,
443
+ "rewards/rejected": -2.8821353912353516,
444
  "step": 280
445
  },
446
  {
447
+ "epoch": 0.7421625079974408,
448
+ "grad_norm": 355.41024702928314,
449
+ "learning_rate": 7.490257868414449e-08,
450
+ "logits/chosen": -2.613981246948242,
451
+ "logits/rejected": -2.5670533180236816,
452
+ "logps/chosen": -266.4203186035156,
453
+ "logps/rejected": -216.7047119140625,
454
+ "loss": 0.2561,
455
+ "rewards/accuracies": 0.875,
456
+ "rewards/chosen": 2.7628657817840576,
457
+ "rewards/margins": 5.556154727935791,
458
+ "rewards/rejected": -2.793288469314575,
459
  "step": 290
460
  },
461
  {
462
+ "epoch": 0.7677543186180422,
463
+ "grad_norm": 334.3998771127154,
464
+ "learning_rate": 6.145512929808013e-08,
465
+ "logits/chosen": -2.6336982250213623,
466
+ "logits/rejected": -2.579585552215576,
467
+ "logps/chosen": -273.02044677734375,
468
+ "logps/rejected": -220.3231964111328,
469
+ "loss": 0.274,
470
+ "rewards/accuracies": 0.893750011920929,
471
+ "rewards/chosen": 2.7771525382995605,
472
+ "rewards/margins": 5.427972316741943,
473
+ "rewards/rejected": -2.650820016860962,
474
  "step": 300
475
  },
476
  {
477
+ "epoch": 0.7933461292386437,
478
+ "grad_norm": 248.8225361148432,
479
+ "learning_rate": 4.911681901784197e-08,
480
+ "logits/chosen": -2.5988125801086426,
481
+ "logits/rejected": -2.5305018424987793,
482
+ "logps/chosen": -276.5069885253906,
483
+ "logps/rejected": -215.4237518310547,
484
+ "loss": 0.3069,
485
+ "rewards/accuracies": 0.893750011920929,
486
+ "rewards/chosen": 2.883484125137329,
487
+ "rewards/margins": 5.80063533782959,
488
+ "rewards/rejected": -2.917151927947998,
489
  "step": 310
490
  },
491
  {
492
+ "epoch": 0.818937939859245,
493
+ "grad_norm": 378.21338988259265,
494
+ "learning_rate": 3.79864238021667e-08,
495
+ "logits/chosen": -2.621037006378174,
496
+ "logits/rejected": -2.5753254890441895,
497
+ "logps/chosen": -259.85833740234375,
498
+ "logps/rejected": -204.21798706054688,
499
+ "loss": 0.2691,
500
+ "rewards/accuracies": 0.8999999761581421,
501
+ "rewards/chosen": 2.5267233848571777,
502
+ "rewards/margins": 5.2202558517456055,
503
+ "rewards/rejected": -2.6935324668884277,
504
  "step": 320
505
  },
506
  {
507
+ "epoch": 0.8445297504798465,
508
+ "grad_norm": 354.9475645655247,
509
+ "learning_rate": 2.8153049487556634e-08,
510
+ "logits/chosen": -2.6436188220977783,
511
+ "logits/rejected": -2.585820436477661,
512
+ "logps/chosen": -263.09808349609375,
513
+ "logps/rejected": -217.03341674804688,
514
+ "loss": 0.2695,
515
+ "rewards/accuracies": 0.8999999761581421,
516
+ "rewards/chosen": 3.3003220558166504,
517
+ "rewards/margins": 5.452363014221191,
518
+ "rewards/rejected": -2.1520419120788574,
519
  "step": 330
520
  },
521
  {
522
+ "epoch": 0.8701215611004478,
523
+ "grad_norm": 298.79503920742496,
524
+ "learning_rate": 1.9695418439836796e-08,
525
+ "logits/chosen": -2.6284804344177246,
526
+ "logits/rejected": -2.5717878341674805,
527
+ "logps/chosen": -258.5690002441406,
528
+ "logps/rejected": -210.4982147216797,
529
+ "loss": 0.2354,
530
+ "rewards/accuracies": 0.9312499761581421,
531
+ "rewards/chosen": 3.1211013793945312,
532
+ "rewards/margins": 5.934812545776367,
533
+ "rewards/rejected": -2.813711643218994,
534
  "step": 340
535
  },
536
  {
537
+ "epoch": 0.8957133717210493,
538
+ "grad_norm": 256.56016530768545,
539
+ "learning_rate": 1.2681239331945692e-08,
540
+ "logits/chosen": -2.6408398151397705,
541
+ "logits/rejected": -2.5925440788269043,
542
+ "logps/chosen": -268.15728759765625,
543
+ "logps/rejected": -226.1197967529297,
544
+ "loss": 0.2666,
545
+ "rewards/accuracies": 0.8812500238418579,
546
+ "rewards/chosen": 3.046782970428467,
547
+ "rewards/margins": 5.363730430603027,
548
+ "rewards/rejected": -2.3169472217559814,
549
  "step": 350
550
  },
551
  {
552
+ "epoch": 0.9213051823416507,
553
+ "grad_norm": 316.87066881593194,
554
+ "learning_rate": 7.166665093287538e-09,
555
+ "logits/chosen": -2.6406874656677246,
556
+ "logits/rejected": -2.5731348991394043,
557
+ "logps/chosen": -273.30157470703125,
558
+ "logps/rejected": -218.8077392578125,
559
+ "loss": 0.3057,
560
+ "rewards/accuracies": 0.90625,
561
+ "rewards/chosen": 2.9576709270477295,
562
+ "rewards/margins": 5.49710750579834,
563
+ "rewards/rejected": -2.539436101913452,
564
  "step": 360
565
  },
566
  {
567
+ "epoch": 0.946896992962252,
568
+ "grad_norm": 569.0448969619218,
569
+ "learning_rate": 3.1958433701019694e-09,
570
+ "logits/chosen": -2.6003308296203613,
571
+ "logits/rejected": -2.5643393993377686,
572
+ "logps/chosen": -272.14678955078125,
573
+ "logps/rejected": -228.38943481445312,
574
+ "loss": 0.3023,
575
+ "rewards/accuracies": 0.856249988079071,
576
+ "rewards/chosen": 3.5620276927948,
577
+ "rewards/margins": 5.470287322998047,
578
+ "rewards/rejected": -1.908259391784668,
579
  "step": 370
580
  },
581
  {
582
+ "epoch": 0.9724888035828535,
583
+ "grad_norm": 206.4373391747023,
584
+ "learning_rate": 8.005630957010012e-10,
585
+ "logits/chosen": -2.6491971015930176,
586
+ "logits/rejected": -2.6083197593688965,
587
+ "logps/chosen": -261.6040344238281,
588
+ "logps/rejected": -229.88302612304688,
589
+ "loss": 0.273,
590
+ "rewards/accuracies": 0.8812500238418579,
591
+ "rewards/chosen": 3.3906753063201904,
592
+ "rewards/margins": 5.430436611175537,
593
+ "rewards/rejected": -2.039761543273926,
594
  "step": 380
595
  },
596
  {
597
+ "epoch": 0.9980806142034548,
598
+ "grad_norm": 529.0802276796763,
599
  "learning_rate": 0.0,
600
+ "logits/chosen": -2.6596872806549072,
601
+ "logits/rejected": -2.598179578781128,
602
+ "logps/chosen": -249.18356323242188,
603
+ "logps/rejected": -209.78622436523438,
604
+ "loss": 0.2803,
605
+ "rewards/accuracies": 0.893750011920929,
606
+ "rewards/chosen": 2.9054455757141113,
607
+ "rewards/margins": 5.3939642906188965,
608
+ "rewards/rejected": -2.488518476486206,
609
  "step": 390
610
  },
611
  {
612
+ "epoch": 0.9980806142034548,
613
  "step": 390,
614
  "total_flos": 0.0,
615
+ "train_loss": 0.3119094243416419,
616
+ "train_runtime": 6443.086,
617
+ "train_samples_per_second": 7.76,
618
+ "train_steps_per_second": 0.061
619
  }
620
  ],
621
  "logging_steps": 10,
 
623
  "num_input_tokens_seen": 0,
624
  "num_train_epochs": 1,
625
  "save_steps": 100,
626
+ "stateful_callbacks": {
627
+ "TrainerControl": {
628
+ "args": {
629
+ "should_epoch_stop": false,
630
+ "should_evaluate": false,
631
+ "should_log": false,
632
+ "should_save": true,
633
+ "should_training_stop": false
634
+ },
635
+ "attributes": {}
636
+ }
637
+ },
638
  "total_flos": 0.0,
639
  "train_batch_size": 4,
640
  "trial_name": null,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa0e00d3aa1ab0a0ef1dd07b8b8bd52c64ef5747a57df47646eee4b22e879a38
3
- size 6328
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6819c1dd4414cf3e1008e039028d4a09c333504f352311e2138049a4cec39cd4
3
+ size 6520