RikkiXu commited on
Commit
e9d12bf
1 Parent(s): a58261f

Model save

Browse files
README.md CHANGED
@@ -32,7 +32,7 @@ More information needed
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
- - learning_rate: 1e-08
36
  - train_batch_size: 8
37
  - eval_batch_size: 8
38
  - seed: 42
@@ -44,7 +44,7 @@ The following hyperparameters were used during training:
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
  - lr_scheduler_type: cosine
46
  - lr_scheduler_warmup_ratio: 0.1
47
- - num_epochs: 2
48
 
49
  ### Training results
50
 
 
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
+ - learning_rate: 5e-08
36
  - train_batch_size: 8
37
  - eval_batch_size: 8
38
  - seed: 42
 
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
  - lr_scheduler_type: cosine
46
  - lr_scheduler_warmup_ratio: 0.1
47
+ - num_epochs: 1
48
 
49
  ### Training results
50
 
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 2.0,
3
- "train_loss": 0.6411590957641602,
4
- "train_runtime": 8942.072,
5
- "train_samples": 38445,
6
- "train_samples_per_second": 8.599,
7
  "train_steps_per_second": 0.034
8
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.49109079287602353,
4
+ "train_runtime": 5330.839,
5
+ "train_samples": 46672,
6
+ "train_samples_per_second": 8.755,
7
  "train_steps_per_second": 0.034
8
  }
generation_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
- "eos_token_id": 32000,
5
  "transformers_version": "4.39.3"
6
  }
 
1
  {
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
  "transformers_version": "4.39.3"
6
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e95be6f8c0aa9a00e2dec7409b07bb3971685d185f076a462b8f78d03a1941fd
3
- size 4943178720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80c511d14471ab18870f10f923c8787de845cdc70bd963e129a608dd18f3fc60
3
+ size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:65d756c6023aac10ba06a637406ed4d2e2f4752c360d7d38f9d57ab38052466e
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8c474648050e838c831aabb3699a61ac3756493e671f2a842828d65675367af
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a4dee0a340ae6ecce1b95adc398b7196eb2db45a4ad1ef0dc042944445a81bb6
3
- size 4540532728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2335066937ff02bdedb3b53830a851b36f4126b5264e844e5f7337e06e1c6efd
3
+ size 4540516344
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 14483496960
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00003-of-00003.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 14483464192
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00003-of-00003.safetensors",
runs/Jun18_02-31-46_n136-129-074/events.out.tfevents.1718649387.n136-129-074.1961484.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:86f860b0abf43d7f8965a8620682abac5fc820caea5ba6269048d57fe900dc28
3
- size 12347
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:154d3e21a3f1857573cbd485ca2aab1bc0ca7fa6c9015161948c9c3cf1ee365f
3
+ size 18181
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 2.0,
3
- "train_loss": 0.6411590957641602,
4
- "train_runtime": 8942.072,
5
- "train_samples": 38445,
6
- "train_samples_per_second": 8.599,
7
  "train_steps_per_second": 0.034
8
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.49109079287602353,
4
+ "train_runtime": 5330.839,
5
+ "train_samples": 46672,
6
+ "train_samples_per_second": 8.755,
7
  "train_steps_per_second": 0.034
8
  }
trainer_state.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.9966722129783694,
5
  "eval_steps": 500,
6
- "global_step": 300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.01,
13
- "grad_norm": 1151.6184415193795,
14
- "learning_rate": 3.333333333333333e-10,
15
- "logits/chosen": -4.106247425079346,
16
- "logits/rejected": -4.200438499450684,
17
- "logps/chosen": -382.81439208984375,
18
- "logps/rejected": -357.65960693359375,
19
  "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
@@ -24,469 +24,289 @@
24
  "step": 1
25
  },
26
  {
27
- "epoch": 0.07,
28
- "grad_norm": 1153.8530059959064,
29
- "learning_rate": 3.3333333333333334e-09,
30
- "logits/chosen": -4.217168807983398,
31
- "logits/rejected": -4.321505069732666,
32
- "logps/chosen": -334.6739501953125,
33
- "logps/rejected": -313.41986083984375,
34
- "loss": 0.7234,
35
- "rewards/accuracies": 0.3784722089767456,
36
- "rewards/chosen": -0.04607396200299263,
37
- "rewards/margins": -0.04357295483350754,
38
- "rewards/rejected": -0.0025010076351463795,
39
  "step": 10
40
  },
41
  {
42
- "epoch": 0.13,
43
- "grad_norm": 1270.1091311718026,
44
- "learning_rate": 6.666666666666667e-09,
45
- "logits/chosen": -4.26615571975708,
46
- "logits/rejected": -4.41886043548584,
47
- "logps/chosen": -313.93829345703125,
48
- "logps/rejected": -288.78863525390625,
49
- "loss": 0.7299,
50
- "rewards/accuracies": 0.5062500238418579,
51
- "rewards/chosen": -0.012899210676550865,
52
- "rewards/margins": -0.021626513451337814,
53
- "rewards/rejected": 0.00872730277478695,
54
  "step": 20
55
  },
56
  {
57
- "epoch": 0.2,
58
- "grad_norm": 1195.4358872902142,
59
- "learning_rate": 1e-08,
60
- "logits/chosen": -4.3016462326049805,
61
- "logits/rejected": -4.365716457366943,
62
- "logps/chosen": -308.2979431152344,
63
- "logps/rejected": -285.63018798828125,
64
- "loss": 0.7311,
65
- "rewards/accuracies": 0.49687498807907104,
66
- "rewards/chosen": -0.010719490237534046,
67
- "rewards/margins": -0.010031750425696373,
68
- "rewards/rejected": -0.000687739229761064,
69
  "step": 30
70
  },
71
  {
72
- "epoch": 0.27,
73
- "grad_norm": 1144.0076561795222,
74
- "learning_rate": 9.966191788709716e-09,
75
- "logits/chosen": -4.187338829040527,
76
- "logits/rejected": -4.271176338195801,
77
- "logps/chosen": -332.39453125,
78
- "logps/rejected": -307.4621276855469,
79
- "loss": 0.7199,
80
- "rewards/accuracies": 0.5375000238418579,
81
- "rewards/chosen": 0.04900529980659485,
82
- "rewards/margins": 0.05206792429089546,
83
- "rewards/rejected": -0.0030626237858086824,
84
  "step": 40
85
  },
86
  {
87
- "epoch": 0.33,
88
- "grad_norm": 1135.1845096891816,
89
- "learning_rate": 9.86522435289912e-09,
90
- "logits/chosen": -4.197329044342041,
91
- "logits/rejected": -4.366620063781738,
92
- "logps/chosen": -333.64678955078125,
93
- "logps/rejected": -309.0525817871094,
94
- "loss": 0.7189,
95
- "rewards/accuracies": 0.515625,
96
- "rewards/chosen": 0.04922889173030853,
97
- "rewards/margins": 0.008506924845278263,
98
- "rewards/rejected": 0.040721967816352844,
99
  "step": 50
100
  },
101
  {
102
- "epoch": 0.4,
103
- "grad_norm": 1124.4015217482226,
104
- "learning_rate": 9.698463103929542e-09,
105
- "logits/chosen": -4.186924457550049,
106
- "logits/rejected": -4.269418239593506,
107
- "logps/chosen": -323.7723693847656,
108
- "logps/rejected": -308.26971435546875,
109
- "loss": 0.7082,
110
- "rewards/accuracies": 0.5249999761581421,
111
- "rewards/chosen": 0.11511299759149551,
112
- "rewards/margins": 0.052407026290893555,
113
- "rewards/rejected": 0.06270597130060196,
114
  "step": 60
115
  },
116
  {
117
- "epoch": 0.47,
118
- "grad_norm": 1133.1247984062034,
119
- "learning_rate": 9.468163201617062e-09,
120
- "logits/chosen": -4.147147178649902,
121
- "logits/rejected": -4.3019304275512695,
122
- "logps/chosen": -344.58563232421875,
123
- "logps/rejected": -314.4212951660156,
124
- "loss": 0.7005,
125
- "rewards/accuracies": 0.590624988079071,
126
- "rewards/chosen": 0.23254117369651794,
127
- "rewards/margins": 0.13268980383872986,
128
- "rewards/rejected": 0.09985135495662689,
129
  "step": 70
130
  },
131
  {
132
- "epoch": 0.53,
133
- "grad_norm": 1057.1503651106625,
134
- "learning_rate": 9.177439057064682e-09,
135
- "logits/chosen": -4.223555564880371,
136
- "logits/rejected": -4.389444828033447,
137
- "logps/chosen": -334.3599853515625,
138
- "logps/rejected": -305.4422607421875,
139
- "loss": 0.6844,
140
- "rewards/accuracies": 0.59375,
141
- "rewards/chosen": 0.23962649703025818,
142
- "rewards/margins": 0.09747296571731567,
143
- "rewards/rejected": 0.1421535313129425,
144
  "step": 80
145
  },
146
  {
147
- "epoch": 0.6,
148
- "grad_norm": 1055.57995971272,
149
- "learning_rate": 8.830222215594889e-09,
150
- "logits/chosen": -4.2292633056640625,
151
- "logits/rejected": -4.349828243255615,
152
- "logps/chosen": -327.07330322265625,
153
- "logps/rejected": -307.81707763671875,
154
- "loss": 0.6772,
155
- "rewards/accuracies": 0.5562499761581421,
156
- "rewards/chosen": 0.2870681881904602,
157
- "rewards/margins": 0.06986425817012787,
158
- "rewards/rejected": 0.21720390021800995,
159
  "step": 90
160
  },
161
  {
162
- "epoch": 0.67,
163
- "grad_norm": 1124.231150209405,
164
- "learning_rate": 8.431208189343668e-09,
165
- "logits/chosen": -4.214221000671387,
166
- "logits/rejected": -4.424824237823486,
167
- "logps/chosen": -320.45928955078125,
168
- "logps/rejected": -289.71466064453125,
169
- "loss": 0.6738,
170
- "rewards/accuracies": 0.581250011920929,
171
- "rewards/chosen": 0.3042110502719879,
172
- "rewards/margins": 0.12749677896499634,
173
- "rewards/rejected": 0.1767142415046692,
174
  "step": 100
175
  },
176
  {
177
- "epoch": 0.73,
178
- "grad_norm": 1045.9935050945592,
179
- "learning_rate": 7.98579295851393e-09,
180
- "logits/chosen": -4.233684062957764,
181
- "logits/rejected": -4.286379814147949,
182
- "logps/chosen": -314.39251708984375,
183
- "logps/rejected": -303.2556457519531,
184
- "loss": 0.6623,
185
- "rewards/accuracies": 0.5687500238418579,
186
- "rewards/chosen": 0.3848657011985779,
187
- "rewards/margins": 0.10032075643539429,
188
- "rewards/rejected": 0.2845449447631836,
189
  "step": 110
190
  },
191
  {
192
- "epoch": 0.8,
193
- "grad_norm": 1107.1293654591734,
194
- "learning_rate": 7.500000000000001e-09,
195
- "logits/chosen": -4.151052951812744,
196
- "logits/rejected": -4.307383060455322,
197
- "logps/chosen": -338.46759033203125,
198
- "logps/rejected": -313.0832214355469,
199
- "loss": 0.6634,
200
- "rewards/accuracies": 0.625,
201
- "rewards/chosen": 0.49397316575050354,
202
- "rewards/margins": 0.20816774666309357,
203
- "rewards/rejected": 0.2858053743839264,
204
  "step": 120
205
  },
206
  {
207
- "epoch": 0.87,
208
- "grad_norm": 1113.1665283582252,
209
- "learning_rate": 6.980398830195784e-09,
210
- "logits/chosen": -4.174288272857666,
211
- "logits/rejected": -4.36756706237793,
212
- "logps/chosen": -323.51800537109375,
213
- "logps/rejected": -304.7021484375,
214
- "loss": 0.6392,
215
- "rewards/accuracies": 0.637499988079071,
216
- "rewards/chosen": 0.5511430501937866,
217
- "rewards/margins": 0.23519937694072723,
218
- "rewards/rejected": 0.31594371795654297,
219
  "step": 130
220
  },
221
  {
222
- "epoch": 0.93,
223
- "grad_norm": 1022.5201031232197,
224
- "learning_rate": 6.434016163555451e-09,
225
- "logits/chosen": -4.292388916015625,
226
- "logits/rejected": -4.358359336853027,
227
- "logps/chosen": -307.1917419433594,
228
- "logps/rejected": -296.78192138671875,
229
- "loss": 0.6402,
230
- "rewards/accuracies": 0.6000000238418579,
231
- "rewards/chosen": 0.5292906761169434,
232
- "rewards/margins": 0.16237936913967133,
233
- "rewards/rejected": 0.36691129207611084,
234
  "step": 140
235
  },
236
  {
237
- "epoch": 1.0,
238
- "grad_norm": 1051.3521300860295,
239
- "learning_rate": 5.868240888334653e-09,
240
- "logits/chosen": -4.260380268096924,
241
- "logits/rejected": -4.333888053894043,
242
- "logps/chosen": -311.844482421875,
243
- "logps/rejected": -299.74261474609375,
244
- "loss": 0.6387,
245
- "rewards/accuracies": 0.6187499761581421,
246
- "rewards/chosen": 0.5666605830192566,
247
- "rewards/margins": 0.1995116025209427,
248
- "rewards/rejected": 0.3671489655971527,
249
  "step": 150
250
  },
251
  {
252
- "epoch": 1.06,
253
- "grad_norm": 1029.1548288848146,
254
- "learning_rate": 5.290724144552379e-09,
255
- "logits/chosen": -4.191515922546387,
256
- "logits/rejected": -4.348960876464844,
257
- "logps/chosen": -326.90582275390625,
258
- "logps/rejected": -305.37689208984375,
259
- "loss": 0.6032,
260
- "rewards/accuracies": 0.6937500238418579,
261
- "rewards/chosen": 0.6744669079780579,
262
- "rewards/margins": 0.37094420194625854,
263
- "rewards/rejected": 0.3035227358341217,
264
  "step": 160
265
  },
266
  {
267
- "epoch": 1.13,
268
- "grad_norm": 991.2604083787021,
269
- "learning_rate": 4.709275855447621e-09,
270
- "logits/chosen": -4.241927623748779,
271
- "logits/rejected": -4.339847564697266,
272
- "logps/chosen": -314.4874572753906,
273
- "logps/rejected": -295.49151611328125,
274
- "loss": 0.6151,
275
- "rewards/accuracies": 0.6937500238418579,
276
- "rewards/chosen": 0.6504887938499451,
277
- "rewards/margins": 0.3018999397754669,
278
- "rewards/rejected": 0.3485889136791229,
279
  "step": 170
280
  },
281
  {
282
- "epoch": 1.2,
283
- "grad_norm": 976.0494957890444,
284
- "learning_rate": 4.131759111665349e-09,
285
- "logits/chosen": -4.174123287200928,
286
- "logits/rejected": -4.313396453857422,
287
- "logps/chosen": -325.78045654296875,
288
- "logps/rejected": -304.82989501953125,
289
- "loss": 0.6147,
290
- "rewards/accuracies": 0.6656249761581421,
291
- "rewards/chosen": 0.6252425909042358,
292
- "rewards/margins": 0.27976280450820923,
293
- "rewards/rejected": 0.3454797863960266,
294
  "step": 180
295
  },
296
  {
297
- "epoch": 1.26,
298
- "grad_norm": 949.77837430784,
299
- "learning_rate": 3.56598383644455e-09,
300
- "logits/chosen": -4.231461524963379,
301
- "logits/rejected": -4.367356777191162,
302
- "logps/chosen": -326.8230895996094,
303
- "logps/rejected": -299.7027587890625,
304
- "loss": 0.5969,
305
- "rewards/accuracies": 0.6968749761581421,
306
- "rewards/chosen": 0.7501948475837708,
307
- "rewards/margins": 0.35921385884284973,
308
- "rewards/rejected": 0.39098095893859863,
309
- "step": 190
310
- },
311
- {
312
- "epoch": 1.33,
313
- "grad_norm": 963.9694294192556,
314
- "learning_rate": 3.0196011698042157e-09,
315
- "logits/chosen": -4.249307155609131,
316
- "logits/rejected": -4.317980766296387,
317
- "logps/chosen": -313.9615783691406,
318
- "logps/rejected": -293.1615905761719,
319
- "loss": 0.5922,
320
- "rewards/accuracies": 0.6781250238418579,
321
- "rewards/chosen": 0.7425140142440796,
322
- "rewards/margins": 0.3114904761314392,
323
- "rewards/rejected": 0.4310235381126404,
324
- "step": 200
325
- },
326
- {
327
- "epoch": 1.4,
328
- "grad_norm": 1038.4899227109647,
329
- "learning_rate": 2.5000000000000013e-09,
330
- "logits/chosen": -4.174517631530762,
331
- "logits/rejected": -4.288783073425293,
332
- "logps/chosen": -327.461181640625,
333
- "logps/rejected": -304.9690246582031,
334
- "loss": 0.5952,
335
- "rewards/accuracies": 0.699999988079071,
336
- "rewards/chosen": 0.7787834405899048,
337
- "rewards/margins": 0.3926675319671631,
338
- "rewards/rejected": 0.3861159086227417,
339
- "step": 210
340
- },
341
- {
342
- "epoch": 1.46,
343
- "grad_norm": 992.6867793357102,
344
- "learning_rate": 2.0142070414860704e-09,
345
- "logits/chosen": -4.165514945983887,
346
- "logits/rejected": -4.316833019256592,
347
- "logps/chosen": -358.1416931152344,
348
- "logps/rejected": -323.22998046875,
349
- "loss": 0.5994,
350
- "rewards/accuracies": 0.6781250238418579,
351
- "rewards/chosen": 0.8851108551025391,
352
- "rewards/margins": 0.35754603147506714,
353
- "rewards/rejected": 0.5275647640228271,
354
- "step": 220
355
- },
356
- {
357
- "epoch": 1.53,
358
- "grad_norm": 918.0076197376386,
359
- "learning_rate": 1.5687918106563326e-09,
360
- "logits/chosen": -4.234222412109375,
361
- "logits/rejected": -4.369565486907959,
362
- "logps/chosen": -328.3675537109375,
363
- "logps/rejected": -303.726806640625,
364
- "loss": 0.5919,
365
- "rewards/accuracies": 0.65625,
366
- "rewards/chosen": 0.8034540414810181,
367
- "rewards/margins": 0.2974655032157898,
368
- "rewards/rejected": 0.5059884786605835,
369
- "step": 230
370
- },
371
- {
372
- "epoch": 1.6,
373
- "grad_norm": 944.3107996293463,
374
- "learning_rate": 1.1697777844051105e-09,
375
- "logits/chosen": -4.202380180358887,
376
- "logits/rejected": -4.353602409362793,
377
- "logps/chosen": -326.75604248046875,
378
- "logps/rejected": -297.03619384765625,
379
- "loss": 0.588,
380
- "rewards/accuracies": 0.659375011920929,
381
- "rewards/chosen": 0.8500032424926758,
382
- "rewards/margins": 0.3546527922153473,
383
- "rewards/rejected": 0.4953504502773285,
384
- "step": 240
385
- },
386
- {
387
- "epoch": 1.66,
388
- "grad_norm": 944.2690060176395,
389
- "learning_rate": 8.225609429353187e-10,
390
- "logits/chosen": -4.173297882080078,
391
- "logits/rejected": -4.368635654449463,
392
- "logps/chosen": -321.2724914550781,
393
- "logps/rejected": -293.50311279296875,
394
- "loss": 0.5856,
395
- "rewards/accuracies": 0.731249988079071,
396
- "rewards/chosen": 0.8207529783248901,
397
- "rewards/margins": 0.40889453887939453,
398
- "rewards/rejected": 0.4118584990501404,
399
- "step": 250
400
- },
401
- {
402
- "epoch": 1.73,
403
- "grad_norm": 957.8767493880096,
404
- "learning_rate": 5.318367983829391e-10,
405
- "logits/chosen": -4.18589448928833,
406
- "logits/rejected": -4.284361839294434,
407
- "logps/chosen": -348.6470031738281,
408
- "logps/rejected": -326.4360046386719,
409
- "loss": 0.5895,
410
- "rewards/accuracies": 0.690625011920929,
411
- "rewards/chosen": 0.8914071917533875,
412
- "rewards/margins": 0.37524908781051636,
413
- "rewards/rejected": 0.5161582231521606,
414
- "step": 260
415
- },
416
- {
417
- "epoch": 1.8,
418
- "grad_norm": 949.249423709826,
419
- "learning_rate": 3.015368960704584e-10,
420
- "logits/chosen": -4.271857738494873,
421
- "logits/rejected": -4.4089555740356445,
422
- "logps/chosen": -325.19049072265625,
423
- "logps/rejected": -302.7436828613281,
424
- "loss": 0.5938,
425
- "rewards/accuracies": 0.6781250238418579,
426
- "rewards/chosen": 0.9044697880744934,
427
- "rewards/margins": 0.38229116797447205,
428
- "rewards/rejected": 0.522178590297699,
429
- "step": 270
430
- },
431
- {
432
- "epoch": 1.86,
433
- "grad_norm": 932.1311904270524,
434
- "learning_rate": 1.3477564710088098e-10,
435
- "logits/chosen": -4.163296222686768,
436
- "logits/rejected": -4.306557655334473,
437
- "logps/chosen": -327.57257080078125,
438
- "logps/rejected": -306.2264099121094,
439
- "loss": 0.5865,
440
- "rewards/accuracies": 0.715624988079071,
441
- "rewards/chosen": 0.8713304400444031,
442
- "rewards/margins": 0.4181024134159088,
443
- "rewards/rejected": 0.45322805643081665,
444
- "step": 280
445
- },
446
- {
447
- "epoch": 1.93,
448
- "grad_norm": 884.8124763448137,
449
- "learning_rate": 3.380821129028488e-11,
450
- "logits/chosen": -4.1040120124816895,
451
- "logits/rejected": -4.260913372039795,
452
- "logps/chosen": -334.97686767578125,
453
- "logps/rejected": -312.01617431640625,
454
- "loss": 0.5924,
455
- "rewards/accuracies": 0.6812499761581421,
456
- "rewards/chosen": 0.9016163945198059,
457
- "rewards/margins": 0.4117993414402008,
458
- "rewards/rejected": 0.48981720209121704,
459
- "step": 290
460
- },
461
- {
462
- "epoch": 2.0,
463
- "grad_norm": 959.7419745329076,
464
- "learning_rate": 0.0,
465
- "logits/chosen": -4.31270170211792,
466
- "logits/rejected": -4.468858242034912,
467
- "logps/chosen": -314.4400939941406,
468
- "logps/rejected": -284.8276672363281,
469
- "loss": 0.5825,
470
- "rewards/accuracies": 0.6875,
471
- "rewards/chosen": 0.82757169008255,
472
- "rewards/margins": 0.36657077074050903,
473
- "rewards/rejected": 0.4610009789466858,
474
- "step": 300
475
- },
476
- {
477
- "epoch": 2.0,
478
- "step": 300,
479
  "total_flos": 0.0,
480
- "train_loss": 0.6411590957641602,
481
- "train_runtime": 8942.072,
482
- "train_samples_per_second": 8.599,
483
  "train_steps_per_second": 0.034
484
  }
485
  ],
486
  "logging_steps": 10,
487
- "max_steps": 300,
488
  "num_input_tokens_seen": 0,
489
- "num_train_epochs": 2,
490
  "save_steps": 100,
491
  "total_flos": 0.0,
492
  "train_batch_size": 8,
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9972602739726028,
5
  "eval_steps": 500,
6
+ "global_step": 182,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.01,
13
+ "grad_norm": 450.1434249789045,
14
+ "learning_rate": 2.6315789473684206e-09,
15
+ "logits/chosen": -0.7030794620513916,
16
+ "logits/rejected": -0.3951629400253296,
17
+ "logps/chosen": -341.73382568359375,
18
+ "logps/rejected": -292.9862060546875,
19
  "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
 
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.05,
28
+ "grad_norm": 427.11862533612515,
29
+ "learning_rate": 2.6315789473684208e-08,
30
+ "logits/chosen": -0.8544511795043945,
31
+ "logits/rejected": -0.5010538101196289,
32
+ "logps/chosen": -343.0840148925781,
33
+ "logps/rejected": -297.6950378417969,
34
+ "loss": 0.6989,
35
+ "rewards/accuracies": 0.4201388955116272,
36
+ "rewards/chosen": 0.006387188099324703,
37
+ "rewards/margins": -0.002455425448715687,
38
+ "rewards/rejected": 0.008842612616717815,
39
  "step": 10
40
  },
41
  {
42
+ "epoch": 0.11,
43
+ "grad_norm": 420.6890953363653,
44
+ "learning_rate": 4.9995356760283366e-08,
45
+ "logits/chosen": -0.8882249593734741,
46
+ "logits/rejected": -0.5170028805732727,
47
+ "logps/chosen": -333.65777587890625,
48
+ "logps/rejected": -287.80816650390625,
49
+ "loss": 0.6881,
50
+ "rewards/accuracies": 0.5375000238418579,
51
+ "rewards/chosen": -0.012563005089759827,
52
+ "rewards/margins": 0.03462809696793556,
53
+ "rewards/rejected": -0.04719109833240509,
54
  "step": 20
55
  },
56
  {
57
+ "epoch": 0.16,
58
+ "grad_norm": 398.42327340439533,
59
+ "learning_rate": 4.944025194969586e-08,
60
+ "logits/chosen": -0.9182589650154114,
61
+ "logits/rejected": -0.5222188234329224,
62
+ "logps/chosen": -332.426025390625,
63
+ "logps/rejected": -290.18304443359375,
64
+ "loss": 0.6356,
65
+ "rewards/accuracies": 0.640625,
66
+ "rewards/chosen": -0.035035934299230576,
67
+ "rewards/margins": 0.15518921613693237,
68
+ "rewards/rejected": -0.19022515416145325,
69
  "step": 30
70
  },
71
  {
72
+ "epoch": 0.22,
73
+ "grad_norm": 361.5856784181511,
74
+ "learning_rate": 4.798007040388211e-08,
75
+ "logits/chosen": -0.8608843684196472,
76
+ "logits/rejected": -0.5015160441398621,
77
+ "logps/chosen": -337.7128601074219,
78
+ "logps/rejected": -307.75726318359375,
79
+ "loss": 0.5811,
80
+ "rewards/accuracies": 0.7406250238418579,
81
+ "rewards/chosen": 0.006648591253906488,
82
+ "rewards/margins": 0.3918178081512451,
83
+ "rewards/rejected": -0.38516920804977417,
84
  "step": 40
85
  },
86
  {
87
+ "epoch": 0.27,
88
+ "grad_norm": 334.47329593106986,
89
+ "learning_rate": 4.5668885883990063e-08,
90
+ "logits/chosen": -0.8879793286323547,
91
+ "logits/rejected": -0.4423498511314392,
92
+ "logps/chosen": -332.4192199707031,
93
+ "logps/rejected": -296.57025146484375,
94
+ "loss": 0.5272,
95
+ "rewards/accuracies": 0.78125,
96
+ "rewards/chosen": 0.24052493274211884,
97
+ "rewards/margins": 0.5968745946884155,
98
+ "rewards/rejected": -0.3563496172428131,
99
  "step": 50
100
  },
101
  {
102
+ "epoch": 0.33,
103
+ "grad_norm": 339.682806652983,
104
+ "learning_rate": 4.2592286678719626e-08,
105
+ "logits/chosen": -0.8047763109207153,
106
+ "logits/rejected": -0.4120696485042572,
107
+ "logps/chosen": -330.0352478027344,
108
+ "logps/rejected": -295.6581726074219,
109
+ "loss": 0.5016,
110
+ "rewards/accuracies": 0.7718750238418579,
111
+ "rewards/chosen": 0.5128915905952454,
112
+ "rewards/margins": 0.7231279015541077,
113
+ "rewards/rejected": -0.21023626625537872,
114
  "step": 60
115
  },
116
  {
117
+ "epoch": 0.38,
118
+ "grad_norm": 294.8745864628234,
119
+ "learning_rate": 3.886420608016766e-08,
120
+ "logits/chosen": -1.0426691770553589,
121
+ "logits/rejected": -0.600997805595398,
122
+ "logps/chosen": -348.37408447265625,
123
+ "logps/rejected": -304.3432922363281,
124
+ "loss": 0.4851,
125
+ "rewards/accuracies": 0.753125011920929,
126
+ "rewards/chosen": 0.823379397392273,
127
+ "rewards/margins": 0.7953158617019653,
128
+ "rewards/rejected": 0.02806355059146881,
129
  "step": 70
130
  },
131
  {
132
+ "epoch": 0.44,
133
+ "grad_norm": 272.6474819647033,
134
+ "learning_rate": 3.462270318133136e-08,
135
+ "logits/chosen": -0.9253625869750977,
136
+ "logits/rejected": -0.3548746705055237,
137
+ "logps/chosen": -341.5060119628906,
138
+ "logps/rejected": -294.5276794433594,
139
+ "loss": 0.4535,
140
+ "rewards/accuracies": 0.815625011920929,
141
+ "rewards/chosen": 0.7518247961997986,
142
+ "rewards/margins": 0.94850093126297,
143
+ "rewards/rejected": -0.19667614996433258,
144
  "step": 80
145
  },
146
  {
147
+ "epoch": 0.49,
148
+ "grad_norm": 281.1147370449449,
149
+ "learning_rate": 3.0024850241696124e-08,
150
+ "logits/chosen": -1.0233888626098633,
151
+ "logits/rejected": -0.6329795122146606,
152
+ "logps/chosen": -337.8768310546875,
153
+ "logps/rejected": -307.80621337890625,
154
+ "loss": 0.4505,
155
+ "rewards/accuracies": 0.796875,
156
+ "rewards/chosen": 0.9118101000785828,
157
+ "rewards/margins": 1.0233700275421143,
158
+ "rewards/rejected": -0.1115599274635315,
159
  "step": 90
160
  },
161
  {
162
+ "epoch": 0.55,
163
+ "grad_norm": 270.95326246127075,
164
+ "learning_rate": 2.5240915953099518e-08,
165
+ "logits/chosen": -0.8435468673706055,
166
+ "logits/rejected": -0.528495192527771,
167
+ "logps/chosen": -316.45147705078125,
168
+ "logps/rejected": -284.6639099121094,
169
+ "loss": 0.434,
170
+ "rewards/accuracies": 0.7875000238418579,
171
+ "rewards/chosen": 0.6949013471603394,
172
+ "rewards/margins": 0.9432865381240845,
173
+ "rewards/rejected": -0.24838528037071228,
174
  "step": 100
175
  },
176
  {
177
+ "epoch": 0.6,
178
+ "grad_norm": 246.8924504601372,
179
+ "learning_rate": 2.0448060012437142e-08,
180
+ "logits/chosen": -0.7631230354309082,
181
+ "logits/rejected": -0.46078410744667053,
182
+ "logps/chosen": -332.81842041015625,
183
+ "logps/rejected": -302.547119140625,
184
+ "loss": 0.4323,
185
+ "rewards/accuracies": 0.8125,
186
+ "rewards/chosen": 0.88267582654953,
187
+ "rewards/margins": 1.0535722970962524,
188
+ "rewards/rejected": -0.17089635133743286,
189
  "step": 110
190
  },
191
  {
192
+ "epoch": 0.66,
193
+ "grad_norm": 249.2911347992777,
194
+ "learning_rate": 1.5823772505167697e-08,
195
+ "logits/chosen": -0.8469772338867188,
196
+ "logits/rejected": -0.47505950927734375,
197
+ "logps/chosen": -345.8771667480469,
198
+ "logps/rejected": -308.70025634765625,
199
+ "loss": 0.421,
200
+ "rewards/accuracies": 0.8343750238418579,
201
+ "rewards/chosen": 1.0309433937072754,
202
+ "rewards/margins": 1.2240039110183716,
203
+ "rewards/rejected": -0.19306042790412903,
204
  "step": 120
205
  },
206
  {
207
+ "epoch": 0.71,
208
+ "grad_norm": 236.63634286693804,
209
+ "learning_rate": 1.1539301053796948e-08,
210
+ "logits/chosen": -0.8259572982788086,
211
+ "logits/rejected": -0.48908624053001404,
212
+ "logps/chosen": -326.006591796875,
213
+ "logps/rejected": -298.5427551269531,
214
+ "loss": 0.4317,
215
+ "rewards/accuracies": 0.7406250238418579,
216
+ "rewards/chosen": 0.7692808508872986,
217
+ "rewards/margins": 0.9658697843551636,
218
+ "rewards/rejected": -0.1965889036655426,
219
  "step": 130
220
  },
221
  {
222
+ "epoch": 0.77,
223
+ "grad_norm": 277.9055914170381,
224
+ "learning_rate": 7.753309138609703e-09,
225
+ "logits/chosen": -1.0100657939910889,
226
+ "logits/rejected": -0.518832266330719,
227
+ "logps/chosen": -335.1830749511719,
228
+ "logps/rejected": -293.55389404296875,
229
+ "loss": 0.4203,
230
+ "rewards/accuracies": 0.7906249761581421,
231
+ "rewards/chosen": 0.938341498374939,
232
+ "rewards/margins": 1.1349132061004639,
233
+ "rewards/rejected": -0.19657166302204132,
234
  "step": 140
235
  },
236
  {
237
+ "epoch": 0.82,
238
+ "grad_norm": 280.40678633901416,
239
+ "learning_rate": 4.606000437098476e-09,
240
+ "logits/chosen": -0.9020172953605652,
241
+ "logits/rejected": -0.5801359415054321,
242
+ "logps/chosen": -328.5782775878906,
243
+ "logps/rejected": -298.58416748046875,
244
+ "loss": 0.417,
245
+ "rewards/accuracies": 0.768750011920929,
246
+ "rewards/chosen": 0.8313944935798645,
247
+ "rewards/margins": 1.0033814907073975,
248
+ "rewards/rejected": -0.1719868779182434,
249
  "step": 150
250
  },
251
  {
252
+ "epoch": 0.88,
253
+ "grad_norm": 281.95412461514485,
254
+ "learning_rate": 2.2139267708310455e-09,
255
+ "logits/chosen": -0.941261887550354,
256
+ "logits/rejected": -0.5286608934402466,
257
+ "logps/chosen": -337.65704345703125,
258
+ "logps/rejected": -302.15557861328125,
259
+ "loss": 0.4195,
260
+ "rewards/accuracies": 0.831250011920929,
261
+ "rewards/chosen": 1.0939249992370605,
262
+ "rewards/margins": 1.298884391784668,
263
+ "rewards/rejected": -0.20495939254760742,
264
  "step": 160
265
  },
266
  {
267
+ "epoch": 0.93,
268
+ "grad_norm": 257.0608061838707,
269
+ "learning_rate": 6.656719329999699e-10,
270
+ "logits/chosen": -0.6665180921554565,
271
+ "logits/rejected": -0.36475566029548645,
272
+ "logps/chosen": -323.7983703613281,
273
+ "logps/rejected": -292.5258483886719,
274
+ "loss": 0.4248,
275
+ "rewards/accuracies": 0.8343750238418579,
276
+ "rewards/chosen": 1.0974645614624023,
277
+ "rewards/margins": 1.273900032043457,
278
+ "rewards/rejected": -0.1764354258775711,
279
  "step": 170
280
  },
281
  {
282
+ "epoch": 0.99,
283
+ "grad_norm": 245.05015884181918,
284
+ "learning_rate": 1.8571234092507048e-11,
285
+ "logits/chosen": -0.8604642152786255,
286
+ "logits/rejected": -0.526264488697052,
287
+ "logps/chosen": -320.47540283203125,
288
+ "logps/rejected": -290.77569580078125,
289
+ "loss": 0.4234,
290
+ "rewards/accuracies": 0.856249988079071,
291
+ "rewards/chosen": 0.9940341711044312,
292
+ "rewards/margins": 1.222551941871643,
293
+ "rewards/rejected": -0.22851786017417908,
294
  "step": 180
295
  },
296
  {
297
+ "epoch": 1.0,
298
+ "step": 182,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299
  "total_flos": 0.0,
300
+ "train_loss": 0.49109079287602353,
301
+ "train_runtime": 5330.839,
302
+ "train_samples_per_second": 8.755,
303
  "train_steps_per_second": 0.034
304
  }
305
  ],
306
  "logging_steps": 10,
307
+ "max_steps": 182,
308
  "num_input_tokens_seen": 0,
309
+ "num_train_epochs": 1,
310
  "save_steps": 100,
311
  "total_flos": 0.0,
312
  "train_batch_size": 8,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fe7ad1d1edbbc30b461a2666bb026f02d74c0c127226e41e03c561f18f18d157
3
  size 6328
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:511f849ca93a0f3688a329d8a84efe863ebada7f409c9ad22d389e79b9db4345
3
  size 6328