kobzaond commited on
Commit
33a5194
·
1 Parent(s): 0818fcc

Initial upload of alquistcoder_F1_MAIN_DPO model files

Browse files
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81ace2f1dc017ba41ec9c842af014cea1a4317a7bcbf07a3c57cbb5ff62fe02f
3
+ size 4975120816
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4173f1d5bc73b6d7b9291defdd8112d3526c7697c029fa3845b9bc4b742277e4
3
+ size 4832141488
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7c87ffc81fa842dbfcd00f2951470b38abcaca8ab55d54b62840fdaac00c126
3
+ size 4832141512
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e221988b20ec2da09324c678a57f4f7b8631ca2f6348c952ebd524d5de3c5ef
3
+ size 3163092328
trainer_state.json CHANGED
@@ -2,543 +2,288 @@
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
- "epoch": 2.0,
6
  "eval_steps": 500,
7
- "global_step": 356,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
11
  "log_history": [
12
  {
13
- "epoch": 0.056338028169014086,
14
- "grad_norm": 574.27978515625,
15
- "learning_rate": 6.25e-07,
16
- "logits/chosen": 2.945640802383423,
17
- "logits/rejected": 3.0515265464782715,
18
- "logps/chosen": -246.39541625976562,
19
- "logps/rejected": -195.46109008789062,
20
- "loss": 0.9304,
21
- "rewards/accuracies": 0.5218750238418579,
22
- "rewards/chosen": 0.03325582295656204,
23
- "rewards/margins": 0.014068150892853737,
24
- "rewards/rejected": 0.019187677651643753,
25
  "step": 10
26
  },
27
  {
28
- "epoch": 0.11267605633802817,
29
- "grad_norm": 267.4449462890625,
30
- "learning_rate": 1.3194444444444446e-06,
31
- "logits/chosen": 3.0365357398986816,
32
- "logits/rejected": 3.0756676197052,
33
- "logps/chosen": -231.8340606689453,
34
- "logps/rejected": -196.01499938964844,
35
- "loss": 0.5293,
36
- "rewards/accuracies": 0.737500011920929,
37
- "rewards/chosen": 1.023498773574829,
38
- "rewards/margins": 1.6332218647003174,
39
- "rewards/rejected": -0.6097229719161987,
40
  "step": 20
41
  },
42
  {
43
- "epoch": 0.16901408450704225,
44
- "grad_norm": 168.65689086914062,
45
- "learning_rate": 2.0138888888888893e-06,
46
- "logits/chosen": 3.038414478302002,
47
- "logits/rejected": 3.0314807891845703,
48
- "logps/chosen": -265.27825927734375,
49
- "logps/rejected": -199.1730499267578,
50
- "loss": 0.1929,
51
- "rewards/accuracies": 0.925000011920929,
52
- "rewards/chosen": 4.1361799240112305,
53
- "rewards/margins": 6.971987724304199,
54
- "rewards/rejected": -2.835808277130127,
55
  "step": 30
56
  },
57
  {
58
- "epoch": 0.22535211267605634,
59
- "grad_norm": 37.93267822265625,
60
- "learning_rate": 2.7083333333333334e-06,
61
- "logits/chosen": 3.0692856311798096,
62
- "logits/rejected": 3.1041903495788574,
63
- "logps/chosen": -245.3774871826172,
64
- "logps/rejected": -207.87789916992188,
65
- "loss": 0.1002,
66
- "rewards/accuracies": 0.9671875238418579,
67
- "rewards/chosen": 6.395417213439941,
68
- "rewards/margins": 13.0038480758667,
69
- "rewards/rejected": -6.608429908752441,
70
  "step": 40
71
  },
72
  {
73
- "epoch": 0.28169014084507044,
74
- "grad_norm": 129.71878051757812,
75
- "learning_rate": 3.4027777777777783e-06,
76
- "logits/chosen": 2.943250894546509,
77
- "logits/rejected": 2.9869542121887207,
78
- "logps/chosen": -245.9439239501953,
79
- "logps/rejected": -202.8123321533203,
80
- "loss": 0.1375,
81
- "rewards/accuracies": 0.973437488079071,
82
- "rewards/chosen": 7.993535041809082,
83
- "rewards/margins": 19.0964412689209,
84
- "rewards/rejected": -11.102909088134766,
85
  "step": 50
86
  },
87
  {
88
- "epoch": 0.3380281690140845,
89
- "grad_norm": 57.64502716064453,
90
- "learning_rate": 4.097222222222222e-06,
91
- "logits/chosen": 3.0049643516540527,
92
- "logits/rejected": 3.0770554542541504,
93
- "logps/chosen": -236.73410034179688,
94
- "logps/rejected": -210.151611328125,
95
- "loss": 0.1722,
96
- "rewards/accuracies": 0.965624988079071,
97
- "rewards/chosen": 9.21152114868164,
98
- "rewards/margins": 22.748985290527344,
99
- "rewards/rejected": -13.53746509552002,
100
  "step": 60
101
  },
102
  {
103
- "epoch": 0.39436619718309857,
104
- "grad_norm": 105.868408203125,
105
- "learning_rate": 4.791666666666668e-06,
106
- "logits/chosen": 3.0406033992767334,
107
- "logits/rejected": 3.0739083290100098,
108
- "logps/chosen": -256.1812744140625,
109
- "logps/rejected": -204.35446166992188,
110
- "loss": 0.1129,
111
- "rewards/accuracies": 0.979687511920929,
112
- "rewards/chosen": 8.951081275939941,
113
- "rewards/margins": 21.80655860900879,
114
- "rewards/rejected": -12.85547924041748,
115
  "step": 70
116
  },
117
  {
118
- "epoch": 0.4507042253521127,
119
- "grad_norm": 168.75357055664062,
120
- "learning_rate": 4.998524282731094e-06,
121
- "logits/chosen": 3.0504136085510254,
122
- "logits/rejected": 3.0491514205932617,
123
- "logps/chosen": -255.90316772460938,
124
- "logps/rejected": -206.5497283935547,
125
- "loss": 0.3368,
126
- "rewards/accuracies": 0.96875,
127
- "rewards/chosen": 8.498771667480469,
128
- "rewards/margins": 22.880569458007812,
129
- "rewards/rejected": -14.381797790527344,
130
  "step": 80
131
  },
132
  {
133
- "epoch": 0.5070422535211268,
134
- "grad_norm": 109.43980407714844,
135
- "learning_rate": 4.991300473502437e-06,
136
- "logits/chosen": 3.0595602989196777,
137
- "logits/rejected": 3.1263351440429688,
138
- "logps/chosen": -228.88070678710938,
139
- "logps/rejected": -218.99783325195312,
140
- "loss": 0.3032,
141
- "rewards/accuracies": 0.9624999761581421,
142
- "rewards/chosen": 6.379992485046387,
143
- "rewards/margins": 28.722070693969727,
144
- "rewards/rejected": -22.34208106994629,
145
  "step": 90
146
  },
147
  {
148
- "epoch": 0.5633802816901409,
149
- "grad_norm": 128.98760986328125,
150
- "learning_rate": 4.978074903220964e-06,
151
- "logits/chosen": 3.1774604320526123,
152
- "logits/rejected": 3.181511402130127,
153
- "logps/chosen": -228.3056640625,
154
- "logps/rejected": -222.18038940429688,
155
- "loss": 0.2029,
156
- "rewards/accuracies": 0.9781249761581421,
157
- "rewards/chosen": 5.856657028198242,
158
- "rewards/margins": 34.18266677856445,
159
- "rewards/rejected": -28.326007843017578,
160
  "step": 100
161
  },
162
  {
163
- "epoch": 0.6197183098591549,
164
- "grad_norm": 144.1073455810547,
165
- "learning_rate": 4.958879433443904e-06,
166
- "logits/chosen": 3.218785047531128,
167
- "logits/rejected": 3.1809394359588623,
168
- "logps/chosen": -243.4155731201172,
169
- "logps/rejected": -236.49868774414062,
170
- "loss": 0.2134,
171
  "rewards/accuracies": 0.9765625,
172
- "rewards/chosen": 6.719731330871582,
173
- "rewards/margins": 35.10533905029297,
174
- "rewards/rejected": -28.385604858398438,
175
  "step": 110
176
  },
177
  {
178
- "epoch": 0.676056338028169,
179
- "grad_norm": 149.68939208984375,
180
- "learning_rate": 4.933760307739277e-06,
181
- "logits/chosen": 3.1437926292419434,
182
- "logits/rejected": 3.1433277130126953,
183
- "logps/chosen": -255.87796020507812,
184
- "logps/rejected": -231.52182006835938,
185
- "loss": 0.2745,
186
- "rewards/accuracies": 0.981249988079071,
187
- "rewards/chosen": 6.636951446533203,
188
- "rewards/margins": 37.604583740234375,
189
- "rewards/rejected": -30.967632293701172,
190
  "step": 120
191
  },
192
  {
193
- "epoch": 0.7323943661971831,
194
- "grad_norm": 171.0680389404297,
195
- "learning_rate": 4.90277804028108e-06,
196
- "logits/chosen": 3.206712245941162,
197
- "logits/rejected": 3.2256839275360107,
198
- "logps/chosen": -261.3406982421875,
199
- "logps/rejected": -228.3511505126953,
200
- "loss": 0.5352,
201
- "rewards/accuracies": 0.964062511920929,
202
- "rewards/chosen": 4.778416633605957,
203
- "rewards/margins": 33.36115646362305,
204
- "rewards/rejected": -28.58274269104004,
205
  "step": 130
206
  },
207
  {
208
- "epoch": 0.7887323943661971,
209
- "grad_norm": 118.76382446289062,
210
- "learning_rate": 4.866007270065345e-06,
211
- "logits/chosen": 3.2129406929016113,
212
- "logits/rejected": 3.227287769317627,
213
- "logps/chosen": -246.7953643798828,
214
- "logps/rejected": -230.45651245117188,
215
- "loss": 0.2311,
216
- "rewards/accuracies": 0.9765625,
217
- "rewards/chosen": 0.6871539354324341,
218
- "rewards/margins": 36.90045166015625,
219
- "rewards/rejected": -36.213294982910156,
220
  "step": 140
221
  },
222
  {
223
- "epoch": 0.8450704225352113,
224
- "grad_norm": 198.268310546875,
225
- "learning_rate": 4.823536581098262e-06,
226
- "logits/chosen": 3.1713156700134277,
227
- "logits/rejected": 3.152228355407715,
228
- "logps/chosen": -238.29159545898438,
229
- "logps/rejected": -238.2996826171875,
230
- "loss": 0.2612,
231
- "rewards/accuracies": 0.96875,
232
- "rewards/chosen": 1.5464673042297363,
233
- "rewards/margins": 41.4525146484375,
234
- "rewards/rejected": -39.90605163574219,
235
  "step": 150
236
  },
237
  {
238
- "epoch": 0.9014084507042254,
239
- "grad_norm": 191.44915771484375,
240
- "learning_rate": 4.775468288989545e-06,
241
- "logits/chosen": 3.2888169288635254,
242
- "logits/rejected": 3.2170681953430176,
243
- "logps/chosen": -251.0401153564453,
244
- "logps/rejected": -236.00479125976562,
245
- "loss": 0.51,
246
- "rewards/accuracies": 0.971875011920929,
247
- "rewards/chosen": 2.7297756671905518,
248
- "rewards/margins": 42.41936492919922,
249
- "rewards/rejected": -39.68959426879883,
250
  "step": 160
251
  },
252
  {
253
- "epoch": 0.9577464788732394,
254
- "grad_norm": 73.10994720458984,
255
- "learning_rate": 4.7219181944651695e-06,
256
- "logits/chosen": 3.2599377632141113,
257
- "logits/rejected": 3.2560737133026123,
258
- "logps/chosen": -245.1952667236328,
259
- "logps/rejected": -241.4735565185547,
260
- "loss": 0.313,
261
- "rewards/accuracies": 0.973437488079071,
262
- "rewards/chosen": -0.8346878290176392,
263
- "rewards/margins": 44.581295013427734,
264
- "rewards/rejected": -45.415985107421875,
265
  "step": 170
266
  },
267
  {
268
- "epoch": 1.0112676056338028,
269
- "grad_norm": 71.17866516113281,
270
- "learning_rate": 4.663015304393279e-06,
271
- "logits/chosen": 3.3155758380889893,
272
- "logits/rejected": 3.3333241939544678,
273
- "logps/chosen": -270.6006774902344,
274
- "logps/rejected": -241.6360321044922,
275
- "loss": 0.338,
276
- "rewards/accuracies": 0.9736841917037964,
277
- "rewards/chosen": 0.171695277094841,
278
- "rewards/margins": 45.20180130004883,
279
- "rewards/rejected": -45.03010177612305,
280
  "step": 180
281
- },
282
- {
283
- "epoch": 1.0676056338028168,
284
- "grad_norm": 113.73174285888672,
285
- "learning_rate": 4.59890152099534e-06,
286
- "logits/chosen": 3.508065700531006,
287
- "logits/rejected": 3.5083301067352295,
288
- "logps/chosen": -266.78387451171875,
289
- "logps/rejected": -252.5698699951172,
290
- "loss": 0.8076,
291
- "rewards/accuracies": 0.964062511920929,
292
- "rewards/chosen": 10.96754264831543,
293
- "rewards/margins": 63.106483459472656,
294
- "rewards/rejected": -52.138938903808594,
295
- "step": 190
296
- },
297
- {
298
- "epoch": 1.123943661971831,
299
- "grad_norm": 0.890201985836029,
300
- "learning_rate": 4.5297312999912625e-06,
301
- "logits/chosen": 3.5750420093536377,
302
- "logits/rejected": 3.5598015785217285,
303
- "logps/chosen": -244.08615112304688,
304
- "logps/rejected": -249.61984252929688,
305
- "loss": 0.4625,
306
- "rewards/accuracies": 0.9765625,
307
- "rewards/chosen": 8.597976684570312,
308
- "rewards/margins": 65.22010803222656,
309
- "rewards/rejected": -56.62213897705078,
310
- "step": 200
311
- },
312
- {
313
- "epoch": 1.180281690140845,
314
- "grad_norm": 204.70803833007812,
315
- "learning_rate": 4.455671278502042e-06,
316
- "logits/chosen": 3.6519992351531982,
317
- "logits/rejected": 3.6381404399871826,
318
- "logps/chosen": -240.0857696533203,
319
- "logps/rejected": -238.0441131591797,
320
- "loss": 0.653,
321
- "rewards/accuracies": 0.9546874761581421,
322
- "rewards/chosen": 6.305617332458496,
323
- "rewards/margins": 53.15886688232422,
324
- "rewards/rejected": -46.8532600402832,
325
- "step": 210
326
- },
327
- {
328
- "epoch": 1.236619718309859,
329
- "grad_norm": 139.86141967773438,
330
- "learning_rate": 4.376899873606336e-06,
331
- "logits/chosen": 3.622997283935547,
332
- "logits/rejected": 3.6359076499938965,
333
- "logps/chosen": -259.12725830078125,
334
- "logps/rejected": -250.89501953125,
335
- "loss": 0.9057,
336
- "rewards/accuracies": 0.9453125,
337
- "rewards/chosen": 4.471914291381836,
338
- "rewards/margins": 51.15581130981445,
339
- "rewards/rejected": -46.683895111083984,
340
- "step": 220
341
- },
342
- {
343
- "epoch": 1.2929577464788733,
344
- "grad_norm": 94.85035705566406,
345
- "learning_rate": 4.293606852518101e-06,
346
- "logits/chosen": 3.6598830223083496,
347
- "logits/rejected": 3.594897508621216,
348
- "logps/chosen": -263.00079345703125,
349
- "logps/rejected": -248.59524536132812,
350
- "loss": 0.3918,
351
- "rewards/accuracies": 0.9750000238418579,
352
- "rewards/chosen": 2.2544591426849365,
353
- "rewards/margins": 56.7413444519043,
354
- "rewards/rejected": -54.48688507080078,
355
- "step": 230
356
- },
357
- {
358
- "epoch": 1.3492957746478873,
359
- "grad_norm": 37.788360595703125,
360
- "learning_rate": 4.205992875420742e-06,
361
- "logits/chosen": 3.57995343208313,
362
- "logits/rejected": 3.5664329528808594,
363
- "logps/chosen": -243.8499755859375,
364
- "logps/rejected": -252.65353393554688,
365
- "loss": 0.4873,
366
- "rewards/accuracies": 0.9781249761581421,
367
- "rewards/chosen": 7.1952104568481445,
368
- "rewards/margins": 64.06478118896484,
369
- "rewards/rejected": -56.86956787109375,
370
- "step": 240
371
- },
372
- {
373
- "epoch": 1.4056338028169013,
374
- "grad_norm": 159.25875854492188,
375
- "learning_rate": 4.114269012059169e-06,
376
- "logits/chosen": 3.5491080284118652,
377
- "logits/rejected": 3.510200023651123,
378
- "logps/chosen": -253.5936279296875,
379
- "logps/rejected": -254.4651336669922,
380
- "loss": 0.3683,
381
- "rewards/accuracies": 0.9624999761581421,
382
- "rewards/chosen": 4.560586452484131,
383
- "rewards/margins": 59.49503707885742,
384
- "rewards/rejected": -54.9344482421875,
385
- "step": 250
386
- },
387
- {
388
- "epoch": 1.4619718309859155,
389
- "grad_norm": 113.43865966796875,
390
- "learning_rate": 4.018656233254278e-06,
391
- "logits/chosen": 3.4692349433898926,
392
- "logits/rejected": 3.3723576068878174,
393
- "logps/chosen": -253.46249389648438,
394
- "logps/rejected": -268.2179870605469,
395
- "loss": 0.3905,
396
- "rewards/accuracies": 0.984375,
397
- "rewards/chosen": 3.9269375801086426,
398
- "rewards/margins": 74.34013366699219,
399
- "rewards/rejected": -70.41320037841797,
400
- "step": 260
401
- },
402
- {
403
- "epoch": 1.5183098591549296,
404
- "grad_norm": 36.17473602294922,
405
- "learning_rate": 3.919384878564902e-06,
406
- "logits/chosen": 3.488018035888672,
407
- "logits/rejected": 3.4679126739501953,
408
- "logps/chosen": -248.9741668701172,
409
- "logps/rejected": -282.92840576171875,
410
- "loss": 0.5658,
411
- "rewards/accuracies": 0.9781249761581421,
412
- "rewards/chosen": 4.083906650543213,
413
- "rewards/margins": 77.73112487792969,
414
- "rewards/rejected": -73.647216796875,
415
- "step": 270
416
- },
417
- {
418
- "epoch": 1.5746478873239438,
419
- "grad_norm": 177.54251098632812,
420
- "learning_rate": 3.816694101379631e-06,
421
- "logits/chosen": 3.6568634510040283,
422
- "logits/rejected": 3.6167445182800293,
423
- "logps/chosen": -240.26895141601562,
424
- "logps/rejected": -263.4219665527344,
425
- "loss": 0.3476,
426
- "rewards/accuracies": 0.979687511920929,
427
- "rewards/chosen": 4.189328193664551,
428
- "rewards/margins": 66.97871398925781,
429
- "rewards/rejected": -62.78938674926758,
430
- "step": 280
431
- },
432
- {
433
- "epoch": 1.6309859154929578,
434
- "grad_norm": 76.29850006103516,
435
- "learning_rate": 3.7108312927753533e-06,
436
- "logits/chosen": 3.5761330127716064,
437
- "logits/rejected": 3.5349583625793457,
438
- "logps/chosen": -256.83697509765625,
439
- "logps/rejected": -257.34063720703125,
440
- "loss": 0.2007,
441
- "rewards/accuracies": 0.987500011920929,
442
- "rewards/chosen": 3.5510315895080566,
443
- "rewards/margins": 67.4579849243164,
444
- "rewards/rejected": -63.906944274902344,
445
- "step": 290
446
- },
447
- {
448
- "epoch": 1.6873239436619718,
449
- "grad_norm": 88.87206268310547,
450
- "learning_rate": 3.6020514855304856e-06,
451
- "logits/chosen": 3.4625442028045654,
452
- "logits/rejected": 3.4353599548339844,
453
- "logps/chosen": -246.8657684326172,
454
- "logps/rejected": -262.365966796875,
455
- "loss": 0.3359,
456
- "rewards/accuracies": 0.9765625,
457
- "rewards/chosen": -1.413503885269165,
458
- "rewards/margins": 65.82698059082031,
459
- "rewards/rejected": -67.240478515625,
460
- "step": 300
461
- },
462
- {
463
- "epoch": 1.7436619718309858,
464
- "grad_norm": 109.3917007446289,
465
- "learning_rate": 3.4906167397286643e-06,
466
- "logits/chosen": 3.4818034172058105,
467
- "logits/rejected": 3.438699722290039,
468
- "logps/chosen": -248.93112182617188,
469
- "logps/rejected": -257.42694091796875,
470
- "loss": 0.4265,
471
- "rewards/accuracies": 0.9828125238418579,
472
- "rewards/chosen": 0.5807734131813049,
473
- "rewards/margins": 62.88755416870117,
474
- "rewards/rejected": -62.306793212890625,
475
- "step": 310
476
- },
477
- {
478
- "epoch": 1.8,
479
- "grad_norm": 88.23355102539062,
480
- "learning_rate": 3.3767955114330586e-06,
481
- "logits/chosen": 3.3789494037628174,
482
- "logits/rejected": 3.329315662384033,
483
- "logps/chosen": -258.67291259765625,
484
- "logps/rejected": -259.5810546875,
485
- "loss": 0.4489,
486
- "rewards/accuracies": 0.9624999761581421,
487
- "rewards/chosen": -2.7831504344940186,
488
- "rewards/margins": 59.0428466796875,
489
- "rewards/rejected": -61.82598876953125,
490
- "step": 320
491
- },
492
- {
493
- "epoch": 1.856338028169014,
494
- "grad_norm": 1.0168672800064087,
495
- "learning_rate": 3.2608620059521935e-06,
496
- "logits/chosen": 3.38688325881958,
497
- "logits/rejected": 3.349194288253784,
498
- "logps/chosen": -233.11666870117188,
499
- "logps/rejected": -270.48590087890625,
500
- "loss": 0.5576,
501
- "rewards/accuracies": 0.979687511920929,
502
- "rewards/chosen": -4.500494480133057,
503
- "rewards/margins": 67.32182312011719,
504
- "rewards/rejected": -71.82231140136719,
505
- "step": 330
506
- },
507
- {
508
- "epoch": 1.9126760563380283,
509
- "grad_norm": 106.62237548828125,
510
- "learning_rate": 3.14309551725535e-06,
511
- "logits/chosen": 3.401437759399414,
512
- "logits/rejected": 3.3626091480255127,
513
- "logps/chosen": -247.9010009765625,
514
- "logps/rejected": -256.19097900390625,
515
- "loss": 0.4024,
516
- "rewards/accuracies": 0.979687511920929,
517
- "rewards/chosen": 0.5352304577827454,
518
- "rewards/margins": 63.979896545410156,
519
- "rewards/rejected": -63.444664001464844,
520
- "step": 340
521
- },
522
- {
523
- "epoch": 1.9690140845070423,
524
- "grad_norm": 159.5343475341797,
525
- "learning_rate": 3.0237797551289228e-06,
526
- "logits/chosen": 3.4421494007110596,
527
- "logits/rejected": 3.4059250354766846,
528
- "logps/chosen": -229.91134643554688,
529
- "logps/rejected": -252.29019165039062,
530
- "loss": 0.6959,
531
- "rewards/accuracies": 0.971875011920929,
532
- "rewards/chosen": 0.6608916521072388,
533
- "rewards/margins": 60.5278205871582,
534
- "rewards/rejected": -59.86692428588867,
535
- "step": 350
536
  }
537
  ],
538
  "logging_steps": 10,
539
- "max_steps": 712,
540
  "num_input_tokens_seen": 0,
541
- "num_train_epochs": 4,
542
  "save_steps": 500,
543
  "stateful_callbacks": {
544
  "TrainerControl": {
 
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
+ "epoch": 1.0,
6
  "eval_steps": 500,
7
+ "global_step": 183,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
11
  "log_history": [
12
  {
13
+ "epoch": 0.0547945205479452,
14
+ "grad_norm": 513.996826171875,
15
+ "learning_rate": 4.090909090909091e-07,
16
+ "logits/chosen": 2.990995407104492,
17
+ "logits/rejected": 3.0081257820129395,
18
+ "logps/chosen": -298.52886962890625,
19
+ "logps/rejected": -202.96295166015625,
20
+ "loss": 0.8843,
21
+ "rewards/accuracies": 0.526562511920929,
22
+ "rewards/chosen": 0.010423189960420132,
23
+ "rewards/margins": 0.05534166842699051,
24
+ "rewards/rejected": -0.0449184887111187,
25
  "step": 10
26
  },
27
  {
28
+ "epoch": 0.1095890410958904,
29
+ "grad_norm": 461.9533996582031,
30
+ "learning_rate": 8.636363636363637e-07,
31
+ "logits/chosen": 2.934217929840088,
32
+ "logits/rejected": 2.919574737548828,
33
+ "logps/chosen": -276.70391845703125,
34
+ "logps/rejected": -200.52728271484375,
35
+ "loss": 0.6858,
36
+ "rewards/accuracies": 0.65625,
37
+ "rewards/chosen": 0.7733574509620667,
38
+ "rewards/margins": 0.9247980117797852,
39
+ "rewards/rejected": -0.15144045650959015,
40
  "step": 20
41
  },
42
  {
43
+ "epoch": 0.1643835616438356,
44
+ "grad_norm": 150.57086181640625,
45
+ "learning_rate": 1.3181818181818182e-06,
46
+ "logits/chosen": 3.0546913146972656,
47
+ "logits/rejected": 3.0510308742523193,
48
+ "logps/chosen": -289.57977294921875,
49
+ "logps/rejected": -218.24765014648438,
50
+ "loss": 0.2839,
51
+ "rewards/accuracies": 0.8890625238418579,
52
+ "rewards/chosen": 2.8807666301727295,
53
+ "rewards/margins": 4.351069450378418,
54
+ "rewards/rejected": -1.4703023433685303,
55
  "step": 30
56
  },
57
  {
58
+ "epoch": 0.2191780821917808,
59
+ "grad_norm": 151.46690368652344,
60
+ "learning_rate": 1.7727272727272729e-06,
61
+ "logits/chosen": 3.088348150253296,
62
+ "logits/rejected": 3.1125292778015137,
63
+ "logps/chosen": -275.5274963378906,
64
+ "logps/rejected": -214.15737915039062,
65
+ "loss": 0.1502,
66
+ "rewards/accuracies": 0.9390624761581421,
67
+ "rewards/chosen": 5.216189384460449,
68
+ "rewards/margins": 8.827147483825684,
69
+ "rewards/rejected": -3.610957384109497,
70
  "step": 40
71
  },
72
  {
73
+ "epoch": 0.273972602739726,
74
+ "grad_norm": 130.53453063964844,
75
+ "learning_rate": 2.2272727272727274e-06,
76
+ "logits/chosen": 2.9992308616638184,
77
+ "logits/rejected": 3.075270175933838,
78
+ "logps/chosen": -259.50836181640625,
79
+ "logps/rejected": -203.08811950683594,
80
+ "loss": 0.1281,
81
+ "rewards/accuracies": 0.957812488079071,
82
+ "rewards/chosen": 6.97296667098999,
83
+ "rewards/margins": 12.8120698928833,
84
+ "rewards/rejected": -5.839103698730469,
85
  "step": 50
86
  },
87
  {
88
+ "epoch": 0.3287671232876712,
89
+ "grad_norm": 110.53746032714844,
90
+ "learning_rate": 2.6818181818181822e-06,
91
+ "logits/chosen": 3.088064670562744,
92
+ "logits/rejected": 2.986386775970459,
93
+ "logps/chosen": -325.0431213378906,
94
+ "logps/rejected": -224.979736328125,
95
+ "loss": 0.1444,
96
+ "rewards/accuracies": 0.964062511920929,
97
+ "rewards/chosen": 10.117586135864258,
98
+ "rewards/margins": 18.425701141357422,
99
+ "rewards/rejected": -8.30811595916748,
100
  "step": 60
101
  },
102
  {
103
+ "epoch": 0.3835616438356164,
104
+ "grad_norm": 95.7762222290039,
105
+ "learning_rate": 3.1363636363636367e-06,
106
+ "logits/chosen": 3.040494918823242,
107
+ "logits/rejected": 3.022307872772217,
108
+ "logps/chosen": -273.9972839355469,
109
+ "logps/rejected": -211.2712860107422,
110
+ "loss": 0.1443,
111
+ "rewards/accuracies": 0.9671875238418579,
112
+ "rewards/chosen": 9.627126693725586,
113
+ "rewards/margins": 20.590023040771484,
114
+ "rewards/rejected": -10.962896347045898,
115
  "step": 70
116
  },
117
  {
118
+ "epoch": 0.4383561643835616,
119
+ "grad_norm": 179.24440002441406,
120
+ "learning_rate": 3.590909090909091e-06,
121
+ "logits/chosen": 3.0298266410827637,
122
+ "logits/rejected": 3.0730605125427246,
123
+ "logps/chosen": -280.2432861328125,
124
+ "logps/rejected": -214.884033203125,
125
+ "loss": 0.142,
126
+ "rewards/accuracies": 0.979687511920929,
127
+ "rewards/chosen": 10.719534873962402,
128
+ "rewards/margins": 23.82217788696289,
129
+ "rewards/rejected": -13.102640151977539,
130
  "step": 80
131
  },
132
  {
133
+ "epoch": 0.4931506849315068,
134
+ "grad_norm": 2.0804860591888428,
135
+ "learning_rate": 4.045454545454546e-06,
136
+ "logits/chosen": 3.1072518825531006,
137
+ "logits/rejected": 3.067288875579834,
138
+ "logps/chosen": -294.1097106933594,
139
+ "logps/rejected": -219.38949584960938,
140
+ "loss": 0.1262,
141
+ "rewards/accuracies": 0.981249988079071,
142
+ "rewards/chosen": 12.069157600402832,
143
+ "rewards/margins": 29.454524993896484,
144
+ "rewards/rejected": -17.385366439819336,
145
  "step": 90
146
  },
147
  {
148
+ "epoch": 0.547945205479452,
149
+ "grad_norm": 134.90240478515625,
150
+ "learning_rate": 4.5e-06,
151
+ "logits/chosen": 2.9695353507995605,
152
+ "logits/rejected": 2.9900407791137695,
153
+ "logps/chosen": -270.2259826660156,
154
+ "logps/rejected": -210.32302856445312,
155
+ "loss": 0.1125,
156
+ "rewards/accuracies": 0.984375,
157
+ "rewards/chosen": 8.642024040222168,
158
+ "rewards/margins": 26.856210708618164,
159
+ "rewards/rejected": -18.21418571472168,
160
  "step": 100
161
  },
162
  {
163
+ "epoch": 0.6027397260273972,
164
+ "grad_norm": 237.83163452148438,
165
+ "learning_rate": 4.954545454545455e-06,
166
+ "logits/chosen": 3.001239538192749,
167
+ "logits/rejected": 2.9165444374084473,
168
+ "logps/chosen": -261.63848876953125,
169
+ "logps/rejected": -217.56314086914062,
170
+ "loss": 0.1337,
171
  "rewards/accuracies": 0.9765625,
172
+ "rewards/chosen": 4.362582206726074,
173
+ "rewards/margins": 21.842912673950195,
174
+ "rewards/rejected": -17.480329513549805,
175
  "step": 110
176
  },
177
  {
178
+ "epoch": 0.6575342465753424,
179
+ "grad_norm": 124.83686065673828,
180
+ "learning_rate": 4.998976350571773e-06,
181
+ "logits/chosen": 3.0631394386291504,
182
+ "logits/rejected": 3.0034124851226807,
183
+ "logps/chosen": -296.0355224609375,
184
+ "logps/rejected": -219.4881134033203,
185
+ "loss": 0.176,
186
+ "rewards/accuracies": 0.96875,
187
+ "rewards/chosen": 3.6324734687805176,
188
+ "rewards/margins": 20.854042053222656,
189
+ "rewards/rejected": -17.221569061279297,
190
  "step": 120
191
  },
192
  {
193
+ "epoch": 0.7123287671232876,
194
+ "grad_norm": 110.8822250366211,
195
+ "learning_rate": 4.995438885558294e-06,
196
+ "logits/chosen": 3.0476179122924805,
197
+ "logits/rejected": 2.9690792560577393,
198
+ "logps/chosen": -292.52276611328125,
199
+ "logps/rejected": -210.3925018310547,
200
+ "loss": 0.2762,
201
+ "rewards/accuracies": 0.9781249761581421,
202
+ "rewards/chosen": 5.373471736907959,
203
+ "rewards/margins": 26.076580047607422,
204
+ "rewards/rejected": -20.703105926513672,
205
  "step": 130
206
  },
207
  {
208
+ "epoch": 0.7671232876712328,
209
+ "grad_norm": 72.18496704101562,
210
+ "learning_rate": 4.989378542821969e-06,
211
+ "logits/chosen": 3.0710926055908203,
212
+ "logits/rejected": 3.0577285289764404,
213
+ "logps/chosen": -284.55230712890625,
214
+ "logps/rejected": -230.9425506591797,
215
+ "loss": 0.2372,
216
+ "rewards/accuracies": 0.973437488079071,
217
+ "rewards/chosen": 5.454714775085449,
218
+ "rewards/margins": 30.868602752685547,
219
+ "rewards/rejected": -25.413890838623047,
220
  "step": 140
221
  },
222
  {
223
+ "epoch": 0.821917808219178,
224
+ "grad_norm": 104.61406707763672,
225
+ "learning_rate": 4.9808014493426124e-06,
226
+ "logits/chosen": 3.053307294845581,
227
+ "logits/rejected": 3.0027899742126465,
228
+ "logps/chosen": -282.54864501953125,
229
+ "logps/rejected": -236.41592407226562,
230
+ "loss": 0.6492,
231
+ "rewards/accuracies": 0.9546874761581421,
232
+ "rewards/chosen": 0.29179587960243225,
233
+ "rewards/margins": 30.52816390991211,
234
+ "rewards/rejected": -30.23636817932129,
235
  "step": 150
236
  },
237
  {
238
+ "epoch": 0.8767123287671232,
239
+ "grad_norm": 114.0179443359375,
240
+ "learning_rate": 4.9697162765239595e-06,
241
+ "logits/chosen": 3.0813591480255127,
242
+ "logits/rejected": 3.093292713165283,
243
+ "logps/chosen": -265.5400390625,
244
+ "logps/rejected": -233.4171142578125,
245
+ "loss": 0.1703,
246
+ "rewards/accuracies": 0.981249988079071,
247
+ "rewards/chosen": 2.905339002609253,
248
+ "rewards/margins": 36.841583251953125,
249
+ "rewards/rejected": -33.936241149902344,
250
  "step": 160
251
  },
252
  {
253
+ "epoch": 0.9315068493150684,
254
+ "grad_norm": 132.4547576904297,
255
+ "learning_rate": 4.9561342314269055e-06,
256
+ "logits/chosen": 3.124277114868164,
257
+ "logits/rejected": 3.048166036605835,
258
+ "logps/chosen": -262.30194091796875,
259
+ "logps/rejected": -232.39297485351562,
260
+ "loss": 0.4256,
261
+ "rewards/accuracies": 0.9515625238418579,
262
+ "rewards/chosen": -1.3073980808258057,
263
+ "rewards/margins": 30.139415740966797,
264
+ "rewards/rejected": -31.446813583374023,
265
  "step": 170
266
  },
267
  {
268
+ "epoch": 0.9863013698630136,
269
+ "grad_norm": 1.0581492185592651,
270
+ "learning_rate": 4.940069045439226e-06,
271
+ "logits/chosen": 3.164407968521118,
272
+ "logits/rejected": 3.164742946624756,
273
+ "logps/chosen": -274.58673095703125,
274
+ "logps/rejected": -232.9495849609375,
275
+ "loss": 0.3806,
276
+ "rewards/accuracies": 0.9703124761581421,
277
+ "rewards/chosen": 0.4807693064212799,
278
+ "rewards/margins": 33.424041748046875,
279
+ "rewards/rejected": -32.943275451660156,
280
  "step": 180
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281
  }
282
  ],
283
  "logging_steps": 10,
284
+ "max_steps": 1098,
285
  "num_input_tokens_seen": 0,
286
+ "num_train_epochs": 6,
287
  "save_steps": 500,
288
  "stateful_callbacks": {
289
  "TrainerControl": {
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7789fd19e3b22d6de6b1b2cd69ff66551be72d745bf4062a2a77caaab0386055
3
+ size 6520