File size: 25,861 Bytes
124df4a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 2.988593155893536,
  "eval_steps": 50,
  "global_step": 393,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.07604562737642585,
      "grad_norm": 8.750013078952279,
      "learning_rate": 1.25e-07,
      "logits/chosen": -2.757716655731201,
      "logits/rejected": -2.75109601020813,
      "logps/chosen": -260.8774719238281,
      "logps/rejected": -271.0572814941406,
      "loss": 0.6931,
      "rewards/accuracies": 0.42500001192092896,
      "rewards/chosen": 5.311033601174131e-05,
      "rewards/margins": -0.00021072864183224738,
      "rewards/rejected": 0.0002638388832565397,
      "step": 10
    },
    {
      "epoch": 0.1520912547528517,
      "grad_norm": 7.9922636295097265,
      "learning_rate": 2.5e-07,
      "logits/chosen": -2.7602779865264893,
      "logits/rejected": -2.7576041221618652,
      "logps/chosen": -286.5116882324219,
      "logps/rejected": -283.8603820800781,
      "loss": 0.6926,
      "rewards/accuracies": 0.53125,
      "rewards/chosen": 0.0009672940941527486,
      "rewards/margins": 0.0008663859916850924,
      "rewards/rejected": 0.00010090797150041908,
      "step": 20
    },
    {
      "epoch": 0.22813688212927757,
      "grad_norm": 8.25949567460557,
      "learning_rate": 3.75e-07,
      "logits/chosen": -2.7091896533966064,
      "logits/rejected": -2.707089900970459,
      "logps/chosen": -268.6849670410156,
      "logps/rejected": -269.9923400878906,
      "loss": 0.6901,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": 0.01067989133298397,
      "rewards/margins": 0.006726422347128391,
      "rewards/rejected": 0.003953468985855579,
      "step": 30
    },
    {
      "epoch": 0.3041825095057034,
      "grad_norm": 7.8540741959718,
      "learning_rate": 5e-07,
      "logits/chosen": -2.7155396938323975,
      "logits/rejected": -2.716829776763916,
      "logps/chosen": -292.66705322265625,
      "logps/rejected": -287.9569396972656,
      "loss": 0.6838,
      "rewards/accuracies": 0.59375,
      "rewards/chosen": 0.03730246424674988,
      "rewards/margins": 0.019583439454436302,
      "rewards/rejected": 0.017719022929668427,
      "step": 40
    },
    {
      "epoch": 0.38022813688212925,
      "grad_norm": 8.712893470844076,
      "learning_rate": 4.990105959637203e-07,
      "logits/chosen": -2.688120126724243,
      "logits/rejected": -2.6780331134796143,
      "logps/chosen": -275.4263610839844,
      "logps/rejected": -280.61688232421875,
      "loss": 0.6701,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": 0.011463982984423637,
      "rewards/margins": 0.03627394884824753,
      "rewards/rejected": -0.024809962138533592,
      "step": 50
    },
    {
      "epoch": 0.38022813688212925,
      "eval_logits/chosen": -2.5790276527404785,
      "eval_logits/rejected": -2.5429863929748535,
      "eval_logps/chosen": -266.01251220703125,
      "eval_logps/rejected": -260.623779296875,
      "eval_loss": 0.6608449220657349,
      "eval_rewards/accuracies": 0.6379310488700867,
      "eval_rewards/chosen": -0.06051206588745117,
      "eval_rewards/margins": 0.06295385211706161,
      "eval_rewards/rejected": -0.12346591800451279,
      "eval_runtime": 93.5809,
      "eval_samples_per_second": 19.566,
      "eval_steps_per_second": 0.31,
      "step": 50
    },
    {
      "epoch": 0.45627376425855515,
      "grad_norm": 9.48204753541487,
      "learning_rate": 4.960502152176573e-07,
      "logits/chosen": -2.6288909912109375,
      "logits/rejected": -2.6260056495666504,
      "logps/chosen": -305.30902099609375,
      "logps/rejected": -323.2539367675781,
      "loss": 0.6562,
      "rewards/accuracies": 0.668749988079071,
      "rewards/chosen": -0.08925994485616684,
      "rewards/margins": 0.11333100497722626,
      "rewards/rejected": -0.2025909423828125,
      "step": 60
    },
    {
      "epoch": 0.532319391634981,
      "grad_norm": 10.474383395672094,
      "learning_rate": 4.911422898630837e-07,
      "logits/chosen": -2.5303704738616943,
      "logits/rejected": -2.5113658905029297,
      "logps/chosen": -289.95233154296875,
      "logps/rejected": -313.9202880859375,
      "loss": 0.6516,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.24351218342781067,
      "rewards/margins": 0.08410634845495224,
      "rewards/rejected": -0.3276185095310211,
      "step": 70
    },
    {
      "epoch": 0.6083650190114068,
      "grad_norm": 13.987853639442921,
      "learning_rate": 4.84325667269244e-07,
      "logits/chosen": -2.4371495246887207,
      "logits/rejected": -2.4378981590270996,
      "logps/chosen": -334.60919189453125,
      "logps/rejected": -343.8853454589844,
      "loss": 0.6459,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.37069252133369446,
      "rewards/margins": 0.14995035529136658,
      "rewards/rejected": -0.520642876625061,
      "step": 80
    },
    {
      "epoch": 0.6844106463878327,
      "grad_norm": 13.701357719917418,
      "learning_rate": 4.7565430258740336e-07,
      "logits/chosen": -2.43092942237854,
      "logits/rejected": -2.398475170135498,
      "logps/chosen": -332.1542053222656,
      "logps/rejected": -322.47540283203125,
      "loss": 0.6262,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.4322943687438965,
      "rewards/margins": 0.17964716255664825,
      "rewards/rejected": -0.6119415163993835,
      "step": 90
    },
    {
      "epoch": 0.7604562737642585,
      "grad_norm": 14.672531746982555,
      "learning_rate": 4.6519683168329195e-07,
      "logits/chosen": -2.4015495777130127,
      "logits/rejected": -2.371605396270752,
      "logps/chosen": -324.8261413574219,
      "logps/rejected": -323.9524841308594,
      "loss": 0.6369,
      "rewards/accuracies": 0.6812499761581421,
      "rewards/chosen": -0.5212644338607788,
      "rewards/margins": 0.19606027007102966,
      "rewards/rejected": -0.7173247933387756,
      "step": 100
    },
    {
      "epoch": 0.7604562737642585,
      "eval_logits/chosen": -2.3145029544830322,
      "eval_logits/rejected": -2.25056529045105,
      "eval_logps/chosen": -305.69305419921875,
      "eval_logps/rejected": -315.26226806640625,
      "eval_loss": 0.6255849003791809,
      "eval_rewards/accuracies": 0.6379310488700867,
      "eval_rewards/chosen": -0.457317590713501,
      "eval_rewards/margins": 0.21253280341625214,
      "eval_rewards/rejected": -0.6698502898216248,
      "eval_runtime": 94.9787,
      "eval_samples_per_second": 19.278,
      "eval_steps_per_second": 0.305,
      "step": 100
    },
    {
      "epoch": 0.8365019011406845,
      "grad_norm": 14.036217300902836,
      "learning_rate": 4.530360278682841e-07,
      "logits/chosen": -2.405102252960205,
      "logits/rejected": -2.3873496055603027,
      "logps/chosen": -297.6337890625,
      "logps/rejected": -314.28936767578125,
      "loss": 0.6236,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.2411026507616043,
      "rewards/margins": 0.2816271483898163,
      "rewards/rejected": -0.522729754447937,
      "step": 110
    },
    {
      "epoch": 0.9125475285171103,
      "grad_norm": 18.73190288761437,
      "learning_rate": 4.3926814672941166e-07,
      "logits/chosen": -2.415482759475708,
      "logits/rejected": -2.409000873565674,
      "logps/chosen": -335.9554138183594,
      "logps/rejected": -366.6188049316406,
      "loss": 0.6232,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.41517263650894165,
      "rewards/margins": 0.29164889454841614,
      "rewards/rejected": -0.7068215012550354,
      "step": 120
    },
    {
      "epoch": 0.9885931558935361,
      "grad_norm": 14.277531847256165,
      "learning_rate": 4.240021642440332e-07,
      "logits/chosen": -2.3934361934661865,
      "logits/rejected": -2.3797760009765625,
      "logps/chosen": -326.6287841796875,
      "logps/rejected": -334.6332092285156,
      "loss": 0.6044,
      "rewards/accuracies": 0.65625,
      "rewards/chosen": -0.5393946170806885,
      "rewards/margins": 0.20507295429706573,
      "rewards/rejected": -0.7444676160812378,
      "step": 130
    },
    {
      "epoch": 1.064638783269962,
      "grad_norm": 12.399932098202758,
      "learning_rate": 4.073589142096592e-07,
      "logits/chosen": -2.3695178031921387,
      "logits/rejected": -2.362006425857544,
      "logps/chosen": -363.754150390625,
      "logps/rejected": -403.7615661621094,
      "loss": 0.5188,
      "rewards/accuracies": 0.7562500238418579,
      "rewards/chosen": -0.4886489808559418,
      "rewards/margins": 0.4631797671318054,
      "rewards/rejected": -0.9518287777900696,
      "step": 140
    },
    {
      "epoch": 1.1406844106463878,
      "grad_norm": 14.307776858677078,
      "learning_rate": 3.8947013181637624e-07,
      "logits/chosen": -2.2751259803771973,
      "logits/rejected": -2.2668895721435547,
      "logps/chosen": -347.95452880859375,
      "logps/rejected": -414.0625915527344,
      "loss": 0.4762,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -0.5590722560882568,
      "rewards/margins": 0.6314972043037415,
      "rewards/rejected": -1.1905694007873535,
      "step": 150
    },
    {
      "epoch": 1.1406844106463878,
      "eval_logits/chosen": -2.146411180496216,
      "eval_logits/rejected": -2.071317434310913,
      "eval_logps/chosen": -362.7339782714844,
      "eval_logps/rejected": -387.7436218261719,
      "eval_loss": 0.6095313429832458,
      "eval_rewards/accuracies": 0.6637930870056152,
      "eval_rewards/chosen": -1.027726411819458,
      "eval_rewards/margins": 0.3669382333755493,
      "eval_rewards/rejected": -1.3946646451950073,
      "eval_runtime": 92.9404,
      "eval_samples_per_second": 19.701,
      "eval_steps_per_second": 0.312,
      "step": 150
    },
    {
      "epoch": 1.2167300380228137,
      "grad_norm": 17.154986114362913,
      "learning_rate": 3.7047741093221656e-07,
      "logits/chosen": -2.259256601333618,
      "logits/rejected": -2.2275631427764893,
      "logps/chosen": -399.64996337890625,
      "logps/rejected": -466.52056884765625,
      "loss": 0.4675,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -0.9498268961906433,
      "rewards/margins": 0.8195298910140991,
      "rewards/rejected": -1.7693569660186768,
      "step": 160
    },
    {
      "epoch": 1.2927756653992395,
      "grad_norm": 17.94492731274679,
      "learning_rate": 3.5053108335480205e-07,
      "logits/chosen": -2.110236406326294,
      "logits/rejected": -2.0996217727661133,
      "logps/chosen": -406.27825927734375,
      "logps/rejected": -483.40478515625,
      "loss": 0.4492,
      "rewards/accuracies": 0.8187500238418579,
      "rewards/chosen": -1.0264767408370972,
      "rewards/margins": 0.876954197883606,
      "rewards/rejected": -1.903430700302124,
      "step": 170
    },
    {
      "epoch": 1.3688212927756653,
      "grad_norm": 22.10036946673983,
      "learning_rate": 3.29789028900245e-07,
      "logits/chosen": -2.075085163116455,
      "logits/rejected": -2.0645432472229004,
      "logps/chosen": -446.86572265625,
      "logps/rejected": -529.196044921875,
      "loss": 0.4381,
      "rewards/accuracies": 0.8687499761581421,
      "rewards/chosen": -1.4222126007080078,
      "rewards/margins": 0.9729466438293457,
      "rewards/rejected": -2.3951592445373535,
      "step": 180
    },
    {
      "epoch": 1.4448669201520912,
      "grad_norm": 22.48679557744493,
      "learning_rate": 3.084154257477301e-07,
      "logits/chosen": -2.159637928009033,
      "logits/rejected": -2.144123077392578,
      "logps/chosen": -454.2430114746094,
      "logps/rejected": -546.5646362304688,
      "loss": 0.4343,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -1.5697638988494873,
      "rewards/margins": 0.8866235017776489,
      "rewards/rejected": -2.456387519836426,
      "step": 190
    },
    {
      "epoch": 1.5209125475285172,
      "grad_norm": 19.33120207165494,
      "learning_rate": 2.865794509310888e-07,
      "logits/chosen": -1.9821497201919556,
      "logits/rejected": -1.9777543544769287,
      "logps/chosen": -427.1321716308594,
      "logps/rejected": -510.4873046875,
      "loss": 0.4416,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -1.4423638582229614,
      "rewards/margins": 0.8751496076583862,
      "rewards/rejected": -2.3175134658813477,
      "step": 200
    },
    {
      "epoch": 1.5209125475285172,
      "eval_logits/chosen": -1.8830668926239014,
      "eval_logits/rejected": -1.8054617643356323,
      "eval_logps/chosen": -412.5243835449219,
      "eval_logps/rejected": -451.2822570800781,
      "eval_loss": 0.6302618384361267,
      "eval_rewards/accuracies": 0.6896551847457886,
      "eval_rewards/chosen": -1.525631070137024,
      "eval_rewards/margins": 0.5044201612472534,
      "eval_rewards/rejected": -2.0300514698028564,
      "eval_runtime": 94.2551,
      "eval_samples_per_second": 19.426,
      "eval_steps_per_second": 0.308,
      "step": 200
    },
    {
      "epoch": 1.5969581749049429,
      "grad_norm": 21.341696689301088,
      "learning_rate": 2.644539412632786e-07,
      "logits/chosen": -2.0418550968170166,
      "logits/rejected": -1.9922387599945068,
      "logps/chosen": -456.74951171875,
      "logps/rejected": -536.8170166015625,
      "loss": 0.4216,
      "rewards/accuracies": 0.8687499761581421,
      "rewards/chosen": -1.3003852367401123,
      "rewards/margins": 1.0898964405059814,
      "rewards/rejected": -2.390282154083252,
      "step": 210
    },
    {
      "epoch": 1.673003802281369,
      "grad_norm": 24.250284409575745,
      "learning_rate": 2.422140252928601e-07,
      "logits/chosen": -1.964264154434204,
      "logits/rejected": -1.914783239364624,
      "logps/chosen": -446.67950439453125,
      "logps/rejected": -542.49169921875,
      "loss": 0.4058,
      "rewards/accuracies": 0.8687499761581421,
      "rewards/chosen": -1.5265235900878906,
      "rewards/margins": 1.0695818662643433,
      "rewards/rejected": -2.5961055755615234,
      "step": 220
    },
    {
      "epoch": 1.7490494296577945,
      "grad_norm": 19.00774265939697,
      "learning_rate": 2.2003573712085455e-07,
      "logits/chosen": -1.8126968145370483,
      "logits/rejected": -1.7982728481292725,
      "logps/chosen": -407.3929748535156,
      "logps/rejected": -507.03704833984375,
      "loss": 0.4308,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -1.3489201068878174,
      "rewards/margins": 0.9808903932571411,
      "rewards/rejected": -2.329810619354248,
      "step": 230
    },
    {
      "epoch": 1.8250950570342206,
      "grad_norm": 22.60307202010589,
      "learning_rate": 1.980946230499431e-07,
      "logits/chosen": -1.6764023303985596,
      "logits/rejected": -1.666276216506958,
      "logps/chosen": -441.1431579589844,
      "logps/rejected": -543.8973388671875,
      "loss": 0.4012,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -1.6486518383026123,
      "rewards/margins": 1.0365753173828125,
      "rewards/rejected": -2.685227155685425,
      "step": 240
    },
    {
      "epoch": 1.9011406844106464,
      "grad_norm": 21.996162773488756,
      "learning_rate": 1.7656435209470376e-07,
      "logits/chosen": -1.4262855052947998,
      "logits/rejected": -1.4329298734664917,
      "logps/chosen": -472.7010803222656,
      "logps/rejected": -587.4404296875,
      "loss": 0.4058,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -1.8240299224853516,
      "rewards/margins": 1.0630748271942139,
      "rewards/rejected": -2.8871047496795654,
      "step": 250
    },
    {
      "epoch": 1.9011406844106464,
      "eval_logits/chosen": -0.976457953453064,
      "eval_logits/rejected": -0.8045514225959778,
      "eval_logps/chosen": -474.0944519042969,
      "eval_logps/rejected": -521.2467041015625,
      "eval_loss": 0.6470248103141785,
      "eval_rewards/accuracies": 0.6724137663841248,
      "eval_rewards/chosen": -2.141331672668457,
      "eval_rewards/margins": 0.5883632898330688,
      "eval_rewards/rejected": -2.7296955585479736,
      "eval_runtime": 93.8915,
      "eval_samples_per_second": 19.501,
      "eval_steps_per_second": 0.309,
      "step": 250
    },
    {
      "epoch": 1.9771863117870723,
      "grad_norm": 26.930679059566526,
      "learning_rate": 1.5561534135101884e-07,
      "logits/chosen": -1.0850579738616943,
      "logits/rejected": -0.9344271421432495,
      "logps/chosen": -493.2483825683594,
      "logps/rejected": -594.302734375,
      "loss": 0.4126,
      "rewards/accuracies": 0.856249988079071,
      "rewards/chosen": -1.9806413650512695,
      "rewards/margins": 1.1219998598098755,
      "rewards/rejected": -3.1026411056518555,
      "step": 260
    },
    {
      "epoch": 2.053231939163498,
      "grad_norm": 31.653928530377552,
      "learning_rate": 1.3541340710517546e-07,
      "logits/chosen": -0.8464574813842773,
      "logits/rejected": -0.7507003545761108,
      "logps/chosen": -435.60260009765625,
      "logps/rejected": -586.5030517578125,
      "loss": 0.3086,
      "rewards/accuracies": 0.8687499761581421,
      "rewards/chosen": -1.743119239807129,
      "rewards/margins": 1.482635259628296,
      "rewards/rejected": -3.225754499435425,
      "step": 270
    },
    {
      "epoch": 2.129277566539924,
      "grad_norm": 19.6643061541321,
      "learning_rate": 1.1611845235944143e-07,
      "logits/chosen": -0.4659014642238617,
      "logits/rejected": -0.35695117712020874,
      "logps/chosen": -510.74456787109375,
      "logps/rejected": -693.2734985351562,
      "loss": 0.2494,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -2.120767116546631,
      "rewards/margins": 1.895451307296753,
      "rewards/rejected": -4.016218662261963,
      "step": 280
    },
    {
      "epoch": 2.20532319391635,
      "grad_norm": 24.595513209110543,
      "learning_rate": 9.788320116265892e-08,
      "logits/chosen": -0.13620242476463318,
      "logits/rejected": 0.04458779841661453,
      "logps/chosen": -530.4153442382812,
      "logps/rejected": -696.87109375,
      "loss": 0.2381,
      "rewards/accuracies": 0.9437500238418579,
      "rewards/chosen": -2.3323657512664795,
      "rewards/margins": 1.861975073814392,
      "rewards/rejected": -4.194340705871582,
      "step": 290
    },
    {
      "epoch": 2.2813688212927756,
      "grad_norm": 21.633771054738194,
      "learning_rate": 8.085198976392124e-08,
      "logits/chosen": 0.2940121591091156,
      "logits/rejected": 0.4243450164794922,
      "logps/chosen": -531.2306518554688,
      "logps/rejected": -713.4442138671875,
      "loss": 0.2288,
      "rewards/accuracies": 0.9437500238418579,
      "rewards/chosen": -2.508195400238037,
      "rewards/margins": 2.0444493293762207,
      "rewards/rejected": -4.552644729614258,
      "step": 300
    },
    {
      "epoch": 2.2813688212927756,
      "eval_logits/chosen": 0.7331591844558716,
      "eval_logits/rejected": 1.1516082286834717,
      "eval_logps/chosen": -602.3347778320312,
      "eval_logps/rejected": -678.4207763671875,
      "eval_loss": 0.7265122532844543,
      "eval_rewards/accuracies": 0.6724137663841248,
      "eval_rewards/chosen": -3.4237349033355713,
      "eval_rewards/margins": 0.877700686454773,
      "eval_rewards/rejected": -4.301435470581055,
      "eval_runtime": 94.112,
      "eval_samples_per_second": 19.456,
      "eval_steps_per_second": 0.308,
      "step": 300
    },
    {
      "epoch": 2.3574144486692017,
      "grad_norm": 25.495425543277612,
      "learning_rate": 6.515962415763369e-08,
      "logits/chosen": 0.6170581579208374,
      "logits/rejected": 0.7346482276916504,
      "logps/chosen": -559.1423950195312,
      "logps/rejected": -797.1380615234375,
      "loss": 0.2308,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -2.808534860610962,
      "rewards/margins": 2.294363021850586,
      "rewards/rejected": -5.102897644042969,
      "step": 310
    },
    {
      "epoch": 2.4334600760456273,
      "grad_norm": 26.06035826594137,
      "learning_rate": 5.093031306275308e-08,
      "logits/chosen": 0.7691527009010315,
      "logits/rejected": 1.0421297550201416,
      "logps/chosen": -611.4459228515625,
      "logps/rejected": -806.7598876953125,
      "loss": 0.2207,
      "rewards/accuracies": 0.918749988079071,
      "rewards/chosen": -3.107365369796753,
      "rewards/margins": 2.2186014652252197,
      "rewards/rejected": -5.325966835021973,
      "step": 320
    },
    {
      "epoch": 2.5095057034220534,
      "grad_norm": 22.38621742753542,
      "learning_rate": 3.827668478192578e-08,
      "logits/chosen": 0.8044630289077759,
      "logits/rejected": 0.9414040446281433,
      "logps/chosen": -576.51708984375,
      "logps/rejected": -825.3946533203125,
      "loss": 0.2206,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -3.0663881301879883,
      "rewards/margins": 2.4700827598571777,
      "rewards/rejected": -5.536470890045166,
      "step": 330
    },
    {
      "epoch": 2.585551330798479,
      "grad_norm": 24.44232863700222,
      "learning_rate": 2.729889572230856e-08,
      "logits/chosen": 0.9481533765792847,
      "logits/rejected": 1.2326308488845825,
      "logps/chosen": -563.4014282226562,
      "logps/rejected": -779.8926391601562,
      "loss": 0.2173,
      "rewards/accuracies": 0.9437500238418579,
      "rewards/chosen": -2.942201614379883,
      "rewards/margins": 2.315051555633545,
      "rewards/rejected": -5.257253170013428,
      "step": 340
    },
    {
      "epoch": 2.661596958174905,
      "grad_norm": 25.605389790591452,
      "learning_rate": 1.8083837634341766e-08,
      "logits/chosen": 0.5146993398666382,
      "logits/rejected": 0.7431297302246094,
      "logps/chosen": -602.69482421875,
      "logps/rejected": -831.6427001953125,
      "loss": 0.21,
      "rewards/accuracies": 0.9437500238418579,
      "rewards/chosen": -3.007021427154541,
      "rewards/margins": 2.4290623664855957,
      "rewards/rejected": -5.436083793640137,
      "step": 350
    },
    {
      "epoch": 2.661596958174905,
      "eval_logits/chosen": 1.3830486536026,
      "eval_logits/rejected": 1.804565668106079,
      "eval_logps/chosen": -649.2000122070312,
      "eval_logps/rejected": -731.8800659179688,
      "eval_loss": 0.7540475726127625,
      "eval_rewards/accuracies": 0.681034505367279,
      "eval_rewards/chosen": -3.8923871517181396,
      "eval_rewards/margins": 0.9436419010162354,
      "eval_rewards/rejected": -4.836028575897217,
      "eval_runtime": 92.8895,
      "eval_samples_per_second": 19.712,
      "eval_steps_per_second": 0.312,
      "step": 350
    },
    {
      "epoch": 2.7376425855513307,
      "grad_norm": 33.51553576619823,
      "learning_rate": 1.0704449843359498e-08,
      "logits/chosen": 0.9970355033874512,
      "logits/rejected": 1.2489699125289917,
      "logps/chosen": -617.2589721679688,
      "logps/rejected": -876.3328857421875,
      "loss": 0.2347,
      "rewards/accuracies": 0.918749988079071,
      "rewards/chosen": -3.3343372344970703,
      "rewards/margins": 2.5600481033325195,
      "rewards/rejected": -5.894384860992432,
      "step": 360
    },
    {
      "epoch": 2.8136882129277567,
      "grad_norm": 28.342143525311826,
      "learning_rate": 5.2191419178871935e-09,
      "logits/chosen": 0.9573332071304321,
      "logits/rejected": 0.9704620242118835,
      "logps/chosen": -602.1446533203125,
      "logps/rejected": -826.4749145507812,
      "loss": 0.2285,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -3.1017754077911377,
      "rewards/margins": 2.2921016216278076,
      "rewards/rejected": -5.3938775062561035,
      "step": 370
    },
    {
      "epoch": 2.8897338403041823,
      "grad_norm": 25.89340425260963,
      "learning_rate": 1.6713313443384724e-09,
      "logits/chosen": 1.0670582056045532,
      "logits/rejected": 1.103432536125183,
      "logps/chosen": -586.4422607421875,
      "logps/rejected": -816.7635498046875,
      "loss": 0.2255,
      "rewards/accuracies": 0.9437500238418579,
      "rewards/chosen": -3.018024444580078,
      "rewards/margins": 2.285562753677368,
      "rewards/rejected": -5.303586959838867,
      "step": 380
    },
    {
      "epoch": 2.9657794676806084,
      "grad_norm": 24.29095712697487,
      "learning_rate": 8.909986752470012e-11,
      "logits/chosen": 0.6790753602981567,
      "logits/rejected": 0.9370797872543335,
      "logps/chosen": -650.6864624023438,
      "logps/rejected": -848.3294067382812,
      "loss": 0.2245,
      "rewards/accuracies": 0.981249988079071,
      "rewards/chosen": -3.326509475708008,
      "rewards/margins": 2.2717742919921875,
      "rewards/rejected": -5.5982842445373535,
      "step": 390
    },
    {
      "epoch": 2.988593155893536,
      "step": 393,
      "total_flos": 0.0,
      "train_loss": 0.440452757075846,
      "train_runtime": 10425.4174,
      "train_samples_per_second": 4.834,
      "train_steps_per_second": 0.038
    }
  ],
  "logging_steps": 10,
  "max_steps": 393,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 3,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}