File size: 26,955 Bytes
c976830
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.3229061553985873,
  "eval_steps": 50,
  "global_step": 400,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.008072653884964682,
      "grad_norm": 0.04926518723368645,
      "learning_rate": 4.999451708687114e-06,
      "logits/chosen": 14.542106628417969,
      "logits/rejected": 14.864250183105469,
      "logps/chosen": -0.2809702754020691,
      "logps/rejected": -0.3013763725757599,
      "loss": 0.9221,
      "rewards/accuracies": 0.42500001192092896,
      "rewards/chosen": -0.421455442905426,
      "rewards/margins": 0.03060910664498806,
      "rewards/rejected": -0.452064573764801,
      "step": 10
    },
    {
      "epoch": 0.016145307769929364,
      "grad_norm": 0.056249432265758514,
      "learning_rate": 4.997807075247147e-06,
      "logits/chosen": 14.614748001098633,
      "logits/rejected": 15.259109497070312,
      "logps/chosen": -0.2828002870082855,
      "logps/rejected": -0.3477819561958313,
      "loss": 0.916,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.4242004454135895,
      "rewards/margins": 0.09747247397899628,
      "rewards/rejected": -0.5216729044914246,
      "step": 20
    },
    {
      "epoch": 0.024217961654894045,
      "grad_norm": 0.0664869099855423,
      "learning_rate": 4.9950668210706795e-06,
      "logits/chosen": 14.391204833984375,
      "logits/rejected": 14.82734203338623,
      "logps/chosen": -0.287629634141922,
      "logps/rejected": -0.3329126834869385,
      "loss": 0.914,
      "rewards/accuracies": 0.48750001192092896,
      "rewards/chosen": -0.4314444661140442,
      "rewards/margins": 0.06792456656694412,
      "rewards/rejected": -0.4993689954280853,
      "step": 30
    },
    {
      "epoch": 0.03229061553985873,
      "grad_norm": 0.055584829300642014,
      "learning_rate": 4.9912321481237616e-06,
      "logits/chosen": 13.93278980255127,
      "logits/rejected": 14.886846542358398,
      "logps/chosen": -0.28155946731567383,
      "logps/rejected": -0.3678051829338074,
      "loss": 0.9273,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.42233920097351074,
      "rewards/margins": 0.12936851382255554,
      "rewards/rejected": -0.5517078042030334,
      "step": 40
    },
    {
      "epoch": 0.04036326942482341,
      "grad_norm": 0.07997103035449982,
      "learning_rate": 4.986304738420684e-06,
      "logits/chosen": 14.172693252563477,
      "logits/rejected": 14.742494583129883,
      "logps/chosen": -0.28225988149642944,
      "logps/rejected": -0.34329456090927124,
      "loss": 0.9081,
      "rewards/accuracies": 0.48750001192092896,
      "rewards/chosen": -0.42338982224464417,
      "rewards/margins": 0.0915520042181015,
      "rewards/rejected": -0.5149418115615845,
      "step": 50
    },
    {
      "epoch": 0.04036326942482341,
      "eval_logits/chosen": 14.200166702270508,
      "eval_logits/rejected": 14.817726135253906,
      "eval_logps/chosen": -0.2625390887260437,
      "eval_logps/rejected": -0.3458769917488098,
      "eval_loss": 0.9080610275268555,
      "eval_rewards/accuracies": 0.5544554591178894,
      "eval_rewards/chosen": -0.39380866289138794,
      "eval_rewards/margins": 0.1250067949295044,
      "eval_rewards/rejected": -0.5188154578208923,
      "eval_runtime": 29.8098,
      "eval_samples_per_second": 26.87,
      "eval_steps_per_second": 3.388,
      "step": 50
    },
    {
      "epoch": 0.04843592330978809,
      "grad_norm": 0.06322095543146133,
      "learning_rate": 4.980286753286196e-06,
      "logits/chosen": 14.008010864257812,
      "logits/rejected": 14.975939750671387,
      "logps/chosen": -0.2697351574897766,
      "logps/rejected": -0.3445274233818054,
      "loss": 0.9195,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.40460270643234253,
      "rewards/margins": 0.1121884360909462,
      "rewards/rejected": -0.5167912244796753,
      "step": 60
    },
    {
      "epoch": 0.056508577194752774,
      "grad_norm": 0.12975476682186127,
      "learning_rate": 4.973180832407471e-06,
      "logits/chosen": 14.213134765625,
      "logits/rejected": 14.889978408813477,
      "logps/chosen": -0.2839818596839905,
      "logps/rejected": -0.3382417559623718,
      "loss": 0.9241,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.4259727895259857,
      "rewards/margins": 0.08138985931873322,
      "rewards/rejected": -0.5073626637458801,
      "step": 70
    },
    {
      "epoch": 0.06458123107971746,
      "grad_norm": 0.08352109789848328,
      "learning_rate": 4.964990092676263e-06,
      "logits/chosen": 14.395146369934082,
      "logits/rejected": 14.900177001953125,
      "logps/chosen": -0.27067264914512634,
      "logps/rejected": -0.3516673743724823,
      "loss": 0.9251,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.4060089588165283,
      "rewards/margins": 0.12149210274219513,
      "rewards/rejected": -0.5275009870529175,
      "step": 80
    },
    {
      "epoch": 0.07265388496468214,
      "grad_norm": 0.0793827548623085,
      "learning_rate": 4.9557181268217225e-06,
      "logits/chosen": 13.905145645141602,
      "logits/rejected": 14.897878646850586,
      "logps/chosen": -0.26814645528793335,
      "logps/rejected": -0.38061466813087463,
      "loss": 0.9055,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.4022197127342224,
      "rewards/margins": 0.16870227456092834,
      "rewards/rejected": -0.5709219574928284,
      "step": 90
    },
    {
      "epoch": 0.08072653884964683,
      "grad_norm": 0.09212008118629456,
      "learning_rate": 4.9453690018345144e-06,
      "logits/chosen": 14.11627197265625,
      "logits/rejected": 14.571977615356445,
      "logps/chosen": -0.3130624294281006,
      "logps/rejected": -0.35073375701904297,
      "loss": 0.911,
      "rewards/accuracies": 0.48750001192092896,
      "rewards/chosen": -0.46959367394447327,
      "rewards/margins": 0.05650699883699417,
      "rewards/rejected": -0.5261006951332092,
      "step": 100
    },
    {
      "epoch": 0.08072653884964683,
      "eval_logits/chosen": 13.845876693725586,
      "eval_logits/rejected": 14.490789413452148,
      "eval_logps/chosen": -0.2531408667564392,
      "eval_logps/rejected": -0.3464036285877228,
      "eval_loss": 0.8986235857009888,
      "eval_rewards/accuracies": 0.5544554591178894,
      "eval_rewards/chosen": -0.3797112703323364,
      "eval_rewards/margins": 0.13989417254924774,
      "eval_rewards/rejected": -0.5196054577827454,
      "eval_runtime": 29.0914,
      "eval_samples_per_second": 27.534,
      "eval_steps_per_second": 3.472,
      "step": 100
    },
    {
      "epoch": 0.08879919273461151,
      "grad_norm": 0.08554862439632416,
      "learning_rate": 4.933947257182901e-06,
      "logits/chosen": 13.416229248046875,
      "logits/rejected": 14.582674026489258,
      "logps/chosen": -0.2523443102836609,
      "logps/rejected": -0.38751405477523804,
      "loss": 0.8993,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.3785164952278137,
      "rewards/margins": 0.20275457203388214,
      "rewards/rejected": -0.5812710523605347,
      "step": 110
    },
    {
      "epoch": 0.09687184661957618,
      "grad_norm": 0.29209578037261963,
      "learning_rate": 4.921457902821578e-06,
      "logits/chosen": 13.874654769897461,
      "logits/rejected": 14.423624992370605,
      "logps/chosen": -0.27300480008125305,
      "logps/rejected": -0.3526575267314911,
      "loss": 0.8942,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.40950721502304077,
      "rewards/margins": 0.11947910487651825,
      "rewards/rejected": -0.5289863348007202,
      "step": 120
    },
    {
      "epoch": 0.10494450050454086,
      "grad_norm": 0.1028478816151619,
      "learning_rate": 4.907906416994146e-06,
      "logits/chosen": 14.030723571777344,
      "logits/rejected": 14.711235046386719,
      "logps/chosen": -0.27410784363746643,
      "logps/rejected": -0.3665519058704376,
      "loss": 0.8922,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.41116175055503845,
      "rewards/margins": 0.1386660784482956,
      "rewards/rejected": -0.5498278737068176,
      "step": 130
    },
    {
      "epoch": 0.11301715438950555,
      "grad_norm": 0.08459590375423431,
      "learning_rate": 4.893298743830168e-06,
      "logits/chosen": 13.477182388305664,
      "logits/rejected": 13.919464111328125,
      "logps/chosen": -0.2495063841342926,
      "logps/rejected": -0.3594816029071808,
      "loss": 0.8957,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.3742595613002777,
      "rewards/margins": 0.16496284306049347,
      "rewards/rejected": -0.5392224192619324,
      "step": 140
    },
    {
      "epoch": 0.12108980827447023,
      "grad_norm": 0.12467797845602036,
      "learning_rate": 4.8776412907378845e-06,
      "logits/chosen": 12.801656723022461,
      "logits/rejected": 13.564155578613281,
      "logps/chosen": -0.2779986262321472,
      "logps/rejected": -0.33566445112228394,
      "loss": 0.892,
      "rewards/accuracies": 0.4375,
      "rewards/chosen": -0.4169979691505432,
      "rewards/margins": 0.08649872243404388,
      "rewards/rejected": -0.5034967064857483,
      "step": 150
    },
    {
      "epoch": 0.12108980827447023,
      "eval_logits/chosen": 12.169118881225586,
      "eval_logits/rejected": 13.019683837890625,
      "eval_logps/chosen": -0.25547870993614197,
      "eval_logps/rejected": -0.37277930974960327,
      "eval_loss": 0.8789658546447754,
      "eval_rewards/accuracies": 0.5742574334144592,
      "eval_rewards/chosen": -0.38321802020072937,
      "eval_rewards/margins": 0.17595094442367554,
      "eval_rewards/rejected": -0.5591689944267273,
      "eval_runtime": 29.0961,
      "eval_samples_per_second": 27.529,
      "eval_steps_per_second": 3.471,
      "step": 150
    },
    {
      "epoch": 0.12916246215943492,
      "grad_norm": 0.1712370663881302,
      "learning_rate": 4.860940925593703e-06,
      "logits/chosen": 11.972528457641602,
      "logits/rejected": 12.684088706970215,
      "logps/chosen": -0.28340521454811096,
      "logps/rejected": -0.3864063024520874,
      "loss": 0.8789,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.42510780692100525,
      "rewards/margins": 0.15450166165828705,
      "rewards/rejected": -0.5796095132827759,
      "step": 160
    },
    {
      "epoch": 0.13723511604439959,
      "grad_norm": 0.21750673651695251,
      "learning_rate": 4.84320497372973e-06,
      "logits/chosen": 11.93881607055664,
      "logits/rejected": 12.458230972290039,
      "logps/chosen": -0.274599552154541,
      "logps/rejected": -0.3882916271686554,
      "loss": 0.8857,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -0.4118993282318115,
      "rewards/margins": 0.1705380380153656,
      "rewards/rejected": -0.5824374556541443,
      "step": 170
    },
    {
      "epoch": 0.14530776992936428,
      "grad_norm": 0.16000741720199585,
      "learning_rate": 4.824441214720629e-06,
      "logits/chosen": 9.631464958190918,
      "logits/rejected": 10.772969245910645,
      "logps/chosen": -0.287865549325943,
      "logps/rejected": -0.4817379415035248,
      "loss": 0.8699,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -0.4317983090877533,
      "rewards/margins": 0.29080861806869507,
      "rewards/rejected": -0.722606897354126,
      "step": 180
    },
    {
      "epoch": 0.15338042381432895,
      "grad_norm": 0.46293890476226807,
      "learning_rate": 4.804657878971252e-06,
      "logits/chosen": 9.081937789916992,
      "logits/rejected": 10.024572372436523,
      "logps/chosen": -0.2941994071006775,
      "logps/rejected": -0.4772109389305115,
      "loss": 0.8565,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -0.44129911065101624,
      "rewards/margins": 0.2745172679424286,
      "rewards/rejected": -0.7158163785934448,
      "step": 190
    },
    {
      "epoch": 0.16145307769929365,
      "grad_norm": 0.24658434092998505,
      "learning_rate": 4.783863644106502e-06,
      "logits/chosen": 7.767125606536865,
      "logits/rejected": 8.009145736694336,
      "logps/chosen": -0.32877305150032043,
      "logps/rejected": -0.47733697295188904,
      "loss": 0.8482,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.49315959215164185,
      "rewards/margins": 0.2228458821773529,
      "rewards/rejected": -0.7160054445266724,
      "step": 200
    },
    {
      "epoch": 0.16145307769929365,
      "eval_logits/chosen": 6.716187953948975,
      "eval_logits/rejected": 7.248146057128906,
      "eval_logps/chosen": -0.30652713775634766,
      "eval_logps/rejected": -0.5056277513504028,
      "eval_loss": 0.8243693113327026,
      "eval_rewards/accuracies": 0.603960394859314,
      "eval_rewards/chosen": -0.4597907066345215,
      "eval_rewards/margins": 0.2986510097980499,
      "eval_rewards/rejected": -0.7584417462348938,
      "eval_runtime": 29.0989,
      "eval_samples_per_second": 27.527,
      "eval_steps_per_second": 3.471,
      "step": 200
    },
    {
      "epoch": 0.16952573158425832,
      "grad_norm": 0.40796443819999695,
      "learning_rate": 4.762067631165049e-06,
      "logits/chosen": 5.932644844055176,
      "logits/rejected": 6.521953582763672,
      "logps/chosen": -0.32976508140563965,
      "logps/rejected": -0.5628186464309692,
      "loss": 0.7988,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.4946475923061371,
      "rewards/margins": 0.3495803475379944,
      "rewards/rejected": -0.8442279696464539,
      "step": 210
    },
    {
      "epoch": 0.17759838546922302,
      "grad_norm": 0.42468318343162537,
      "learning_rate": 4.7392794005985324e-06,
      "logits/chosen": 6.069305896759033,
      "logits/rejected": 5.8950395584106445,
      "logps/chosen": -0.37205421924591064,
      "logps/rejected": -0.6190425753593445,
      "loss": 0.796,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.5580812692642212,
      "rewards/margins": 0.3704826235771179,
      "rewards/rejected": -0.9285639524459839,
      "step": 220
    },
    {
      "epoch": 0.1856710393541877,
      "grad_norm": 0.4138280153274536,
      "learning_rate": 4.715508948078037e-06,
      "logits/chosen": 3.5271706581115723,
      "logits/rejected": 3.363534927368164,
      "logps/chosen": -0.41677650809288025,
      "logps/rejected": -0.7121980786323547,
      "loss": 0.7457,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.6251648664474487,
      "rewards/margins": 0.4431324005126953,
      "rewards/rejected": -1.0682971477508545,
      "step": 230
    },
    {
      "epoch": 0.19374369323915236,
      "grad_norm": 1.4327284097671509,
      "learning_rate": 4.690766700109659e-06,
      "logits/chosen": 2.296924114227295,
      "logits/rejected": 1.6135867834091187,
      "logps/chosen": -0.506227970123291,
      "logps/rejected": -0.8080593943595886,
      "loss": 0.7453,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.7593418955802917,
      "rewards/margins": 0.4527471661567688,
      "rewards/rejected": -1.21208918094635,
      "step": 240
    },
    {
      "epoch": 0.20181634712411706,
      "grad_norm": 0.4413074553012848,
      "learning_rate": 4.665063509461098e-06,
      "logits/chosen": 1.4450469017028809,
      "logits/rejected": 0.40727120637893677,
      "logps/chosen": -0.4749869406223297,
      "logps/rejected": -0.9967275857925415,
      "loss": 0.704,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -0.712480366230011,
      "rewards/margins": 0.7826108932495117,
      "rewards/rejected": -1.4950913190841675,
      "step": 250
    },
    {
      "epoch": 0.20181634712411706,
      "eval_logits/chosen": 1.4614256620407104,
      "eval_logits/rejected": 0.6895493865013123,
      "eval_logps/chosen": -0.5038881301879883,
      "eval_logps/rejected": -0.973581850528717,
      "eval_loss": 0.6982013583183289,
      "eval_rewards/accuracies": 0.6138613820075989,
      "eval_rewards/chosen": -0.7558321952819824,
      "eval_rewards/margins": 0.7045406699180603,
      "eval_rewards/rejected": -1.460372805595398,
      "eval_runtime": 29.0993,
      "eval_samples_per_second": 27.526,
      "eval_steps_per_second": 3.471,
      "step": 250
    },
    {
      "epoch": 0.20988900100908173,
      "grad_norm": 0.734251856803894,
      "learning_rate": 4.638410650401267e-06,
      "logits/chosen": 2.4274039268493652,
      "logits/rejected": 1.498230218887329,
      "logps/chosen": -0.5549699068069458,
      "logps/rejected": -0.9348627328872681,
      "loss": 0.7149,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": -0.8324548602104187,
      "rewards/margins": 0.5698392987251282,
      "rewards/rejected": -1.4022941589355469,
      "step": 260
    },
    {
      "epoch": 0.21796165489404642,
      "grad_norm": 0.4445085823535919,
      "learning_rate": 4.610819813755038e-06,
      "logits/chosen": 2.042858600616455,
      "logits/rejected": 1.1695036888122559,
      "logps/chosen": -0.5573975443840027,
      "logps/rejected": -1.15065598487854,
      "loss": 0.6811,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.8360962867736816,
      "rewards/margins": 0.889887809753418,
      "rewards/rejected": -1.72598397731781,
      "step": 270
    },
    {
      "epoch": 0.2260343087790111,
      "grad_norm": 0.5875476598739624,
      "learning_rate": 4.582303101775249e-06,
      "logits/chosen": 1.3029518127441406,
      "logits/rejected": 0.3808741271495819,
      "logps/chosen": -0.6656169891357422,
      "logps/rejected": -1.5305824279785156,
      "loss": 0.637,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.9984253644943237,
      "rewards/margins": 1.2974482774734497,
      "rewards/rejected": -2.2958736419677734,
      "step": 280
    },
    {
      "epoch": 0.2341069626639758,
      "grad_norm": 0.3814420998096466,
      "learning_rate": 4.55287302283426e-06,
      "logits/chosen": 2.0200212001800537,
      "logits/rejected": 1.2681838274002075,
      "logps/chosen": -0.6776013970375061,
      "logps/rejected": -1.3369777202606201,
      "loss": 0.63,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -1.016402006149292,
      "rewards/margins": 0.9890643358230591,
      "rewards/rejected": -2.0054664611816406,
      "step": 290
    },
    {
      "epoch": 0.24217961654894046,
      "grad_norm": 0.6442322731018066,
      "learning_rate": 4.522542485937369e-06,
      "logits/chosen": 1.6268196105957031,
      "logits/rejected": 0.4954712390899658,
      "logps/chosen": -0.8117648363113403,
      "logps/rejected": -1.7705228328704834,
      "loss": 0.6104,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -1.2176473140716553,
      "rewards/margins": 1.4381370544433594,
      "rewards/rejected": -2.6557843685150146,
      "step": 300
    },
    {
      "epoch": 0.24217961654894046,
      "eval_logits/chosen": 1.4003607034683228,
      "eval_logits/rejected": 0.46628010272979736,
      "eval_logps/chosen": -0.7612115740776062,
      "eval_logps/rejected": -1.6895866394042969,
      "eval_loss": 0.5781419277191162,
      "eval_rewards/accuracies": 0.6534653306007385,
      "eval_rewards/chosen": -1.141817331314087,
      "eval_rewards/margins": 1.392562747001648,
      "eval_rewards/rejected": -2.5343799591064453,
      "eval_runtime": 29.102,
      "eval_samples_per_second": 27.524,
      "eval_steps_per_second": 3.471,
      "step": 300
    },
    {
      "epoch": 0.25025227043390513,
      "grad_norm": 0.7889758944511414,
      "learning_rate": 4.491324795060491e-06,
      "logits/chosen": 1.1702089309692383,
      "logits/rejected": 0.354276180267334,
      "logps/chosen": -0.8091352581977844,
      "logps/rejected": -1.944819450378418,
      "loss": 0.5807,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -1.2137027978897095,
      "rewards/margins": 1.7035261392593384,
      "rewards/rejected": -2.9172286987304688,
      "step": 310
    },
    {
      "epoch": 0.25832492431886983,
      "grad_norm": 1.410145878791809,
      "learning_rate": 4.4592336433146e-06,
      "logits/chosen": 2.0981459617614746,
      "logits/rejected": 1.2289329767227173,
      "logps/chosen": -0.8151634931564331,
      "logps/rejected": -1.977259874343872,
      "loss": 0.5007,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -1.222745418548584,
      "rewards/margins": 1.7431443929672241,
      "rewards/rejected": -2.9658896923065186,
      "step": 320
    },
    {
      "epoch": 0.26639757820383453,
      "grad_norm": 0.5564689040184021,
      "learning_rate": 4.426283106939474e-06,
      "logits/chosen": 1.7921768426895142,
      "logits/rejected": 0.7705962061882019,
      "logps/chosen": -0.9244564771652222,
      "logps/rejected": -2.5274672508239746,
      "loss": 0.4757,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -1.3866846561431885,
      "rewards/margins": 2.4045166969299316,
      "rewards/rejected": -3.791201114654541,
      "step": 330
    },
    {
      "epoch": 0.27447023208879917,
      "grad_norm": 0.7554243803024292,
      "learning_rate": 4.3924876391293915e-06,
      "logits/chosen": 1.947997808456421,
      "logits/rejected": 0.9661592245101929,
      "logps/chosen": -0.9154227375984192,
      "logps/rejected": -3.0766491889953613,
      "loss": 0.4581,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -1.3731342554092407,
      "rewards/margins": 3.24183988571167,
      "rewards/rejected": -4.614974021911621,
      "step": 340
    },
    {
      "epoch": 0.28254288597376387,
      "grad_norm": 2.936426877975464,
      "learning_rate": 4.357862063693486e-06,
      "logits/chosen": 1.2846364974975586,
      "logits/rejected": 0.9968118667602539,
      "logps/chosen": -1.0272481441497803,
      "logps/rejected": -2.6980624198913574,
      "loss": 0.4989,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -1.54087233543396,
      "rewards/margins": 2.506221294403076,
      "rewards/rejected": -4.047093868255615,
      "step": 350
    },
    {
      "epoch": 0.28254288597376387,
      "eval_logits/chosen": 1.841333031654358,
      "eval_logits/rejected": 1.1651691198349,
      "eval_logps/chosen": -0.9644113183021545,
      "eval_logps/rejected": -2.631535053253174,
      "eval_loss": 0.44154587388038635,
      "eval_rewards/accuracies": 0.6732673048973083,
      "eval_rewards/chosen": -1.4466170072555542,
      "eval_rewards/margins": 2.500684976577759,
      "eval_rewards/rejected": -3.9473025798797607,
      "eval_runtime": 29.1062,
      "eval_samples_per_second": 27.52,
      "eval_steps_per_second": 3.47,
      "step": 350
    },
    {
      "epoch": 0.29061553985872857,
      "grad_norm": 0.49919044971466064,
      "learning_rate": 4.322421568553529e-06,
      "logits/chosen": 1.9874728918075562,
      "logits/rejected": 1.464611291885376,
      "logps/chosen": -0.9963932037353516,
      "logps/rejected": -2.8804218769073486,
      "loss": 0.4373,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -1.4945898056030273,
      "rewards/margins": 2.8260433673858643,
      "rewards/rejected": -4.320633411407471,
      "step": 360
    },
    {
      "epoch": 0.29868819374369326,
      "grad_norm": 1.4991668462753296,
      "learning_rate": 4.286181699082008e-06,
      "logits/chosen": 2.5421957969665527,
      "logits/rejected": 1.941922903060913,
      "logps/chosen": -1.0214306116104126,
      "logps/rejected": -3.2893283367156982,
      "loss": 0.438,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -1.5321458578109741,
      "rewards/margins": 3.4018466472625732,
      "rewards/rejected": -4.933992862701416,
      "step": 370
    },
    {
      "epoch": 0.3067608476286579,
      "grad_norm": 1.933100938796997,
      "learning_rate": 4.249158351283414e-06,
      "logits/chosen": 1.80439031124115,
      "logits/rejected": 1.4421275854110718,
      "logps/chosen": -1.1270357370376587,
      "logps/rejected": -3.17500901222229,
      "loss": 0.4542,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -1.6905533075332642,
      "rewards/margins": 3.071959972381592,
      "rewards/rejected": -4.762513160705566,
      "step": 380
    },
    {
      "epoch": 0.3148335015136226,
      "grad_norm": 3.017254590988159,
      "learning_rate": 4.211367764821722e-06,
      "logits/chosen": 3.4090209007263184,
      "logits/rejected": 2.784639835357666,
      "logps/chosen": -1.1713608503341675,
      "logps/rejected": -3.326244831085205,
      "loss": 0.4816,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -1.757041335105896,
      "rewards/margins": 3.232325792312622,
      "rewards/rejected": -4.9893670082092285,
      "step": 390
    },
    {
      "epoch": 0.3229061553985873,
      "grad_norm": 0.5897337198257446,
      "learning_rate": 4.172826515897146e-06,
      "logits/chosen": 3.6071503162384033,
      "logits/rejected": 2.738395929336548,
      "logps/chosen": -1.133429765701294,
      "logps/rejected": -3.509474277496338,
      "loss": 0.4431,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -1.7001447677612305,
      "rewards/margins": 3.5640671253204346,
      "rewards/rejected": -5.264212131500244,
      "step": 400
    },
    {
      "epoch": 0.3229061553985873,
      "eval_logits/chosen": 2.893691062927246,
      "eval_logits/rejected": 2.259718656539917,
      "eval_logps/chosen": -1.0705492496490479,
      "eval_logps/rejected": -2.935904026031494,
      "eval_loss": 0.4156961143016815,
      "eval_rewards/accuracies": 0.6732673048973083,
      "eval_rewards/chosen": -1.6058237552642822,
      "eval_rewards/margins": 2.798032283782959,
      "eval_rewards/rejected": -4.403855800628662,
      "eval_runtime": 29.186,
      "eval_samples_per_second": 27.445,
      "eval_steps_per_second": 3.461,
      "step": 400
    }
  ],
  "logging_steps": 10,
  "max_steps": 1500,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 9.309474350044283e+17,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}