File size: 52,835 Bytes
2f7b1cb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.7104795737122558,
  "eval_steps": 50,
  "global_step": 800,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.008880994671403197,
      "grad_norm": 0.04571289196610451,
      "learning_rate": 4.999451708687114e-06,
      "logits/chosen": 14.56671142578125,
      "logits/rejected": 15.112574577331543,
      "logps/chosen": -0.26506316661834717,
      "logps/rejected": -0.3439488410949707,
      "loss": 0.9267,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.39759472012519836,
      "rewards/margins": 0.11832849681377411,
      "rewards/rejected": -0.5159232020378113,
      "step": 10
    },
    {
      "epoch": 0.017761989342806393,
      "grad_norm": 0.0512714721262455,
      "learning_rate": 4.997807075247147e-06,
      "logits/chosen": 14.376543045043945,
      "logits/rejected": 14.862703323364258,
      "logps/chosen": -0.2708089351654053,
      "logps/rejected": -0.32412824034690857,
      "loss": 0.936,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.4062133729457855,
      "rewards/margins": 0.07997899502515793,
      "rewards/rejected": -0.4861923158168793,
      "step": 20
    },
    {
      "epoch": 0.02664298401420959,
      "grad_norm": 0.058383647352457047,
      "learning_rate": 4.9950668210706795e-06,
      "logits/chosen": 14.208717346191406,
      "logits/rejected": 15.370651245117188,
      "logps/chosen": -0.28206294775009155,
      "logps/rejected": -0.38387423753738403,
      "loss": 0.9215,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.42309442162513733,
      "rewards/margins": 0.15271687507629395,
      "rewards/rejected": -0.5758112668991089,
      "step": 30
    },
    {
      "epoch": 0.035523978685612786,
      "grad_norm": 0.06262075155973434,
      "learning_rate": 4.9912321481237616e-06,
      "logits/chosen": 14.768765449523926,
      "logits/rejected": 15.169331550598145,
      "logps/chosen": -0.27857059240341187,
      "logps/rejected": -0.3388269543647766,
      "loss": 0.9386,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.4178559184074402,
      "rewards/margins": 0.09038447588682175,
      "rewards/rejected": -0.5082404017448425,
      "step": 40
    },
    {
      "epoch": 0.04440497335701599,
      "grad_norm": 0.06259036809206009,
      "learning_rate": 4.986304738420684e-06,
      "logits/chosen": 14.950456619262695,
      "logits/rejected": 15.232122421264648,
      "logps/chosen": -0.2961367070674896,
      "logps/rejected": -0.3322262465953827,
      "loss": 0.9317,
      "rewards/accuracies": 0.44999998807907104,
      "rewards/chosen": -0.44420504570007324,
      "rewards/margins": 0.054134320467710495,
      "rewards/rejected": -0.4983394145965576,
      "step": 50
    },
    {
      "epoch": 0.04440497335701599,
      "eval_logits/chosen": 14.56529426574707,
      "eval_logits/rejected": 14.895020484924316,
      "eval_logps/chosen": -0.2806546986103058,
      "eval_logps/rejected": -0.3486972451210022,
      "eval_loss": 0.9381324052810669,
      "eval_rewards/accuracies": 0.5274725556373596,
      "eval_rewards/chosen": -0.4209820330142975,
      "eval_rewards/margins": 0.10206379741430283,
      "eval_rewards/rejected": -0.5230458974838257,
      "eval_runtime": 25.2574,
      "eval_samples_per_second": 28.823,
      "eval_steps_per_second": 3.603,
      "step": 50
    },
    {
      "epoch": 0.05328596802841918,
      "grad_norm": 0.07301533967256546,
      "learning_rate": 4.980286753286196e-06,
      "logits/chosen": 14.195574760437012,
      "logits/rejected": 15.173194885253906,
      "logps/chosen": -0.2693648636341095,
      "logps/rejected": -0.33997970819473267,
      "loss": 0.9319,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -0.40404725074768066,
      "rewards/margins": 0.10592226684093475,
      "rewards/rejected": -0.5099694728851318,
      "step": 60
    },
    {
      "epoch": 0.06216696269982238,
      "grad_norm": 0.0659889206290245,
      "learning_rate": 4.973180832407471e-06,
      "logits/chosen": 14.910173416137695,
      "logits/rejected": 15.361429214477539,
      "logps/chosen": -0.28456225991249084,
      "logps/rejected": -0.3702812194824219,
      "loss": 0.9185,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.42684346437454224,
      "rewards/margins": 0.12857840955257416,
      "rewards/rejected": -0.5554218292236328,
      "step": 70
    },
    {
      "epoch": 0.07104795737122557,
      "grad_norm": 0.05815625935792923,
      "learning_rate": 4.964990092676263e-06,
      "logits/chosen": 14.407182693481445,
      "logits/rejected": 14.948204040527344,
      "logps/chosen": -0.292889267206192,
      "logps/rejected": -0.3381648063659668,
      "loss": 0.9388,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.43933385610580444,
      "rewards/margins": 0.06791339069604874,
      "rewards/rejected": -0.5072472095489502,
      "step": 80
    },
    {
      "epoch": 0.07992895204262877,
      "grad_norm": 0.06627190113067627,
      "learning_rate": 4.9557181268217225e-06,
      "logits/chosen": 14.622471809387207,
      "logits/rejected": 15.167770385742188,
      "logps/chosen": -0.28155821561813354,
      "logps/rejected": -0.33633899688720703,
      "loss": 0.9256,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.4223373532295227,
      "rewards/margins": 0.08217118680477142,
      "rewards/rejected": -0.5045084953308105,
      "step": 90
    },
    {
      "epoch": 0.08880994671403197,
      "grad_norm": 0.0724545568227768,
      "learning_rate": 4.9453690018345144e-06,
      "logits/chosen": 14.289724349975586,
      "logits/rejected": 14.882037162780762,
      "logps/chosen": -0.2791440486907959,
      "logps/rejected": -0.35329627990722656,
      "loss": 0.9374,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.41871610283851624,
      "rewards/margins": 0.11122839152812958,
      "rewards/rejected": -0.5299445390701294,
      "step": 100
    },
    {
      "epoch": 0.08880994671403197,
      "eval_logits/chosen": 14.337930679321289,
      "eval_logits/rejected": 14.689269065856934,
      "eval_logps/chosen": -0.2726942300796509,
      "eval_logps/rejected": -0.34668418765068054,
      "eval_loss": 0.9302808046340942,
      "eval_rewards/accuracies": 0.5384615659713745,
      "eval_rewards/chosen": -0.40904131531715393,
      "eval_rewards/margins": 0.11098497360944748,
      "eval_rewards/rejected": -0.5200263261795044,
      "eval_runtime": 25.2585,
      "eval_samples_per_second": 28.822,
      "eval_steps_per_second": 3.603,
      "step": 100
    },
    {
      "epoch": 0.09769094138543517,
      "grad_norm": 0.08156246691942215,
      "learning_rate": 4.933947257182901e-06,
      "logits/chosen": 14.499124526977539,
      "logits/rejected": 14.916313171386719,
      "logps/chosen": -0.2798352837562561,
      "logps/rejected": -0.3477734327316284,
      "loss": 0.9243,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.4197530150413513,
      "rewards/margins": 0.10190720856189728,
      "rewards/rejected": -0.5216602087020874,
      "step": 110
    },
    {
      "epoch": 0.10657193605683836,
      "grad_norm": 0.08161844313144684,
      "learning_rate": 4.921457902821578e-06,
      "logits/chosen": 13.595013618469238,
      "logits/rejected": 14.390353202819824,
      "logps/chosen": -0.26682502031326294,
      "logps/rejected": -0.3336995542049408,
      "loss": 0.9123,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.400237500667572,
      "rewards/margins": 0.10031183809041977,
      "rewards/rejected": -0.5005493760108948,
      "step": 120
    },
    {
      "epoch": 0.11545293072824156,
      "grad_norm": 0.28624778985977173,
      "learning_rate": 4.907906416994146e-06,
      "logits/chosen": 13.711044311523438,
      "logits/rejected": 14.558542251586914,
      "logps/chosen": -0.27874043583869934,
      "logps/rejected": -0.3582325279712677,
      "loss": 0.9163,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.41811060905456543,
      "rewards/margins": 0.11923813819885254,
      "rewards/rejected": -0.537348747253418,
      "step": 130
    },
    {
      "epoch": 0.12433392539964476,
      "grad_norm": 0.10971464216709137,
      "learning_rate": 4.893298743830168e-06,
      "logits/chosen": 14.18798828125,
      "logits/rejected": 14.993026733398438,
      "logps/chosen": -0.2750400900840759,
      "logps/rejected": -0.39451608061790466,
      "loss": 0.9098,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.4125601351261139,
      "rewards/margins": 0.17921395599842072,
      "rewards/rejected": -0.5917741060256958,
      "step": 140
    },
    {
      "epoch": 0.13321492007104796,
      "grad_norm": 0.09321591258049011,
      "learning_rate": 4.8776412907378845e-06,
      "logits/chosen": 12.775139808654785,
      "logits/rejected": 13.751996994018555,
      "logps/chosen": -0.28446996212005615,
      "logps/rejected": -0.36404967308044434,
      "loss": 0.9104,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": -0.42670494318008423,
      "rewards/margins": 0.11936960369348526,
      "rewards/rejected": -0.5460745096206665,
      "step": 150
    },
    {
      "epoch": 0.13321492007104796,
      "eval_logits/chosen": 12.97266960144043,
      "eval_logits/rejected": 13.47339916229248,
      "eval_logps/chosen": -0.27297571301460266,
      "eval_logps/rejected": -0.36854612827301025,
      "eval_loss": 0.9143257737159729,
      "eval_rewards/accuracies": 0.5824176073074341,
      "eval_rewards/chosen": -0.4094635546207428,
      "eval_rewards/margins": 0.14335563778877258,
      "eval_rewards/rejected": -0.5528191924095154,
      "eval_runtime": 25.2406,
      "eval_samples_per_second": 28.842,
      "eval_steps_per_second": 3.605,
      "step": 150
    },
    {
      "epoch": 0.14209591474245115,
      "grad_norm": 0.11029861867427826,
      "learning_rate": 4.860940925593703e-06,
      "logits/chosen": 12.677947998046875,
      "logits/rejected": 13.396716117858887,
      "logps/chosen": -0.2631794512271881,
      "logps/rejected": -0.37102141976356506,
      "loss": 0.9051,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.3947691321372986,
      "rewards/margins": 0.161762997508049,
      "rewards/rejected": -0.5565321445465088,
      "step": 160
    },
    {
      "epoch": 0.15097690941385436,
      "grad_norm": 0.15728294849395752,
      "learning_rate": 4.84320497372973e-06,
      "logits/chosen": 12.620219230651855,
      "logits/rejected": 13.189640998840332,
      "logps/chosen": -0.2947639524936676,
      "logps/rejected": -0.3843482732772827,
      "loss": 0.8906,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.442145973443985,
      "rewards/margins": 0.13437646627426147,
      "rewards/rejected": -0.5765224099159241,
      "step": 170
    },
    {
      "epoch": 0.15985790408525755,
      "grad_norm": 0.31504154205322266,
      "learning_rate": 4.824441214720629e-06,
      "logits/chosen": 11.487619400024414,
      "logits/rejected": 12.33470344543457,
      "logps/chosen": -0.271095871925354,
      "logps/rejected": -0.4252637028694153,
      "loss": 0.8766,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.406643807888031,
      "rewards/margins": 0.2312517911195755,
      "rewards/rejected": -0.6378955245018005,
      "step": 180
    },
    {
      "epoch": 0.16873889875666073,
      "grad_norm": 0.19222252070903778,
      "learning_rate": 4.804657878971252e-06,
      "logits/chosen": 10.093737602233887,
      "logits/rejected": 10.851752281188965,
      "logps/chosen": -0.2679918110370636,
      "logps/rejected": -0.437336266040802,
      "loss": 0.884,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.4019877314567566,
      "rewards/margins": 0.2540166974067688,
      "rewards/rejected": -0.6560044288635254,
      "step": 190
    },
    {
      "epoch": 0.17761989342806395,
      "grad_norm": 0.2275688648223877,
      "learning_rate": 4.783863644106502e-06,
      "logits/chosen": 9.483477592468262,
      "logits/rejected": 10.106366157531738,
      "logps/chosen": -0.2957404553890228,
      "logps/rejected": -0.40739065408706665,
      "loss": 0.8767,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.4436107575893402,
      "rewards/margins": 0.16747523844242096,
      "rewards/rejected": -0.6110859513282776,
      "step": 200
    },
    {
      "epoch": 0.17761989342806395,
      "eval_logits/chosen": 8.491498947143555,
      "eval_logits/rejected": 8.999146461486816,
      "eval_logps/chosen": -0.3135836124420166,
      "eval_logps/rejected": -0.4829566180706024,
      "eval_loss": 0.8664290904998779,
      "eval_rewards/accuracies": 0.6263736486434937,
      "eval_rewards/chosen": -0.4703753888607025,
      "eval_rewards/margins": 0.2540595233440399,
      "eval_rewards/rejected": -0.7244349122047424,
      "eval_runtime": 25.2553,
      "eval_samples_per_second": 28.826,
      "eval_steps_per_second": 3.603,
      "step": 200
    },
    {
      "epoch": 0.18650088809946713,
      "grad_norm": 0.27885496616363525,
      "learning_rate": 4.762067631165049e-06,
      "logits/chosen": 7.234966278076172,
      "logits/rejected": 8.313450813293457,
      "logps/chosen": -0.29102542996406555,
      "logps/rejected": -0.49241799116134644,
      "loss": 0.8556,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.43653813004493713,
      "rewards/margins": 0.3020888566970825,
      "rewards/rejected": -0.7386269569396973,
      "step": 210
    },
    {
      "epoch": 0.19538188277087035,
      "grad_norm": 0.29907363653182983,
      "learning_rate": 4.7392794005985324e-06,
      "logits/chosen": 7.907521724700928,
      "logits/rejected": 8.253190994262695,
      "logps/chosen": -0.33691853284835815,
      "logps/rejected": -0.4829257130622864,
      "loss": 0.8236,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.5053777694702148,
      "rewards/margins": 0.21901080012321472,
      "rewards/rejected": -0.724388599395752,
      "step": 220
    },
    {
      "epoch": 0.20426287744227353,
      "grad_norm": 0.282474547624588,
      "learning_rate": 4.715508948078037e-06,
      "logits/chosen": 6.367492198944092,
      "logits/rejected": 6.273728370666504,
      "logps/chosen": -0.3519875705242157,
      "logps/rejected": -0.5284813642501831,
      "loss": 0.8027,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.5279813408851624,
      "rewards/margins": 0.2647407650947571,
      "rewards/rejected": -0.7927221059799194,
      "step": 230
    },
    {
      "epoch": 0.21314387211367672,
      "grad_norm": 0.327765554189682,
      "learning_rate": 4.690766700109659e-06,
      "logits/chosen": 5.090893268585205,
      "logits/rejected": 4.768380165100098,
      "logps/chosen": -0.3851698040962219,
      "logps/rejected": -0.6464222073554993,
      "loss": 0.7898,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.5777546167373657,
      "rewards/margins": 0.391878604888916,
      "rewards/rejected": -0.9696332812309265,
      "step": 240
    },
    {
      "epoch": 0.22202486678507993,
      "grad_norm": 0.4895865321159363,
      "learning_rate": 4.665063509461098e-06,
      "logits/chosen": 4.056812286376953,
      "logits/rejected": 3.723601818084717,
      "logps/chosen": -0.4400455951690674,
      "logps/rejected": -0.7731422781944275,
      "loss": 0.7626,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -0.6600683927536011,
      "rewards/margins": 0.49964505434036255,
      "rewards/rejected": -1.1597135066986084,
      "step": 250
    },
    {
      "epoch": 0.22202486678507993,
      "eval_logits/chosen": 2.420060396194458,
      "eval_logits/rejected": 2.1626052856445312,
      "eval_logps/chosen": -0.4724067151546478,
      "eval_logps/rejected": -0.8418064117431641,
      "eval_loss": 0.7631083130836487,
      "eval_rewards/accuracies": 0.6483516693115234,
      "eval_rewards/chosen": -0.7086100578308105,
      "eval_rewards/margins": 0.5540997385978699,
      "eval_rewards/rejected": -1.2627097368240356,
      "eval_runtime": 25.2418,
      "eval_samples_per_second": 28.841,
      "eval_steps_per_second": 3.605,
      "step": 250
    },
    {
      "epoch": 0.23090586145648312,
      "grad_norm": 0.46291017532348633,
      "learning_rate": 4.638410650401267e-06,
      "logits/chosen": 1.5297390222549438,
      "logits/rejected": 1.1381648778915405,
      "logps/chosen": -0.4418027997016907,
      "logps/rejected": -1.0542564392089844,
      "loss": 0.7026,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.6627041697502136,
      "rewards/margins": 0.9186803698539734,
      "rewards/rejected": -1.5813844203948975,
      "step": 260
    },
    {
      "epoch": 0.23978685612788633,
      "grad_norm": 0.9783313870429993,
      "learning_rate": 4.610819813755038e-06,
      "logits/chosen": 2.8311033248901367,
      "logits/rejected": 1.9742711782455444,
      "logps/chosen": -0.5430587530136108,
      "logps/rejected": -0.9841039776802063,
      "loss": 0.7317,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.814588189125061,
      "rewards/margins": 0.6615679860115051,
      "rewards/rejected": -1.4761559963226318,
      "step": 270
    },
    {
      "epoch": 0.24866785079928952,
      "grad_norm": 2.102562189102173,
      "learning_rate": 4.582303101775249e-06,
      "logits/chosen": 1.8241952657699585,
      "logits/rejected": 0.8777934312820435,
      "logps/chosen": -0.5624039769172668,
      "logps/rejected": -1.1460126638412476,
      "loss": 0.6887,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.8436058163642883,
      "rewards/margins": 0.8754131197929382,
      "rewards/rejected": -1.7190189361572266,
      "step": 280
    },
    {
      "epoch": 0.25754884547069273,
      "grad_norm": 0.9813026189804077,
      "learning_rate": 4.55287302283426e-06,
      "logits/chosen": 2.370732069015503,
      "logits/rejected": 1.4697134494781494,
      "logps/chosen": -0.6739786863327026,
      "logps/rejected": -1.6581566333770752,
      "loss": 0.5695,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -1.0109679698944092,
      "rewards/margins": 1.476266622543335,
      "rewards/rejected": -2.487234592437744,
      "step": 290
    },
    {
      "epoch": 0.2664298401420959,
      "grad_norm": 2.187314510345459,
      "learning_rate": 4.522542485937369e-06,
      "logits/chosen": 1.6230781078338623,
      "logits/rejected": 0.5460122227668762,
      "logps/chosen": -0.6433733701705933,
      "logps/rejected": -2.1001811027526855,
      "loss": 0.5366,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.9650601148605347,
      "rewards/margins": 2.185211658477783,
      "rewards/rejected": -3.1502718925476074,
      "step": 300
    },
    {
      "epoch": 0.2664298401420959,
      "eval_logits/chosen": 1.4087599515914917,
      "eval_logits/rejected": 0.7888947129249573,
      "eval_logps/chosen": -0.7579545974731445,
      "eval_logps/rejected": -2.0049116611480713,
      "eval_loss": 0.551510214805603,
      "eval_rewards/accuracies": 0.6813187003135681,
      "eval_rewards/chosen": -1.1369318962097168,
      "eval_rewards/margins": 1.8704355955123901,
      "eval_rewards/rejected": -3.0073673725128174,
      "eval_runtime": 25.2647,
      "eval_samples_per_second": 28.815,
      "eval_steps_per_second": 3.602,
      "step": 300
    },
    {
      "epoch": 0.2753108348134991,
      "grad_norm": 0.7035408616065979,
      "learning_rate": 4.491324795060491e-06,
      "logits/chosen": 1.5831315517425537,
      "logits/rejected": 0.46730250120162964,
      "logps/chosen": -0.7262418866157532,
      "logps/rejected": -2.1209158897399902,
      "loss": 0.5524,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -1.0893628597259521,
      "rewards/margins": 2.092010974884033,
      "rewards/rejected": -3.1813735961914062,
      "step": 310
    },
    {
      "epoch": 0.2841918294849023,
      "grad_norm": 0.5678634643554688,
      "learning_rate": 4.4592336433146e-06,
      "logits/chosen": 1.265734076499939,
      "logits/rejected": 0.7576489448547363,
      "logps/chosen": -0.7938942313194275,
      "logps/rejected": -2.3495612144470215,
      "loss": 0.5233,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -1.1908413171768188,
      "rewards/margins": 2.333500385284424,
      "rewards/rejected": -3.5243420600891113,
      "step": 320
    },
    {
      "epoch": 0.29307282415630553,
      "grad_norm": 1.1373224258422852,
      "learning_rate": 4.426283106939474e-06,
      "logits/chosen": 2.977414846420288,
      "logits/rejected": 2.1573710441589355,
      "logps/chosen": -0.8513160943984985,
      "logps/rejected": -2.4125566482543945,
      "loss": 0.556,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -1.2769742012023926,
      "rewards/margins": 2.341860771179199,
      "rewards/rejected": -3.6188347339630127,
      "step": 330
    },
    {
      "epoch": 0.3019538188277087,
      "grad_norm": 4.7876176834106445,
      "learning_rate": 4.3924876391293915e-06,
      "logits/chosen": 2.4026589393615723,
      "logits/rejected": 1.207395315170288,
      "logps/chosen": -0.8529679179191589,
      "logps/rejected": -2.456879138946533,
      "loss": 0.5592,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -1.279451847076416,
      "rewards/margins": 2.4058666229248047,
      "rewards/rejected": -3.6853187084198,
      "step": 340
    },
    {
      "epoch": 0.3108348134991119,
      "grad_norm": 0.5053763389587402,
      "learning_rate": 4.357862063693486e-06,
      "logits/chosen": 2.434265375137329,
      "logits/rejected": 1.2504141330718994,
      "logps/chosen": -0.9489291310310364,
      "logps/rejected": -2.8521530628204346,
      "loss": 0.4737,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -1.423393726348877,
      "rewards/margins": 2.8548355102539062,
      "rewards/rejected": -4.278229236602783,
      "step": 350
    },
    {
      "epoch": 0.3108348134991119,
      "eval_logits/chosen": 1.6632592678070068,
      "eval_logits/rejected": 1.235045075416565,
      "eval_logps/chosen": -1.0692518949508667,
      "eval_logps/rejected": -2.7428486347198486,
      "eval_loss": 0.5021397471427917,
      "eval_rewards/accuracies": 0.692307710647583,
      "eval_rewards/chosen": -1.6038777828216553,
      "eval_rewards/margins": 2.510395050048828,
      "eval_rewards/rejected": -4.1142730712890625,
      "eval_runtime": 25.2582,
      "eval_samples_per_second": 28.822,
      "eval_steps_per_second": 3.603,
      "step": 350
    },
    {
      "epoch": 0.3197158081705151,
      "grad_norm": 0.8040274381637573,
      "learning_rate": 4.322421568553529e-06,
      "logits/chosen": 1.187036395072937,
      "logits/rejected": 0.4290788769721985,
      "logps/chosen": -1.1015206575393677,
      "logps/rejected": -2.919748544692993,
      "loss": 0.489,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -1.6522810459136963,
      "rewards/margins": 2.727341651916504,
      "rewards/rejected": -4.379622459411621,
      "step": 360
    },
    {
      "epoch": 0.3285968028419183,
      "grad_norm": 0.9299562573432922,
      "learning_rate": 4.286181699082008e-06,
      "logits/chosen": 2.5852127075195312,
      "logits/rejected": 2.0419259071350098,
      "logps/chosen": -1.1498607397079468,
      "logps/rejected": -3.0336194038391113,
      "loss": 0.4812,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -1.724791169166565,
      "rewards/margins": 2.8256375789642334,
      "rewards/rejected": -4.55042839050293,
      "step": 370
    },
    {
      "epoch": 0.33747779751332146,
      "grad_norm": 1.7739671468734741,
      "learning_rate": 4.249158351283414e-06,
      "logits/chosen": 2.246245861053467,
      "logits/rejected": 1.5551975965499878,
      "logps/chosen": -1.254900574684143,
      "logps/rejected": -3.206178665161133,
      "loss": 0.4651,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -1.8823509216308594,
      "rewards/margins": 2.926917552947998,
      "rewards/rejected": -4.809267997741699,
      "step": 380
    },
    {
      "epoch": 0.3463587921847247,
      "grad_norm": 4.380665302276611,
      "learning_rate": 4.211367764821722e-06,
      "logits/chosen": 3.0754549503326416,
      "logits/rejected": 2.622124433517456,
      "logps/chosen": -1.9250037670135498,
      "logps/rejected": -3.69482421875,
      "loss": 0.4292,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -2.8875060081481934,
      "rewards/margins": 2.6547305583953857,
      "rewards/rejected": -5.542236328125,
      "step": 390
    },
    {
      "epoch": 0.3552397868561279,
      "grad_norm": 1.5087212324142456,
      "learning_rate": 4.172826515897146e-06,
      "logits/chosen": 2.2718021869659424,
      "logits/rejected": 1.8861210346221924,
      "logps/chosen": -2.4473955631256104,
      "logps/rejected": -4.387387752532959,
      "loss": 0.3902,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -3.671093702316284,
      "rewards/margins": 2.9099888801574707,
      "rewards/rejected": -6.581082344055176,
      "step": 400
    },
    {
      "epoch": 0.3552397868561279,
      "eval_logits/chosen": 1.759078860282898,
      "eval_logits/rejected": 1.5246928930282593,
      "eval_logps/chosen": -2.720665454864502,
      "eval_logps/rejected": -4.613493919372559,
      "eval_loss": 0.4054907560348511,
      "eval_rewards/accuracies": 0.8791208863258362,
      "eval_rewards/chosen": -4.080998420715332,
      "eval_rewards/margins": 2.839242696762085,
      "eval_rewards/rejected": -6.920241355895996,
      "eval_runtime": 25.2363,
      "eval_samples_per_second": 28.847,
      "eval_steps_per_second": 3.606,
      "step": 400
    },
    {
      "epoch": 0.3641207815275311,
      "grad_norm": 6.079421043395996,
      "learning_rate": 4.133551509975264e-06,
      "logits/chosen": 1.8841949701309204,
      "logits/rejected": 1.3479797840118408,
      "logps/chosen": -2.517265796661377,
      "logps/rejected": -4.453648567199707,
      "loss": 0.3977,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -3.7758986949920654,
      "rewards/margins": 2.9045748710632324,
      "rewards/rejected": -6.680473327636719,
      "step": 410
    },
    {
      "epoch": 0.37300177619893427,
      "grad_norm": 3.0998194217681885,
      "learning_rate": 4.093559974371725e-06,
      "logits/chosen": 1.6409276723861694,
      "logits/rejected": 1.2141990661621094,
      "logps/chosen": -2.2561168670654297,
      "logps/rejected": -4.470211029052734,
      "loss": 0.3527,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -3.3841750621795654,
      "rewards/margins": 3.321141004562378,
      "rewards/rejected": -6.70531702041626,
      "step": 420
    },
    {
      "epoch": 0.38188277087033745,
      "grad_norm": 6.982161045074463,
      "learning_rate": 4.052869450695776e-06,
      "logits/chosen": 2.835188388824463,
      "logits/rejected": 2.3657329082489014,
      "logps/chosen": -2.8557300567626953,
      "logps/rejected": -5.075521469116211,
      "loss": 0.387,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -4.283595085144043,
      "rewards/margins": 3.3296875953674316,
      "rewards/rejected": -7.613282680511475,
      "step": 430
    },
    {
      "epoch": 0.3907637655417407,
      "grad_norm": 2.139338970184326,
      "learning_rate": 4.011497787155938e-06,
      "logits/chosen": 2.126509189605713,
      "logits/rejected": 1.459567904472351,
      "logps/chosen": -3.1412863731384277,
      "logps/rejected": -5.423466682434082,
      "loss": 0.3611,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -4.711928844451904,
      "rewards/margins": 3.4232699871063232,
      "rewards/rejected": -8.135198593139648,
      "step": 440
    },
    {
      "epoch": 0.3996447602131439,
      "grad_norm": 1.7899377346038818,
      "learning_rate": 3.969463130731183e-06,
      "logits/chosen": 2.4551379680633545,
      "logits/rejected": 2.0784289836883545,
      "logps/chosen": -3.098043203353882,
      "logps/rejected": -5.300747871398926,
      "loss": 0.354,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -4.647065162658691,
      "rewards/margins": 3.3040566444396973,
      "rewards/rejected": -7.951122283935547,
      "step": 450
    },
    {
      "epoch": 0.3996447602131439,
      "eval_logits/chosen": 1.9761625528335571,
      "eval_logits/rejected": 1.6654667854309082,
      "eval_logps/chosen": -2.8789772987365723,
      "eval_logps/rejected": -5.1105055809021,
      "eval_loss": 0.36211252212524414,
      "eval_rewards/accuracies": 0.8791208863258362,
      "eval_rewards/chosen": -4.3184661865234375,
      "eval_rewards/margins": 3.347292423248291,
      "eval_rewards/rejected": -7.6657586097717285,
      "eval_runtime": 25.2549,
      "eval_samples_per_second": 28.826,
      "eval_steps_per_second": 3.603,
      "step": 450
    },
    {
      "epoch": 0.40852575488454707,
      "grad_norm": 1.9171936511993408,
      "learning_rate": 3.92678391921108e-06,
      "logits/chosen": 2.1672446727752686,
      "logits/rejected": 1.6228408813476562,
      "logps/chosen": -2.688931703567505,
      "logps/rejected": -5.2408246994018555,
      "loss": 0.3266,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -4.033397197723389,
      "rewards/margins": 3.8278393745422363,
      "rewards/rejected": -7.861237525939941,
      "step": 460
    },
    {
      "epoch": 0.41740674955595025,
      "grad_norm": 1.702635407447815,
      "learning_rate": 3.88347887310836e-06,
      "logits/chosen": 2.3164448738098145,
      "logits/rejected": 2.047529697418213,
      "logps/chosen": -2.6861701011657715,
      "logps/rejected": -5.629918098449707,
      "loss": 0.3339,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -4.02925443649292,
      "rewards/margins": 4.415622711181641,
      "rewards/rejected": -8.444877624511719,
      "step": 470
    },
    {
      "epoch": 0.42628774422735344,
      "grad_norm": 2.48634934425354,
      "learning_rate": 3.839566987447492e-06,
      "logits/chosen": 2.5225472450256348,
      "logits/rejected": 2.0870003700256348,
      "logps/chosen": -3.041111946105957,
      "logps/rejected": -5.3499016761779785,
      "loss": 0.3226,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -4.561667442321777,
      "rewards/margins": 3.4631850719451904,
      "rewards/rejected": -8.024852752685547,
      "step": 480
    },
    {
      "epoch": 0.4351687388987567,
      "grad_norm": 4.728499412536621,
      "learning_rate": 3.795067523432826e-06,
      "logits/chosen": 2.33893084526062,
      "logits/rejected": 1.7909936904907227,
      "logps/chosen": -2.7356209754943848,
      "logps/rejected": -5.33417272567749,
      "loss": 0.322,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -4.103431224822998,
      "rewards/margins": 3.8978283405303955,
      "rewards/rejected": -8.001258850097656,
      "step": 490
    },
    {
      "epoch": 0.44404973357015987,
      "grad_norm": 8.412679672241211,
      "learning_rate": 3.7500000000000005e-06,
      "logits/chosen": 2.788668632507324,
      "logits/rejected": 2.439873695373535,
      "logps/chosen": -3.3219153881073,
      "logps/rejected": -5.992051124572754,
      "loss": 0.3075,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -4.98287296295166,
      "rewards/margins": 4.005204200744629,
      "rewards/rejected": -8.988077163696289,
      "step": 500
    },
    {
      "epoch": 0.44404973357015987,
      "eval_logits/chosen": 2.165436029434204,
      "eval_logits/rejected": 1.8186790943145752,
      "eval_logps/chosen": -3.4299349784851074,
      "eval_logps/rejected": -6.0660552978515625,
      "eval_loss": 0.3319137990474701,
      "eval_rewards/accuracies": 0.8901098966598511,
      "eval_rewards/chosen": -5.14490270614624,
      "eval_rewards/margins": 3.954181671142578,
      "eval_rewards/rejected": -9.09908390045166,
      "eval_runtime": 25.2602,
      "eval_samples_per_second": 28.82,
      "eval_steps_per_second": 3.603,
      "step": 500
    },
    {
      "epoch": 0.45293072824156305,
      "grad_norm": 2.8339133262634277,
      "learning_rate": 3.7043841852542884e-06,
      "logits/chosen": 2.109018325805664,
      "logits/rejected": 1.6996265649795532,
      "logps/chosen": -3.288560390472412,
      "logps/rejected": -5.986764430999756,
      "loss": 0.3116,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -4.932840824127197,
      "rewards/margins": 4.047306060791016,
      "rewards/rejected": -8.980146408081055,
      "step": 510
    },
    {
      "epoch": 0.46181172291296624,
      "grad_norm": 3.8578269481658936,
      "learning_rate": 3.658240087799655e-06,
      "logits/chosen": 1.7659969329833984,
      "logits/rejected": 1.4596515893936157,
      "logps/chosen": -3.0301425457000732,
      "logps/rejected": -6.252682209014893,
      "loss": 0.3015,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -4.5452141761779785,
      "rewards/margins": 4.833809852600098,
      "rewards/rejected": -9.379022598266602,
      "step": 520
    },
    {
      "epoch": 0.4706927175843694,
      "grad_norm": 2.7795143127441406,
      "learning_rate": 3.611587947962319e-06,
      "logits/chosen": 2.472006320953369,
      "logits/rejected": 1.9400993585586548,
      "logps/chosen": -3.2479186058044434,
      "logps/rejected": -6.475512504577637,
      "loss": 0.3006,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -4.871877193450928,
      "rewards/margins": 4.841391086578369,
      "rewards/rejected": -9.713269233703613,
      "step": 530
    },
    {
      "epoch": 0.47957371225577267,
      "grad_norm": 3.5200746059417725,
      "learning_rate": 3.564448228912682e-06,
      "logits/chosen": 2.911531925201416,
      "logits/rejected": 2.2947440147399902,
      "logps/chosen": -3.387556791305542,
      "logps/rejected": -6.559035301208496,
      "loss": 0.2684,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": -5.081335544586182,
      "rewards/margins": 4.757218360900879,
      "rewards/rejected": -9.838552474975586,
      "step": 540
    },
    {
      "epoch": 0.48845470692717585,
      "grad_norm": 2.368495225906372,
      "learning_rate": 3.516841607689501e-06,
      "logits/chosen": 1.276886224746704,
      "logits/rejected": 1.3811718225479126,
      "logps/chosen": -2.9689180850982666,
      "logps/rejected": -6.619606971740723,
      "loss": 0.3159,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -4.4533772468566895,
      "rewards/margins": 5.4760332107543945,
      "rewards/rejected": -9.929410934448242,
      "step": 550
    },
    {
      "epoch": 0.48845470692717585,
      "eval_logits/chosen": 2.2735824584960938,
      "eval_logits/rejected": 1.9788992404937744,
      "eval_logps/chosen": -3.698131799697876,
      "eval_logps/rejected": -6.64966344833374,
      "eval_loss": 0.30747923254966736,
      "eval_rewards/accuracies": 0.9230769276618958,
      "eval_rewards/chosen": -5.5471978187561035,
      "eval_rewards/margins": 4.427298545837402,
      "eval_rewards/rejected": -9.974496841430664,
      "eval_runtime": 25.2322,
      "eval_samples_per_second": 28.852,
      "eval_steps_per_second": 3.606,
      "step": 550
    },
    {
      "epoch": 0.49733570159857904,
      "grad_norm": 1.901207685470581,
      "learning_rate": 3.4687889661302577e-06,
      "logits/chosen": 1.9734981060028076,
      "logits/rejected": 1.8617655038833618,
      "logps/chosen": -3.464953899383545,
      "logps/rejected": -6.746106147766113,
      "loss": 0.2912,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": -5.1974310874938965,
      "rewards/margins": 4.921727180480957,
      "rewards/rejected": -10.119158744812012,
      "step": 560
    },
    {
      "epoch": 0.5062166962699822,
      "grad_norm": 3.526299238204956,
      "learning_rate": 3.4203113817116955e-06,
      "logits/chosen": 3.0836069583892822,
      "logits/rejected": 2.75875186920166,
      "logps/chosen": -3.5981743335723877,
      "logps/rejected": -6.405775547027588,
      "loss": 0.3284,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -5.397261142730713,
      "rewards/margins": 4.211403846740723,
      "rewards/rejected": -9.608665466308594,
      "step": 570
    },
    {
      "epoch": 0.5150976909413855,
      "grad_norm": 2.816272497177124,
      "learning_rate": 3.3714301183045382e-06,
      "logits/chosen": 3.0068678855895996,
      "logits/rejected": 2.466287136077881,
      "logps/chosen": -3.7227580547332764,
      "logps/rejected": -6.9311113357543945,
      "loss": 0.2728,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": -5.584136962890625,
      "rewards/margins": 4.812530040740967,
      "rewards/rejected": -10.39666748046875,
      "step": 580
    },
    {
      "epoch": 0.5239786856127886,
      "grad_norm": 2.433389902114868,
      "learning_rate": 3.3221666168464584e-06,
      "logits/chosen": 2.9992904663085938,
      "logits/rejected": 2.678699254989624,
      "logps/chosen": -3.540968418121338,
      "logps/rejected": -7.228091239929199,
      "loss": 0.2568,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -5.3114519119262695,
      "rewards/margins": 5.530684947967529,
      "rewards/rejected": -10.842137336730957,
      "step": 590
    },
    {
      "epoch": 0.5328596802841918,
      "grad_norm": 2.7557125091552734,
      "learning_rate": 3.272542485937369e-06,
      "logits/chosen": 2.557410478591919,
      "logits/rejected": 2.331958770751953,
      "logps/chosen": -3.9404635429382324,
      "logps/rejected": -7.266766548156738,
      "loss": 0.2639,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -5.9106950759887695,
      "rewards/margins": 4.9894537925720215,
      "rewards/rejected": -10.900148391723633,
      "step": 600
    },
    {
      "epoch": 0.5328596802841918,
      "eval_logits/chosen": 2.561415910720825,
      "eval_logits/rejected": 2.2971484661102295,
      "eval_logps/chosen": -4.015191555023193,
      "eval_logps/rejected": -7.222255229949951,
      "eval_loss": 0.28853774070739746,
      "eval_rewards/accuracies": 0.9340659379959106,
      "eval_rewards/chosen": -6.022787570953369,
      "eval_rewards/margins": 4.810595989227295,
      "eval_rewards/rejected": -10.833383560180664,
      "eval_runtime": 25.2577,
      "eval_samples_per_second": 28.823,
      "eval_steps_per_second": 3.603,
      "step": 600
    },
    {
      "epoch": 0.5417406749555951,
      "grad_norm": 2.582770824432373,
      "learning_rate": 3.222579492361179e-06,
      "logits/chosen": 2.7404181957244873,
      "logits/rejected": 2.540113687515259,
      "logps/chosen": -3.9154396057128906,
      "logps/rejected": -7.631985664367676,
      "loss": 0.2816,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": -5.873159408569336,
      "rewards/margins": 5.574820041656494,
      "rewards/rejected": -11.447979927062988,
      "step": 610
    },
    {
      "epoch": 0.5506216696269982,
      "grad_norm": 3.8167436122894287,
      "learning_rate": 3.1722995515381644e-06,
      "logits/chosen": 2.445218563079834,
      "logits/rejected": 2.288620710372925,
      "logps/chosen": -3.7501556873321533,
      "logps/rejected": -7.918539524078369,
      "loss": 0.2891,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -5.625233173370361,
      "rewards/margins": 6.252577304840088,
      "rewards/rejected": -11.877809524536133,
      "step": 620
    },
    {
      "epoch": 0.5595026642984015,
      "grad_norm": 3.57536244392395,
      "learning_rate": 3.121724717912138e-06,
      "logits/chosen": 2.8337388038635254,
      "logits/rejected": 2.1241557598114014,
      "logps/chosen": -3.7040035724639893,
      "logps/rejected": -7.53197717666626,
      "loss": 0.2702,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -5.556005477905273,
      "rewards/margins": 5.741961479187012,
      "rewards/rejected": -11.297966003417969,
      "step": 630
    },
    {
      "epoch": 0.5683836589698046,
      "grad_norm": 3.0520713329315186,
      "learning_rate": 3.0708771752766397e-06,
      "logits/chosen": 2.5255160331726074,
      "logits/rejected": 2.0742428302764893,
      "logps/chosen": -3.908573865890503,
      "logps/rejected": -7.60653829574585,
      "loss": 0.2598,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -5.862860679626465,
      "rewards/margins": 5.546946048736572,
      "rewards/rejected": -11.409807205200195,
      "step": 640
    },
    {
      "epoch": 0.5772646536412078,
      "grad_norm": 8.067182540893555,
      "learning_rate": 3.019779227044398e-06,
      "logits/chosen": 1.6965067386627197,
      "logits/rejected": 1.644667625427246,
      "logps/chosen": -3.6588027477264404,
      "logps/rejected": -7.457572937011719,
      "loss": 0.2453,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -5.488204002380371,
      "rewards/margins": 5.698155403137207,
      "rewards/rejected": -11.186359405517578,
      "step": 650
    },
    {
      "epoch": 0.5772646536412078,
      "eval_logits/chosen": 2.577754020690918,
      "eval_logits/rejected": 2.265626907348633,
      "eval_logps/chosen": -3.906606435775757,
      "eval_logps/rejected": -7.3099446296691895,
      "eval_loss": 0.27302286028862,
      "eval_rewards/accuracies": 0.9560439586639404,
      "eval_rewards/chosen": -5.859910011291504,
      "eval_rewards/margins": 5.105007171630859,
      "eval_rewards/rejected": -10.964917182922363,
      "eval_runtime": 25.2446,
      "eval_samples_per_second": 28.838,
      "eval_steps_per_second": 3.605,
      "step": 650
    },
    {
      "epoch": 0.5861456483126111,
      "grad_norm": 2.9298255443573,
      "learning_rate": 2.9684532864643123e-06,
      "logits/chosen": 2.901702404022217,
      "logits/rejected": 2.595918655395508,
      "logps/chosen": -3.9949543476104736,
      "logps/rejected": -7.430356502532959,
      "loss": 0.2726,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -5.992431163787842,
      "rewards/margins": 5.153104782104492,
      "rewards/rejected": -11.145535469055176,
      "step": 660
    },
    {
      "epoch": 0.5950266429840142,
      "grad_norm": 3.085571050643921,
      "learning_rate": 2.9169218667902562e-06,
      "logits/chosen": 2.8471388816833496,
      "logits/rejected": 2.5500330924987793,
      "logps/chosen": -4.014147758483887,
      "logps/rejected": -7.298794746398926,
      "loss": 0.2484,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -6.02122163772583,
      "rewards/margins": 4.926970481872559,
      "rewards/rejected": -10.94819164276123,
      "step": 670
    },
    {
      "epoch": 0.6039076376554174,
      "grad_norm": 2.3615477085113525,
      "learning_rate": 2.8652075714060296e-06,
      "logits/chosen": 2.495004177093506,
      "logits/rejected": 1.9873936176300049,
      "logps/chosen": -4.2625041007995605,
      "logps/rejected": -8.186586380004883,
      "loss": 0.2144,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -6.393756866455078,
      "rewards/margins": 5.886124610900879,
      "rewards/rejected": -12.279881477355957,
      "step": 680
    },
    {
      "epoch": 0.6127886323268206,
      "grad_norm": 3.497316837310791,
      "learning_rate": 2.813333083910761e-06,
      "logits/chosen": 1.540621042251587,
      "logits/rejected": 1.2002273797988892,
      "logps/chosen": -3.71490740776062,
      "logps/rejected": -8.136199951171875,
      "loss": 0.2861,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -5.572361469268799,
      "rewards/margins": 6.631939888000488,
      "rewards/rejected": -12.204300880432129,
      "step": 690
    },
    {
      "epoch": 0.6216696269982238,
      "grad_norm": 3.2540247440338135,
      "learning_rate": 2.761321158169134e-06,
      "logits/chosen": 2.776721477508545,
      "logits/rejected": 2.475888729095459,
      "logps/chosen": -4.3385329246521,
      "logps/rejected": -7.8828301429748535,
      "loss": 0.2885,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -6.507800102233887,
      "rewards/margins": 5.316445350646973,
      "rewards/rejected": -11.824244499206543,
      "step": 700
    },
    {
      "epoch": 0.6216696269982238,
      "eval_logits/chosen": 2.4267516136169434,
      "eval_logits/rejected": 2.1245739459991455,
      "eval_logps/chosen": -3.9584598541259766,
      "eval_logps/rejected": -7.546706199645996,
      "eval_loss": 0.26512712240219116,
      "eval_rewards/accuracies": 0.9340659379959106,
      "eval_rewards/chosen": -5.937689781188965,
      "eval_rewards/margins": 5.382368564605713,
      "eval_rewards/rejected": -11.320058822631836,
      "eval_runtime": 25.2589,
      "eval_samples_per_second": 28.822,
      "eval_steps_per_second": 3.603,
      "step": 700
    },
    {
      "epoch": 0.6305506216696269,
      "grad_norm": 2.582273483276367,
      "learning_rate": 2.70919460833079e-06,
      "logits/chosen": 2.2102553844451904,
      "logits/rejected": 1.9504516124725342,
      "logps/chosen": -3.7470879554748535,
      "logps/rejected": -7.761659145355225,
      "loss": 0.2078,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -5.620632171630859,
      "rewards/margins": 6.02185583114624,
      "rewards/rejected": -11.642488479614258,
      "step": 710
    },
    {
      "epoch": 0.6394316163410302,
      "grad_norm": 2.2855663299560547,
      "learning_rate": 2.6569762988232838e-06,
      "logits/chosen": 2.4659409523010254,
      "logits/rejected": 2.0434811115264893,
      "logps/chosen": -3.4346442222595215,
      "logps/rejected": -7.470824241638184,
      "loss": 0.2346,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -5.151965618133545,
      "rewards/margins": 6.054270267486572,
      "rewards/rejected": -11.206236839294434,
      "step": 720
    },
    {
      "epoch": 0.6483126110124334,
      "grad_norm": 2.038733959197998,
      "learning_rate": 2.604689134322999e-06,
      "logits/chosen": 2.270310878753662,
      "logits/rejected": 1.9651508331298828,
      "logps/chosen": -3.721379518508911,
      "logps/rejected": -7.776650905609131,
      "loss": 0.2354,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -5.582068920135498,
      "rewards/margins": 6.0829057693481445,
      "rewards/rejected": -11.6649751663208,
      "step": 730
    },
    {
      "epoch": 0.6571936056838366,
      "grad_norm": 2.948915481567383,
      "learning_rate": 2.5523560497083927e-06,
      "logits/chosen": 2.358057737350464,
      "logits/rejected": 2.063586711883545,
      "logps/chosen": -3.6883864402770996,
      "logps/rejected": -7.352984428405762,
      "loss": 0.231,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -5.5325798988342285,
      "rewards/margins": 5.496896266937256,
      "rewards/rejected": -11.0294771194458,
      "step": 740
    },
    {
      "epoch": 0.6660746003552398,
      "grad_norm": 3.661870002746582,
      "learning_rate": 2.5e-06,
      "logits/chosen": 2.6712796688079834,
      "logits/rejected": 2.393817901611328,
      "logps/chosen": -4.065141201019287,
      "logps/rejected": -7.9232306480407715,
      "loss": 0.2332,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -6.097712516784668,
      "rewards/margins": 5.787134170532227,
      "rewards/rejected": -11.884846687316895,
      "step": 750
    },
    {
      "epoch": 0.6660746003552398,
      "eval_logits/chosen": 2.4871790409088135,
      "eval_logits/rejected": 2.2864174842834473,
      "eval_logps/chosen": -4.176412105560303,
      "eval_logps/rejected": -8.009041786193848,
      "eval_loss": 0.2521994411945343,
      "eval_rewards/accuracies": 0.9340659379959106,
      "eval_rewards/chosen": -6.264617443084717,
      "eval_rewards/margins": 5.7489447593688965,
      "eval_rewards/rejected": -12.01356315612793,
      "eval_runtime": 25.2624,
      "eval_samples_per_second": 28.818,
      "eval_steps_per_second": 3.602,
      "step": 750
    },
    {
      "epoch": 0.6749555950266429,
      "grad_norm": 1.992043375968933,
      "learning_rate": 2.447643950291608e-06,
      "logits/chosen": 1.8225914239883423,
      "logits/rejected": 1.842559814453125,
      "logps/chosen": -4.158253192901611,
      "logps/rejected": -8.65810775756836,
      "loss": 0.2104,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -6.237379550933838,
      "rewards/margins": 6.749783515930176,
      "rewards/rejected": -12.987161636352539,
      "step": 760
    },
    {
      "epoch": 0.6838365896980462,
      "grad_norm": 1.9012709856033325,
      "learning_rate": 2.3953108656770018e-06,
      "logits/chosen": 2.491041660308838,
      "logits/rejected": 1.9490203857421875,
      "logps/chosen": -4.141987323760986,
      "logps/rejected": -8.790396690368652,
      "loss": 0.2236,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": -6.212981224060059,
      "rewards/margins": 6.9726152420043945,
      "rewards/rejected": -13.185595512390137,
      "step": 770
    },
    {
      "epoch": 0.6927175843694494,
      "grad_norm": 3.4377241134643555,
      "learning_rate": 2.3430237011767166e-06,
      "logits/chosen": 2.54376220703125,
      "logits/rejected": 2.2269883155822754,
      "logps/chosen": -3.7832961082458496,
      "logps/rejected": -8.058969497680664,
      "loss": 0.2449,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -5.674944877624512,
      "rewards/margins": 6.413510322570801,
      "rewards/rejected": -12.08845329284668,
      "step": 780
    },
    {
      "epoch": 0.7015985790408525,
      "grad_norm": 1.365478277206421,
      "learning_rate": 2.290805391669212e-06,
      "logits/chosen": 3.386944532394409,
      "logits/rejected": 2.942134141921997,
      "logps/chosen": -3.8051178455352783,
      "logps/rejected": -8.201702117919922,
      "loss": 0.1764,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -5.707675933837891,
      "rewards/margins": 6.594876289367676,
      "rewards/rejected": -12.302552223205566,
      "step": 790
    },
    {
      "epoch": 0.7104795737122558,
      "grad_norm": 3.3714189529418945,
      "learning_rate": 2.238678841830867e-06,
      "logits/chosen": 2.690781593322754,
      "logits/rejected": 2.2246782779693604,
      "logps/chosen": -4.251287937164307,
      "logps/rejected": -8.268930435180664,
      "loss": 0.2429,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -6.376931667327881,
      "rewards/margins": 6.026463031768799,
      "rewards/rejected": -12.40339469909668,
      "step": 800
    },
    {
      "epoch": 0.7104795737122558,
      "eval_logits/chosen": 2.453395128250122,
      "eval_logits/rejected": 2.2390329837799072,
      "eval_logps/chosen": -4.038145065307617,
      "eval_logps/rejected": -8.012212753295898,
      "eval_loss": 0.2468602955341339,
      "eval_rewards/accuracies": 0.9340659379959106,
      "eval_rewards/chosen": -6.057217597961426,
      "eval_rewards/margins": 5.961101055145264,
      "eval_rewards/rejected": -12.018318176269531,
      "eval_runtime": 25.2622,
      "eval_samples_per_second": 28.818,
      "eval_steps_per_second": 3.602,
      "step": 800
    }
  ],
  "logging_steps": 10,
  "max_steps": 1500,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 1.8865429979783496e+18,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}