File size: 26,036 Bytes
5349189
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 1.0,
  "eval_steps": 100,
  "global_step": 478,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.0,
      "learning_rate": 1.0416666666666666e-08,
      "logits/chosen": -0.3017902672290802,
      "logits/rejected": -0.4429064989089966,
      "logps/chosen": -319.77001953125,
      "logps/rejected": -256.397705078125,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.02,
      "learning_rate": 1.0416666666666667e-07,
      "logits/chosen": -0.6185635924339294,
      "logits/rejected": -0.6267194747924805,
      "logps/chosen": -266.4049072265625,
      "logps/rejected": -226.4547576904297,
      "loss": 0.6938,
      "rewards/accuracies": 0.5347222089767456,
      "rewards/chosen": -0.0314086489379406,
      "rewards/margins": 0.004176241811364889,
      "rewards/rejected": -0.035584889352321625,
      "step": 10
    },
    {
      "epoch": 0.04,
      "learning_rate": 2.0833333333333333e-07,
      "logits/chosen": -0.5040268301963806,
      "logits/rejected": -0.5066824555397034,
      "logps/chosen": -294.06231689453125,
      "logps/rejected": -256.36572265625,
      "loss": 0.6919,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": 0.0255455132573843,
      "rewards/margins": 0.0022588702850043774,
      "rewards/rejected": 0.023286638781428337,
      "step": 20
    },
    {
      "epoch": 0.06,
      "learning_rate": 3.1249999999999997e-07,
      "logits/chosen": -0.47493982315063477,
      "logits/rejected": -0.4304034113883972,
      "logps/chosen": -294.8653869628906,
      "logps/rejected": -278.9495544433594,
      "loss": 0.6909,
      "rewards/accuracies": 0.543749988079071,
      "rewards/chosen": 0.011051720939576626,
      "rewards/margins": 0.003937236033380032,
      "rewards/rejected": 0.007114484906196594,
      "step": 30
    },
    {
      "epoch": 0.08,
      "learning_rate": 4.1666666666666667e-07,
      "logits/chosen": -0.6350118517875671,
      "logits/rejected": -0.6409525871276855,
      "logps/chosen": -302.514404296875,
      "logps/rejected": -264.9754943847656,
      "loss": 0.6886,
      "rewards/accuracies": 0.5562499761581421,
      "rewards/chosen": -0.00148511934094131,
      "rewards/margins": 0.005857478827238083,
      "rewards/rejected": -0.007342599332332611,
      "step": 40
    },
    {
      "epoch": 0.1,
      "learning_rate": 4.999733114418725e-07,
      "logits/chosen": -0.6464229226112366,
      "logits/rejected": -0.6577852964401245,
      "logps/chosen": -318.97540283203125,
      "logps/rejected": -295.6136169433594,
      "loss": 0.6858,
      "rewards/accuracies": 0.625,
      "rewards/chosen": 0.002440870273858309,
      "rewards/margins": 0.01247528288513422,
      "rewards/rejected": -0.010034412145614624,
      "step": 50
    },
    {
      "epoch": 0.13,
      "learning_rate": 4.990398100856366e-07,
      "logits/chosen": -0.7159032821655273,
      "logits/rejected": -0.6997270584106445,
      "logps/chosen": -257.1394958496094,
      "logps/rejected": -223.77249145507812,
      "loss": 0.6834,
      "rewards/accuracies": 0.6187499761581421,
      "rewards/chosen": 0.008302798494696617,
      "rewards/margins": 0.01499713771045208,
      "rewards/rejected": -0.006694340147078037,
      "step": 60
    },
    {
      "epoch": 0.15,
      "learning_rate": 4.967775735898179e-07,
      "logits/chosen": -0.6443796157836914,
      "logits/rejected": -0.6453703045845032,
      "logps/chosen": -287.23919677734375,
      "logps/rejected": -253.98779296875,
      "loss": 0.6809,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": 0.012265635654330254,
      "rewards/margins": 0.02614836022257805,
      "rewards/rejected": -0.013882724568247795,
      "step": 70
    },
    {
      "epoch": 0.17,
      "learning_rate": 4.931986719649298e-07,
      "logits/chosen": -0.6774781346321106,
      "logits/rejected": -0.6471685171127319,
      "logps/chosen": -281.8182067871094,
      "logps/rejected": -281.8425598144531,
      "loss": 0.6743,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": -0.0038391489069908857,
      "rewards/margins": 0.02816380001604557,
      "rewards/rejected": -0.032002948224544525,
      "step": 80
    },
    {
      "epoch": 0.19,
      "learning_rate": 4.883222001996351e-07,
      "logits/chosen": -0.7065152525901794,
      "logits/rejected": -0.692989706993103,
      "logps/chosen": -276.53753662109375,
      "logps/rejected": -264.0567932128906,
      "loss": 0.6657,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.04013616591691971,
      "rewards/margins": 0.060574889183044434,
      "rewards/rejected": -0.10071107000112534,
      "step": 90
    },
    {
      "epoch": 0.21,
      "learning_rate": 4.821741763807186e-07,
      "logits/chosen": -0.6360586881637573,
      "logits/rejected": -0.6389707326889038,
      "logps/chosen": -301.81329345703125,
      "logps/rejected": -287.9957580566406,
      "loss": 0.6548,
      "rewards/accuracies": 0.668749988079071,
      "rewards/chosen": -0.07365767657756805,
      "rewards/margins": 0.08726193010807037,
      "rewards/rejected": -0.16091960668563843,
      "step": 100
    },
    {
      "epoch": 0.21,
      "eval_logits/chosen": -0.7018380165100098,
      "eval_logits/rejected": -0.6576583981513977,
      "eval_logps/chosen": -290.2217712402344,
      "eval_logps/rejected": -294.29364013671875,
      "eval_loss": 0.6548193693161011,
      "eval_rewards/accuracies": 0.66796875,
      "eval_rewards/chosen": -0.1060016080737114,
      "eval_rewards/margins": 0.10316330194473267,
      "eval_rewards/rejected": -0.20916491746902466,
      "eval_runtime": 117.0477,
      "eval_samples_per_second": 17.087,
      "eval_steps_per_second": 0.273,
      "step": 100
    },
    {
      "epoch": 0.23,
      "learning_rate": 4.747874028753375e-07,
      "logits/chosen": -0.6454014778137207,
      "logits/rejected": -0.5620900988578796,
      "logps/chosen": -347.7331237792969,
      "logps/rejected": -300.71112060546875,
      "loss": 0.651,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.11785294860601425,
      "rewards/margins": 0.12140049040317535,
      "rewards/rejected": -0.23925340175628662,
      "step": 110
    },
    {
      "epoch": 0.25,
      "learning_rate": 4.662012913161997e-07,
      "logits/chosen": -0.70166015625,
      "logits/rejected": -0.727249801158905,
      "logps/chosen": -303.4729919433594,
      "logps/rejected": -263.29583740234375,
      "loss": 0.6384,
      "rewards/accuracies": 0.706250011920929,
      "rewards/chosen": -0.18253406882286072,
      "rewards/margins": 0.14989741146564484,
      "rewards/rejected": -0.33243149518966675,
      "step": 120
    },
    {
      "epoch": 0.27,
      "learning_rate": 4.5646165232345103e-07,
      "logits/chosen": -0.6562562584877014,
      "logits/rejected": -0.5979722142219543,
      "logps/chosen": -306.23516845703125,
      "logps/rejected": -346.08197021484375,
      "loss": 0.6379,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.28600436449050903,
      "rewards/margins": 0.16408249735832214,
      "rewards/rejected": -0.45008689165115356,
      "step": 130
    },
    {
      "epoch": 0.29,
      "learning_rate": 4.456204510851956e-07,
      "logits/chosen": -0.5730563402175903,
      "logits/rejected": -0.5727633237838745,
      "logps/chosen": -335.1769714355469,
      "logps/rejected": -309.3208312988281,
      "loss": 0.6268,
      "rewards/accuracies": 0.768750011920929,
      "rewards/chosen": -0.16110344231128693,
      "rewards/margins": 0.21982815861701965,
      "rewards/rejected": -0.38093167543411255,
      "step": 140
    },
    {
      "epoch": 0.31,
      "learning_rate": 4.337355301007335e-07,
      "logits/chosen": -0.6809996366500854,
      "logits/rejected": -0.6591669321060181,
      "logps/chosen": -328.6540832519531,
      "logps/rejected": -292.2736511230469,
      "loss": 0.6047,
      "rewards/accuracies": 0.668749988079071,
      "rewards/chosen": -0.33402004837989807,
      "rewards/margins": 0.2206876277923584,
      "rewards/rejected": -0.5547076463699341,
      "step": 150
    },
    {
      "epoch": 0.33,
      "learning_rate": 4.2087030056579986e-07,
      "logits/chosen": -0.6660237312316895,
      "logits/rejected": -0.6344098448753357,
      "logps/chosen": -324.8284912109375,
      "logps/rejected": -335.0122985839844,
      "loss": 0.6017,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.30958321690559387,
      "rewards/margins": 0.312285840511322,
      "rewards/rejected": -0.6218689680099487,
      "step": 160
    },
    {
      "epoch": 0.36,
      "learning_rate": 4.070934040463998e-07,
      "logits/chosen": -0.6780328154563904,
      "logits/rejected": -0.6576133370399475,
      "logps/chosen": -300.7780456542969,
      "logps/rejected": -299.3976745605469,
      "loss": 0.6237,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -0.4301220774650574,
      "rewards/margins": 0.2145705670118332,
      "rewards/rejected": -0.6446926593780518,
      "step": 170
    },
    {
      "epoch": 0.38,
      "learning_rate": 3.9247834624635404e-07,
      "logits/chosen": -0.6824392080307007,
      "logits/rejected": -0.6258381009101868,
      "logps/chosen": -354.97503662109375,
      "logps/rejected": -370.0382385253906,
      "loss": 0.599,
      "rewards/accuracies": 0.706250011920929,
      "rewards/chosen": -0.4686453938484192,
      "rewards/margins": 0.343492329120636,
      "rewards/rejected": -0.8121377825737,
      "step": 180
    },
    {
      "epoch": 0.4,
      "learning_rate": 3.7710310482256523e-07,
      "logits/chosen": -0.6199353933334351,
      "logits/rejected": -0.6015241146087646,
      "logps/chosen": -320.41021728515625,
      "logps/rejected": -331.60284423828125,
      "loss": 0.5902,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.27603933215141296,
      "rewards/margins": 0.31284210085868835,
      "rewards/rejected": -0.5888813734054565,
      "step": 190
    },
    {
      "epoch": 0.42,
      "learning_rate": 3.610497133404795e-07,
      "logits/chosen": -0.7054450511932373,
      "logits/rejected": -0.6953166723251343,
      "logps/chosen": -338.31829833984375,
      "logps/rejected": -323.37115478515625,
      "loss": 0.5881,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.5281119346618652,
      "rewards/margins": 0.2860097885131836,
      "rewards/rejected": -0.8141217231750488,
      "step": 200
    },
    {
      "epoch": 0.42,
      "eval_logits/chosen": -0.8023231625556946,
      "eval_logits/rejected": -0.7415486574172974,
      "eval_logps/chosen": -310.1252136230469,
      "eval_logps/rejected": -339.1258850097656,
      "eval_loss": 0.5968395471572876,
      "eval_rewards/accuracies": 0.6875,
      "eval_rewards/chosen": -0.3050362467765808,
      "eval_rewards/margins": 0.3524515628814697,
      "eval_rewards/rejected": -0.657487690448761,
      "eval_runtime": 121.2666,
      "eval_samples_per_second": 16.493,
      "eval_steps_per_second": 0.264,
      "step": 200
    },
    {
      "epoch": 0.44,
      "learning_rate": 3.4440382358952115e-07,
      "logits/chosen": -0.7312219738960266,
      "logits/rejected": -0.730713427066803,
      "logps/chosen": -314.7323303222656,
      "logps/rejected": -301.01312255859375,
      "loss": 0.5938,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.23817189037799835,
      "rewards/margins": 0.30586400628089905,
      "rewards/rejected": -0.5440359115600586,
      "step": 210
    },
    {
      "epoch": 0.46,
      "learning_rate": 3.272542485937368e-07,
      "logits/chosen": -0.7227334380149841,
      "logits/rejected": -0.6831812858581543,
      "logps/chosen": -315.691162109375,
      "logps/rejected": -317.9109802246094,
      "loss": 0.5961,
      "rewards/accuracies": 0.706250011920929,
      "rewards/chosen": -0.27428802847862244,
      "rewards/margins": 0.35738009214401245,
      "rewards/rejected": -0.6316681504249573,
      "step": 220
    },
    {
      "epoch": 0.48,
      "learning_rate": 3.096924887558854e-07,
      "logits/chosen": -0.6957461833953857,
      "logits/rejected": -0.6545354723930359,
      "logps/chosen": -313.1252746582031,
      "logps/rejected": -355.5252685546875,
      "loss": 0.5924,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.4048747420310974,
      "rewards/margins": 0.39421346783638,
      "rewards/rejected": -0.7990882992744446,
      "step": 230
    },
    {
      "epoch": 0.5,
      "learning_rate": 2.9181224366319943e-07,
      "logits/chosen": -0.8229473829269409,
      "logits/rejected": -0.8242266774177551,
      "logps/chosen": -316.42327880859375,
      "logps/rejected": -329.6378173828125,
      "loss": 0.5973,
      "rewards/accuracies": 0.6312500238418579,
      "rewards/chosen": -0.5616710186004639,
      "rewards/margins": 0.3975863456726074,
      "rewards/rejected": -0.9592572450637817,
      "step": 240
    },
    {
      "epoch": 0.52,
      "learning_rate": 2.7370891215954565e-07,
      "logits/chosen": -0.7482638955116272,
      "logits/rejected": -0.7271997332572937,
      "logps/chosen": -325.5633850097656,
      "logps/rejected": -347.3030700683594,
      "loss": 0.5769,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -0.5051977038383484,
      "rewards/margins": 0.35222890973091125,
      "rewards/rejected": -0.857426643371582,
      "step": 250
    },
    {
      "epoch": 0.54,
      "learning_rate": 2.55479083351317e-07,
      "logits/chosen": -0.8334507942199707,
      "logits/rejected": -0.8130620718002319,
      "logps/chosen": -333.03924560546875,
      "logps/rejected": -325.97979736328125,
      "loss": 0.5666,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.482491672039032,
      "rewards/margins": 0.45697951316833496,
      "rewards/rejected": -0.9394710659980774,
      "step": 260
    },
    {
      "epoch": 0.56,
      "learning_rate": 2.3722002126275822e-07,
      "logits/chosen": -0.8548744320869446,
      "logits/rejected": -0.839653491973877,
      "logps/chosen": -324.4539794921875,
      "logps/rejected": -357.67169189453125,
      "loss": 0.5678,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.45763128995895386,
      "rewards/margins": 0.48610082268714905,
      "rewards/rejected": -0.9437320828437805,
      "step": 270
    },
    {
      "epoch": 0.59,
      "learning_rate": 2.19029145890313e-07,
      "logits/chosen": -0.814782440662384,
      "logits/rejected": -0.7766333222389221,
      "logps/chosen": -330.60662841796875,
      "logps/rejected": -326.7635192871094,
      "loss": 0.5829,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.46811041235923767,
      "rewards/margins": 0.3988150358200073,
      "rewards/rejected": -0.8669255375862122,
      "step": 280
    },
    {
      "epoch": 0.61,
      "learning_rate": 2.0100351342479216e-07,
      "logits/chosen": -0.714547336101532,
      "logits/rejected": -0.656903862953186,
      "logps/chosen": -347.14349365234375,
      "logps/rejected": -363.637451171875,
      "loss": 0.5845,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.4874221384525299,
      "rewards/margins": 0.36368709802627563,
      "rewards/rejected": -0.8511092066764832,
      "step": 290
    },
    {
      "epoch": 0.63,
      "learning_rate": 1.8323929841460178e-07,
      "logits/chosen": -0.7814041376113892,
      "logits/rejected": -0.7600786089897156,
      "logps/chosen": -350.5426025390625,
      "logps/rejected": -373.58282470703125,
      "loss": 0.5753,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -0.4952825903892517,
      "rewards/margins": 0.3200310170650482,
      "rewards/rejected": -0.8153136372566223,
      "step": 300
    },
    {
      "epoch": 0.63,
      "eval_logits/chosen": -0.8262501955032349,
      "eval_logits/rejected": -0.7578529119491577,
      "eval_logps/chosen": -328.7913513183594,
      "eval_logps/rejected": -371.0563659667969,
      "eval_loss": 0.5733634233474731,
      "eval_rewards/accuracies": 0.72265625,
      "eval_rewards/chosen": -0.4916972815990448,
      "eval_rewards/margins": 0.48509520292282104,
      "eval_rewards/rejected": -0.9767925143241882,
      "eval_runtime": 130.1337,
      "eval_samples_per_second": 15.369,
      "eval_steps_per_second": 0.246,
      "step": 300
    },
    {
      "epoch": 0.65,
      "learning_rate": 1.6583128063291573e-07,
      "logits/chosen": -0.8206486701965332,
      "logits/rejected": -0.8467684984207153,
      "logps/chosen": -335.5056457519531,
      "logps/rejected": -316.3816833496094,
      "loss": 0.5633,
      "rewards/accuracies": 0.71875,
      "rewards/chosen": -0.5007157325744629,
      "rewards/margins": 0.4249514937400818,
      "rewards/rejected": -0.9256671667098999,
      "step": 310
    },
    {
      "epoch": 0.67,
      "learning_rate": 1.488723393865766e-07,
      "logits/chosen": -0.9328675270080566,
      "logits/rejected": -0.8487465977668762,
      "logps/chosen": -306.1993103027344,
      "logps/rejected": -342.8832092285156,
      "loss": 0.5693,
      "rewards/accuracies": 0.7562500238418579,
      "rewards/chosen": -0.5110325813293457,
      "rewards/margins": 0.5352452993392944,
      "rewards/rejected": -1.0462777614593506,
      "step": 320
    },
    {
      "epoch": 0.69,
      "learning_rate": 1.3245295796480788e-07,
      "logits/chosen": -0.6851174831390381,
      "logits/rejected": -0.6687122583389282,
      "logps/chosen": -352.14984130859375,
      "logps/rejected": -357.9172668457031,
      "loss": 0.57,
      "rewards/accuracies": 0.7437499761581421,
      "rewards/chosen": -0.570861279964447,
      "rewards/margins": 0.4501311779022217,
      "rewards/rejected": -1.0209925174713135,
      "step": 330
    },
    {
      "epoch": 0.71,
      "learning_rate": 1.1666074087171627e-07,
      "logits/chosen": -0.7861064672470093,
      "logits/rejected": -0.7692282795906067,
      "logps/chosen": -313.29083251953125,
      "logps/rejected": -344.58062744140625,
      "loss": 0.5673,
      "rewards/accuracies": 0.706250011920929,
      "rewards/chosen": -0.48139747977256775,
      "rewards/margins": 0.413718044757843,
      "rewards/rejected": -0.8951154947280884,
      "step": 340
    },
    {
      "epoch": 0.73,
      "learning_rate": 1.0157994641835734e-07,
      "logits/chosen": -0.8523145914077759,
      "logits/rejected": -0.8958388566970825,
      "logps/chosen": -305.7944641113281,
      "logps/rejected": -335.16778564453125,
      "loss": 0.5533,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -0.4743496775627136,
      "rewards/margins": 0.49316510558128357,
      "rewards/rejected": -0.9675148129463196,
      "step": 350
    },
    {
      "epoch": 0.75,
      "learning_rate": 8.729103716819111e-08,
      "logits/chosen": -0.7652881741523743,
      "logits/rejected": -0.8117391467094421,
      "logps/chosen": -384.6844177246094,
      "logps/rejected": -361.26385498046875,
      "loss": 0.5719,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.47557467222213745,
      "rewards/margins": 0.4706428647041321,
      "rewards/rejected": -0.9462175369262695,
      "step": 360
    },
    {
      "epoch": 0.77,
      "learning_rate": 7.387025063449081e-08,
      "logits/chosen": -0.8511560559272766,
      "logits/rejected": -0.8533377647399902,
      "logps/chosen": -373.3354187011719,
      "logps/rejected": -363.8319396972656,
      "loss": 0.5718,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.5117169618606567,
      "rewards/margins": 0.5143001675605774,
      "rewards/rejected": -1.026017189025879,
      "step": 370
    },
    {
      "epoch": 0.79,
      "learning_rate": 6.138919252022435e-08,
      "logits/chosen": -0.8471282124519348,
      "logits/rejected": -0.8318672180175781,
      "logps/chosen": -351.12689208984375,
      "logps/rejected": -389.46929931640625,
      "loss": 0.5572,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -0.6237812042236328,
      "rewards/margins": 0.5021282434463501,
      "rewards/rejected": -1.1259095668792725,
      "step": 380
    },
    {
      "epoch": 0.82,
      "learning_rate": 4.991445467064689e-08,
      "logits/chosen": -0.8562029600143433,
      "logits/rejected": -0.8402010798454285,
      "logps/chosen": -342.40411376953125,
      "logps/rejected": -366.63250732421875,
      "loss": 0.5666,
      "rewards/accuracies": 0.643750011920929,
      "rewards/chosen": -0.5931364297866821,
      "rewards/margins": 0.336722195148468,
      "rewards/rejected": -0.9298585653305054,
      "step": 390
    },
    {
      "epoch": 0.84,
      "learning_rate": 3.9507259776993954e-08,
      "logits/chosen": -0.8323208093643188,
      "logits/rejected": -0.8248344659805298,
      "logps/chosen": -379.9523620605469,
      "logps/rejected": -394.05401611328125,
      "loss": 0.5602,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": -0.5521629452705383,
      "rewards/margins": 0.45100274682044983,
      "rewards/rejected": -1.003165602684021,
      "step": 400
    },
    {
      "epoch": 0.84,
      "eval_logits/chosen": -0.92626953125,
      "eval_logits/rejected": -0.8578699231147766,
      "eval_logps/chosen": -329.260498046875,
      "eval_logps/rejected": -374.94903564453125,
      "eval_loss": 0.5689293742179871,
      "eval_rewards/accuracies": 0.734375,
      "eval_rewards/chosen": -0.49638885259628296,
      "eval_rewards/margins": 0.5193302035331726,
      "eval_rewards/rejected": -1.0157190561294556,
      "eval_runtime": 136.1974,
      "eval_samples_per_second": 14.685,
      "eval_steps_per_second": 0.235,
      "step": 400
    },
    {
      "epoch": 0.86,
      "learning_rate": 3.022313472693447e-08,
      "logits/chosen": -0.9494141340255737,
      "logits/rejected": -0.9114382863044739,
      "logps/chosen": -325.3351135253906,
      "logps/rejected": -346.55963134765625,
      "loss": 0.5736,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.538487434387207,
      "rewards/margins": 0.4442841410636902,
      "rewards/rejected": -0.9827715754508972,
      "step": 410
    },
    {
      "epoch": 0.88,
      "learning_rate": 2.2111614344599684e-08,
      "logits/chosen": -0.9237302541732788,
      "logits/rejected": -0.8802440762519836,
      "logps/chosen": -330.2814025878906,
      "logps/rejected": -381.2386169433594,
      "loss": 0.5607,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.5165634155273438,
      "rewards/margins": 0.437481164932251,
      "rewards/rejected": -0.9540446400642395,
      "step": 420
    },
    {
      "epoch": 0.9,
      "learning_rate": 1.521597710086439e-08,
      "logits/chosen": -0.8156334757804871,
      "logits/rejected": -0.8552393913269043,
      "logps/chosen": -364.59478759765625,
      "logps/rejected": -368.82489013671875,
      "loss": 0.5777,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -0.5483056306838989,
      "rewards/margins": 0.5216010808944702,
      "rewards/rejected": -1.0699067115783691,
      "step": 430
    },
    {
      "epoch": 0.92,
      "learning_rate": 9.57301420397924e-09,
      "logits/chosen": -0.9248915910720825,
      "logits/rejected": -0.8963875770568848,
      "logps/chosen": -329.3937683105469,
      "logps/rejected": -360.4046325683594,
      "loss": 0.5606,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": -0.5845829248428345,
      "rewards/margins": 0.42287713289260864,
      "rewards/rejected": -1.007460117340088,
      "step": 440
    },
    {
      "epoch": 0.94,
      "learning_rate": 5.212833302556258e-09,
      "logits/chosen": -1.0620511770248413,
      "logits/rejected": -1.0440218448638916,
      "logps/chosen": -308.4063720703125,
      "logps/rejected": -327.61328125,
      "loss": 0.5911,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": -0.5545769333839417,
      "rewards/margins": 0.38095879554748535,
      "rewards/rejected": -0.935535728931427,
      "step": 450
    },
    {
      "epoch": 0.96,
      "learning_rate": 2.158697848236607e-09,
      "logits/chosen": -0.9425494074821472,
      "logits/rejected": -0.9231334924697876,
      "logps/chosen": -308.18743896484375,
      "logps/rejected": -333.3728942871094,
      "loss": 0.5588,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -0.6228442192077637,
      "rewards/margins": 0.42768868803977966,
      "rewards/rejected": -1.0505329370498657,
      "step": 460
    },
    {
      "epoch": 0.98,
      "learning_rate": 4.269029751107489e-10,
      "logits/chosen": -0.9223429560661316,
      "logits/rejected": -0.9098516702651978,
      "logps/chosen": -363.28765869140625,
      "logps/rejected": -387.8728942871094,
      "loss": 0.5479,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -0.480919748544693,
      "rewards/margins": 0.6743713617324829,
      "rewards/rejected": -1.155290961265564,
      "step": 470
    },
    {
      "epoch": 1.0,
      "step": 478,
      "total_flos": 0.0,
      "train_loss": 0.6029723678173879,
      "train_runtime": 7448.3301,
      "train_samples_per_second": 8.208,
      "train_steps_per_second": 0.064
    }
  ],
  "logging_steps": 10,
  "max_steps": 478,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 10000,
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}