lole25 commited on
Commit
4bd8d55
1 Parent(s): b725940

Model save

Browse files
README.md ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ base_model: microsoft/phi-2
9
+ model-index:
10
+ - name: phi-2-ipo-ultrafeedback-lora
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # phi-2-ipo-ultrafeedback-lora
18
+
19
+ This model is a fine-tuned version of [microsoft/phi-2](https://huggingface.co/microsoft/phi-2) on the None dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 2156.2256
22
+ - Rewards/chosen: -0.1105
23
+ - Rewards/rejected: -0.1771
24
+ - Rewards/accuracies: 0.6940
25
+ - Rewards/margins: 0.0666
26
+ - Logps/rejected: -249.1476
27
+ - Logps/chosen: -271.2955
28
+ - Logits/rejected: 0.7668
29
+ - Logits/chosen: 0.6624
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 5e-06
49
+ - train_batch_size: 4
50
+ - eval_batch_size: 4
51
+ - seed: 42
52
+ - distributed_type: multi-GPU
53
+ - num_devices: 4
54
+ - gradient_accumulation_steps: 4
55
+ - total_train_batch_size: 64
56
+ - total_eval_batch_size: 16
57
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
+ - lr_scheduler_type: cosine
59
+ - lr_scheduler_warmup_ratio: 0.1
60
+ - num_epochs: 2
61
+
62
+ ### Training results
63
+
64
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
+ | 2494.2439 | 0.21 | 100 | 2494.1194 | -0.0001 | -0.0010 | 0.5480 | 0.0009 | -231.5405 | -260.2577 | 0.9164 | 0.8142 |
67
+ | 2425.7957 | 0.42 | 200 | 2420.3296 | -0.0052 | -0.0154 | 0.6560 | 0.0101 | -232.9728 | -260.7673 | 0.9218 | 0.8183 |
68
+ | 2310.102 | 0.63 | 300 | 2309.9451 | -0.0300 | -0.0576 | 0.6680 | 0.0276 | -237.1959 | -263.2440 | 0.9088 | 0.8041 |
69
+ | 2159.0707 | 0.84 | 400 | 2236.2759 | -0.0634 | -0.1085 | 0.6840 | 0.0451 | -242.2857 | -266.5839 | 0.8637 | 0.7578 |
70
+ | 2176.8641 | 1.05 | 500 | 2197.5420 | -0.0903 | -0.1463 | 0.6980 | 0.0560 | -246.0634 | -269.2716 | 0.8180 | 0.7125 |
71
+ | 2066.3285 | 1.26 | 600 | 2177.3389 | -0.1014 | -0.1628 | 0.6960 | 0.0614 | -247.7128 | -270.3855 | 0.7927 | 0.6879 |
72
+ | 2119.5369 | 1.47 | 700 | 2166.3855 | -0.1054 | -0.1702 | 0.6960 | 0.0648 | -248.4533 | -270.7824 | 0.7771 | 0.6726 |
73
+ | 2096.7854 | 1.67 | 800 | 2159.7104 | -0.1091 | -0.1756 | 0.6960 | 0.0665 | -248.9965 | -271.1501 | 0.7684 | 0.6641 |
74
+ | 2094.5041 | 1.88 | 900 | 2158.6299 | -0.1103 | -0.1768 | 0.6980 | 0.0665 | -249.1140 | -271.2745 | 0.7690 | 0.6646 |
75
+
76
+
77
+ ### Framework versions
78
+
79
+ - PEFT 0.7.1
80
+ - Transformers 4.36.2
81
+ - Pytorch 2.1.2+cu118
82
+ - Datasets 2.14.6
83
+ - Tokenizers 0.15.2
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:45830533e4b34ff6b3628c452511d1795e7636c6af514cd5106f412932e14a7b
3
  size 41977616
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c347685b3aa2c6e5c6146f0068dba658ca7ef760eaba19404437e2030bd5e74
3
  size 41977616
all_results.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_logits/chosen": 0.6623885631561279,
4
+ "eval_logits/rejected": 0.7668179869651794,
5
+ "eval_logps/chosen": -271.2955017089844,
6
+ "eval_logps/rejected": -249.1475830078125,
7
+ "eval_loss": 2156.2255859375,
8
+ "eval_rewards/accuracies": 0.6940000057220459,
9
+ "eval_rewards/chosen": -0.11051338165998459,
10
+ "eval_rewards/margins": 0.06660113483667374,
11
+ "eval_rewards/rejected": -0.17711451649665833,
12
+ "eval_runtime": 325.4702,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 6.145,
15
+ "eval_steps_per_second": 0.384,
16
+ "train_loss": 2246.599348344143,
17
+ "train_runtime": 18130.6033,
18
+ "train_samples": 30567,
19
+ "train_samples_per_second": 3.372,
20
+ "train_steps_per_second": 0.053
21
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_logits/chosen": 0.6623885631561279,
4
+ "eval_logits/rejected": 0.7668179869651794,
5
+ "eval_logps/chosen": -271.2955017089844,
6
+ "eval_logps/rejected": -249.1475830078125,
7
+ "eval_loss": 2156.2255859375,
8
+ "eval_rewards/accuracies": 0.6940000057220459,
9
+ "eval_rewards/chosen": -0.11051338165998459,
10
+ "eval_rewards/margins": 0.06660113483667374,
11
+ "eval_rewards/rejected": -0.17711451649665833,
12
+ "eval_runtime": 325.4702,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 6.145,
15
+ "eval_steps_per_second": 0.384
16
+ }
runs/Mar04_17-16-13_gpu4-119-4/events.out.tfevents.1709533136.gpu4-119-4.2455345.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f34ebe5d2eca840775feadca225c94a46de7359c05136edca6023ff418583584
3
- size 69040
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d578ba49f82a9a74c3ecb2c570c53bda71d61dfc2f2e569b6e9b527ebba9405
3
+ size 72564
runs/Mar04_17-16-13_gpu4-119-4/events.out.tfevents.1709551592.gpu4-119-4.2455345.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f74a1e91a3f246c1bbd25dbbddcdae32374db31ca667b89032988cdb626434bd
3
+ size 828
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "train_loss": 2246.599348344143,
4
+ "train_runtime": 18130.6033,
5
+ "train_samples": 30567,
6
+ "train_samples_per_second": 3.372,
7
+ "train_steps_per_second": 0.053
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1518 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.9968602825745683,
5
+ "eval_steps": 100,
6
+ "global_step": 954,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 5.208333333333333e-08,
14
+ "logits/chosen": 0.952304482460022,
15
+ "logits/rejected": 0.5888463854789734,
16
+ "logps/chosen": -223.79486083984375,
17
+ "logps/rejected": -209.482666015625,
18
+ "loss": 2500.0,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.02,
27
+ "learning_rate": 5.208333333333334e-07,
28
+ "logits/chosen": 0.8362942934036255,
29
+ "logits/rejected": 0.8542055487632751,
30
+ "logps/chosen": -236.253662109375,
31
+ "logps/rejected": -221.88853454589844,
32
+ "loss": 2503.2357,
33
+ "rewards/accuracies": 0.3958333432674408,
34
+ "rewards/chosen": 8.874866762198508e-05,
35
+ "rewards/margins": -0.00026307348161935806,
36
+ "rewards/rejected": 0.0003518221783451736,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.04,
41
+ "learning_rate": 1.0416666666666667e-06,
42
+ "logits/chosen": 0.8335070610046387,
43
+ "logits/rejected": 0.9283286929130554,
44
+ "logps/chosen": -254.7803192138672,
45
+ "logps/rejected": -247.91357421875,
46
+ "loss": 2498.9305,
47
+ "rewards/accuracies": 0.512499988079071,
48
+ "rewards/chosen": -0.0006878537242300808,
49
+ "rewards/margins": 0.00017734414723236114,
50
+ "rewards/rejected": -0.0008651980315335095,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.06,
55
+ "learning_rate": 1.5625e-06,
56
+ "logits/chosen": 0.8601231575012207,
57
+ "logits/rejected": 0.9173057675361633,
58
+ "logps/chosen": -260.49664306640625,
59
+ "logps/rejected": -232.03378295898438,
60
+ "loss": 2500.6068,
61
+ "rewards/accuracies": 0.5062500238418579,
62
+ "rewards/chosen": 0.0006941998144611716,
63
+ "rewards/margins": 0.0007489144918508828,
64
+ "rewards/rejected": -5.471452095662244e-05,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.08,
69
+ "learning_rate": 2.0833333333333334e-06,
70
+ "logits/chosen": 0.811628520488739,
71
+ "logits/rejected": 0.9033697843551636,
72
+ "logps/chosen": -280.28118896484375,
73
+ "logps/rejected": -228.5680694580078,
74
+ "loss": 2502.0561,
75
+ "rewards/accuracies": 0.4312500059604645,
76
+ "rewards/chosen": -0.000692486937623471,
77
+ "rewards/margins": -0.0004873524303548038,
78
+ "rewards/rejected": -0.00020513453637249768,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.1,
83
+ "learning_rate": 2.604166666666667e-06,
84
+ "logits/chosen": 0.8561393022537231,
85
+ "logits/rejected": 0.9358364343643188,
86
+ "logps/chosen": -257.61163330078125,
87
+ "logps/rejected": -219.1778106689453,
88
+ "loss": 2492.4258,
89
+ "rewards/accuracies": 0.59375,
90
+ "rewards/chosen": 0.0008641455206088722,
91
+ "rewards/margins": 0.0018391588237136602,
92
+ "rewards/rejected": -0.000975013361312449,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.13,
97
+ "learning_rate": 3.125e-06,
98
+ "logits/chosen": 0.8744305372238159,
99
+ "logits/rejected": 0.9009464383125305,
100
+ "logps/chosen": -237.00228881835938,
101
+ "logps/rejected": -237.4504852294922,
102
+ "loss": 2497.3361,
103
+ "rewards/accuracies": 0.5249999761581421,
104
+ "rewards/chosen": -9.825383131101262e-06,
105
+ "rewards/margins": 0.00019281035929452628,
106
+ "rewards/rejected": -0.0002026357251452282,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 0.15,
111
+ "learning_rate": 3.6458333333333333e-06,
112
+ "logits/chosen": 0.8502357602119446,
113
+ "logits/rejected": 0.8783925771713257,
114
+ "logps/chosen": -260.8014221191406,
115
+ "logps/rejected": -227.61328125,
116
+ "loss": 2500.9129,
117
+ "rewards/accuracies": 0.4437499940395355,
118
+ "rewards/chosen": -0.0007933862507343292,
119
+ "rewards/margins": -0.00043974880827590823,
120
+ "rewards/rejected": -0.000353637442458421,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.17,
125
+ "learning_rate": 4.166666666666667e-06,
126
+ "logits/chosen": 0.8865741491317749,
127
+ "logits/rejected": 0.9179280996322632,
128
+ "logps/chosen": -251.12197875976562,
129
+ "logps/rejected": -231.300048828125,
130
+ "loss": 2494.3678,
131
+ "rewards/accuracies": 0.518750011920929,
132
+ "rewards/chosen": -0.00037043695920147,
133
+ "rewards/margins": 0.0005658747395500541,
134
+ "rewards/rejected": -0.0009363117860630155,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 0.19,
139
+ "learning_rate": 4.6875000000000004e-06,
140
+ "logits/chosen": 0.8638327717781067,
141
+ "logits/rejected": 0.9173502922058105,
142
+ "logps/chosen": -225.3396453857422,
143
+ "logps/rejected": -241.39352416992188,
144
+ "loss": 2497.2648,
145
+ "rewards/accuracies": 0.543749988079071,
146
+ "rewards/chosen": -0.0007204846478998661,
147
+ "rewards/margins": 0.0005254354909993708,
148
+ "rewards/rejected": -0.001245920080691576,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 0.21,
153
+ "learning_rate": 4.999731868769027e-06,
154
+ "logits/chosen": 0.9247162938117981,
155
+ "logits/rejected": 0.9241034388542175,
156
+ "logps/chosen": -242.1609344482422,
157
+ "logps/rejected": -221.4512939453125,
158
+ "loss": 2494.2439,
159
+ "rewards/accuracies": 0.5375000238418579,
160
+ "rewards/chosen": -0.00025123285013251007,
161
+ "rewards/margins": 0.0006884234608151019,
162
+ "rewards/rejected": -0.0009396563400514424,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 0.21,
167
+ "eval_logits/chosen": 0.8141916990280151,
168
+ "eval_logits/rejected": 0.9164313673973083,
169
+ "eval_logps/chosen": -260.2576599121094,
170
+ "eval_logps/rejected": -231.54052734375,
171
+ "eval_loss": 2494.119384765625,
172
+ "eval_rewards/accuracies": 0.5479999780654907,
173
+ "eval_rewards/chosen": -0.00013494741870090365,
174
+ "eval_rewards/margins": 0.0009091641986742616,
175
+ "eval_rewards/rejected": -0.0010441114427521825,
176
+ "eval_runtime": 325.9254,
177
+ "eval_samples_per_second": 6.136,
178
+ "eval_steps_per_second": 0.384,
179
+ "step": 100
180
+ },
181
+ {
182
+ "epoch": 0.23,
183
+ "learning_rate": 4.996716052911017e-06,
184
+ "logits/chosen": 0.8349224328994751,
185
+ "logits/rejected": 0.8758266568183899,
186
+ "logps/chosen": -264.0060119628906,
187
+ "logps/rejected": -219.6302490234375,
188
+ "loss": 2491.0754,
189
+ "rewards/accuracies": 0.606249988079071,
190
+ "rewards/chosen": 0.00016143513494171202,
191
+ "rewards/margins": 0.0016861247131600976,
192
+ "rewards/rejected": -0.001524689607322216,
193
+ "step": 110
194
+ },
195
+ {
196
+ "epoch": 0.25,
197
+ "learning_rate": 4.9903533134293035e-06,
198
+ "logits/chosen": 0.8607719540596008,
199
+ "logits/rejected": 0.9709636569023132,
200
+ "logps/chosen": -254.9365234375,
201
+ "logps/rejected": -219.2154541015625,
202
+ "loss": 2483.0162,
203
+ "rewards/accuracies": 0.612500011920929,
204
+ "rewards/chosen": -4.470603380468674e-05,
205
+ "rewards/margins": 0.0026740250177681446,
206
+ "rewards/rejected": -0.002718730829656124,
207
+ "step": 120
208
+ },
209
+ {
210
+ "epoch": 0.27,
211
+ "learning_rate": 4.9806521797692184e-06,
212
+ "logits/chosen": 0.8791080713272095,
213
+ "logits/rejected": 0.8794806599617004,
214
+ "logps/chosen": -264.69219970703125,
215
+ "logps/rejected": -247.05224609375,
216
+ "loss": 2478.3258,
217
+ "rewards/accuracies": 0.5375000238418579,
218
+ "rewards/chosen": -0.000567199953366071,
219
+ "rewards/margins": 0.0015755310887470841,
220
+ "rewards/rejected": -0.002142731100320816,
221
+ "step": 130
222
+ },
223
+ {
224
+ "epoch": 0.29,
225
+ "learning_rate": 4.967625656594782e-06,
226
+ "logits/chosen": 0.8544095754623413,
227
+ "logits/rejected": 0.9317782521247864,
228
+ "logps/chosen": -222.06851196289062,
229
+ "logps/rejected": -232.3370819091797,
230
+ "loss": 2474.4896,
231
+ "rewards/accuracies": 0.574999988079071,
232
+ "rewards/chosen": -0.0013934863964095712,
233
+ "rewards/margins": 0.0016968228155747056,
234
+ "rewards/rejected": -0.0030903094448149204,
235
+ "step": 140
236
+ },
237
+ {
238
+ "epoch": 0.31,
239
+ "learning_rate": 4.95129120635556e-06,
240
+ "logits/chosen": 0.8754276037216187,
241
+ "logits/rejected": 0.905910849571228,
242
+ "logps/chosen": -258.3515625,
243
+ "logps/rejected": -215.86328125,
244
+ "loss": 2470.2957,
245
+ "rewards/accuracies": 0.5562499761581421,
246
+ "rewards/chosen": -0.0012219983618706465,
247
+ "rewards/margins": 0.0023179189302027225,
248
+ "rewards/rejected": -0.003539917292073369,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 0.33,
253
+ "learning_rate": 4.93167072587771e-06,
254
+ "logits/chosen": 0.7798808813095093,
255
+ "logits/rejected": 0.8481999635696411,
256
+ "logps/chosen": -257.81036376953125,
257
+ "logps/rejected": -250.8020477294922,
258
+ "loss": 2466.5924,
259
+ "rewards/accuracies": 0.581250011920929,
260
+ "rewards/chosen": -0.0015566629590466619,
261
+ "rewards/margins": 0.002945856424048543,
262
+ "rewards/rejected": -0.004502518568187952,
263
+ "step": 160
264
+ },
265
+ {
266
+ "epoch": 0.36,
267
+ "learning_rate": 4.908790517010637e-06,
268
+ "logits/chosen": 0.9313274621963501,
269
+ "logits/rejected": 0.9725171327590942,
270
+ "logps/chosen": -239.09017944335938,
271
+ "logps/rejected": -253.09255981445312,
272
+ "loss": 2458.4684,
273
+ "rewards/accuracies": 0.5874999761581421,
274
+ "rewards/chosen": -0.0023754839785397053,
275
+ "rewards/margins": 0.003910133149474859,
276
+ "rewards/rejected": -0.006285616662353277,
277
+ "step": 170
278
+ },
279
+ {
280
+ "epoch": 0.38,
281
+ "learning_rate": 4.882681251368549e-06,
282
+ "logits/chosen": 0.8546341061592102,
283
+ "logits/rejected": 0.8491582870483398,
284
+ "logps/chosen": -270.9264831542969,
285
+ "logps/rejected": -257.13836669921875,
286
+ "loss": 2442.4984,
287
+ "rewards/accuracies": 0.581250011920929,
288
+ "rewards/chosen": -0.0015862795989960432,
289
+ "rewards/margins": 0.005088582634925842,
290
+ "rewards/rejected": -0.006674862466752529,
291
+ "step": 180
292
+ },
293
+ {
294
+ "epoch": 0.4,
295
+ "learning_rate": 4.853377929214243e-06,
296
+ "logits/chosen": 0.7889066934585571,
297
+ "logits/rejected": 0.8611849546432495,
298
+ "logps/chosen": -252.2519989013672,
299
+ "logps/rejected": -245.2388458251953,
300
+ "loss": 2436.8207,
301
+ "rewards/accuracies": 0.6000000238418579,
302
+ "rewards/chosen": -0.0018671129364520311,
303
+ "rewards/margins": 0.0070212846621870995,
304
+ "rewards/rejected": -0.008888397365808487,
305
+ "step": 190
306
+ },
307
+ {
308
+ "epoch": 0.42,
309
+ "learning_rate": 4.8209198325401815e-06,
310
+ "logits/chosen": 0.8786072731018066,
311
+ "logits/rejected": 0.8954092264175415,
312
+ "logps/chosen": -237.77017211914062,
313
+ "logps/rejected": -233.86325073242188,
314
+ "loss": 2425.7957,
315
+ "rewards/accuracies": 0.6499999761581421,
316
+ "rewards/chosen": -0.0038148313760757446,
317
+ "rewards/margins": 0.00766246672719717,
318
+ "rewards/rejected": -0.01147729717195034,
319
+ "step": 200
320
+ },
321
+ {
322
+ "epoch": 0.42,
323
+ "eval_logits/chosen": 0.8182709813117981,
324
+ "eval_logits/rejected": 0.9217536449432373,
325
+ "eval_logps/chosen": -260.7673034667969,
326
+ "eval_logps/rejected": -232.97280883789062,
327
+ "eval_loss": 2420.32958984375,
328
+ "eval_rewards/accuracies": 0.656000018119812,
329
+ "eval_rewards/chosen": -0.00523131899535656,
330
+ "eval_rewards/margins": 0.010135524906218052,
331
+ "eval_rewards/rejected": -0.015366843901574612,
332
+ "eval_runtime": 326.0274,
333
+ "eval_samples_per_second": 6.134,
334
+ "eval_steps_per_second": 0.383,
335
+ "step": 200
336
+ },
337
+ {
338
+ "epoch": 0.44,
339
+ "learning_rate": 4.785350472409792e-06,
340
+ "logits/chosen": 0.9068363904953003,
341
+ "logits/rejected": 0.911398708820343,
342
+ "logps/chosen": -233.5305633544922,
343
+ "logps/rejected": -229.12158203125,
344
+ "loss": 2425.1516,
345
+ "rewards/accuracies": 0.59375,
346
+ "rewards/chosen": -0.009300420060753822,
347
+ "rewards/margins": 0.007756868842989206,
348
+ "rewards/rejected": -0.01705729030072689,
349
+ "step": 210
350
+ },
351
+ {
352
+ "epoch": 0.46,
353
+ "learning_rate": 4.746717530629565e-06,
354
+ "logits/chosen": 0.8564063906669617,
355
+ "logits/rejected": 0.8920175433158875,
356
+ "logps/chosen": -260.2020263671875,
357
+ "logps/rejected": -240.0993194580078,
358
+ "loss": 2418.3254,
359
+ "rewards/accuracies": 0.625,
360
+ "rewards/chosen": -0.008474646136164665,
361
+ "rewards/margins": 0.012274968437850475,
362
+ "rewards/rejected": -0.020749617367982864,
363
+ "step": 220
364
+ },
365
+ {
366
+ "epoch": 0.48,
367
+ "learning_rate": 4.7050727958301505e-06,
368
+ "logits/chosen": 0.8939176797866821,
369
+ "logits/rejected": 0.8774939775466919,
370
+ "logps/chosen": -245.0527801513672,
371
+ "logps/rejected": -230.6669921875,
372
+ "loss": 2386.8547,
373
+ "rewards/accuracies": 0.6499999761581421,
374
+ "rewards/chosen": -0.011173086240887642,
375
+ "rewards/margins": 0.013905773870646954,
376
+ "rewards/rejected": -0.02507885918021202,
377
+ "step": 230
378
+ },
379
+ {
380
+ "epoch": 0.5,
381
+ "learning_rate": 4.660472094042121e-06,
382
+ "logits/chosen": 0.8556815385818481,
383
+ "logits/rejected": 0.863630473613739,
384
+ "logps/chosen": -282.67669677734375,
385
+ "logps/rejected": -236.6685028076172,
386
+ "loss": 2355.6057,
387
+ "rewards/accuracies": 0.7124999761581421,
388
+ "rewards/chosen": -0.013796107843518257,
389
+ "rewards/margins": 0.017949409782886505,
390
+ "rewards/rejected": -0.03174551948904991,
391
+ "step": 240
392
+ },
393
+ {
394
+ "epoch": 0.52,
395
+ "learning_rate": 4.612975213859487e-06,
396
+ "logits/chosen": 0.8427609205245972,
397
+ "logits/rejected": 0.915958046913147,
398
+ "logps/chosen": -269.03228759765625,
399
+ "logps/rejected": -244.0561981201172,
400
+ "loss": 2353.825,
401
+ "rewards/accuracies": 0.675000011920929,
402
+ "rewards/chosen": -0.014950485900044441,
403
+ "rewards/margins": 0.016987096518278122,
404
+ "rewards/rejected": -0.031937580555677414,
405
+ "step": 250
406
+ },
407
+ {
408
+ "epoch": 0.54,
409
+ "learning_rate": 4.5626458262912745e-06,
410
+ "logits/chosen": 0.8542389869689941,
411
+ "logits/rejected": 0.8951870203018188,
412
+ "logps/chosen": -275.2603454589844,
413
+ "logps/rejected": -262.59820556640625,
414
+ "loss": 2344.6988,
415
+ "rewards/accuracies": 0.6812499761581421,
416
+ "rewards/chosen": -0.010937584564089775,
417
+ "rewards/margins": 0.02299124002456665,
418
+ "rewards/rejected": -0.033928822726011276,
419
+ "step": 260
420
+ },
421
+ {
422
+ "epoch": 0.57,
423
+ "learning_rate": 4.509551399408598e-06,
424
+ "logits/chosen": 0.9320127367973328,
425
+ "logits/rejected": 0.9399789571762085,
426
+ "logps/chosen": -254.00363159179688,
427
+ "logps/rejected": -211.70822143554688,
428
+ "loss": 2326.323,
429
+ "rewards/accuracies": 0.6937500238418579,
430
+ "rewards/chosen": -0.017307719215750694,
431
+ "rewards/margins": 0.020856201648712158,
432
+ "rewards/rejected": -0.038163922727108,
433
+ "step": 270
434
+ },
435
+ {
436
+ "epoch": 0.59,
437
+ "learning_rate": 4.453763107901676e-06,
438
+ "logits/chosen": 0.9306808710098267,
439
+ "logits/rejected": 0.8955329060554504,
440
+ "logps/chosen": -248.00350952148438,
441
+ "logps/rejected": -256.06072998046875,
442
+ "loss": 2353.0586,
443
+ "rewards/accuracies": 0.606249988079071,
444
+ "rewards/chosen": -0.0263187438249588,
445
+ "rewards/margins": 0.0119154192507267,
446
+ "rewards/rejected": -0.0382341630756855,
447
+ "step": 280
448
+ },
449
+ {
450
+ "epoch": 0.61,
451
+ "learning_rate": 4.3953557376679856e-06,
452
+ "logits/chosen": 0.8539811372756958,
453
+ "logits/rejected": 0.860480785369873,
454
+ "logps/chosen": -262.2434387207031,
455
+ "logps/rejected": -258.9552917480469,
456
+ "loss": 2328.1365,
457
+ "rewards/accuracies": 0.5874999761581421,
458
+ "rewards/chosen": -0.026760926470160484,
459
+ "rewards/margins": 0.01638859696686268,
460
+ "rewards/rejected": -0.04314952343702316,
461
+ "step": 290
462
+ },
463
+ {
464
+ "epoch": 0.63,
465
+ "learning_rate": 4.33440758555951e-06,
466
+ "logits/chosen": 0.8304749727249146,
467
+ "logits/rejected": 0.9085506200790405,
468
+ "logps/chosen": -251.0150604248047,
469
+ "logps/rejected": -247.31289672851562,
470
+ "loss": 2310.102,
471
+ "rewards/accuracies": 0.6937500238418579,
472
+ "rewards/chosen": -0.019448721781373024,
473
+ "rewards/margins": 0.029306888580322266,
474
+ "rewards/rejected": -0.04875560849905014,
475
+ "step": 300
476
+ },
477
+ {
478
+ "epoch": 0.63,
479
+ "eval_logits/chosen": 0.8040502071380615,
480
+ "eval_logits/rejected": 0.9088209271430969,
481
+ "eval_logps/chosen": -263.2439880371094,
482
+ "eval_logps/rejected": -237.19593811035156,
483
+ "eval_loss": 2309.945068359375,
484
+ "eval_rewards/accuracies": 0.6679999828338623,
485
+ "eval_rewards/chosen": -0.02999839559197426,
486
+ "eval_rewards/margins": 0.02759976126253605,
487
+ "eval_rewards/rejected": -0.05759815126657486,
488
+ "eval_runtime": 325.793,
489
+ "eval_samples_per_second": 6.139,
490
+ "eval_steps_per_second": 0.384,
491
+ "step": 300
492
+ },
493
+ {
494
+ "epoch": 0.65,
495
+ "learning_rate": 4.2710003544234255e-06,
496
+ "logits/chosen": 0.8506165742874146,
497
+ "logits/rejected": 0.8639786839485168,
498
+ "logps/chosen": -238.32284545898438,
499
+ "logps/rejected": -230.3314971923828,
500
+ "loss": 2280.0254,
501
+ "rewards/accuracies": 0.6312500238418579,
502
+ "rewards/chosen": -0.034138236194849014,
503
+ "rewards/margins": 0.023389272391796112,
504
+ "rewards/rejected": -0.05752750486135483,
505
+ "step": 310
506
+ },
507
+ {
508
+ "epoch": 0.67,
509
+ "learning_rate": 4.205219043576955e-06,
510
+ "logits/chosen": 0.8560878038406372,
511
+ "logits/rejected": 0.878685474395752,
512
+ "logps/chosen": -226.87521362304688,
513
+ "logps/rejected": -220.47201538085938,
514
+ "loss": 2272.9057,
515
+ "rewards/accuracies": 0.668749988079071,
516
+ "rewards/chosen": -0.03677995875477791,
517
+ "rewards/margins": 0.026556292548775673,
518
+ "rewards/rejected": -0.06333625316619873,
519
+ "step": 320
520
+ },
521
+ {
522
+ "epoch": 0.69,
523
+ "learning_rate": 4.137151834863213e-06,
524
+ "logits/chosen": 0.8369635343551636,
525
+ "logits/rejected": 0.8179060220718384,
526
+ "logps/chosen": -255.9115753173828,
527
+ "logps/rejected": -263.99334716796875,
528
+ "loss": 2288.366,
529
+ "rewards/accuracies": 0.65625,
530
+ "rewards/chosen": -0.03619007021188736,
531
+ "rewards/margins": 0.036657560616731644,
532
+ "rewards/rejected": -0.07284761965274811,
533
+ "step": 330
534
+ },
535
+ {
536
+ "epoch": 0.71,
537
+ "learning_rate": 4.066889974440757e-06,
538
+ "logits/chosen": 0.8635396957397461,
539
+ "logits/rejected": 0.8681753873825073,
540
+ "logps/chosen": -268.9547119140625,
541
+ "logps/rejected": -254.25814819335938,
542
+ "loss": 2217.676,
543
+ "rewards/accuracies": 0.643750011920929,
544
+ "rewards/chosen": -0.03367992490530014,
545
+ "rewards/margins": 0.03633836284279823,
546
+ "rewards/rejected": -0.07001828402280807,
547
+ "step": 340
548
+ },
549
+ {
550
+ "epoch": 0.73,
551
+ "learning_rate": 3.994527650465352e-06,
552
+ "logits/chosen": 0.8572274446487427,
553
+ "logits/rejected": 0.8776391744613647,
554
+ "logps/chosen": -226.3207550048828,
555
+ "logps/rejected": -211.4627685546875,
556
+ "loss": 2251.0934,
557
+ "rewards/accuracies": 0.668749988079071,
558
+ "rewards/chosen": -0.046974435448646545,
559
+ "rewards/margins": 0.03458600863814354,
560
+ "rewards/rejected": -0.08156044781208038,
561
+ "step": 350
562
+ },
563
+ {
564
+ "epoch": 0.75,
565
+ "learning_rate": 3.92016186682789e-06,
566
+ "logits/chosen": 0.7831335067749023,
567
+ "logits/rejected": 0.7933910489082336,
568
+ "logps/chosen": -213.00491333007812,
569
+ "logps/rejected": -242.6522979736328,
570
+ "loss": 2245.7613,
571
+ "rewards/accuracies": 0.65625,
572
+ "rewards/chosen": -0.05574915558099747,
573
+ "rewards/margins": 0.030930276960134506,
574
+ "rewards/rejected": -0.08667943626642227,
575
+ "step": 360
576
+ },
577
+ {
578
+ "epoch": 0.77,
579
+ "learning_rate": 3.843892313117724e-06,
580
+ "logits/chosen": 0.8648300170898438,
581
+ "logits/rejected": 0.895352840423584,
582
+ "logps/chosen": -272.79400634765625,
583
+ "logps/rejected": -249.0972900390625,
584
+ "loss": 2275.2465,
585
+ "rewards/accuracies": 0.668749988079071,
586
+ "rewards/chosen": -0.05149676650762558,
587
+ "rewards/margins": 0.0362628772854805,
588
+ "rewards/rejected": -0.08775965869426727,
589
+ "step": 370
590
+ },
591
+ {
592
+ "epoch": 0.8,
593
+ "learning_rate": 3.7658212309857576e-06,
594
+ "logits/chosen": 0.8409647941589355,
595
+ "logits/rejected": 0.8629050254821777,
596
+ "logps/chosen": -243.03182983398438,
597
+ "logps/rejected": -222.12338256835938,
598
+ "loss": 2199.2453,
599
+ "rewards/accuracies": 0.6812499761581421,
600
+ "rewards/chosen": -0.053347665816545486,
601
+ "rewards/margins": 0.0404227040708065,
602
+ "rewards/rejected": -0.09377036988735199,
603
+ "step": 380
604
+ },
605
+ {
606
+ "epoch": 0.82,
607
+ "learning_rate": 3.686053277086401e-06,
608
+ "logits/chosen": 0.7878540754318237,
609
+ "logits/rejected": 0.875022292137146,
610
+ "logps/chosen": -266.7389831542969,
611
+ "logps/rejected": -244.7388916015625,
612
+ "loss": 2180.4859,
613
+ "rewards/accuracies": 0.6812499761581421,
614
+ "rewards/chosen": -0.05637942627072334,
615
+ "rewards/margins": 0.04192977398633957,
616
+ "rewards/rejected": -0.09830919653177261,
617
+ "step": 390
618
+ },
619
+ {
620
+ "epoch": 0.84,
621
+ "learning_rate": 3.604695382782159e-06,
622
+ "logits/chosen": 0.7989987134933472,
623
+ "logits/rejected": 0.8174804449081421,
624
+ "logps/chosen": -282.3369445800781,
625
+ "logps/rejected": -260.7422790527344,
626
+ "loss": 2159.0707,
627
+ "rewards/accuracies": 0.625,
628
+ "rewards/chosen": -0.05871356651186943,
629
+ "rewards/margins": 0.03764244168996811,
630
+ "rewards/rejected": -0.09635601192712784,
631
+ "step": 400
632
+ },
633
+ {
634
+ "epoch": 0.84,
635
+ "eval_logits/chosen": 0.7577926516532898,
636
+ "eval_logits/rejected": 0.8636941313743591,
637
+ "eval_logps/chosen": -266.5838623046875,
638
+ "eval_logps/rejected": -242.28573608398438,
639
+ "eval_loss": 2236.27587890625,
640
+ "eval_rewards/accuracies": 0.6840000152587891,
641
+ "eval_rewards/chosen": -0.06339714676141739,
642
+ "eval_rewards/margins": 0.04509904235601425,
643
+ "eval_rewards/rejected": -0.10849618166685104,
644
+ "eval_runtime": 325.6827,
645
+ "eval_samples_per_second": 6.141,
646
+ "eval_steps_per_second": 0.384,
647
+ "step": 400
648
+ },
649
+ {
650
+ "epoch": 0.86,
651
+ "learning_rate": 3.5218566107988872e-06,
652
+ "logits/chosen": 0.8149998784065247,
653
+ "logits/rejected": 0.8398739695549011,
654
+ "logps/chosen": -274.44647216796875,
655
+ "logps/rejected": -244.11441040039062,
656
+ "loss": 2235.4445,
657
+ "rewards/accuracies": 0.706250011920929,
658
+ "rewards/chosen": -0.060248058289289474,
659
+ "rewards/margins": 0.0490226149559021,
660
+ "rewards/rejected": -0.10927066951990128,
661
+ "step": 410
662
+ },
663
+ {
664
+ "epoch": 0.88,
665
+ "learning_rate": 3.437648009023905e-06,
666
+ "logits/chosen": 0.7635517716407776,
667
+ "logits/rejected": 0.8307437896728516,
668
+ "logps/chosen": -219.5519256591797,
669
+ "logps/rejected": -215.0430145263672,
670
+ "loss": 2210.1293,
671
+ "rewards/accuracies": 0.637499988079071,
672
+ "rewards/chosen": -0.07285571843385696,
673
+ "rewards/margins": 0.03309701010584831,
674
+ "rewards/rejected": -0.10595273971557617,
675
+ "step": 420
676
+ },
677
+ {
678
+ "epoch": 0.9,
679
+ "learning_rate": 3.352182461642929e-06,
680
+ "logits/chosen": 0.7868200540542603,
681
+ "logits/rejected": 0.8455543518066406,
682
+ "logps/chosen": -240.498779296875,
683
+ "logps/rejected": -228.08535766601562,
684
+ "loss": 2132.6711,
685
+ "rewards/accuracies": 0.699999988079071,
686
+ "rewards/chosen": -0.06197371333837509,
687
+ "rewards/margins": 0.048869095742702484,
688
+ "rewards/rejected": -0.11084280163049698,
689
+ "step": 430
690
+ },
691
+ {
692
+ "epoch": 0.92,
693
+ "learning_rate": 3.265574537815398e-06,
694
+ "logits/chosen": 0.7955938577651978,
695
+ "logits/rejected": 0.8357489705085754,
696
+ "logps/chosen": -285.87493896484375,
697
+ "logps/rejected": -252.0729522705078,
698
+ "loss": 2188.6035,
699
+ "rewards/accuracies": 0.6937500238418579,
700
+ "rewards/chosen": -0.05718296021223068,
701
+ "rewards/margins": 0.05849381536245346,
702
+ "rewards/rejected": -0.11567678302526474,
703
+ "step": 440
704
+ },
705
+ {
706
+ "epoch": 0.94,
707
+ "learning_rate": 3.177940338091043e-06,
708
+ "logits/chosen": 0.8076552152633667,
709
+ "logits/rejected": 0.8619295954704285,
710
+ "logps/chosen": -259.4091796875,
711
+ "logps/rejected": -231.5826873779297,
712
+ "loss": 2190.8027,
713
+ "rewards/accuracies": 0.6937500238418579,
714
+ "rewards/chosen": -0.07417537271976471,
715
+ "rewards/margins": 0.042368099093437195,
716
+ "rewards/rejected": -0.1165434867143631,
717
+ "step": 450
718
+ },
719
+ {
720
+ "epoch": 0.96,
721
+ "learning_rate": 3.089397338773569e-06,
722
+ "logits/chosen": 0.787534236907959,
723
+ "logits/rejected": 0.8202370405197144,
724
+ "logps/chosen": -265.3134460449219,
725
+ "logps/rejected": -237.35116577148438,
726
+ "loss": 2146.2295,
727
+ "rewards/accuracies": 0.706250011920929,
728
+ "rewards/chosen": -0.0778832882642746,
729
+ "rewards/margins": 0.05344442278146744,
730
+ "rewards/rejected": -0.13132771849632263,
731
+ "step": 460
732
+ },
733
+ {
734
+ "epoch": 0.98,
735
+ "learning_rate": 3.0000642344401115e-06,
736
+ "logits/chosen": 0.7577365040779114,
737
+ "logits/rejected": 0.803280234336853,
738
+ "logps/chosen": -246.31301879882812,
739
+ "logps/rejected": -235.45675659179688,
740
+ "loss": 2136.8623,
741
+ "rewards/accuracies": 0.65625,
742
+ "rewards/chosen": -0.0812118723988533,
743
+ "rewards/margins": 0.04804684966802597,
744
+ "rewards/rejected": -0.12925872206687927,
745
+ "step": 470
746
+ },
747
+ {
748
+ "epoch": 1.0,
749
+ "learning_rate": 2.9100607788275547e-06,
750
+ "logits/chosen": 0.7916151881217957,
751
+ "logits/rejected": 0.8478500247001648,
752
+ "logps/chosen": -251.5865020751953,
753
+ "logps/rejected": -246.8653564453125,
754
+ "loss": 2219.0154,
755
+ "rewards/accuracies": 0.6812499761581421,
756
+ "rewards/chosen": -0.08595123142004013,
757
+ "rewards/margins": 0.04087045416235924,
758
+ "rewards/rejected": -0.12682169675827026,
759
+ "step": 480
760
+ },
761
+ {
762
+ "epoch": 1.03,
763
+ "learning_rate": 2.8195076242990124e-06,
764
+ "logits/chosen": 0.7650834321975708,
765
+ "logits/rejected": 0.8132265210151672,
766
+ "logps/chosen": -246.82644653320312,
767
+ "logps/rejected": -227.9211883544922,
768
+ "loss": 2162.0559,
769
+ "rewards/accuracies": 0.6499999761581421,
770
+ "rewards/chosen": -0.0883156806230545,
771
+ "rewards/margins": 0.050779860466718674,
772
+ "rewards/rejected": -0.13909552991390228,
773
+ "step": 490
774
+ },
775
+ {
776
+ "epoch": 1.05,
777
+ "learning_rate": 2.72852616010567e-06,
778
+ "logits/chosen": 0.7439101338386536,
779
+ "logits/rejected": 0.7856588363647461,
780
+ "logps/chosen": -261.5977478027344,
781
+ "logps/rejected": -247.032470703125,
782
+ "loss": 2176.8641,
783
+ "rewards/accuracies": 0.71875,
784
+ "rewards/chosen": -0.08485061675310135,
785
+ "rewards/margins": 0.05461747199296951,
786
+ "rewards/rejected": -0.13946808874607086,
787
+ "step": 500
788
+ },
789
+ {
790
+ "epoch": 1.05,
791
+ "eval_logits/chosen": 0.7125015258789062,
792
+ "eval_logits/rejected": 0.8179839849472046,
793
+ "eval_logps/chosen": -269.2715759277344,
794
+ "eval_logps/rejected": -246.0634002685547,
795
+ "eval_loss": 2197.5419921875,
796
+ "eval_rewards/accuracies": 0.6980000138282776,
797
+ "eval_rewards/chosen": -0.09027400612831116,
798
+ "eval_rewards/margins": 0.05599898844957352,
799
+ "eval_rewards/rejected": -0.14627300202846527,
800
+ "eval_runtime": 325.7893,
801
+ "eval_samples_per_second": 6.139,
802
+ "eval_steps_per_second": 0.384,
803
+ "step": 500
804
+ },
805
+ {
806
+ "epoch": 1.07,
807
+ "learning_rate": 2.637238349660819e-06,
808
+ "logits/chosen": 0.7544692754745483,
809
+ "logits/rejected": 0.8448120355606079,
810
+ "logps/chosen": -245.3660430908203,
811
+ "logps/rejected": -210.2096405029297,
812
+ "loss": 2188.5398,
813
+ "rewards/accuracies": 0.668749988079071,
814
+ "rewards/chosen": -0.0969894677400589,
815
+ "rewards/margins": 0.04615020379424095,
816
+ "rewards/rejected": -0.14313964545726776,
817
+ "step": 510
818
+ },
819
+ {
820
+ "epoch": 1.09,
821
+ "learning_rate": 2.5457665670441937e-06,
822
+ "logits/chosen": 0.8052291870117188,
823
+ "logits/rejected": 0.8244439959526062,
824
+ "logps/chosen": -257.635009765625,
825
+ "logps/rejected": -238.87258911132812,
826
+ "loss": 2140.0687,
827
+ "rewards/accuracies": 0.6812499761581421,
828
+ "rewards/chosen": -0.082728311419487,
829
+ "rewards/margins": 0.061602912843227386,
830
+ "rewards/rejected": -0.14433124661445618,
831
+ "step": 520
832
+ },
833
+ {
834
+ "epoch": 1.11,
835
+ "learning_rate": 2.4542334329558075e-06,
836
+ "logits/chosen": 0.7256805896759033,
837
+ "logits/rejected": 0.7552824020385742,
838
+ "logps/chosen": -250.42422485351562,
839
+ "logps/rejected": -241.7630615234375,
840
+ "loss": 2133.6596,
841
+ "rewards/accuracies": 0.7562500238418579,
842
+ "rewards/chosen": -0.0894266813993454,
843
+ "rewards/margins": 0.05599946528673172,
844
+ "rewards/rejected": -0.14542615413665771,
845
+ "step": 530
846
+ },
847
+ {
848
+ "epoch": 1.13,
849
+ "learning_rate": 2.3627616503391813e-06,
850
+ "logits/chosen": 0.7206599116325378,
851
+ "logits/rejected": 0.7505000233650208,
852
+ "logps/chosen": -267.65875244140625,
853
+ "logps/rejected": -228.6749725341797,
854
+ "loss": 2182.2404,
855
+ "rewards/accuracies": 0.643750011920929,
856
+ "rewards/chosen": -0.09456731379032135,
857
+ "rewards/margins": 0.04886298626661301,
858
+ "rewards/rejected": -0.14343029260635376,
859
+ "step": 540
860
+ },
861
+ {
862
+ "epoch": 1.15,
863
+ "learning_rate": 2.271473839894331e-06,
864
+ "logits/chosen": 0.7229181528091431,
865
+ "logits/rejected": 0.749284029006958,
866
+ "logps/chosen": -276.0394287109375,
867
+ "logps/rejected": -262.62982177734375,
868
+ "loss": 2171.0582,
869
+ "rewards/accuracies": 0.6625000238418579,
870
+ "rewards/chosen": -0.09430189430713654,
871
+ "rewards/margins": 0.05570146441459656,
872
+ "rewards/rejected": -0.1500033438205719,
873
+ "step": 550
874
+ },
875
+ {
876
+ "epoch": 1.17,
877
+ "learning_rate": 2.1804923757009885e-06,
878
+ "logits/chosen": 0.694362998008728,
879
+ "logits/rejected": 0.7259857654571533,
880
+ "logps/chosen": -261.21038818359375,
881
+ "logps/rejected": -239.57852172851562,
882
+ "loss": 2163.352,
883
+ "rewards/accuracies": 0.668749988079071,
884
+ "rewards/chosen": -0.100721076130867,
885
+ "rewards/margins": 0.05086208134889603,
886
+ "rewards/rejected": -0.15158315002918243,
887
+ "step": 560
888
+ },
889
+ {
890
+ "epoch": 1.19,
891
+ "learning_rate": 2.089939221172446e-06,
892
+ "logits/chosen": 0.7141777276992798,
893
+ "logits/rejected": 0.7269617915153503,
894
+ "logps/chosen": -279.9233093261719,
895
+ "logps/rejected": -245.4532928466797,
896
+ "loss": 2219.2641,
897
+ "rewards/accuracies": 0.6875,
898
+ "rewards/chosen": -0.10629091411828995,
899
+ "rewards/margins": 0.06119798496365547,
900
+ "rewards/rejected": -0.16748890280723572,
901
+ "step": 570
902
+ },
903
+ {
904
+ "epoch": 1.21,
905
+ "learning_rate": 1.9999357655598894e-06,
906
+ "logits/chosen": 0.7328698635101318,
907
+ "logits/rejected": 0.7043576240539551,
908
+ "logps/chosen": -250.1948699951172,
909
+ "logps/rejected": -239.837890625,
910
+ "loss": 2180.0646,
911
+ "rewards/accuracies": 0.637499988079071,
912
+ "rewards/chosen": -0.10338902473449707,
913
+ "rewards/margins": 0.0403132289648056,
914
+ "rewards/rejected": -0.14370223879814148,
915
+ "step": 580
916
+ },
917
+ {
918
+ "epoch": 1.23,
919
+ "learning_rate": 1.9106026612264316e-06,
920
+ "logits/chosen": 0.7637673616409302,
921
+ "logits/rejected": 0.8286052942276001,
922
+ "logps/chosen": -227.8063507080078,
923
+ "logps/rejected": -227.64193725585938,
924
+ "loss": 2125.3691,
925
+ "rewards/accuracies": 0.6187499761581421,
926
+ "rewards/chosen": -0.10240204632282257,
927
+ "rewards/margins": 0.04882645606994629,
928
+ "rewards/rejected": -0.15122851729393005,
929
+ "step": 590
930
+ },
931
+ {
932
+ "epoch": 1.26,
933
+ "learning_rate": 1.8220596619089576e-06,
934
+ "logits/chosen": 0.7344295978546143,
935
+ "logits/rejected": 0.7085897922515869,
936
+ "logps/chosen": -271.92706298828125,
937
+ "logps/rejected": -245.1657257080078,
938
+ "loss": 2066.3285,
939
+ "rewards/accuracies": 0.6937500238418579,
940
+ "rewards/chosen": -0.10316000878810883,
941
+ "rewards/margins": 0.061250198632478714,
942
+ "rewards/rejected": -0.16441020369529724,
943
+ "step": 600
944
+ },
945
+ {
946
+ "epoch": 1.26,
947
+ "eval_logits/chosen": 0.6878580451011658,
948
+ "eval_logits/rejected": 0.7927125096321106,
949
+ "eval_logps/chosen": -270.385498046875,
950
+ "eval_logps/rejected": -247.7128448486328,
951
+ "eval_loss": 2177.3388671875,
952
+ "eval_rewards/accuracies": 0.6959999799728394,
953
+ "eval_rewards/chosen": -0.10141333192586899,
954
+ "eval_rewards/margins": 0.0613539032638073,
955
+ "eval_rewards/rejected": -0.16276724636554718,
956
+ "eval_runtime": 325.4497,
957
+ "eval_samples_per_second": 6.145,
958
+ "eval_steps_per_second": 0.384,
959
+ "step": 600
960
+ },
961
+ {
962
+ "epoch": 1.28,
963
+ "learning_rate": 1.7344254621846018e-06,
964
+ "logits/chosen": 0.7047083377838135,
965
+ "logits/rejected": 0.7504035830497742,
966
+ "logps/chosen": -273.6443176269531,
967
+ "logps/rejected": -265.7873840332031,
968
+ "loss": 2070.9469,
969
+ "rewards/accuracies": 0.668749988079071,
970
+ "rewards/chosen": -0.09533126652240753,
971
+ "rewards/margins": 0.06477675586938858,
972
+ "rewards/rejected": -0.16010800004005432,
973
+ "step": 610
974
+ },
975
+ {
976
+ "epoch": 1.3,
977
+ "learning_rate": 1.647817538357072e-06,
978
+ "logits/chosen": 0.7320979237556458,
979
+ "logits/rejected": 0.8024471998214722,
980
+ "logps/chosen": -257.2734680175781,
981
+ "logps/rejected": -243.01651000976562,
982
+ "loss": 2189.2789,
983
+ "rewards/accuracies": 0.6499999761581421,
984
+ "rewards/chosen": -0.10891245305538177,
985
+ "rewards/margins": 0.04389963299036026,
986
+ "rewards/rejected": -0.15281209349632263,
987
+ "step": 620
988
+ },
989
+ {
990
+ "epoch": 1.32,
991
+ "learning_rate": 1.5623519909760953e-06,
992
+ "logits/chosen": 0.7088596820831299,
993
+ "logits/rejected": 0.7686936259269714,
994
+ "logps/chosen": -253.4821014404297,
995
+ "logps/rejected": -253.3447723388672,
996
+ "loss": 2140.6246,
997
+ "rewards/accuracies": 0.6312500238418579,
998
+ "rewards/chosen": -0.11572308838367462,
999
+ "rewards/margins": 0.046286530792713165,
1000
+ "rewards/rejected": -0.16200962662696838,
1001
+ "step": 630
1002
+ },
1003
+ {
1004
+ "epoch": 1.34,
1005
+ "learning_rate": 1.4781433892011132e-06,
1006
+ "logits/chosen": 0.7846838235855103,
1007
+ "logits/rejected": 0.8031150698661804,
1008
+ "logps/chosen": -251.593505859375,
1009
+ "logps/rejected": -272.357177734375,
1010
+ "loss": 2168.2102,
1011
+ "rewards/accuracies": 0.6812499761581421,
1012
+ "rewards/chosen": -0.10245855897665024,
1013
+ "rewards/margins": 0.04714034125208855,
1014
+ "rewards/rejected": -0.1495988965034485,
1015
+ "step": 640
1016
+ },
1017
+ {
1018
+ "epoch": 1.36,
1019
+ "learning_rate": 1.3953046172178413e-06,
1020
+ "logits/chosen": 0.7221434116363525,
1021
+ "logits/rejected": 0.7379263043403625,
1022
+ "logps/chosen": -264.2921142578125,
1023
+ "logps/rejected": -261.3705749511719,
1024
+ "loss": 2137.5834,
1025
+ "rewards/accuracies": 0.6875,
1026
+ "rewards/chosen": -0.11888917535543442,
1027
+ "rewards/margins": 0.05505634471774101,
1028
+ "rewards/rejected": -0.17394550144672394,
1029
+ "step": 650
1030
+ },
1031
+ {
1032
+ "epoch": 1.38,
1033
+ "learning_rate": 1.3139467229135999e-06,
1034
+ "logits/chosen": 0.6965005397796631,
1035
+ "logits/rejected": 0.7608405351638794,
1036
+ "logps/chosen": -230.50830078125,
1037
+ "logps/rejected": -261.0777282714844,
1038
+ "loss": 2178.6738,
1039
+ "rewards/accuracies": 0.625,
1040
+ "rewards/chosen": -0.10801998525857925,
1041
+ "rewards/margins": 0.049364469945430756,
1042
+ "rewards/rejected": -0.15738445520401,
1043
+ "step": 660
1044
+ },
1045
+ {
1046
+ "epoch": 1.4,
1047
+ "learning_rate": 1.2341787690142436e-06,
1048
+ "logits/chosen": 0.6697909235954285,
1049
+ "logits/rejected": 0.7529794573783875,
1050
+ "logps/chosen": -309.4987487792969,
1051
+ "logps/rejected": -255.5410919189453,
1052
+ "loss": 2163.5801,
1053
+ "rewards/accuracies": 0.7124999761581421,
1054
+ "rewards/chosen": -0.09704665839672089,
1055
+ "rewards/margins": 0.06983451545238495,
1056
+ "rewards/rejected": -0.16688117384910583,
1057
+ "step": 670
1058
+ },
1059
+ {
1060
+ "epoch": 1.42,
1061
+ "learning_rate": 1.1561076868822756e-06,
1062
+ "logits/chosen": 0.6984275579452515,
1063
+ "logits/rejected": 0.7366929650306702,
1064
+ "logps/chosen": -253.47988891601562,
1065
+ "logps/rejected": -230.54940795898438,
1066
+ "loss": 2114.4482,
1067
+ "rewards/accuracies": 0.643750011920929,
1068
+ "rewards/chosen": -0.10325287282466888,
1069
+ "rewards/margins": 0.05643494054675102,
1070
+ "rewards/rejected": -0.1596878170967102,
1071
+ "step": 680
1072
+ },
1073
+ {
1074
+ "epoch": 1.44,
1075
+ "learning_rate": 1.079838133172111e-06,
1076
+ "logits/chosen": 0.760990560054779,
1077
+ "logits/rejected": 0.7594455480575562,
1078
+ "logps/chosen": -267.423583984375,
1079
+ "logps/rejected": -251.7685089111328,
1080
+ "loss": 2198.5418,
1081
+ "rewards/accuracies": 0.699999988079071,
1082
+ "rewards/chosen": -0.0984167829155922,
1083
+ "rewards/margins": 0.05027080327272415,
1084
+ "rewards/rejected": -0.14868757128715515,
1085
+ "step": 690
1086
+ },
1087
+ {
1088
+ "epoch": 1.47,
1089
+ "learning_rate": 1.0054723495346484e-06,
1090
+ "logits/chosen": 0.6380269527435303,
1091
+ "logits/rejected": 0.7157927751541138,
1092
+ "logps/chosen": -269.24908447265625,
1093
+ "logps/rejected": -241.45919799804688,
1094
+ "loss": 2119.5369,
1095
+ "rewards/accuracies": 0.637499988079071,
1096
+ "rewards/chosen": -0.10670281946659088,
1097
+ "rewards/margins": 0.05142299085855484,
1098
+ "rewards/rejected": -0.15812578797340393,
1099
+ "step": 700
1100
+ },
1101
+ {
1102
+ "epoch": 1.47,
1103
+ "eval_logits/chosen": 0.6726287007331848,
1104
+ "eval_logits/rejected": 0.7770729660987854,
1105
+ "eval_logps/chosen": -270.7824401855469,
1106
+ "eval_logps/rejected": -248.45333862304688,
1107
+ "eval_loss": 2166.385498046875,
1108
+ "eval_rewards/accuracies": 0.6959999799728394,
1109
+ "eval_rewards/chosen": -0.10538262128829956,
1110
+ "eval_rewards/margins": 0.06478944420814514,
1111
+ "eval_rewards/rejected": -0.17017203569412231,
1112
+ "eval_runtime": 325.443,
1113
+ "eval_samples_per_second": 6.145,
1114
+ "eval_steps_per_second": 0.384,
1115
+ "step": 700
1116
+ },
1117
+ {
1118
+ "epoch": 1.49,
1119
+ "learning_rate": 9.331100255592437e-07,
1120
+ "logits/chosen": 0.7291372418403625,
1121
+ "logits/rejected": 0.7911130785942078,
1122
+ "logps/chosen": -288.43621826171875,
1123
+ "logps/rejected": -244.11181640625,
1124
+ "loss": 2094.7482,
1125
+ "rewards/accuracies": 0.6875,
1126
+ "rewards/chosen": -0.09699388593435287,
1127
+ "rewards/margins": 0.0718715712428093,
1128
+ "rewards/rejected": -0.16886545717716217,
1129
+ "step": 710
1130
+ },
1131
+ {
1132
+ "epoch": 1.51,
1133
+ "learning_rate": 8.628481651367876e-07,
1134
+ "logits/chosen": 0.7129195928573608,
1135
+ "logits/rejected": 0.7519146203994751,
1136
+ "logps/chosen": -273.88372802734375,
1137
+ "logps/rejected": -259.33465576171875,
1138
+ "loss": 2053.8975,
1139
+ "rewards/accuracies": 0.675000011920929,
1140
+ "rewards/chosen": -0.10259418189525604,
1141
+ "rewards/margins": 0.06398696452379227,
1142
+ "rewards/rejected": -0.1665811538696289,
1143
+ "step": 720
1144
+ },
1145
+ {
1146
+ "epoch": 1.53,
1147
+ "learning_rate": 7.947809564230446e-07,
1148
+ "logits/chosen": 0.7380334138870239,
1149
+ "logits/rejected": 0.7657966017723083,
1150
+ "logps/chosen": -275.5352478027344,
1151
+ "logps/rejected": -267.64544677734375,
1152
+ "loss": 2090.3867,
1153
+ "rewards/accuracies": 0.7562500238418579,
1154
+ "rewards/chosen": -0.10346021503210068,
1155
+ "rewards/margins": 0.06666620075702667,
1156
+ "rewards/rejected": -0.17012640833854675,
1157
+ "step": 730
1158
+ },
1159
+ {
1160
+ "epoch": 1.55,
1161
+ "learning_rate": 7.289996455765749e-07,
1162
+ "logits/chosen": 0.694438099861145,
1163
+ "logits/rejected": 0.7096751928329468,
1164
+ "logps/chosen": -285.7628479003906,
1165
+ "logps/rejected": -266.2068176269531,
1166
+ "loss": 2179.6143,
1167
+ "rewards/accuracies": 0.6499999761581421,
1168
+ "rewards/chosen": -0.1050759106874466,
1169
+ "rewards/margins": 0.04846997186541557,
1170
+ "rewards/rejected": -0.15354588627815247,
1171
+ "step": 740
1172
+ },
1173
+ {
1174
+ "epoch": 1.57,
1175
+ "learning_rate": 6.655924144404907e-07,
1176
+ "logits/chosen": 0.6481191515922546,
1177
+ "logits/rejected": 0.7124420404434204,
1178
+ "logps/chosen": -269.7061767578125,
1179
+ "logps/rejected": -250.74905395507812,
1180
+ "loss": 2143.8697,
1181
+ "rewards/accuracies": 0.699999988079071,
1182
+ "rewards/chosen": -0.09301155805587769,
1183
+ "rewards/margins": 0.06966546177864075,
1184
+ "rewards/rejected": -0.16267701983451843,
1185
+ "step": 750
1186
+ },
1187
+ {
1188
+ "epoch": 1.59,
1189
+ "learning_rate": 6.046442623320145e-07,
1190
+ "logits/chosen": 0.7415434122085571,
1191
+ "logits/rejected": 0.77794349193573,
1192
+ "logps/chosen": -267.1199035644531,
1193
+ "logps/rejected": -233.93765258789062,
1194
+ "loss": 2100.39,
1195
+ "rewards/accuracies": 0.6187499761581421,
1196
+ "rewards/chosen": -0.11892955005168915,
1197
+ "rewards/margins": 0.048730865120887756,
1198
+ "rewards/rejected": -0.1676604300737381,
1199
+ "step": 760
1200
+ },
1201
+ {
1202
+ "epoch": 1.61,
1203
+ "learning_rate": 5.462368920983249e-07,
1204
+ "logits/chosen": 0.7082683444023132,
1205
+ "logits/rejected": 0.7616415023803711,
1206
+ "logps/chosen": -252.79940795898438,
1207
+ "logps/rejected": -231.410400390625,
1208
+ "loss": 2186.8764,
1209
+ "rewards/accuracies": 0.643750011920929,
1210
+ "rewards/chosen": -0.11736402660608292,
1211
+ "rewards/margins": 0.03412212058901787,
1212
+ "rewards/rejected": -0.1514861285686493,
1213
+ "step": 770
1214
+ },
1215
+ {
1216
+ "epoch": 1.63,
1217
+ "learning_rate": 4.904486005914027e-07,
1218
+ "logits/chosen": 0.7068939805030823,
1219
+ "logits/rejected": 0.7558518052101135,
1220
+ "logps/chosen": -293.97174072265625,
1221
+ "logps/rejected": -274.1463928222656,
1222
+ "loss": 2085.8768,
1223
+ "rewards/accuracies": 0.7124999761581421,
1224
+ "rewards/chosen": -0.0985812246799469,
1225
+ "rewards/margins": 0.07491330802440643,
1226
+ "rewards/rejected": -0.17349454760551453,
1227
+ "step": 780
1228
+ },
1229
+ {
1230
+ "epoch": 1.65,
1231
+ "learning_rate": 4.373541737087264e-07,
1232
+ "logits/chosen": 0.6898752450942993,
1233
+ "logits/rejected": 0.7903083562850952,
1234
+ "logps/chosen": -275.10650634765625,
1235
+ "logps/rejected": -239.94729614257812,
1236
+ "loss": 2145.707,
1237
+ "rewards/accuracies": 0.65625,
1238
+ "rewards/chosen": -0.09976668655872345,
1239
+ "rewards/margins": 0.06850672513246536,
1240
+ "rewards/rejected": -0.16827340424060822,
1241
+ "step": 790
1242
+ },
1243
+ {
1244
+ "epoch": 1.67,
1245
+ "learning_rate": 3.8702478614051353e-07,
1246
+ "logits/chosen": 0.668254554271698,
1247
+ "logits/rejected": 0.6736531257629395,
1248
+ "logps/chosen": -248.01626586914062,
1249
+ "logps/rejected": -245.8029327392578,
1250
+ "loss": 2096.7854,
1251
+ "rewards/accuracies": 0.699999988079071,
1252
+ "rewards/chosen": -0.10279978811740875,
1253
+ "rewards/margins": 0.07554516196250916,
1254
+ "rewards/rejected": -0.1783449351787567,
1255
+ "step": 800
1256
+ },
1257
+ {
1258
+ "epoch": 1.67,
1259
+ "eval_logits/chosen": 0.6641319990158081,
1260
+ "eval_logits/rejected": 0.7684468030929565,
1261
+ "eval_logps/chosen": -271.1501159667969,
1262
+ "eval_logps/rejected": -248.99647521972656,
1263
+ "eval_loss": 2159.71044921875,
1264
+ "eval_rewards/accuracies": 0.6959999799728394,
1265
+ "eval_rewards/chosen": -0.10905998200178146,
1266
+ "eval_rewards/margins": 0.06654350459575653,
1267
+ "eval_rewards/rejected": -0.1756034791469574,
1268
+ "eval_runtime": 325.5727,
1269
+ "eval_samples_per_second": 6.143,
1270
+ "eval_steps_per_second": 0.384,
1271
+ "step": 800
1272
+ },
1273
+ {
1274
+ "epoch": 1.7,
1275
+ "learning_rate": 3.3952790595787986e-07,
1276
+ "logits/chosen": 0.7833539247512817,
1277
+ "logits/rejected": 0.7525036931037903,
1278
+ "logps/chosen": -260.330810546875,
1279
+ "logps/rejected": -247.55337524414062,
1280
+ "loss": 2140.7756,
1281
+ "rewards/accuracies": 0.625,
1282
+ "rewards/chosen": -0.11206915229558945,
1283
+ "rewards/margins": 0.04849858209490776,
1284
+ "rewards/rejected": -0.1605677306652069,
1285
+ "step": 810
1286
+ },
1287
+ {
1288
+ "epoch": 1.72,
1289
+ "learning_rate": 2.9492720416985004e-07,
1290
+ "logits/chosen": 0.6317057609558105,
1291
+ "logits/rejected": 0.6787868738174438,
1292
+ "logps/chosen": -283.4757080078125,
1293
+ "logps/rejected": -266.5099182128906,
1294
+ "loss": 2079.7037,
1295
+ "rewards/accuracies": 0.7124999761581421,
1296
+ "rewards/chosen": -0.09024739265441895,
1297
+ "rewards/margins": 0.06939631700515747,
1298
+ "rewards/rejected": -0.15964370965957642,
1299
+ "step": 820
1300
+ },
1301
+ {
1302
+ "epoch": 1.74,
1303
+ "learning_rate": 2.5328246937043526e-07,
1304
+ "logits/chosen": 0.7278770208358765,
1305
+ "logits/rejected": 0.7481415867805481,
1306
+ "logps/chosen": -248.8776397705078,
1307
+ "logps/rejected": -233.3114776611328,
1308
+ "loss": 2126.3594,
1309
+ "rewards/accuracies": 0.6625000238418579,
1310
+ "rewards/chosen": -0.12706544995307922,
1311
+ "rewards/margins": 0.0513269305229187,
1312
+ "rewards/rejected": -0.17839238047599792,
1313
+ "step": 830
1314
+ },
1315
+ {
1316
+ "epoch": 1.76,
1317
+ "learning_rate": 2.1464952759020857e-07,
1318
+ "logits/chosen": 0.711814820766449,
1319
+ "logits/rejected": 0.7446814775466919,
1320
+ "logps/chosen": -260.2667541503906,
1321
+ "logps/rejected": -262.43939208984375,
1322
+ "loss": 2154.4643,
1323
+ "rewards/accuracies": 0.65625,
1324
+ "rewards/chosen": -0.10629498958587646,
1325
+ "rewards/margins": 0.06066171079874039,
1326
+ "rewards/rejected": -0.16695669293403625,
1327
+ "step": 840
1328
+ },
1329
+ {
1330
+ "epoch": 1.78,
1331
+ "learning_rate": 1.790801674598186e-07,
1332
+ "logits/chosen": 0.6856343746185303,
1333
+ "logits/rejected": 0.7043182849884033,
1334
+ "logps/chosen": -278.94085693359375,
1335
+ "logps/rejected": -251.6495819091797,
1336
+ "loss": 2141.4652,
1337
+ "rewards/accuracies": 0.7124999761581421,
1338
+ "rewards/chosen": -0.10617993772029877,
1339
+ "rewards/margins": 0.0626487284898758,
1340
+ "rewards/rejected": -0.16882868111133575,
1341
+ "step": 850
1342
+ },
1343
+ {
1344
+ "epoch": 1.8,
1345
+ "learning_rate": 1.4662207078575685e-07,
1346
+ "logits/chosen": 0.6835039258003235,
1347
+ "logits/rejected": 0.7542312145233154,
1348
+ "logps/chosen": -242.60501098632812,
1349
+ "logps/rejected": -255.89266967773438,
1350
+ "loss": 2120.8357,
1351
+ "rewards/accuracies": 0.675000011920929,
1352
+ "rewards/chosen": -0.10873384773731232,
1353
+ "rewards/margins": 0.06333796679973602,
1354
+ "rewards/rejected": -0.17207179963588715,
1355
+ "step": 860
1356
+ },
1357
+ {
1358
+ "epoch": 1.82,
1359
+ "learning_rate": 1.1731874863145143e-07,
1360
+ "logits/chosen": 0.7217626571655273,
1361
+ "logits/rejected": 0.7742848992347717,
1362
+ "logps/chosen": -271.91668701171875,
1363
+ "logps/rejected": -239.55130004882812,
1364
+ "loss": 2097.9199,
1365
+ "rewards/accuracies": 0.706250011920929,
1366
+ "rewards/chosen": -0.10101070255041122,
1367
+ "rewards/margins": 0.06306958198547363,
1368
+ "rewards/rejected": -0.16408027708530426,
1369
+ "step": 870
1370
+ },
1371
+ {
1372
+ "epoch": 1.84,
1373
+ "learning_rate": 9.120948298936422e-08,
1374
+ "logits/chosen": 0.7404865026473999,
1375
+ "logits/rejected": 0.7562910914421082,
1376
+ "logps/chosen": -260.2977294921875,
1377
+ "logps/rejected": -240.30502319335938,
1378
+ "loss": 2009.9664,
1379
+ "rewards/accuracies": 0.65625,
1380
+ "rewards/chosen": -0.10928317159414291,
1381
+ "rewards/margins": 0.05941414088010788,
1382
+ "rewards/rejected": -0.1686973124742508,
1383
+ "step": 880
1384
+ },
1385
+ {
1386
+ "epoch": 1.86,
1387
+ "learning_rate": 6.832927412229017e-08,
1388
+ "logits/chosen": 0.6707200407981873,
1389
+ "logits/rejected": 0.7818647623062134,
1390
+ "logps/chosen": -271.86065673828125,
1391
+ "logps/rejected": -243.53182983398438,
1392
+ "loss": 2081.6182,
1393
+ "rewards/accuracies": 0.6625000238418579,
1394
+ "rewards/chosen": -0.10177429020404816,
1395
+ "rewards/margins": 0.06641246378421783,
1396
+ "rewards/rejected": -0.16818676888942719,
1397
+ "step": 890
1398
+ },
1399
+ {
1400
+ "epoch": 1.88,
1401
+ "learning_rate": 4.870879364444109e-08,
1402
+ "logits/chosen": 0.6861158609390259,
1403
+ "logits/rejected": 0.7574108839035034,
1404
+ "logps/chosen": -261.988525390625,
1405
+ "logps/rejected": -223.02371215820312,
1406
+ "loss": 2094.5041,
1407
+ "rewards/accuracies": 0.7437499761581421,
1408
+ "rewards/chosen": -0.10657148063182831,
1409
+ "rewards/margins": 0.07416997849941254,
1410
+ "rewards/rejected": -0.18074145913124084,
1411
+ "step": 900
1412
+ },
1413
+ {
1414
+ "epoch": 1.88,
1415
+ "eval_logits/chosen": 0.6645969748497009,
1416
+ "eval_logits/rejected": 0.7690178155899048,
1417
+ "eval_logps/chosen": -271.27447509765625,
1418
+ "eval_logps/rejected": -249.114013671875,
1419
+ "eval_loss": 2158.6298828125,
1420
+ "eval_rewards/accuracies": 0.6980000138282776,
1421
+ "eval_rewards/chosen": -0.11030303686857224,
1422
+ "eval_rewards/margins": 0.06647594273090363,
1423
+ "eval_rewards/rejected": -0.17677900195121765,
1424
+ "eval_runtime": 325.3231,
1425
+ "eval_samples_per_second": 6.148,
1426
+ "eval_steps_per_second": 0.384,
1427
+ "step": 900
1428
+ },
1429
+ {
1430
+ "epoch": 1.9,
1431
+ "learning_rate": 3.237434340521789e-08,
1432
+ "logits/chosen": 0.6756221055984497,
1433
+ "logits/rejected": 0.7528306245803833,
1434
+ "logps/chosen": -272.76104736328125,
1435
+ "logps/rejected": -262.8546447753906,
1436
+ "loss": 2083.5836,
1437
+ "rewards/accuracies": 0.699999988079071,
1438
+ "rewards/chosen": -0.10101411491632462,
1439
+ "rewards/margins": 0.06638970226049423,
1440
+ "rewards/rejected": -0.16740381717681885,
1441
+ "step": 910
1442
+ },
1443
+ {
1444
+ "epoch": 1.93,
1445
+ "learning_rate": 1.93478202307823e-08,
1446
+ "logits/chosen": 0.6799421310424805,
1447
+ "logits/rejected": 0.7030155658721924,
1448
+ "logps/chosen": -252.42404174804688,
1449
+ "logps/rejected": -260.2074890136719,
1450
+ "loss": 2119.1219,
1451
+ "rewards/accuracies": 0.6625000238418579,
1452
+ "rewards/chosen": -0.11061377823352814,
1453
+ "rewards/margins": 0.04686294496059418,
1454
+ "rewards/rejected": -0.15747670829296112,
1455
+ "step": 920
1456
+ },
1457
+ {
1458
+ "epoch": 1.95,
1459
+ "learning_rate": 9.646686570697062e-09,
1460
+ "logits/chosen": 0.7364012002944946,
1461
+ "logits/rejected": 0.7563687562942505,
1462
+ "logps/chosen": -267.6513366699219,
1463
+ "logps/rejected": -265.22686767578125,
1464
+ "loss": 2177.6246,
1465
+ "rewards/accuracies": 0.6875,
1466
+ "rewards/chosen": -0.11080427467823029,
1467
+ "rewards/margins": 0.05903978273272514,
1468
+ "rewards/rejected": -0.16984406113624573,
1469
+ "step": 930
1470
+ },
1471
+ {
1472
+ "epoch": 1.97,
1473
+ "learning_rate": 3.283947088983663e-09,
1474
+ "logits/chosen": 0.7186964750289917,
1475
+ "logits/rejected": 0.7220372557640076,
1476
+ "logps/chosen": -249.5286407470703,
1477
+ "logps/rejected": -258.6728515625,
1478
+ "loss": 2177.0199,
1479
+ "rewards/accuracies": 0.625,
1480
+ "rewards/chosen": -0.12288296222686768,
1481
+ "rewards/margins": 0.04801628738641739,
1482
+ "rewards/rejected": -0.17089924216270447,
1483
+ "step": 940
1484
+ },
1485
+ {
1486
+ "epoch": 1.99,
1487
+ "learning_rate": 2.681312309735229e-10,
1488
+ "logits/chosen": 0.6878038644790649,
1489
+ "logits/rejected": 0.7842324376106262,
1490
+ "logps/chosen": -242.4557342529297,
1491
+ "logps/rejected": -244.35446166992188,
1492
+ "loss": 2182.6682,
1493
+ "rewards/accuracies": 0.59375,
1494
+ "rewards/chosen": -0.11521060764789581,
1495
+ "rewards/margins": 0.04983743280172348,
1496
+ "rewards/rejected": -0.1650480479001999,
1497
+ "step": 950
1498
+ },
1499
+ {
1500
+ "epoch": 2.0,
1501
+ "step": 954,
1502
+ "total_flos": 0.0,
1503
+ "train_loss": 2246.599348344143,
1504
+ "train_runtime": 18130.6033,
1505
+ "train_samples_per_second": 3.372,
1506
+ "train_steps_per_second": 0.053
1507
+ }
1508
+ ],
1509
+ "logging_steps": 10,
1510
+ "max_steps": 954,
1511
+ "num_input_tokens_seen": 0,
1512
+ "num_train_epochs": 2,
1513
+ "save_steps": 100,
1514
+ "total_flos": 0.0,
1515
+ "train_batch_size": 4,
1516
+ "trial_name": null,
1517
+ "trial_params": null
1518
+ }