dctanner commited on
Commit
50149aa
1 Parent(s): b66f79e

Model save

Browse files
README.md ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ base_model: sablo/sablo-pebble-mistral
9
+ model-index:
10
+ - name: sablo-pebble-mistral-dpo-lora-HelpSteer_binarized-2
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # sablo-pebble-mistral-dpo-lora-HelpSteer_binarized-2
18
+
19
+ This model is a fine-tuned version of [sablo/sablo-pebble-mistral](https://huggingface.co/sablo/sablo-pebble-mistral) on the None dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.5195
22
+ - Rewards/chosen: -1.3821
23
+ - Rewards/rejected: -2.4510
24
+ - Rewards/accuracies: 0.7358
25
+ - Rewards/margins: 1.0689
26
+ - Logps/rejected: -158.5470
27
+ - Logps/chosen: -147.7195
28
+ - Logits/rejected: -2.0952
29
+ - Logits/chosen: -2.1922
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 5e-06
49
+ - train_batch_size: 4
50
+ - eval_batch_size: 8
51
+ - seed: 42
52
+ - distributed_type: multi-GPU
53
+ - gradient_accumulation_steps: 2
54
+ - total_train_batch_size: 8
55
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
56
+ - lr_scheduler_type: cosine
57
+ - lr_scheduler_warmup_ratio: 0.1
58
+ - num_epochs: 1
59
+
60
+ ### Training results
61
+
62
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
63
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
64
+ | 0.65 | 0.2 | 200 | 0.6563 | 0.1070 | 0.0177 | 0.6509 | 0.0893 | -76.2561 | -98.0835 | -2.0464 | -2.1421 |
65
+ | 0.456 | 0.39 | 400 | 0.5446 | -1.2305 | -1.8748 | 0.7217 | 0.6444 | -139.3410 | -142.6661 | -2.1203 | -2.2102 |
66
+ | 0.4388 | 0.59 | 600 | 0.5325 | -1.8012 | -2.8927 | 0.7123 | 1.0915 | -173.2708 | -161.6904 | -2.1017 | -2.1954 |
67
+ | 0.6137 | 0.79 | 800 | 0.5198 | -1.4487 | -2.5199 | 0.7382 | 1.0712 | -160.8413 | -149.9388 | -2.0962 | -2.1935 |
68
+ | 0.5866 | 0.98 | 1000 | 0.5195 | -1.3821 | -2.4510 | 0.7358 | 1.0689 | -158.5470 | -147.7195 | -2.0952 | -2.1922 |
69
+
70
+
71
+ ### Framework versions
72
+
73
+ - PEFT 0.7.1
74
+ - Transformers 4.36.2
75
+ - Pytorch 2.0.1+cu118
76
+ - Datasets 2.14.6
77
+ - Tokenizers 0.15.0
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e5b256b0fefe42173653da99f9943a797a132f457df43621103c945d81387072
3
  size 83945744
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48f734b7073eba57a580259f094f177adf210cc855e0e23332611cf08a9c73f9
3
  size 83945744
all_results.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -2.192172050476074,
4
+ "eval_logits/rejected": -2.095205545425415,
5
+ "eval_logps/chosen": -147.71951293945312,
6
+ "eval_logps/rejected": -158.54696655273438,
7
+ "eval_loss": 0.5194684863090515,
8
+ "eval_rewards/accuracies": 0.7358490824699402,
9
+ "eval_rewards/chosen": -1.382088541984558,
10
+ "eval_rewards/margins": 1.068930983543396,
11
+ "eval_rewards/rejected": -2.451019525527954,
12
+ "eval_runtime": 422.7886,
13
+ "eval_samples": 418,
14
+ "eval_samples_per_second": 0.989,
15
+ "eval_steps_per_second": 0.125,
16
+ "train_loss": 0.5536013367607837,
17
+ "train_runtime": 17040.3461,
18
+ "train_samples": 8130,
19
+ "train_samples_per_second": 0.477,
20
+ "train_steps_per_second": 0.06
21
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -2.192172050476074,
4
+ "eval_logits/rejected": -2.095205545425415,
5
+ "eval_logps/chosen": -147.71951293945312,
6
+ "eval_logps/rejected": -158.54696655273438,
7
+ "eval_loss": 0.5194684863090515,
8
+ "eval_rewards/accuracies": 0.7358490824699402,
9
+ "eval_rewards/chosen": -1.382088541984558,
10
+ "eval_rewards/margins": 1.068930983543396,
11
+ "eval_rewards/rejected": -2.451019525527954,
12
+ "eval_runtime": 422.7886,
13
+ "eval_samples": 418,
14
+ "eval_samples_per_second": 0.989,
15
+ "eval_steps_per_second": 0.125
16
+ }
runs/Jan19_12-34-55_08134be46a59/events.out.tfevents.1705667918.08134be46a59.4105.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d195ebfda055f8c8c1b4c81887e1b54f7edd2b65b8900e14b1ac094902c1248
3
- size 72275
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7981d59edf7b04164e13033db557c7a6707655e9adf64dc75f8da6c3d41de20
3
+ size 73263
runs/Jan19_12-34-55_08134be46a59/events.out.tfevents.1705685379.08134be46a59.4105.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bffeef1461d345b389ec89af41f16d731602330205245d5772927c7a2ed597fd
3
+ size 828
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.5536013367607837,
4
+ "train_runtime": 17040.3461,
5
+ "train_samples": 8130,
6
+ "train_samples_per_second": 0.477,
7
+ "train_steps_per_second": 0.06
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1538 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.999508116084604,
5
+ "eval_steps": 200,
6
+ "global_step": 1016,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 4.901960784313726e-08,
14
+ "logits/chosen": -2.0748214721679688,
15
+ "logits/rejected": -2.1528916358947754,
16
+ "logps/chosen": -78.83467102050781,
17
+ "logps/rejected": -90.04127502441406,
18
+ "loss": 0.6931,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.01,
27
+ "learning_rate": 4.901960784313725e-07,
28
+ "logits/chosen": -2.167158603668213,
29
+ "logits/rejected": -2.065119743347168,
30
+ "logps/chosen": -101.462646484375,
31
+ "logps/rejected": -67.91659545898438,
32
+ "loss": 0.6937,
33
+ "rewards/accuracies": 0.3611111044883728,
34
+ "rewards/chosen": -0.0003858842537738383,
35
+ "rewards/margins": -0.001147754956036806,
36
+ "rewards/rejected": 0.0007618705276399851,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.02,
41
+ "learning_rate": 9.80392156862745e-07,
42
+ "logits/chosen": -2.139626979827881,
43
+ "logits/rejected": -2.0195488929748535,
44
+ "logps/chosen": -109.9924545288086,
45
+ "logps/rejected": -84.65155029296875,
46
+ "loss": 0.695,
47
+ "rewards/accuracies": 0.375,
48
+ "rewards/chosen": -0.002081229817122221,
49
+ "rewards/margins": -0.0036208624951541424,
50
+ "rewards/rejected": 0.0015396325616165996,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.03,
55
+ "learning_rate": 1.4705882352941177e-06,
56
+ "logits/chosen": -2.2819817066192627,
57
+ "logits/rejected": -2.181386947631836,
58
+ "logps/chosen": -100.8692398071289,
59
+ "logps/rejected": -84.57305908203125,
60
+ "loss": 0.6931,
61
+ "rewards/accuracies": 0.5375000238418579,
62
+ "rewards/chosen": 0.0019000970060005784,
63
+ "rewards/margins": 0.00016770660295151174,
64
+ "rewards/rejected": 0.0017323906067758799,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.04,
69
+ "learning_rate": 1.96078431372549e-06,
70
+ "logits/chosen": -2.274721622467041,
71
+ "logits/rejected": -2.156294584274292,
72
+ "logps/chosen": -105.41255950927734,
73
+ "logps/rejected": -83.99583435058594,
74
+ "loss": 0.6929,
75
+ "rewards/accuracies": 0.5375000238418579,
76
+ "rewards/chosen": 0.001408256939612329,
77
+ "rewards/margins": 0.0004250857455190271,
78
+ "rewards/rejected": 0.0009831712814047933,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.05,
83
+ "learning_rate": 2.450980392156863e-06,
84
+ "logits/chosen": -2.29875111579895,
85
+ "logits/rejected": -2.20742130279541,
86
+ "logps/chosen": -103.51786041259766,
87
+ "logps/rejected": -77.90748596191406,
88
+ "loss": 0.692,
89
+ "rewards/accuracies": 0.550000011920929,
90
+ "rewards/chosen": 0.005386353936046362,
91
+ "rewards/margins": 0.002320895902812481,
92
+ "rewards/rejected": 0.0030654582660645247,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.06,
97
+ "learning_rate": 2.9411764705882355e-06,
98
+ "logits/chosen": -2.30989408493042,
99
+ "logits/rejected": -2.233097553253174,
100
+ "logps/chosen": -107.9452896118164,
101
+ "logps/rejected": -90.55638122558594,
102
+ "loss": 0.6912,
103
+ "rewards/accuracies": 0.637499988079071,
104
+ "rewards/chosen": 0.008994068950414658,
105
+ "rewards/margins": 0.003984561190009117,
106
+ "rewards/rejected": 0.005009507294744253,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 0.07,
111
+ "learning_rate": 3.431372549019608e-06,
112
+ "logits/chosen": -2.2964837551116943,
113
+ "logits/rejected": -2.192676544189453,
114
+ "logps/chosen": -98.52997589111328,
115
+ "logps/rejected": -73.2212142944336,
116
+ "loss": 0.6907,
117
+ "rewards/accuracies": 0.5874999761581421,
118
+ "rewards/chosen": 0.011149758473038673,
119
+ "rewards/margins": 0.004859681706875563,
120
+ "rewards/rejected": 0.006290078163146973,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.08,
125
+ "learning_rate": 3.92156862745098e-06,
126
+ "logits/chosen": -2.147590398788452,
127
+ "logits/rejected": -2.0242252349853516,
128
+ "logps/chosen": -110.5780258178711,
129
+ "logps/rejected": -85.19944763183594,
130
+ "loss": 0.6903,
131
+ "rewards/accuracies": 0.612500011920929,
132
+ "rewards/chosen": 0.018292848020792007,
133
+ "rewards/margins": 0.005854339338839054,
134
+ "rewards/rejected": 0.012438507750630379,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 0.09,
139
+ "learning_rate": 4.411764705882353e-06,
140
+ "logits/chosen": -2.103201389312744,
141
+ "logits/rejected": -1.9731849431991577,
142
+ "logps/chosen": -86.84721374511719,
143
+ "logps/rejected": -63.73047637939453,
144
+ "loss": 0.6889,
145
+ "rewards/accuracies": 0.625,
146
+ "rewards/chosen": 0.02545500174164772,
147
+ "rewards/margins": 0.008647125214338303,
148
+ "rewards/rejected": 0.016807876527309418,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 0.1,
153
+ "learning_rate": 4.901960784313726e-06,
154
+ "logits/chosen": -2.071173906326294,
155
+ "logits/rejected": -1.8952083587646484,
156
+ "logps/chosen": -120.53657531738281,
157
+ "logps/rejected": -84.6740951538086,
158
+ "loss": 0.6804,
159
+ "rewards/accuracies": 0.75,
160
+ "rewards/chosen": 0.051459141075611115,
161
+ "rewards/margins": 0.02619524672627449,
162
+ "rewards/rejected": 0.025263899937272072,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 0.11,
167
+ "learning_rate": 4.9990549169459415e-06,
168
+ "logits/chosen": -2.2821316719055176,
169
+ "logits/rejected": -2.1455578804016113,
170
+ "logps/chosen": -103.51020812988281,
171
+ "logps/rejected": -78.41320037841797,
172
+ "loss": 0.6767,
173
+ "rewards/accuracies": 0.824999988079071,
174
+ "rewards/chosen": 0.06272837519645691,
175
+ "rewards/margins": 0.03374689072370529,
176
+ "rewards/rejected": 0.028981482610106468,
177
+ "step": 110
178
+ },
179
+ {
180
+ "epoch": 0.12,
181
+ "learning_rate": 4.995216741642263e-06,
182
+ "logits/chosen": -2.284080743789673,
183
+ "logits/rejected": -2.210580825805664,
184
+ "logps/chosen": -96.24214172363281,
185
+ "logps/rejected": -77.19042205810547,
186
+ "loss": 0.6812,
187
+ "rewards/accuracies": 0.612500011920929,
188
+ "rewards/chosen": 0.08176051080226898,
189
+ "rewards/margins": 0.02550395205616951,
190
+ "rewards/rejected": 0.056256555020809174,
191
+ "step": 120
192
+ },
193
+ {
194
+ "epoch": 0.13,
195
+ "learning_rate": 4.988430936991089e-06,
196
+ "logits/chosen": -2.2925381660461426,
197
+ "logits/rejected": -2.149710178375244,
198
+ "logps/chosen": -106.36643981933594,
199
+ "logps/rejected": -80.01815795898438,
200
+ "loss": 0.6647,
201
+ "rewards/accuracies": 0.7250000238418579,
202
+ "rewards/chosen": 0.11486251652240753,
203
+ "rewards/margins": 0.06016199663281441,
204
+ "rewards/rejected": 0.054700516164302826,
205
+ "step": 130
206
+ },
207
+ {
208
+ "epoch": 0.14,
209
+ "learning_rate": 4.978705519144525e-06,
210
+ "logits/chosen": -2.1970953941345215,
211
+ "logits/rejected": -2.0385475158691406,
212
+ "logps/chosen": -118.03541564941406,
213
+ "logps/rejected": -81.73417663574219,
214
+ "loss": 0.6639,
215
+ "rewards/accuracies": 0.7749999761581421,
216
+ "rewards/chosen": 0.10544681549072266,
217
+ "rewards/margins": 0.06229867413640022,
218
+ "rewards/rejected": 0.04314813390374184,
219
+ "step": 140
220
+ },
221
+ {
222
+ "epoch": 0.15,
223
+ "learning_rate": 4.966051976854862e-06,
224
+ "logits/chosen": -2.339622735977173,
225
+ "logits/rejected": -2.2239785194396973,
226
+ "logps/chosen": -90.04592895507812,
227
+ "logps/rejected": -68.85932922363281,
228
+ "loss": 0.6506,
229
+ "rewards/accuracies": 0.7749999761581421,
230
+ "rewards/chosen": 0.1409004032611847,
231
+ "rewards/margins": 0.09264969825744629,
232
+ "rewards/rejected": 0.048250701278448105,
233
+ "step": 150
234
+ },
235
+ {
236
+ "epoch": 0.16,
237
+ "learning_rate": 4.950485257902782e-06,
238
+ "logits/chosen": -2.2208049297332764,
239
+ "logits/rejected": -2.1466565132141113,
240
+ "logps/chosen": -101.00809478759766,
241
+ "logps/rejected": -76.30353546142578,
242
+ "loss": 0.6618,
243
+ "rewards/accuracies": 0.675000011920929,
244
+ "rewards/chosen": 0.13603894412517548,
245
+ "rewards/margins": 0.0690341368317604,
246
+ "rewards/rejected": 0.06700481474399567,
247
+ "step": 160
248
+ },
249
+ {
250
+ "epoch": 0.17,
251
+ "learning_rate": 4.932023751439358e-06,
252
+ "logits/chosen": -2.286597728729248,
253
+ "logits/rejected": -2.13307523727417,
254
+ "logps/chosen": -110.24186706542969,
255
+ "logps/rejected": -81.82392120361328,
256
+ "loss": 0.6666,
257
+ "rewards/accuracies": 0.6499999761581421,
258
+ "rewards/chosen": 0.13935917615890503,
259
+ "rewards/margins": 0.06269019842147827,
260
+ "rewards/rejected": 0.07666899263858795,
261
+ "step": 170
262
+ },
263
+ {
264
+ "epoch": 0.18,
265
+ "learning_rate": 4.9106892662627395e-06,
266
+ "logits/chosen": -2.357682466506958,
267
+ "logits/rejected": -2.237700939178467,
268
+ "logps/chosen": -104.79756927490234,
269
+ "logps/rejected": -81.48443603515625,
270
+ "loss": 0.6615,
271
+ "rewards/accuracies": 0.6875,
272
+ "rewards/chosen": 0.16287028789520264,
273
+ "rewards/margins": 0.07204125821590424,
274
+ "rewards/rejected": 0.0908290296792984,
275
+ "step": 180
276
+ },
277
+ {
278
+ "epoch": 0.19,
279
+ "learning_rate": 4.886507005055149e-06,
280
+ "logits/chosen": -2.3158655166625977,
281
+ "logits/rejected": -2.1448147296905518,
282
+ "logps/chosen": -113.81204986572266,
283
+ "logps/rejected": -80.2816390991211,
284
+ "loss": 0.646,
285
+ "rewards/accuracies": 0.7124999761581421,
286
+ "rewards/chosen": 0.15469492971897125,
287
+ "rewards/margins": 0.10541819036006927,
288
+ "rewards/rejected": 0.049276743084192276,
289
+ "step": 190
290
+ },
291
+ {
292
+ "epoch": 0.2,
293
+ "learning_rate": 4.859505534610658e-06,
294
+ "logits/chosen": -2.2734456062316895,
295
+ "logits/rejected": -2.1840872764587402,
296
+ "logps/chosen": -94.27523040771484,
297
+ "logps/rejected": -76.13407897949219,
298
+ "loss": 0.65,
299
+ "rewards/accuracies": 0.6625000238418579,
300
+ "rewards/chosen": 0.12800325453281403,
301
+ "rewards/margins": 0.10445593297481537,
302
+ "rewards/rejected": 0.023547304794192314,
303
+ "step": 200
304
+ },
305
+ {
306
+ "epoch": 0.2,
307
+ "eval_logits/chosen": -2.1420769691467285,
308
+ "eval_logits/rejected": -2.046384811401367,
309
+ "eval_logps/chosen": -98.08348083496094,
310
+ "eval_logps/rejected": -76.25611877441406,
311
+ "eval_loss": 0.656255841255188,
312
+ "eval_rewards/accuracies": 0.650943398475647,
313
+ "eval_rewards/chosen": 0.106992706656456,
314
+ "eval_rewards/margins": 0.08928683400154114,
315
+ "eval_rewards/rejected": 0.017705870792269707,
316
+ "eval_runtime": 425.8967,
317
+ "eval_samples_per_second": 0.981,
318
+ "eval_steps_per_second": 0.124,
319
+ "step": 200
320
+ },
321
+ {
322
+ "epoch": 0.21,
323
+ "learning_rate": 4.829716752088893e-06,
324
+ "logits/chosen": -2.2180724143981934,
325
+ "logits/rejected": -2.1603446006774902,
326
+ "logps/chosen": -85.03736877441406,
327
+ "logps/rejected": -78.87088012695312,
328
+ "loss": 0.6573,
329
+ "rewards/accuracies": 0.699999988079071,
330
+ "rewards/chosen": 0.10240446031093597,
331
+ "rewards/margins": 0.08565853536128998,
332
+ "rewards/rejected": 0.016745951026678085,
333
+ "step": 210
334
+ },
335
+ {
336
+ "epoch": 0.22,
337
+ "learning_rate": 4.797175847334535e-06,
338
+ "logits/chosen": -2.2404112815856934,
339
+ "logits/rejected": -2.12317156791687,
340
+ "logps/chosen": -107.25093841552734,
341
+ "logps/rejected": -84.33776092529297,
342
+ "loss": 0.6679,
343
+ "rewards/accuracies": 0.6625000238418579,
344
+ "rewards/chosen": 0.06942565739154816,
345
+ "rewards/margins": 0.06278938055038452,
346
+ "rewards/rejected": 0.006636275444179773,
347
+ "step": 220
348
+ },
349
+ {
350
+ "epoch": 0.23,
351
+ "learning_rate": 4.761921261307143e-06,
352
+ "logits/chosen": -2.271301746368408,
353
+ "logits/rejected": -2.1368188858032227,
354
+ "logps/chosen": -102.20993041992188,
355
+ "logps/rejected": -80.41031646728516,
356
+ "loss": 0.656,
357
+ "rewards/accuracies": 0.699999988079071,
358
+ "rewards/chosen": 0.06752457469701767,
359
+ "rewards/margins": 0.09335462749004364,
360
+ "rewards/rejected": -0.02583005093038082,
361
+ "step": 230
362
+ },
363
+ {
364
+ "epoch": 0.24,
365
+ "learning_rate": 4.723994640670377e-06,
366
+ "logits/chosen": -2.2616968154907227,
367
+ "logits/rejected": -2.090630292892456,
368
+ "logps/chosen": -112.83064270019531,
369
+ "logps/rejected": -78.7665023803711,
370
+ "loss": 0.613,
371
+ "rewards/accuracies": 0.7875000238418579,
372
+ "rewards/chosen": 0.1038433164358139,
373
+ "rewards/margins": 0.18221423029899597,
374
+ "rewards/rejected": -0.07837091386318207,
375
+ "step": 240
376
+ },
377
+ {
378
+ "epoch": 0.25,
379
+ "learning_rate": 4.68344078859431e-06,
380
+ "logits/chosen": -2.214836597442627,
381
+ "logits/rejected": -2.1651015281677246,
382
+ "logps/chosen": -94.0736312866211,
383
+ "logps/rejected": -87.97615814208984,
384
+ "loss": 0.6446,
385
+ "rewards/accuracies": 0.6499999761581421,
386
+ "rewards/chosen": 0.05433257669210434,
387
+ "rewards/margins": 0.1173890084028244,
388
+ "rewards/rejected": -0.06305641680955887,
389
+ "step": 250
390
+ },
391
+ {
392
+ "epoch": 0.26,
393
+ "learning_rate": 4.6403076118289006e-06,
394
+ "logits/chosen": -2.191092014312744,
395
+ "logits/rejected": -2.0465681552886963,
396
+ "logps/chosen": -106.81085205078125,
397
+ "logps/rejected": -80.91065216064453,
398
+ "loss": 0.6325,
399
+ "rewards/accuracies": 0.675000011920929,
400
+ "rewards/chosen": 0.04650095850229263,
401
+ "rewards/margins": 0.14686846733093262,
402
+ "rewards/rejected": -0.10036750882863998,
403
+ "step": 260
404
+ },
405
+ {
406
+ "epoch": 0.27,
407
+ "learning_rate": 4.5946460641111776e-06,
408
+ "logits/chosen": -2.3747944831848145,
409
+ "logits/rejected": -2.1630542278289795,
410
+ "logps/chosen": -103.29341125488281,
411
+ "logps/rejected": -80.09381103515625,
412
+ "loss": 0.6041,
413
+ "rewards/accuracies": 0.7749999761581421,
414
+ "rewards/chosen": 0.06812257319688797,
415
+ "rewards/margins": 0.2125866860151291,
416
+ "rewards/rejected": -0.14446412026882172,
417
+ "step": 270
418
+ },
419
+ {
420
+ "epoch": 0.28,
421
+ "learning_rate": 4.546510085972983e-06,
422
+ "logits/chosen": -2.385852813720703,
423
+ "logits/rejected": -2.2712764739990234,
424
+ "logps/chosen": -115.0221176147461,
425
+ "logps/rejected": -90.1436538696289,
426
+ "loss": 0.6212,
427
+ "rewards/accuracies": 0.675000011920929,
428
+ "rewards/chosen": 0.017763856798410416,
429
+ "rewards/margins": 0.1770613044500351,
430
+ "rewards/rejected": -0.15929746627807617,
431
+ "step": 280
432
+ },
433
+ {
434
+ "epoch": 0.29,
435
+ "learning_rate": 4.495956541020376e-06,
436
+ "logits/chosen": -2.3901445865631104,
437
+ "logits/rejected": -2.247413396835327,
438
+ "logps/chosen": -119.44215393066406,
439
+ "logps/rejected": -101.89396667480469,
440
+ "loss": 0.613,
441
+ "rewards/accuracies": 0.625,
442
+ "rewards/chosen": -0.03466514125466347,
443
+ "rewards/margins": 0.2126956284046173,
444
+ "rewards/rejected": -0.24736078083515167,
445
+ "step": 290
446
+ },
447
+ {
448
+ "epoch": 0.3,
449
+ "learning_rate": 4.443045148759978e-06,
450
+ "logits/chosen": -2.3851912021636963,
451
+ "logits/rejected": -2.246699094772339,
452
+ "logps/chosen": -126.57054138183594,
453
+ "logps/rejected": -93.43769836425781,
454
+ "loss": 0.5898,
455
+ "rewards/accuracies": 0.6875,
456
+ "rewards/chosen": -0.06855957210063934,
457
+ "rewards/margins": 0.2794524133205414,
458
+ "rewards/rejected": -0.3480120301246643,
459
+ "step": 300
460
+ },
461
+ {
462
+ "epoch": 0.3,
463
+ "learning_rate": 4.3878384140516025e-06,
464
+ "logits/chosen": -2.4334146976470947,
465
+ "logits/rejected": -2.3179290294647217,
466
+ "logps/chosen": -118.78206634521484,
467
+ "logps/rejected": -107.4043197631836,
468
+ "loss": 0.5792,
469
+ "rewards/accuracies": 0.699999988079071,
470
+ "rewards/chosen": -0.15513427555561066,
471
+ "rewards/margins": 0.30940642952919006,
472
+ "rewards/rejected": -0.46454063057899475,
473
+ "step": 310
474
+ },
475
+ {
476
+ "epoch": 0.31,
477
+ "learning_rate": 4.330401553270522e-06,
478
+ "logits/chosen": -2.34911847114563,
479
+ "logits/rejected": -2.2519352436065674,
480
+ "logps/chosen": -116.80616760253906,
481
+ "logps/rejected": -108.0991439819336,
482
+ "loss": 0.5869,
483
+ "rewards/accuracies": 0.737500011920929,
484
+ "rewards/chosen": -0.32395830750465393,
485
+ "rewards/margins": 0.30500033497810364,
486
+ "rewards/rejected": -0.6289585828781128,
487
+ "step": 320
488
+ },
489
+ {
490
+ "epoch": 0.32,
491
+ "learning_rate": 4.2708024172665795e-06,
492
+ "logits/chosen": -2.448741912841797,
493
+ "logits/rejected": -2.2790164947509766,
494
+ "logps/chosen": -119.88993072509766,
495
+ "logps/rejected": -94.18651580810547,
496
+ "loss": 0.5564,
497
+ "rewards/accuracies": 0.8125,
498
+ "rewards/chosen": -0.37532466650009155,
499
+ "rewards/margins": 0.37958258390426636,
500
+ "rewards/rejected": -0.7549072504043579,
501
+ "step": 330
502
+ },
503
+ {
504
+ "epoch": 0.33,
505
+ "learning_rate": 4.209111411211174e-06,
506
+ "logits/chosen": -2.4606077671051025,
507
+ "logits/rejected": -2.3158764839172363,
508
+ "logps/chosen": -108.91439056396484,
509
+ "logps/rejected": -94.19551086425781,
510
+ "loss": 0.5416,
511
+ "rewards/accuracies": 0.75,
512
+ "rewards/chosen": -0.3131277859210968,
513
+ "rewards/margins": 0.4208511710166931,
514
+ "rewards/rejected": -0.7339790463447571,
515
+ "step": 340
516
+ },
517
+ {
518
+ "epoch": 0.34,
519
+ "learning_rate": 4.145401411426788e-06,
520
+ "logits/chosen": -2.4888482093811035,
521
+ "logits/rejected": -2.3739964962005615,
522
+ "logps/chosen": -106.79335021972656,
523
+ "logps/rejected": -94.10002136230469,
524
+ "loss": 0.5863,
525
+ "rewards/accuracies": 0.7124999761581421,
526
+ "rewards/chosen": -0.24795857071876526,
527
+ "rewards/margins": 0.3726752996444702,
528
+ "rewards/rejected": -0.6206337809562683,
529
+ "step": 350
530
+ },
531
+ {
532
+ "epoch": 0.35,
533
+ "learning_rate": 4.079747679297314e-06,
534
+ "logits/chosen": -2.320113182067871,
535
+ "logits/rejected": -2.214874744415283,
536
+ "logps/chosen": -118.53324890136719,
537
+ "logps/rejected": -100.85164642333984,
538
+ "loss": 0.574,
539
+ "rewards/accuracies": 0.6499999761581421,
540
+ "rewards/chosen": -0.29153329133987427,
541
+ "rewards/margins": 0.3650625944137573,
542
+ "rewards/rejected": -0.6565959453582764,
543
+ "step": 360
544
+ },
545
+ {
546
+ "epoch": 0.36,
547
+ "learning_rate": 4.012227772360889e-06,
548
+ "logits/chosen": -2.3389956951141357,
549
+ "logits/rejected": -2.1818504333496094,
550
+ "logps/chosen": -123.92159271240234,
551
+ "logps/rejected": -110.42405700683594,
552
+ "loss": 0.5134,
553
+ "rewards/accuracies": 0.8374999761581421,
554
+ "rewards/chosen": -0.2689405083656311,
555
+ "rewards/margins": 0.5353353023529053,
556
+ "rewards/rejected": -0.8042758703231812,
557
+ "step": 370
558
+ },
559
+ {
560
+ "epoch": 0.37,
561
+ "learning_rate": 3.942921452690245e-06,
562
+ "logits/chosen": -2.369523525238037,
563
+ "logits/rejected": -2.285311222076416,
564
+ "logps/chosen": -132.2254180908203,
565
+ "logps/rejected": -125.16448974609375,
566
+ "loss": 0.6273,
567
+ "rewards/accuracies": 0.699999988079071,
568
+ "rewards/chosen": -0.6799182891845703,
569
+ "rewards/margins": 0.3109356760978699,
570
+ "rewards/rejected": -0.9908539652824402,
571
+ "step": 380
572
+ },
573
+ {
574
+ "epoch": 0.38,
575
+ "learning_rate": 3.871910592668817e-06,
576
+ "logits/chosen": -2.4459290504455566,
577
+ "logits/rejected": -2.3258919715881348,
578
+ "logps/chosen": -127.33780670166016,
579
+ "logps/rejected": -129.97348022460938,
580
+ "loss": 0.4728,
581
+ "rewards/accuracies": 0.800000011920929,
582
+ "rewards/chosen": -0.7220331430435181,
583
+ "rewards/margins": 0.7715710997581482,
584
+ "rewards/rejected": -1.4936041831970215,
585
+ "step": 390
586
+ },
587
+ {
588
+ "epoch": 0.39,
589
+ "learning_rate": 3.799279078273921e-06,
590
+ "logits/chosen": -2.3586697578430176,
591
+ "logits/rejected": -2.190298080444336,
592
+ "logps/chosen": -137.87445068359375,
593
+ "logps/rejected": -125.60152435302734,
594
+ "loss": 0.456,
595
+ "rewards/accuracies": 0.7749999761581421,
596
+ "rewards/chosen": -0.9262881278991699,
597
+ "rewards/margins": 0.8406116366386414,
598
+ "rewards/rejected": -1.766899824142456,
599
+ "step": 400
600
+ },
601
+ {
602
+ "epoch": 0.39,
603
+ "eval_logits/chosen": -2.21016526222229,
604
+ "eval_logits/rejected": -2.1202762126922607,
605
+ "eval_logps/chosen": -142.6661376953125,
606
+ "eval_logps/rejected": -139.3409881591797,
607
+ "eval_loss": 0.5446351766586304,
608
+ "eval_rewards/accuracies": 0.7216981053352356,
609
+ "eval_rewards/chosen": -1.23048734664917,
610
+ "eval_rewards/margins": 0.6443533301353455,
611
+ "eval_rewards/rejected": -1.8748407363891602,
612
+ "eval_runtime": 427.1638,
613
+ "eval_samples_per_second": 0.979,
614
+ "eval_steps_per_second": 0.124,
615
+ "step": 400
616
+ },
617
+ {
618
+ "epoch": 0.4,
619
+ "learning_rate": 3.725112709981249e-06,
620
+ "logits/chosen": -2.2144548892974854,
621
+ "logits/rejected": -2.0825304985046387,
622
+ "logps/chosen": -153.91371154785156,
623
+ "logps/rejected": -152.90274047851562,
624
+ "loss": 0.524,
625
+ "rewards/accuracies": 0.7749999761581421,
626
+ "rewards/chosen": -1.2480976581573486,
627
+ "rewards/margins": 0.7566108703613281,
628
+ "rewards/rejected": -2.0047080516815186,
629
+ "step": 410
630
+ },
631
+ {
632
+ "epoch": 0.41,
633
+ "learning_rate": 3.649499101407737e-06,
634
+ "logits/chosen": -2.3662781715393066,
635
+ "logits/rejected": -2.2161378860473633,
636
+ "logps/chosen": -157.3763427734375,
637
+ "logps/rejected": -154.7397918701172,
638
+ "loss": 0.5717,
639
+ "rewards/accuracies": 0.699999988079071,
640
+ "rewards/chosen": -1.3450363874435425,
641
+ "rewards/margins": 0.736282467842102,
642
+ "rewards/rejected": -2.0813188552856445,
643
+ "step": 420
644
+ },
645
+ {
646
+ "epoch": 0.42,
647
+ "learning_rate": 3.5725275758125564e-06,
648
+ "logits/chosen": -2.2823150157928467,
649
+ "logits/rejected": -2.1387839317321777,
650
+ "logps/chosen": -135.34524536132812,
651
+ "logps/rejected": -134.94952392578125,
652
+ "loss": 0.4512,
653
+ "rewards/accuracies": 0.824999988079071,
654
+ "rewards/chosen": -1.2761428356170654,
655
+ "rewards/margins": 1.0124304294586182,
656
+ "rewards/rejected": -2.2885732650756836,
657
+ "step": 430
658
+ },
659
+ {
660
+ "epoch": 0.43,
661
+ "learning_rate": 3.494289060578478e-06,
662
+ "logits/chosen": -2.3979570865631104,
663
+ "logits/rejected": -2.3127408027648926,
664
+ "logps/chosen": -148.89285278320312,
665
+ "logps/rejected": -157.427978515625,
666
+ "loss": 0.5081,
667
+ "rewards/accuracies": 0.6875,
668
+ "rewards/chosen": -1.634016990661621,
669
+ "rewards/margins": 0.8799861669540405,
670
+ "rewards/rejected": -2.514003276824951,
671
+ "step": 440
672
+ },
673
+ {
674
+ "epoch": 0.44,
675
+ "learning_rate": 3.414875979798272e-06,
676
+ "logits/chosen": -2.349696636199951,
677
+ "logits/rejected": -2.236372709274292,
678
+ "logps/chosen": -163.39578247070312,
679
+ "logps/rejected": -173.43084716796875,
680
+ "loss": 0.4372,
681
+ "rewards/accuracies": 0.7875000238418579,
682
+ "rewards/chosen": -1.1473162174224854,
683
+ "rewards/margins": 1.2105666399002075,
684
+ "rewards/rejected": -2.3578829765319824,
685
+ "step": 450
686
+ },
687
+ {
688
+ "epoch": 0.45,
689
+ "learning_rate": 3.3343821450930196e-06,
690
+ "logits/chosen": -2.468815326690674,
691
+ "logits/rejected": -2.319728374481201,
692
+ "logps/chosen": -147.9761199951172,
693
+ "logps/rejected": -140.23440551757812,
694
+ "loss": 0.4893,
695
+ "rewards/accuracies": 0.75,
696
+ "rewards/chosen": -1.0942740440368652,
697
+ "rewards/margins": 1.0382341146469116,
698
+ "rewards/rejected": -2.132507801055908,
699
+ "step": 460
700
+ },
701
+ {
702
+ "epoch": 0.46,
703
+ "learning_rate": 3.252902644791325e-06,
704
+ "logits/chosen": -2.2242140769958496,
705
+ "logits/rejected": -2.078382968902588,
706
+ "logps/chosen": -138.49478149414062,
707
+ "logps/rejected": -156.64273071289062,
708
+ "loss": 0.4146,
709
+ "rewards/accuracies": 0.8374999761581421,
710
+ "rewards/chosen": -1.0762813091278076,
711
+ "rewards/margins": 1.254016399383545,
712
+ "rewards/rejected": -2.3302977085113525,
713
+ "step": 470
714
+ },
715
+ {
716
+ "epoch": 0.47,
717
+ "learning_rate": 3.170533731600339e-06,
718
+ "logits/chosen": -2.334146022796631,
719
+ "logits/rejected": -2.1843693256378174,
720
+ "logps/chosen": -145.91741943359375,
721
+ "logps/rejected": -150.4651336669922,
722
+ "loss": 0.5102,
723
+ "rewards/accuracies": 0.7124999761581421,
724
+ "rewards/chosen": -1.1809916496276855,
725
+ "rewards/margins": 1.0365092754364014,
726
+ "rewards/rejected": -2.217501163482666,
727
+ "step": 480
728
+ },
729
+ {
730
+ "epoch": 0.48,
731
+ "learning_rate": 3.0873727089012816e-06,
732
+ "logits/chosen": -2.4750094413757324,
733
+ "logits/rejected": -2.3860108852386475,
734
+ "logps/chosen": -168.5699005126953,
735
+ "logps/rejected": -172.41029357910156,
736
+ "loss": 0.522,
737
+ "rewards/accuracies": 0.737500011920929,
738
+ "rewards/chosen": -1.693511962890625,
739
+ "rewards/margins": 1.0143020153045654,
740
+ "rewards/rejected": -2.7078135013580322,
741
+ "step": 490
742
+ },
743
+ {
744
+ "epoch": 0.49,
745
+ "learning_rate": 3.0035178158038026e-06,
746
+ "logits/chosen": -2.261049747467041,
747
+ "logits/rejected": -2.0733211040496826,
748
+ "logps/chosen": -178.0288543701172,
749
+ "logps/rejected": -181.95970153808594,
750
+ "loss": 0.4784,
751
+ "rewards/accuracies": 0.75,
752
+ "rewards/chosen": -1.8506263494491577,
753
+ "rewards/margins": 1.296245813369751,
754
+ "rewards/rejected": -3.146872043609619,
755
+ "step": 500
756
+ },
757
+ {
758
+ "epoch": 0.5,
759
+ "learning_rate": 2.919068111094937e-06,
760
+ "logits/chosen": -2.3573222160339355,
761
+ "logits/rejected": -2.2329063415527344,
762
+ "logps/chosen": -169.11073303222656,
763
+ "logps/rejected": -186.6231231689453,
764
+ "loss": 0.6133,
765
+ "rewards/accuracies": 0.6625000238418579,
766
+ "rewards/chosen": -2.2830090522766113,
767
+ "rewards/margins": 1.0279150009155273,
768
+ "rewards/rejected": -3.3109240531921387,
769
+ "step": 510
770
+ },
771
+ {
772
+ "epoch": 0.51,
773
+ "learning_rate": 2.8341233562197895e-06,
774
+ "logits/chosen": -2.3493080139160156,
775
+ "logits/rejected": -2.2869277000427246,
776
+ "logps/chosen": -156.89324951171875,
777
+ "logps/rejected": -172.35809326171875,
778
+ "loss": 0.4775,
779
+ "rewards/accuracies": 0.7749999761581421,
780
+ "rewards/chosen": -1.8899619579315186,
781
+ "rewards/margins": 1.054958701133728,
782
+ "rewards/rejected": -2.944920778274536,
783
+ "step": 520
784
+ },
785
+ {
786
+ "epoch": 0.52,
787
+ "learning_rate": 2.7487838974321352e-06,
788
+ "logits/chosen": -2.294079065322876,
789
+ "logits/rejected": -2.1721999645233154,
790
+ "logps/chosen": -166.351806640625,
791
+ "logps/rejected": -181.2619171142578,
792
+ "loss": 0.501,
793
+ "rewards/accuracies": 0.7749999761581421,
794
+ "rewards/chosen": -2.059480905532837,
795
+ "rewards/margins": 1.2059848308563232,
796
+ "rewards/rejected": -3.265465259552002,
797
+ "step": 530
798
+ },
799
+ {
800
+ "epoch": 0.53,
801
+ "learning_rate": 2.6631505472541997e-06,
802
+ "logits/chosen": -2.284674644470215,
803
+ "logits/rejected": -2.151689291000366,
804
+ "logps/chosen": -170.77108764648438,
805
+ "logps/rejected": -185.42703247070312,
806
+ "loss": 0.4457,
807
+ "rewards/accuracies": 0.737500011920929,
808
+ "rewards/chosen": -1.8532861471176147,
809
+ "rewards/margins": 1.2717969417572021,
810
+ "rewards/rejected": -3.1250832080841064,
811
+ "step": 540
812
+ },
813
+ {
814
+ "epoch": 0.54,
815
+ "learning_rate": 2.5773244653856173e-06,
816
+ "logits/chosen": -2.262991428375244,
817
+ "logits/rejected": -2.140122652053833,
818
+ "logps/chosen": -171.88180541992188,
819
+ "logps/rejected": -183.91680908203125,
820
+ "loss": 0.4782,
821
+ "rewards/accuracies": 0.762499988079071,
822
+ "rewards/chosen": -1.8343322277069092,
823
+ "rewards/margins": 1.209377408027649,
824
+ "rewards/rejected": -3.0437099933624268,
825
+ "step": 550
826
+ },
827
+ {
828
+ "epoch": 0.55,
829
+ "learning_rate": 2.4914070392022717e-06,
830
+ "logits/chosen": -2.287393093109131,
831
+ "logits/rejected": -2.1789438724517822,
832
+ "logps/chosen": -167.02993774414062,
833
+ "logps/rejected": -178.21737670898438,
834
+ "loss": 0.4847,
835
+ "rewards/accuracies": 0.7749999761581421,
836
+ "rewards/chosen": -1.4670352935791016,
837
+ "rewards/margins": 1.1378867626190186,
838
+ "rewards/rejected": -2.60492205619812,
839
+ "step": 560
840
+ },
841
+ {
842
+ "epoch": 0.56,
843
+ "learning_rate": 2.4054997639861778e-06,
844
+ "logits/chosen": -2.2121901512145996,
845
+ "logits/rejected": -2.039933443069458,
846
+ "logps/chosen": -164.4762725830078,
847
+ "logps/rejected": -171.06558227539062,
848
+ "loss": 0.4581,
849
+ "rewards/accuracies": 0.7875000238418579,
850
+ "rewards/chosen": -1.45037043094635,
851
+ "rewards/margins": 1.332580327987671,
852
+ "rewards/rejected": -2.7829508781433105,
853
+ "step": 570
854
+ },
855
+ {
856
+ "epoch": 0.57,
857
+ "learning_rate": 2.3197041230278905e-06,
858
+ "logits/chosen": -2.336905002593994,
859
+ "logits/rejected": -2.2310211658477783,
860
+ "logps/chosen": -159.6435089111328,
861
+ "logps/rejected": -185.4768829345703,
862
+ "loss": 0.4182,
863
+ "rewards/accuracies": 0.824999988079071,
864
+ "rewards/chosen": -1.4801210165023804,
865
+ "rewards/margins": 1.5004690885543823,
866
+ "rewards/rejected": -2.980590343475342,
867
+ "step": 580
868
+ },
869
+ {
870
+ "epoch": 0.58,
871
+ "learning_rate": 2.234121467743082e-06,
872
+ "logits/chosen": -2.356192111968994,
873
+ "logits/rejected": -2.269792079925537,
874
+ "logps/chosen": -152.16677856445312,
875
+ "logps/rejected": -166.55563354492188,
876
+ "loss": 0.545,
877
+ "rewards/accuracies": 0.7749999761581421,
878
+ "rewards/chosen": -1.5818209648132324,
879
+ "rewards/margins": 1.1744325160980225,
880
+ "rewards/rejected": -2.756253719329834,
881
+ "step": 590
882
+ },
883
+ {
884
+ "epoch": 0.59,
885
+ "learning_rate": 2.148852897944905e-06,
886
+ "logits/chosen": -2.4117207527160645,
887
+ "logits/rejected": -2.2672557830810547,
888
+ "logps/chosen": -149.95504760742188,
889
+ "logps/rejected": -165.35263061523438,
890
+ "loss": 0.4388,
891
+ "rewards/accuracies": 0.7749999761581421,
892
+ "rewards/chosen": -1.488303303718567,
893
+ "rewards/margins": 1.227616786956787,
894
+ "rewards/rejected": -2.7159199714660645,
895
+ "step": 600
896
+ },
897
+ {
898
+ "epoch": 0.59,
899
+ "eval_logits/chosen": -2.1954238414764404,
900
+ "eval_logits/rejected": -2.1017305850982666,
901
+ "eval_logps/chosen": -161.69036865234375,
902
+ "eval_logps/rejected": -173.2707977294922,
903
+ "eval_loss": 0.5324557423591614,
904
+ "eval_rewards/accuracies": 0.7122641801834106,
905
+ "eval_rewards/chosen": -1.8012140989303589,
906
+ "eval_rewards/margins": 1.091520071029663,
907
+ "eval_rewards/rejected": -2.8927345275878906,
908
+ "eval_runtime": 427.5006,
909
+ "eval_samples_per_second": 0.978,
910
+ "eval_steps_per_second": 0.124,
911
+ "step": 600
912
+ },
913
+ {
914
+ "epoch": 0.6,
915
+ "learning_rate": 2.063999142413574e-06,
916
+ "logits/chosen": -2.3014285564422607,
917
+ "logits/rejected": -2.1665964126586914,
918
+ "logps/chosen": -164.73809814453125,
919
+ "logps/rejected": -163.736083984375,
920
+ "loss": 0.4451,
921
+ "rewards/accuracies": 0.7875000238418579,
922
+ "rewards/chosen": -1.6745765209197998,
923
+ "rewards/margins": 1.155146598815918,
924
+ "rewards/rejected": -2.8297228813171387,
925
+ "step": 610
926
+ },
927
+ {
928
+ "epoch": 0.61,
929
+ "learning_rate": 1.9796604399042547e-06,
930
+ "logits/chosen": -2.375365972518921,
931
+ "logits/rejected": -2.2335057258605957,
932
+ "logps/chosen": -170.6911163330078,
933
+ "logps/rejected": -186.2073974609375,
934
+ "loss": 0.4407,
935
+ "rewards/accuracies": 0.7875000238418579,
936
+ "rewards/chosen": -1.9759544134140015,
937
+ "rewards/margins": 1.4642221927642822,
938
+ "rewards/rejected": -3.440176486968994,
939
+ "step": 620
940
+ },
941
+ {
942
+ "epoch": 0.62,
943
+ "learning_rate": 1.8959364207338216e-06,
944
+ "logits/chosen": -2.3572235107421875,
945
+ "logits/rejected": -2.1937854290008545,
946
+ "logps/chosen": -148.90460205078125,
947
+ "logps/rejected": -174.04818725585938,
948
+ "loss": 0.5111,
949
+ "rewards/accuracies": 0.75,
950
+ "rewards/chosen": -1.8767086267471313,
951
+ "rewards/margins": 1.4870884418487549,
952
+ "rewards/rejected": -3.363797426223755,
953
+ "step": 630
954
+ },
955
+ {
956
+ "epoch": 0.63,
957
+ "learning_rate": 1.8129259890863825e-06,
958
+ "logits/chosen": -2.3194022178649902,
959
+ "logits/rejected": -2.2175240516662598,
960
+ "logps/chosen": -165.7982177734375,
961
+ "logps/rejected": -185.64183044433594,
962
+ "loss": 0.5652,
963
+ "rewards/accuracies": 0.6875,
964
+ "rewards/chosen": -1.662651777267456,
965
+ "rewards/margins": 1.1808096170425415,
966
+ "rewards/rejected": -2.843461513519287,
967
+ "step": 640
968
+ },
969
+ {
970
+ "epoch": 0.64,
971
+ "learning_rate": 1.7307272061765738e-06,
972
+ "logits/chosen": -2.355379819869995,
973
+ "logits/rejected": -2.256904125213623,
974
+ "logps/chosen": -160.78778076171875,
975
+ "logps/rejected": -178.40591430664062,
976
+ "loss": 0.489,
977
+ "rewards/accuracies": 0.8125,
978
+ "rewards/chosen": -1.5761786699295044,
979
+ "rewards/margins": 1.2927827835083008,
980
+ "rewards/rejected": -2.8689613342285156,
981
+ "step": 650
982
+ },
983
+ {
984
+ "epoch": 0.65,
985
+ "learning_rate": 1.649437174408685e-06,
986
+ "logits/chosen": -2.2938830852508545,
987
+ "logits/rejected": -2.162132740020752,
988
+ "logps/chosen": -150.61842346191406,
989
+ "logps/rejected": -170.839111328125,
990
+ "loss": 0.5404,
991
+ "rewards/accuracies": 0.737500011920929,
992
+ "rewards/chosen": -1.1881531476974487,
993
+ "rewards/margins": 1.283790111541748,
994
+ "rewards/rejected": -2.4719433784484863,
995
+ "step": 660
996
+ },
997
+ {
998
+ "epoch": 0.66,
999
+ "learning_rate": 1.569151922668422e-06,
1000
+ "logits/chosen": -2.3966734409332275,
1001
+ "logits/rejected": -2.287233829498291,
1002
+ "logps/chosen": -138.367431640625,
1003
+ "logps/rejected": -159.68118286132812,
1004
+ "loss": 0.5196,
1005
+ "rewards/accuracies": 0.7749999761581421,
1006
+ "rewards/chosen": -1.2400791645050049,
1007
+ "rewards/margins": 1.232775330543518,
1008
+ "rewards/rejected": -2.4728543758392334,
1009
+ "step": 670
1010
+ },
1011
+ {
1012
+ "epoch": 0.67,
1013
+ "learning_rate": 1.4899662928828428e-06,
1014
+ "logits/chosen": -2.333634853363037,
1015
+ "logits/rejected": -2.157811403274536,
1016
+ "logps/chosen": -127.2032699584961,
1017
+ "logps/rejected": -149.32907104492188,
1018
+ "loss": 0.4387,
1019
+ "rewards/accuracies": 0.800000011920929,
1020
+ "rewards/chosen": -1.0127990245819092,
1021
+ "rewards/margins": 1.4315626621246338,
1022
+ "rewards/rejected": -2.444361686706543,
1023
+ "step": 680
1024
+ },
1025
+ {
1026
+ "epoch": 0.68,
1027
+ "learning_rate": 1.4119738279824507e-06,
1028
+ "logits/chosen": -2.272979259490967,
1029
+ "logits/rejected": -2.127248764038086,
1030
+ "logps/chosen": -131.46400451660156,
1031
+ "logps/rejected": -144.866455078125,
1032
+ "loss": 0.5303,
1033
+ "rewards/accuracies": 0.7124999761581421,
1034
+ "rewards/chosen": -1.0165386199951172,
1035
+ "rewards/margins": 1.0676348209381104,
1036
+ "rewards/rejected": -2.0841732025146484,
1037
+ "step": 690
1038
+ },
1039
+ {
1040
+ "epoch": 0.69,
1041
+ "learning_rate": 1.3352666613978152e-06,
1042
+ "logits/chosen": -2.192830801010132,
1043
+ "logits/rejected": -2.111144542694092,
1044
+ "logps/chosen": -134.23226928710938,
1045
+ "logps/rejected": -143.62905883789062,
1046
+ "loss": 0.5064,
1047
+ "rewards/accuracies": 0.75,
1048
+ "rewards/chosen": -1.0449942350387573,
1049
+ "rewards/margins": 1.0384838581085205,
1050
+ "rewards/rejected": -2.0834782123565674,
1051
+ "step": 700
1052
+ },
1053
+ {
1054
+ "epoch": 0.7,
1055
+ "learning_rate": 1.2599354082212523e-06,
1056
+ "logits/chosen": -2.2571260929107666,
1057
+ "logits/rejected": -2.1350224018096924,
1058
+ "logps/chosen": -129.79287719726562,
1059
+ "logps/rejected": -138.2635498046875,
1060
+ "loss": 0.5149,
1061
+ "rewards/accuracies": 0.800000011920929,
1062
+ "rewards/chosen": -0.9037138223648071,
1063
+ "rewards/margins": 0.9939008951187134,
1064
+ "rewards/rejected": -1.8976147174835205,
1065
+ "step": 710
1066
+ },
1067
+ {
1068
+ "epoch": 0.71,
1069
+ "learning_rate": 1.186069058162127e-06,
1070
+ "logits/chosen": -2.3779895305633545,
1071
+ "logits/rejected": -2.2798473834991455,
1072
+ "logps/chosen": -136.55984497070312,
1073
+ "logps/rejected": -145.81918334960938,
1074
+ "loss": 0.5318,
1075
+ "rewards/accuracies": 0.75,
1076
+ "rewards/chosen": -1.2205983400344849,
1077
+ "rewards/margins": 0.9511237144470215,
1078
+ "rewards/rejected": -2.171722173690796,
1079
+ "step": 720
1080
+ },
1081
+ {
1082
+ "epoch": 0.72,
1083
+ "learning_rate": 1.113754870422254e-06,
1084
+ "logits/chosen": -2.4235808849334717,
1085
+ "logits/rejected": -2.3193821907043457,
1086
+ "logps/chosen": -142.02517700195312,
1087
+ "logps/rejected": -161.961669921875,
1088
+ "loss": 0.5042,
1089
+ "rewards/accuracies": 0.7124999761581421,
1090
+ "rewards/chosen": -1.2923016548156738,
1091
+ "rewards/margins": 0.9127277135848999,
1092
+ "rewards/rejected": -2.205029249191284,
1093
+ "step": 730
1094
+ },
1095
+ {
1096
+ "epoch": 0.73,
1097
+ "learning_rate": 1.0430782706155545e-06,
1098
+ "logits/chosen": -2.435793399810791,
1099
+ "logits/rejected": -2.296560764312744,
1100
+ "logps/chosen": -145.57290649414062,
1101
+ "logps/rejected": -156.23214721679688,
1102
+ "loss": 0.4346,
1103
+ "rewards/accuracies": 0.7749999761581421,
1104
+ "rewards/chosen": -1.0421411991119385,
1105
+ "rewards/margins": 1.3148539066314697,
1106
+ "rewards/rejected": -2.356995105743408,
1107
+ "step": 740
1108
+ },
1109
+ {
1110
+ "epoch": 0.74,
1111
+ "learning_rate": 9.741227498537615e-07,
1112
+ "logits/chosen": -2.4774973392486572,
1113
+ "logits/rejected": -2.3496251106262207,
1114
+ "logps/chosen": -133.50381469726562,
1115
+ "logps/rejected": -148.293701171875,
1116
+ "loss": 0.4295,
1117
+ "rewards/accuracies": 0.800000011920929,
1118
+ "rewards/chosen": -1.2879221439361572,
1119
+ "rewards/margins": 1.1688247919082642,
1120
+ "rewards/rejected": -2.45674729347229,
1121
+ "step": 750
1122
+ },
1123
+ {
1124
+ "epoch": 0.75,
1125
+ "learning_rate": 9.069697661173668e-07,
1126
+ "logits/chosen": -2.3304691314697266,
1127
+ "logits/rejected": -2.2422666549682617,
1128
+ "logps/chosen": -143.60433959960938,
1129
+ "logps/rejected": -161.01876831054688,
1130
+ "loss": 0.6187,
1131
+ "rewards/accuracies": 0.675000011920929,
1132
+ "rewards/chosen": -1.4187496900558472,
1133
+ "rewards/margins": 0.990622878074646,
1134
+ "rewards/rejected": -2.4093728065490723,
1135
+ "step": 760
1136
+ },
1137
+ {
1138
+ "epoch": 0.76,
1139
+ "learning_rate": 8.416986480283434e-07,
1140
+ "logits/chosen": -2.2641918659210205,
1141
+ "logits/rejected": -2.1294054985046387,
1142
+ "logps/chosen": -150.9503173828125,
1143
+ "logps/rejected": -169.94259643554688,
1144
+ "loss": 0.5414,
1145
+ "rewards/accuracies": 0.7124999761581421,
1146
+ "rewards/chosen": -1.6702228784561157,
1147
+ "rewards/margins": 1.2004982233047485,
1148
+ "rewards/rejected": -2.8707211017608643,
1149
+ "step": 770
1150
+ },
1151
+ {
1152
+ "epoch": 0.77,
1153
+ "learning_rate": 7.783865011382876e-07,
1154
+ "logits/chosen": -2.318986415863037,
1155
+ "logits/rejected": -2.189507246017456,
1156
+ "logps/chosen": -157.97384643554688,
1157
+ "logps/rejected": -164.84359741210938,
1158
+ "loss": 0.4953,
1159
+ "rewards/accuracies": 0.7250000238418579,
1160
+ "rewards/chosen": -1.3389387130737305,
1161
+ "rewards/margins": 1.2802064418792725,
1162
+ "rewards/rejected": -2.619144916534424,
1163
+ "step": 780
1164
+ },
1165
+ {
1166
+ "epoch": 0.78,
1167
+ "learning_rate": 7.171081168427205e-07,
1168
+ "logits/chosen": -2.3565828800201416,
1169
+ "logits/rejected": -2.193580150604248,
1170
+ "logps/chosen": -169.49929809570312,
1171
+ "logps/rejected": -177.23731994628906,
1172
+ "loss": 0.5052,
1173
+ "rewards/accuracies": 0.7250000238418579,
1174
+ "rewards/chosen": -1.3413608074188232,
1175
+ "rewards/margins": 1.271929144859314,
1176
+ "rewards/rejected": -2.6132900714874268,
1177
+ "step": 790
1178
+ },
1179
+ {
1180
+ "epoch": 0.79,
1181
+ "learning_rate": 6.579358840291064e-07,
1182
+ "logits/chosen": -2.3053181171417236,
1183
+ "logits/rejected": -2.205723762512207,
1184
+ "logps/chosen": -155.66004943847656,
1185
+ "logps/rejected": -173.5889434814453,
1186
+ "loss": 0.6137,
1187
+ "rewards/accuracies": 0.7250000238418579,
1188
+ "rewards/chosen": -1.5923388004302979,
1189
+ "rewards/margins": 0.8797184228897095,
1190
+ "rewards/rejected": -2.4720568656921387,
1191
+ "step": 800
1192
+ },
1193
+ {
1194
+ "epoch": 0.79,
1195
+ "eval_logits/chosen": -2.19348406791687,
1196
+ "eval_logits/rejected": -2.0962369441986084,
1197
+ "eval_logps/chosen": -149.9387664794922,
1198
+ "eval_logps/rejected": -160.84133911132812,
1199
+ "eval_loss": 0.5198491215705872,
1200
+ "eval_rewards/accuracies": 0.7382075190544128,
1201
+ "eval_rewards/chosen": -1.448665976524353,
1202
+ "eval_rewards/margins": 1.0711843967437744,
1203
+ "eval_rewards/rejected": -2.519850254058838,
1204
+ "eval_runtime": 426.9839,
1205
+ "eval_samples_per_second": 0.979,
1206
+ "eval_steps_per_second": 0.124,
1207
+ "step": 800
1208
+ },
1209
+ {
1210
+ "epoch": 0.8,
1211
+ "learning_rate": 6.00939703563006e-07,
1212
+ "logits/chosen": -2.3648667335510254,
1213
+ "logits/rejected": -2.2830593585968018,
1214
+ "logps/chosen": -136.9246826171875,
1215
+ "logps/rejected": -156.57559204101562,
1216
+ "loss": 0.5648,
1217
+ "rewards/accuracies": 0.7124999761581421,
1218
+ "rewards/chosen": -1.4630320072174072,
1219
+ "rewards/margins": 0.9899848699569702,
1220
+ "rewards/rejected": -2.453016757965088,
1221
+ "step": 810
1222
+ },
1223
+ {
1224
+ "epoch": 0.81,
1225
+ "learning_rate": 5.461869057133412e-07,
1226
+ "logits/chosen": -2.408620595932007,
1227
+ "logits/rejected": -2.319190502166748,
1228
+ "logps/chosen": -145.22872924804688,
1229
+ "logps/rejected": -167.64926147460938,
1230
+ "loss": 0.487,
1231
+ "rewards/accuracies": 0.7124999761581421,
1232
+ "rewards/chosen": -1.306095838546753,
1233
+ "rewards/margins": 1.1592603921890259,
1234
+ "rewards/rejected": -2.4653563499450684,
1235
+ "step": 820
1236
+ },
1237
+ {
1238
+ "epoch": 0.82,
1239
+ "learning_rate": 4.937421706143497e-07,
1240
+ "logits/chosen": -2.290102481842041,
1241
+ "logits/rejected": -2.158676862716675,
1242
+ "logps/chosen": -152.98867797851562,
1243
+ "logps/rejected": -159.16566467285156,
1244
+ "loss": 0.5218,
1245
+ "rewards/accuracies": 0.737500011920929,
1246
+ "rewards/chosen": -1.3972251415252686,
1247
+ "rewards/margins": 1.101043462753296,
1248
+ "rewards/rejected": -2.4982683658599854,
1249
+ "step": 830
1250
+ },
1251
+ {
1252
+ "epoch": 0.83,
1253
+ "learning_rate": 4.43667451858166e-07,
1254
+ "logits/chosen": -2.2902016639709473,
1255
+ "logits/rejected": -2.131404399871826,
1256
+ "logps/chosen": -134.6024627685547,
1257
+ "logps/rejected": -151.9069061279297,
1258
+ "loss": 0.4313,
1259
+ "rewards/accuracies": 0.8374999761581421,
1260
+ "rewards/chosen": -1.2463924884796143,
1261
+ "rewards/margins": 1.268364667892456,
1262
+ "rewards/rejected": -2.514756917953491,
1263
+ "step": 840
1264
+ },
1265
+ {
1266
+ "epoch": 0.84,
1267
+ "learning_rate": 3.9602190330830484e-07,
1268
+ "logits/chosen": -2.2570953369140625,
1269
+ "logits/rejected": -2.157632827758789,
1270
+ "logps/chosen": -141.2190704345703,
1271
+ "logps/rejected": -162.86306762695312,
1272
+ "loss": 0.4876,
1273
+ "rewards/accuracies": 0.737500011920929,
1274
+ "rewards/chosen": -1.3870246410369873,
1275
+ "rewards/margins": 1.1176416873931885,
1276
+ "rewards/rejected": -2.504666328430176,
1277
+ "step": 850
1278
+ },
1279
+ {
1280
+ "epoch": 0.85,
1281
+ "learning_rate": 3.5086180922049295e-07,
1282
+ "logits/chosen": -2.394253969192505,
1283
+ "logits/rejected": -2.2592263221740723,
1284
+ "logps/chosen": -165.12005615234375,
1285
+ "logps/rejected": -166.54446411132812,
1286
+ "loss": 0.4366,
1287
+ "rewards/accuracies": 0.762499988079071,
1288
+ "rewards/chosen": -1.3376173973083496,
1289
+ "rewards/margins": 1.2365379333496094,
1290
+ "rewards/rejected": -2.574155330657959,
1291
+ "step": 860
1292
+ },
1293
+ {
1294
+ "epoch": 0.86,
1295
+ "learning_rate": 3.0824051775340895e-07,
1296
+ "logits/chosen": -2.424966335296631,
1297
+ "logits/rejected": -2.3199541568756104,
1298
+ "logps/chosen": -124.06752014160156,
1299
+ "logps/rejected": -140.36489868164062,
1300
+ "loss": 0.5696,
1301
+ "rewards/accuracies": 0.737500011920929,
1302
+ "rewards/chosen": -1.2786959409713745,
1303
+ "rewards/margins": 0.8720223307609558,
1304
+ "rewards/rejected": -2.1507184505462646,
1305
+ "step": 870
1306
+ },
1307
+ {
1308
+ "epoch": 0.87,
1309
+ "learning_rate": 2.6820837794786336e-07,
1310
+ "logits/chosen": -2.2567453384399414,
1311
+ "logits/rejected": -2.1820857524871826,
1312
+ "logps/chosen": -145.2144317626953,
1313
+ "logps/rejected": -155.57261657714844,
1314
+ "loss": 0.6788,
1315
+ "rewards/accuracies": 0.574999988079071,
1316
+ "rewards/chosen": -1.4533874988555908,
1317
+ "rewards/margins": 0.4883599281311035,
1318
+ "rewards/rejected": -1.9417474269866943,
1319
+ "step": 880
1320
+ },
1321
+ {
1322
+ "epoch": 0.88,
1323
+ "learning_rate": 2.3081268024887694e-07,
1324
+ "logits/chosen": -2.2515053749084473,
1325
+ "logits/rejected": -2.0878632068634033,
1326
+ "logps/chosen": -141.62442016601562,
1327
+ "logps/rejected": -151.8519287109375,
1328
+ "loss": 0.4185,
1329
+ "rewards/accuracies": 0.8125,
1330
+ "rewards/chosen": -1.220234990119934,
1331
+ "rewards/margins": 1.2837200164794922,
1332
+ "rewards/rejected": -2.5039544105529785,
1333
+ "step": 890
1334
+ },
1335
+ {
1336
+ "epoch": 0.89,
1337
+ "learning_rate": 1.9609760064091044e-07,
1338
+ "logits/chosen": -2.343606472015381,
1339
+ "logits/rejected": -2.277740955352783,
1340
+ "logps/chosen": -144.82054138183594,
1341
+ "logps/rejected": -142.51901245117188,
1342
+ "loss": 0.5214,
1343
+ "rewards/accuracies": 0.75,
1344
+ "rewards/chosen": -1.2351983785629272,
1345
+ "rewards/margins": 0.8959378004074097,
1346
+ "rewards/rejected": -2.131136417388916,
1347
+ "step": 900
1348
+ },
1349
+ {
1350
+ "epoch": 0.9,
1351
+ "learning_rate": 1.6410414846224992e-07,
1352
+ "logits/chosen": -2.256499767303467,
1353
+ "logits/rejected": -2.1499085426330566,
1354
+ "logps/chosen": -139.65145874023438,
1355
+ "logps/rejected": -156.27642822265625,
1356
+ "loss": 0.429,
1357
+ "rewards/accuracies": 0.8125,
1358
+ "rewards/chosen": -1.2983112335205078,
1359
+ "rewards/margins": 1.2496817111968994,
1360
+ "rewards/rejected": -2.547992706298828,
1361
+ "step": 910
1362
+ },
1363
+ {
1364
+ "epoch": 0.91,
1365
+ "learning_rate": 1.348701179601819e-07,
1366
+ "logits/chosen": -2.4174904823303223,
1367
+ "logits/rejected": -2.2869112491607666,
1368
+ "logps/chosen": -156.2935028076172,
1369
+ "logps/rejected": -166.49362182617188,
1370
+ "loss": 0.4412,
1371
+ "rewards/accuracies": 0.824999988079071,
1372
+ "rewards/chosen": -1.221897840499878,
1373
+ "rewards/margins": 1.1692349910736084,
1374
+ "rewards/rejected": -2.3911328315734863,
1375
+ "step": 920
1376
+ },
1377
+ {
1378
+ "epoch": 0.91,
1379
+ "learning_rate": 1.0843004364420151e-07,
1380
+ "logits/chosen": -2.2345454692840576,
1381
+ "logits/rejected": -2.1238150596618652,
1382
+ "logps/chosen": -157.7463836669922,
1383
+ "logps/rejected": -174.59848022460938,
1384
+ "loss": 0.5407,
1385
+ "rewards/accuracies": 0.7124999761581421,
1386
+ "rewards/chosen": -1.309340476989746,
1387
+ "rewards/margins": 0.9167596101760864,
1388
+ "rewards/rejected": -2.226100206375122,
1389
+ "step": 930
1390
+ },
1391
+ {
1392
+ "epoch": 0.92,
1393
+ "learning_rate": 8.481515948997931e-08,
1394
+ "logits/chosen": -2.384162425994873,
1395
+ "logits/rejected": -2.2841312885284424,
1396
+ "logps/chosen": -161.34854125976562,
1397
+ "logps/rejected": -156.95523071289062,
1398
+ "loss": 0.6309,
1399
+ "rewards/accuracies": 0.75,
1400
+ "rewards/chosen": -1.4349567890167236,
1401
+ "rewards/margins": 0.7904176115989685,
1402
+ "rewards/rejected": -2.225374460220337,
1403
+ "step": 940
1404
+ },
1405
+ {
1406
+ "epoch": 0.93,
1407
+ "learning_rate": 6.4053362042297e-08,
1408
+ "logits/chosen": -2.284637928009033,
1409
+ "logits/rejected": -2.121169090270996,
1410
+ "logps/chosen": -147.6913299560547,
1411
+ "logps/rejected": -160.8138885498047,
1412
+ "loss": 0.4361,
1413
+ "rewards/accuracies": 0.7124999761581421,
1414
+ "rewards/chosen": -1.3165013790130615,
1415
+ "rewards/margins": 1.2154157161712646,
1416
+ "rewards/rejected": -2.5319173336029053,
1417
+ "step": 950
1418
+ },
1419
+ {
1420
+ "epoch": 0.94,
1421
+ "learning_rate": 4.616917746052163e-08,
1422
+ "logits/chosen": -2.3731329441070557,
1423
+ "logits/rejected": -2.2411139011383057,
1424
+ "logps/chosen": -147.39578247070312,
1425
+ "logps/rejected": -155.5648651123047,
1426
+ "loss": 0.5548,
1427
+ "rewards/accuracies": 0.737500011920929,
1428
+ "rewards/chosen": -1.4831774234771729,
1429
+ "rewards/margins": 1.0112624168395996,
1430
+ "rewards/rejected": -2.4944396018981934,
1431
+ "step": 960
1432
+ },
1433
+ {
1434
+ "epoch": 0.95,
1435
+ "learning_rate": 3.118373254556412e-08,
1436
+ "logits/chosen": -2.4090075492858887,
1437
+ "logits/rejected": -2.2784786224365234,
1438
+ "logps/chosen": -138.59571838378906,
1439
+ "logps/rejected": -139.35055541992188,
1440
+ "loss": 0.4862,
1441
+ "rewards/accuracies": 0.7749999761581421,
1442
+ "rewards/chosen": -1.2963029146194458,
1443
+ "rewards/margins": 1.0239927768707275,
1444
+ "rewards/rejected": -2.320295810699463,
1445
+ "step": 970
1446
+ },
1447
+ {
1448
+ "epoch": 0.96,
1449
+ "learning_rate": 1.9114729782535037e-08,
1450
+ "logits/chosen": -2.432577133178711,
1451
+ "logits/rejected": -2.3220748901367188,
1452
+ "logps/chosen": -136.9718017578125,
1453
+ "logps/rejected": -146.90240478515625,
1454
+ "loss": 0.5037,
1455
+ "rewards/accuracies": 0.7250000238418579,
1456
+ "rewards/chosen": -1.215503454208374,
1457
+ "rewards/margins": 0.9537515640258789,
1458
+ "rewards/rejected": -2.169254779815674,
1459
+ "step": 980
1460
+ },
1461
+ {
1462
+ "epoch": 0.97,
1463
+ "learning_rate": 9.97642642858815e-09,
1464
+ "logits/chosen": -2.30542254447937,
1465
+ "logits/rejected": -2.1695542335510254,
1466
+ "logps/chosen": -152.14138793945312,
1467
+ "logps/rejected": -153.89279174804688,
1468
+ "loss": 0.5001,
1469
+ "rewards/accuracies": 0.737500011920929,
1470
+ "rewards/chosen": -1.3888603448867798,
1471
+ "rewards/margins": 0.9819372296333313,
1472
+ "rewards/rejected": -2.3707973957061768,
1473
+ "step": 990
1474
+ },
1475
+ {
1476
+ "epoch": 0.98,
1477
+ "learning_rate": 3.779617670651436e-09,
1478
+ "logits/chosen": -2.278373956680298,
1479
+ "logits/rejected": -2.17798113822937,
1480
+ "logps/chosen": -157.8939208984375,
1481
+ "logps/rejected": -168.04055786132812,
1482
+ "loss": 0.5866,
1483
+ "rewards/accuracies": 0.7124999761581421,
1484
+ "rewards/chosen": -1.575810194015503,
1485
+ "rewards/margins": 0.995488166809082,
1486
+ "rewards/rejected": -2.571298599243164,
1487
+ "step": 1000
1488
+ },
1489
+ {
1490
+ "epoch": 0.98,
1491
+ "eval_logits/chosen": -2.192172050476074,
1492
+ "eval_logits/rejected": -2.095205545425415,
1493
+ "eval_logps/chosen": -147.71951293945312,
1494
+ "eval_logps/rejected": -158.54696655273438,
1495
+ "eval_loss": 0.5194684863090515,
1496
+ "eval_rewards/accuracies": 0.7358490824699402,
1497
+ "eval_rewards/chosen": -1.382088541984558,
1498
+ "eval_rewards/margins": 1.068930983543396,
1499
+ "eval_rewards/rejected": -2.451019525527954,
1500
+ "eval_runtime": 427.3583,
1501
+ "eval_samples_per_second": 0.978,
1502
+ "eval_steps_per_second": 0.124,
1503
+ "step": 1000
1504
+ },
1505
+ {
1506
+ "epoch": 0.99,
1507
+ "learning_rate": 5.316238729444201e-10,
1508
+ "logits/chosen": -2.2608113288879395,
1509
+ "logits/rejected": -2.101694345474243,
1510
+ "logps/chosen": -152.0623779296875,
1511
+ "logps/rejected": -162.7583770751953,
1512
+ "loss": 0.4821,
1513
+ "rewards/accuracies": 0.7124999761581421,
1514
+ "rewards/chosen": -1.5301237106323242,
1515
+ "rewards/margins": 1.2161496877670288,
1516
+ "rewards/rejected": -2.7462732791900635,
1517
+ "step": 1010
1518
+ },
1519
+ {
1520
+ "epoch": 1.0,
1521
+ "step": 1016,
1522
+ "total_flos": 0.0,
1523
+ "train_loss": 0.5536013367607837,
1524
+ "train_runtime": 17040.3461,
1525
+ "train_samples_per_second": 0.477,
1526
+ "train_steps_per_second": 0.06
1527
+ }
1528
+ ],
1529
+ "logging_steps": 10,
1530
+ "max_steps": 1016,
1531
+ "num_input_tokens_seen": 0,
1532
+ "num_train_epochs": 1,
1533
+ "save_steps": 100,
1534
+ "total_flos": 0.0,
1535
+ "train_batch_size": 4,
1536
+ "trial_name": null,
1537
+ "trial_params": null
1538
+ }