dctanner commited on
Commit
e291de3
1 Parent(s): f470968

Model save

Browse files
README.md ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ base_model: sablo/sablo-pebble-mistral
9
+ model-index:
10
+ - name: sablo-pebble-mistral-dpo-lora-HelpSteer_binarized
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # sablo-pebble-mistral-dpo-lora-HelpSteer_binarized
18
+
19
+ This model is a fine-tuned version of [sablo/sablo-pebble-mistral](https://huggingface.co/sablo/sablo-pebble-mistral) on the None dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.5212
22
+ - Rewards/chosen: -2.5398
23
+ - Rewards/rejected: -3.5311
24
+ - Rewards/accuracies: 0.7406
25
+ - Rewards/margins: 0.9913
26
+ - Logps/rejected: -214.9349
27
+ - Logps/chosen: -206.5847
28
+ - Logits/rejected: -2.0624
29
+ - Logits/chosen: -2.1620
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 5e-06
49
+ - train_batch_size: 4
50
+ - eval_batch_size: 8
51
+ - seed: 42
52
+ - distributed_type: multi-GPU
53
+ - gradient_accumulation_steps: 2
54
+ - total_train_batch_size: 8
55
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
56
+ - lr_scheduler_type: cosine
57
+ - lr_scheduler_warmup_ratio: 0.1
58
+ - num_epochs: 1
59
+
60
+ ### Training results
61
+
62
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
63
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
64
+ | 0.652 | 0.2 | 200 | 0.6595 | 0.0498 | -0.0320 | 0.6415 | 0.0818 | -98.2975 | -120.2629 | -2.0236 | -2.1231 |
65
+ | 0.4905 | 0.39 | 400 | 0.5551 | -1.6581 | -2.1527 | 0.6958 | 0.4946 | -168.9884 | -177.1946 | -2.0950 | -2.1951 |
66
+ | 0.4249 | 0.59 | 600 | 0.5327 | -3.4554 | -4.3247 | 0.7241 | 0.8693 | -241.3867 | -237.1045 | -2.0782 | -2.1773 |
67
+ | 0.5858 | 0.79 | 800 | 0.5207 | -2.5072 | -3.4512 | 0.7335 | 0.9440 | -212.2718 | -205.4982 | -2.0586 | -2.1591 |
68
+ | 0.6128 | 0.98 | 1000 | 0.5212 | -2.5398 | -3.5311 | 0.7406 | 0.9913 | -214.9349 | -206.5847 | -2.0624 | -2.1620 |
69
+
70
+
71
+ ### Framework versions
72
+
73
+ - PEFT 0.7.1
74
+ - Transformers 4.36.2
75
+ - Pytorch 2.0.1+cu118
76
+ - Datasets 2.14.6
77
+ - Tokenizers 0.15.0
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0bbd1c6d2669f3a9b5f35d1131b8c728053d8507b1f057e7f4565718a340ec49
3
  size 83945744
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5595d5709e08dfe18a5e6f2986317e21a3e06942bd5a4d3e5c0bbc18374bcd7e
3
  size 83945744
all_results.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -2.1620335578918457,
4
+ "eval_logits/rejected": -2.062356948852539,
5
+ "eval_logps/chosen": -206.584716796875,
6
+ "eval_logps/rejected": -214.93492126464844,
7
+ "eval_loss": 0.521207869052887,
8
+ "eval_rewards/accuracies": 0.7405660152435303,
9
+ "eval_rewards/chosen": -2.5398268699645996,
10
+ "eval_rewards/margins": 0.9912916421890259,
11
+ "eval_rewards/rejected": -3.531118631362915,
12
+ "eval_runtime": 424.2832,
13
+ "eval_samples": 418,
14
+ "eval_samples_per_second": 0.985,
15
+ "eval_steps_per_second": 0.125,
16
+ "train_loss": 0.5583291621658746,
17
+ "train_runtime": 16785.8838,
18
+ "train_samples": 8130,
19
+ "train_samples_per_second": 0.484,
20
+ "train_steps_per_second": 0.061
21
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -2.1620335578918457,
4
+ "eval_logits/rejected": -2.062356948852539,
5
+ "eval_logps/chosen": -206.584716796875,
6
+ "eval_logps/rejected": -214.93492126464844,
7
+ "eval_loss": 0.521207869052887,
8
+ "eval_rewards/accuracies": 0.7405660152435303,
9
+ "eval_rewards/chosen": -2.5398268699645996,
10
+ "eval_rewards/margins": 0.9912916421890259,
11
+ "eval_rewards/rejected": -3.531118631362915,
12
+ "eval_runtime": 424.2832,
13
+ "eval_samples": 418,
14
+ "eval_samples_per_second": 0.985,
15
+ "eval_steps_per_second": 0.125
16
+ }
runs/Jan18_15-28-29_08134be46a59/events.out.tfevents.1705591929.08134be46a59.1750.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e011cbf3899ede0a563588175282b5284b034e57f0139b29e249acd0fc42441b
3
- size 72267
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a48003c4bae929ea8ea233e41f4ee0d6502473290e7d839752a5e0f4387e7e89
3
+ size 73255
runs/Jan18_15-28-29_08134be46a59/events.out.tfevents.1705609138.08134be46a59.1750.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00be72d7c7f7977576a9ef9c79387fdd23171778aeb07fa3ceb8cd5e2946f294
3
+ size 828
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.5583291621658746,
4
+ "train_runtime": 16785.8838,
5
+ "train_samples": 8130,
6
+ "train_samples_per_second": 0.484,
7
+ "train_steps_per_second": 0.061
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1538 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.999508116084604,
5
+ "eval_steps": 200,
6
+ "global_step": 1016,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 4.901960784313726e-08,
14
+ "logits/chosen": -2.0737838745117188,
15
+ "logits/rejected": -2.1456010341644287,
16
+ "logps/chosen": -95.6572265625,
17
+ "logps/rejected": -106.55765533447266,
18
+ "loss": 0.6931,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.01,
27
+ "learning_rate": 4.901960784313725e-07,
28
+ "logits/chosen": -2.165830373764038,
29
+ "logits/rejected": -2.060776948928833,
30
+ "logps/chosen": -121.03773498535156,
31
+ "logps/rejected": -87.5294189453125,
32
+ "loss": 0.6918,
33
+ "rewards/accuracies": 0.5,
34
+ "rewards/chosen": 0.00047249632189050317,
35
+ "rewards/margins": 0.002704059472307563,
36
+ "rewards/rejected": -0.002231562975794077,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.02,
41
+ "learning_rate": 9.80392156862745e-07,
42
+ "logits/chosen": -2.1349050998687744,
43
+ "logits/rejected": -2.016066312789917,
44
+ "logps/chosen": -130.94175720214844,
45
+ "logps/rejected": -105.7674789428711,
46
+ "loss": 0.6935,
47
+ "rewards/accuracies": 0.48750001192092896,
48
+ "rewards/chosen": -0.0016262540593743324,
49
+ "rewards/margins": -0.0006325626163743436,
50
+ "rewards/rejected": -0.0009936915012076497,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.03,
55
+ "learning_rate": 1.4705882352941177e-06,
56
+ "logits/chosen": -2.2818872928619385,
57
+ "logits/rejected": -2.1805636882781982,
58
+ "logps/chosen": -121.03263854980469,
59
+ "logps/rejected": -104.84712982177734,
60
+ "loss": 0.6942,
61
+ "rewards/accuracies": 0.3499999940395355,
62
+ "rewards/chosen": -0.003329185303300619,
63
+ "rewards/margins": -0.002142944373190403,
64
+ "rewards/rejected": -0.0011862408136948943,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.04,
69
+ "learning_rate": 1.96078431372549e-06,
70
+ "logits/chosen": -2.2757694721221924,
71
+ "logits/rejected": -2.156691074371338,
72
+ "logps/chosen": -126.5389633178711,
73
+ "logps/rejected": -105.20024108886719,
74
+ "loss": 0.6919,
75
+ "rewards/accuracies": 0.6000000238418579,
76
+ "rewards/chosen": 0.0010745985200628638,
77
+ "rewards/margins": 0.002502765040844679,
78
+ "rewards/rejected": -0.0014281660551205277,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.05,
83
+ "learning_rate": 2.450980392156863e-06,
84
+ "logits/chosen": -2.3002381324768066,
85
+ "logits/rejected": -2.206784725189209,
86
+ "logps/chosen": -124.18415832519531,
87
+ "logps/rejected": -98.63652801513672,
88
+ "loss": 0.6916,
89
+ "rewards/accuracies": 0.612500011920929,
90
+ "rewards/chosen": 0.0012203993974253535,
91
+ "rewards/margins": 0.003048995044082403,
92
+ "rewards/rejected": -0.0018285956466570497,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.06,
97
+ "learning_rate": 2.9411764705882355e-06,
98
+ "logits/chosen": -2.3112730979919434,
99
+ "logits/rejected": -2.232532501220703,
100
+ "logps/chosen": -126.93055725097656,
101
+ "logps/rejected": -109.7610092163086,
102
+ "loss": 0.6914,
103
+ "rewards/accuracies": 0.6000000238418579,
104
+ "rewards/chosen": 0.01018393412232399,
105
+ "rewards/margins": 0.0034624538384377956,
106
+ "rewards/rejected": 0.006721480283886194,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 0.07,
111
+ "learning_rate": 3.431372549019608e-06,
112
+ "logits/chosen": -2.2930376529693604,
113
+ "logits/rejected": -2.187328815460205,
114
+ "logps/chosen": -119.68111419677734,
115
+ "logps/rejected": -93.93470001220703,
116
+ "loss": 0.6912,
117
+ "rewards/accuracies": 0.5874999761581421,
118
+ "rewards/chosen": 0.007645626552402973,
119
+ "rewards/margins": 0.003846182022243738,
120
+ "rewards/rejected": 0.0037994447629898787,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.08,
125
+ "learning_rate": 3.92156862745098e-06,
126
+ "logits/chosen": -2.144854784011841,
127
+ "logits/rejected": -2.016728401184082,
128
+ "logps/chosen": -132.2415313720703,
129
+ "logps/rejected": -106.7207260131836,
130
+ "loss": 0.6902,
131
+ "rewards/accuracies": 0.637499988079071,
132
+ "rewards/chosen": 0.015773242339491844,
133
+ "rewards/margins": 0.006012483034282923,
134
+ "rewards/rejected": 0.009760759770870209,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 0.09,
139
+ "learning_rate": 4.411764705882353e-06,
140
+ "logits/chosen": -2.095534324645996,
141
+ "logits/rejected": -1.9636192321777344,
142
+ "logps/chosen": -106.81976318359375,
143
+ "logps/rejected": -83.68408966064453,
144
+ "loss": 0.6887,
145
+ "rewards/accuracies": 0.6000000238418579,
146
+ "rewards/chosen": 0.02147563174366951,
147
+ "rewards/margins": 0.009103062562644482,
148
+ "rewards/rejected": 0.012372570112347603,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 0.1,
153
+ "learning_rate": 4.901960784313726e-06,
154
+ "logits/chosen": -2.0606656074523926,
155
+ "logits/rejected": -1.87876296043396,
156
+ "logps/chosen": -141.498779296875,
157
+ "logps/rejected": -105.37713623046875,
158
+ "loss": 0.6811,
159
+ "rewards/accuracies": 0.75,
160
+ "rewards/chosen": 0.04738330841064453,
161
+ "rewards/margins": 0.024673232808709145,
162
+ "rewards/rejected": 0.022710075601935387,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 0.11,
167
+ "learning_rate": 4.9990549169459415e-06,
168
+ "logits/chosen": -2.2754921913146973,
169
+ "logits/rejected": -2.135282516479492,
170
+ "logps/chosen": -124.19816589355469,
171
+ "logps/rejected": -98.95077514648438,
172
+ "loss": 0.6777,
173
+ "rewards/accuracies": 0.7250000238418579,
174
+ "rewards/chosen": 0.056061066687107086,
175
+ "rewards/margins": 0.03164363652467728,
176
+ "rewards/rejected": 0.02441743016242981,
177
+ "step": 110
178
+ },
179
+ {
180
+ "epoch": 0.12,
181
+ "learning_rate": 4.995216741642263e-06,
182
+ "logits/chosen": -2.280646324157715,
183
+ "logits/rejected": -2.206347942352295,
184
+ "logps/chosen": -115.8212890625,
185
+ "logps/rejected": -96.82814025878906,
186
+ "loss": 0.6805,
187
+ "rewards/accuracies": 0.6625000238418579,
188
+ "rewards/chosen": 0.07867949455976486,
189
+ "rewards/margins": 0.02664627507328987,
190
+ "rewards/rejected": 0.05203322693705559,
191
+ "step": 120
192
+ },
193
+ {
194
+ "epoch": 0.13,
195
+ "learning_rate": 4.988430936991089e-06,
196
+ "logits/chosen": -2.2835030555725098,
197
+ "logits/rejected": -2.140094041824341,
198
+ "logps/chosen": -127.94944763183594,
199
+ "logps/rejected": -101.47581481933594,
200
+ "loss": 0.6635,
201
+ "rewards/accuracies": 0.75,
202
+ "rewards/chosen": 0.10174749791622162,
203
+ "rewards/margins": 0.06262228637933731,
204
+ "rewards/rejected": 0.039125215262174606,
205
+ "step": 130
206
+ },
207
+ {
208
+ "epoch": 0.14,
209
+ "learning_rate": 4.978705519144525e-06,
210
+ "logits/chosen": -2.18971586227417,
211
+ "logits/rejected": -2.0279135704040527,
212
+ "logps/chosen": -140.58509826660156,
213
+ "logps/rejected": -104.29924011230469,
214
+ "loss": 0.664,
215
+ "rewards/accuracies": 0.75,
216
+ "rewards/chosen": 0.07688155025243759,
217
+ "rewards/margins": 0.061921559274196625,
218
+ "rewards/rejected": 0.014959996566176414,
219
+ "step": 140
220
+ },
221
+ {
222
+ "epoch": 0.15,
223
+ "learning_rate": 4.966051976854862e-06,
224
+ "logits/chosen": -2.333808183670044,
225
+ "logits/rejected": -2.2144458293914795,
226
+ "logps/chosen": -111.2592544555664,
227
+ "logps/rejected": -89.93299865722656,
228
+ "loss": 0.6526,
229
+ "rewards/accuracies": 0.762499988079071,
230
+ "rewards/chosen": 0.11892716586589813,
231
+ "rewards/margins": 0.08827908337116241,
232
+ "rewards/rejected": 0.030648082494735718,
233
+ "step": 150
234
+ },
235
+ {
236
+ "epoch": 0.16,
237
+ "learning_rate": 4.950485257902782e-06,
238
+ "logits/chosen": -2.209681749343872,
239
+ "logits/rejected": -2.1354687213897705,
240
+ "logps/chosen": -122.34986877441406,
241
+ "logps/rejected": -97.61170959472656,
242
+ "loss": 0.6622,
243
+ "rewards/accuracies": 0.637499988079071,
244
+ "rewards/chosen": 0.09915992617607117,
245
+ "rewards/margins": 0.06800667941570282,
246
+ "rewards/rejected": 0.031153246760368347,
247
+ "step": 160
248
+ },
249
+ {
250
+ "epoch": 0.17,
251
+ "learning_rate": 4.932023751439358e-06,
252
+ "logits/chosen": -2.276695489883423,
253
+ "logits/rejected": -2.118220329284668,
254
+ "logps/chosen": -131.77114868164062,
255
+ "logps/rejected": -103.4157943725586,
256
+ "loss": 0.6666,
257
+ "rewards/accuracies": 0.6499999761581421,
258
+ "rewards/chosen": 0.10167907178401947,
259
+ "rewards/margins": 0.06214705854654312,
260
+ "rewards/rejected": 0.039532024413347244,
261
+ "step": 170
262
+ },
263
+ {
264
+ "epoch": 0.18,
265
+ "learning_rate": 4.9106892662627395e-06,
266
+ "logits/chosen": -2.347627878189087,
267
+ "logits/rejected": -2.223806858062744,
268
+ "logps/chosen": -125.82316589355469,
269
+ "logps/rejected": -102.32342529296875,
270
+ "loss": 0.6605,
271
+ "rewards/accuracies": 0.7124999761581421,
272
+ "rewards/chosen": 0.13379618525505066,
273
+ "rewards/margins": 0.07378261536359787,
274
+ "rewards/rejected": 0.06001356244087219,
275
+ "step": 180
276
+ },
277
+ {
278
+ "epoch": 0.19,
279
+ "learning_rate": 4.886507005055149e-06,
280
+ "logits/chosen": -2.299999713897705,
281
+ "logits/rejected": -2.124567985534668,
282
+ "logps/chosen": -135.9125518798828,
283
+ "logps/rejected": -102.34548950195312,
284
+ "loss": 0.6471,
285
+ "rewards/accuracies": 0.762499988079071,
286
+ "rewards/chosen": 0.12664374709129333,
287
+ "rewards/margins": 0.10224989801645279,
288
+ "rewards/rejected": 0.024393849074840546,
289
+ "step": 190
290
+ },
291
+ {
292
+ "epoch": 0.2,
293
+ "learning_rate": 4.859505534610658e-06,
294
+ "logits/chosen": -2.2595176696777344,
295
+ "logits/rejected": -2.167226552963257,
296
+ "logps/chosen": -115.63578033447266,
297
+ "logps/rejected": -97.40113830566406,
298
+ "loss": 0.652,
299
+ "rewards/accuracies": 0.6625000238418579,
300
+ "rewards/chosen": 0.07887722551822662,
301
+ "rewards/margins": 0.09930779039859772,
302
+ "rewards/rejected": -0.020430563017725945,
303
+ "step": 200
304
+ },
305
+ {
306
+ "epoch": 0.2,
307
+ "eval_logits/chosen": -2.1231298446655273,
308
+ "eval_logits/rejected": -2.0235936641693115,
309
+ "eval_logps/chosen": -120.2629165649414,
310
+ "eval_logps/rejected": -98.29754638671875,
311
+ "eval_loss": 0.6594940423965454,
312
+ "eval_rewards/accuracies": 0.6415094137191772,
313
+ "eval_rewards/chosen": 0.04982735216617584,
314
+ "eval_rewards/margins": 0.08182442933320999,
315
+ "eval_rewards/rejected": -0.031997084617614746,
316
+ "eval_runtime": 417.8564,
317
+ "eval_samples_per_second": 1.0,
318
+ "eval_steps_per_second": 0.127,
319
+ "step": 200
320
+ },
321
+ {
322
+ "epoch": 0.21,
323
+ "learning_rate": 4.829716752088893e-06,
324
+ "logits/chosen": -2.202777147293091,
325
+ "logits/rejected": -2.1421408653259277,
326
+ "logps/chosen": -107.44587707519531,
327
+ "logps/rejected": -101.29158020019531,
328
+ "loss": 0.6594,
329
+ "rewards/accuracies": 0.6875,
330
+ "rewards/chosen": 0.03508143126964569,
331
+ "rewards/margins": 0.07955195009708405,
332
+ "rewards/rejected": -0.04447052255272865,
333
+ "step": 210
334
+ },
335
+ {
336
+ "epoch": 0.22,
337
+ "learning_rate": 4.797175847334535e-06,
338
+ "logits/chosen": -2.217074155807495,
339
+ "logits/rejected": -2.0960960388183594,
340
+ "logps/chosen": -130.84152221679688,
341
+ "logps/rejected": -107.61418151855469,
342
+ "loss": 0.6708,
343
+ "rewards/accuracies": 0.6499999761581421,
344
+ "rewards/chosen": 0.0030643828213214874,
345
+ "rewards/margins": 0.05633828788995743,
346
+ "rewards/rejected": -0.05327390506863594,
347
+ "step": 220
348
+ },
349
+ {
350
+ "epoch": 0.23,
351
+ "learning_rate": 4.761921261307143e-06,
352
+ "logits/chosen": -2.2311947345733643,
353
+ "logits/rejected": -2.0887582302093506,
354
+ "logps/chosen": -123.64229583740234,
355
+ "logps/rejected": -103.31034851074219,
356
+ "loss": 0.655,
357
+ "rewards/accuracies": 0.6875,
358
+ "rewards/chosen": -0.009160916320979595,
359
+ "rewards/margins": 0.09632667899131775,
360
+ "rewards/rejected": -0.10548758506774902,
361
+ "step": 230
362
+ },
363
+ {
364
+ "epoch": 0.24,
365
+ "learning_rate": 4.723994640670377e-06,
366
+ "logits/chosen": -2.2253684997558594,
367
+ "logits/rejected": -2.0454909801483154,
368
+ "logps/chosen": -137.86196899414062,
369
+ "logps/rejected": -104.11041259765625,
370
+ "loss": 0.6146,
371
+ "rewards/accuracies": 0.8125,
372
+ "rewards/chosen": -0.011105505749583244,
373
+ "rewards/margins": 0.1803070306777954,
374
+ "rewards/rejected": -0.1914125233888626,
375
+ "step": 240
376
+ },
377
+ {
378
+ "epoch": 0.25,
379
+ "learning_rate": 4.68344078859431e-06,
380
+ "logits/chosen": -2.1830849647521973,
381
+ "logits/rejected": -2.1306838989257812,
382
+ "logps/chosen": -119.04063415527344,
383
+ "logps/rejected": -113.45658111572266,
384
+ "loss": 0.6407,
385
+ "rewards/accuracies": 0.6499999761581421,
386
+ "rewards/chosen": -0.10984460264444351,
387
+ "rewards/margins": 0.1269720494747162,
388
+ "rewards/rejected": -0.2368166148662567,
389
+ "step": 250
390
+ },
391
+ {
392
+ "epoch": 0.26,
393
+ "learning_rate": 4.6403076118289006e-06,
394
+ "logits/chosen": -2.151690721511841,
395
+ "logits/rejected": -2.0012199878692627,
396
+ "logps/chosen": -135.11477661132812,
397
+ "logps/rejected": -110.16157531738281,
398
+ "loss": 0.6217,
399
+ "rewards/accuracies": 0.699999988079071,
400
+ "rewards/chosen": -0.162130668759346,
401
+ "rewards/margins": 0.17287474870681763,
402
+ "rewards/rejected": -0.33500543236732483,
403
+ "step": 260
404
+ },
405
+ {
406
+ "epoch": 0.27,
407
+ "learning_rate": 4.5946460641111776e-06,
408
+ "logits/chosen": -2.3390426635742188,
409
+ "logits/rejected": -2.1155288219451904,
410
+ "logps/chosen": -132.8888397216797,
411
+ "logps/rejected": -110.8713607788086,
412
+ "loss": 0.5927,
413
+ "rewards/accuracies": 0.75,
414
+ "rewards/chosen": -0.19292452931404114,
415
+ "rewards/margins": 0.24748054146766663,
416
+ "rewards/rejected": -0.44040507078170776,
417
+ "step": 270
418
+ },
419
+ {
420
+ "epoch": 0.28,
421
+ "learning_rate": 4.546510085972983e-06,
422
+ "logits/chosen": -2.3495311737060547,
423
+ "logits/rejected": -2.2263104915618896,
424
+ "logps/chosen": -143.91481018066406,
425
+ "logps/rejected": -119.75496673583984,
426
+ "loss": 0.6171,
427
+ "rewards/accuracies": 0.6625000238418579,
428
+ "rewards/chosen": -0.20113949477672577,
429
+ "rewards/margins": 0.19329218566417694,
430
+ "rewards/rejected": -0.3944316804409027,
431
+ "step": 280
432
+ },
433
+ {
434
+ "epoch": 0.29,
435
+ "learning_rate": 4.495956541020376e-06,
436
+ "logits/chosen": -2.3487744331359863,
437
+ "logits/rejected": -2.1971921920776367,
438
+ "logps/chosen": -150.26535034179688,
439
+ "logps/rejected": -133.6148681640625,
440
+ "loss": 0.6038,
441
+ "rewards/accuracies": 0.6625000238418579,
442
+ "rewards/chosen": -0.36308565735816956,
443
+ "rewards/margins": 0.2375185787677765,
444
+ "rewards/rejected": -0.6006041765213013,
445
+ "step": 290
446
+ },
447
+ {
448
+ "epoch": 0.3,
449
+ "learning_rate": 4.443045148759978e-06,
450
+ "logits/chosen": -2.3486955165863037,
451
+ "logits/rejected": -2.1962506771087646,
452
+ "logps/chosen": -159.32064819335938,
453
+ "logps/rejected": -126.43644714355469,
454
+ "loss": 0.5853,
455
+ "rewards/accuracies": 0.7250000238418579,
456
+ "rewards/chosen": -0.44245219230651855,
457
+ "rewards/margins": 0.2853606343269348,
458
+ "rewards/rejected": -0.7278127670288086,
459
+ "step": 300
460
+ },
461
+ {
462
+ "epoch": 0.3,
463
+ "learning_rate": 4.3878384140516025e-06,
464
+ "logits/chosen": -2.3736767768859863,
465
+ "logits/rejected": -2.249598741531372,
466
+ "logps/chosen": -146.68006896972656,
467
+ "logps/rejected": -135.25997924804688,
468
+ "loss": 0.5898,
469
+ "rewards/accuracies": 0.737500011920929,
470
+ "rewards/chosen": -0.41068369150161743,
471
+ "rewards/margins": 0.26723065972328186,
472
+ "rewards/rejected": -0.6779143214225769,
473
+ "step": 310
474
+ },
475
+ {
476
+ "epoch": 0.31,
477
+ "learning_rate": 4.330401553270522e-06,
478
+ "logits/chosen": -2.298905372619629,
479
+ "logits/rejected": -2.1949081420898438,
480
+ "logps/chosen": -146.1105194091797,
481
+ "logps/rejected": -136.11727905273438,
482
+ "loss": 0.6003,
483
+ "rewards/accuracies": 0.7124999761581421,
484
+ "rewards/chosen": -0.551005482673645,
485
+ "rewards/margins": 0.2657201588153839,
486
+ "rewards/rejected": -0.8167255520820618,
487
+ "step": 320
488
+ },
489
+ {
490
+ "epoch": 0.32,
491
+ "learning_rate": 4.2708024172665795e-06,
492
+ "logits/chosen": -2.402360439300537,
493
+ "logits/rejected": -2.2175159454345703,
494
+ "logps/chosen": -150.5807342529297,
495
+ "logps/rejected": -125.05631256103516,
496
+ "loss": 0.5465,
497
+ "rewards/accuracies": 0.824999988079071,
498
+ "rewards/chosen": -0.7037911415100098,
499
+ "rewards/margins": 0.3860488533973694,
500
+ "rewards/rejected": -1.089840054512024,
501
+ "step": 330
502
+ },
503
+ {
504
+ "epoch": 0.33,
505
+ "learning_rate": 4.209111411211174e-06,
506
+ "logits/chosen": -2.413839340209961,
507
+ "logits/rejected": -2.253542423248291,
508
+ "logps/chosen": -142.4720001220703,
509
+ "logps/rejected": -127.50254821777344,
510
+ "loss": 0.5395,
511
+ "rewards/accuracies": 0.75,
512
+ "rewards/chosen": -0.7327712178230286,
513
+ "rewards/margins": 0.4077509045600891,
514
+ "rewards/rejected": -1.1405221223831177,
515
+ "step": 340
516
+ },
517
+ {
518
+ "epoch": 0.34,
519
+ "learning_rate": 4.145401411426788e-06,
520
+ "logits/chosen": -2.4574408531188965,
521
+ "logits/rejected": -2.3272013664245605,
522
+ "logps/chosen": -143.1080322265625,
523
+ "logps/rejected": -130.01637268066406,
524
+ "loss": 0.5795,
525
+ "rewards/accuracies": 0.7250000238418579,
526
+ "rewards/chosen": -0.7288345098495483,
527
+ "rewards/margins": 0.36709967255592346,
528
+ "rewards/rejected": -1.0959341526031494,
529
+ "step": 350
530
+ },
531
+ {
532
+ "epoch": 0.35,
533
+ "learning_rate": 4.079747679297314e-06,
534
+ "logits/chosen": -2.295055627822876,
535
+ "logits/rejected": -2.179664134979248,
536
+ "logps/chosen": -157.7494354248047,
537
+ "logps/rejected": -137.73941040039062,
538
+ "loss": 0.5973,
539
+ "rewards/accuracies": 0.637499988079071,
540
+ "rewards/chosen": -0.8279803395271301,
541
+ "rewards/margins": 0.3018009066581726,
542
+ "rewards/rejected": -1.1297812461853027,
543
+ "step": 360
544
+ },
545
+ {
546
+ "epoch": 0.36,
547
+ "learning_rate": 4.012227772360889e-06,
548
+ "logits/chosen": -2.2948107719421387,
549
+ "logits/rejected": -2.1263232231140137,
550
+ "logps/chosen": -167.21339416503906,
551
+ "logps/rejected": -150.82669067382812,
552
+ "loss": 0.5349,
553
+ "rewards/accuracies": 0.7875000238418579,
554
+ "rewards/chosen": -0.9286147356033325,
555
+ "rewards/margins": 0.44435009360313416,
556
+ "rewards/rejected": -1.3729647397994995,
557
+ "step": 370
558
+ },
559
+ {
560
+ "epoch": 0.37,
561
+ "learning_rate": 3.942921452690245e-06,
562
+ "logits/chosen": -2.3513195514678955,
563
+ "logits/rejected": -2.25927734375,
564
+ "logps/chosen": -173.61428833007812,
565
+ "logps/rejected": -163.9345703125,
566
+ "loss": 0.6164,
567
+ "rewards/accuracies": 0.7124999761581421,
568
+ "rewards/chosen": -1.3186113834381104,
569
+ "rewards/margins": 0.24015231430530548,
570
+ "rewards/rejected": -1.558763861656189,
571
+ "step": 380
572
+ },
573
+ {
574
+ "epoch": 0.38,
575
+ "learning_rate": 3.871910592668817e-06,
576
+ "logits/chosen": -2.433640480041504,
577
+ "logits/rejected": -2.300931215286255,
578
+ "logps/chosen": -170.8921356201172,
579
+ "logps/rejected": -165.83462524414062,
580
+ "loss": 0.503,
581
+ "rewards/accuracies": 0.8125,
582
+ "rewards/chosen": -1.420196294784546,
583
+ "rewards/margins": 0.5389910340309143,
584
+ "rewards/rejected": -1.9591872692108154,
585
+ "step": 390
586
+ },
587
+ {
588
+ "epoch": 0.39,
589
+ "learning_rate": 3.799279078273921e-06,
590
+ "logits/chosen": -2.3466134071350098,
591
+ "logits/rejected": -2.157196044921875,
592
+ "logps/chosen": -177.853271484375,
593
+ "logps/rejected": -158.73776245117188,
594
+ "loss": 0.4905,
595
+ "rewards/accuracies": 0.762499988079071,
596
+ "rewards/chosen": -1.5086350440979004,
597
+ "rewards/margins": 0.6321147680282593,
598
+ "rewards/rejected": -2.140749454498291,
599
+ "step": 400
600
+ },
601
+ {
602
+ "epoch": 0.39,
603
+ "eval_logits/chosen": -2.195096015930176,
604
+ "eval_logits/rejected": -2.0950398445129395,
605
+ "eval_logps/chosen": -177.19459533691406,
606
+ "eval_logps/rejected": -168.9884490966797,
607
+ "eval_loss": 0.5550708174705505,
608
+ "eval_rewards/accuracies": 0.6957547068595886,
609
+ "eval_rewards/chosen": -1.6581227779388428,
610
+ "eval_rewards/margins": 0.4946018159389496,
611
+ "eval_rewards/rejected": -2.152724504470825,
612
+ "eval_runtime": 417.866,
613
+ "eval_samples_per_second": 1.0,
614
+ "eval_steps_per_second": 0.127,
615
+ "step": 400
616
+ },
617
+ {
618
+ "epoch": 0.4,
619
+ "learning_rate": 3.725112709981249e-06,
620
+ "logits/chosen": -2.20538592338562,
621
+ "logits/rejected": -2.059528112411499,
622
+ "logps/chosen": -192.91726684570312,
623
+ "logps/rejected": -185.90341186523438,
624
+ "loss": 0.5449,
625
+ "rewards/accuracies": 0.737500011920929,
626
+ "rewards/chosen": -1.8029924631118774,
627
+ "rewards/margins": 0.5775827765464783,
628
+ "rewards/rejected": -2.38057541847229,
629
+ "step": 410
630
+ },
631
+ {
632
+ "epoch": 0.41,
633
+ "learning_rate": 3.649499101407737e-06,
634
+ "logits/chosen": -2.363370895385742,
635
+ "logits/rejected": -2.2048511505126953,
636
+ "logps/chosen": -224.8810577392578,
637
+ "logps/rejected": -214.8511505126953,
638
+ "loss": 0.5825,
639
+ "rewards/accuracies": 0.6875,
640
+ "rewards/chosen": -2.7866973876953125,
641
+ "rewards/margins": 0.5168424844741821,
642
+ "rewards/rejected": -3.303539991378784,
643
+ "step": 420
644
+ },
645
+ {
646
+ "epoch": 0.42,
647
+ "learning_rate": 3.5725275758125564e-06,
648
+ "logits/chosen": -2.27677059173584,
649
+ "logits/rejected": -2.1246485710144043,
650
+ "logps/chosen": -200.77066040039062,
651
+ "logps/rejected": -190.68533325195312,
652
+ "loss": 0.4805,
653
+ "rewards/accuracies": 0.800000011920929,
654
+ "rewards/chosen": -2.6391139030456543,
655
+ "rewards/margins": 0.7239618897438049,
656
+ "rewards/rejected": -3.3630757331848145,
657
+ "step": 430
658
+ },
659
+ {
660
+ "epoch": 0.43,
661
+ "learning_rate": 3.494289060578478e-06,
662
+ "logits/chosen": -2.3822944164276123,
663
+ "logits/rejected": -2.2915594577789307,
664
+ "logps/chosen": -169.66128540039062,
665
+ "logps/rejected": -174.19607543945312,
666
+ "loss": 0.4812,
667
+ "rewards/accuracies": 0.699999988079071,
668
+ "rewards/chosen": -1.6458194255828857,
669
+ "rewards/margins": 0.7625138163566589,
670
+ "rewards/rejected": -2.4083335399627686,
671
+ "step": 440
672
+ },
673
+ {
674
+ "epoch": 0.44,
675
+ "learning_rate": 3.414875979798272e-06,
676
+ "logits/chosen": -2.327730178833008,
677
+ "logits/rejected": -2.2062036991119385,
678
+ "logps/chosen": -194.1064453125,
679
+ "logps/rejected": -193.63107299804688,
680
+ "loss": 0.4394,
681
+ "rewards/accuracies": 0.8374999761581421,
682
+ "rewards/chosen": -1.4300081729888916,
683
+ "rewards/margins": 0.892192006111145,
684
+ "rewards/rejected": -2.322200059890747,
685
+ "step": 450
686
+ },
687
+ {
688
+ "epoch": 0.45,
689
+ "learning_rate": 3.3343821450930196e-06,
690
+ "logits/chosen": -2.454336166381836,
691
+ "logits/rejected": -2.2937960624694824,
692
+ "logps/chosen": -201.33657836914062,
693
+ "logps/rejected": -188.39266967773438,
694
+ "loss": 0.4758,
695
+ "rewards/accuracies": 0.75,
696
+ "rewards/chosen": -2.0642800331115723,
697
+ "rewards/margins": 0.8783187866210938,
698
+ "rewards/rejected": -2.942598819732666,
699
+ "step": 460
700
+ },
701
+ {
702
+ "epoch": 0.46,
703
+ "learning_rate": 3.252902644791325e-06,
704
+ "logits/chosen": -2.188424587249756,
705
+ "logits/rejected": -2.041105031967163,
706
+ "logps/chosen": -214.7936248779297,
707
+ "logps/rejected": -222.3824462890625,
708
+ "loss": 0.4536,
709
+ "rewards/accuracies": 0.800000011920929,
710
+ "rewards/chosen": -2.7383971214294434,
711
+ "rewards/margins": 0.9385073781013489,
712
+ "rewards/rejected": -3.6769042015075684,
713
+ "step": 470
714
+ },
715
+ {
716
+ "epoch": 0.47,
717
+ "learning_rate": 3.170533731600339e-06,
718
+ "logits/chosen": -2.3067922592163086,
719
+ "logits/rejected": -2.1540229320526123,
720
+ "logps/chosen": -204.9595184326172,
721
+ "logps/rejected": -204.6565704345703,
722
+ "loss": 0.4887,
723
+ "rewards/accuracies": 0.75,
724
+ "rewards/chosen": -2.3199477195739746,
725
+ "rewards/margins": 0.8953973650932312,
726
+ "rewards/rejected": -3.2153449058532715,
727
+ "step": 480
728
+ },
729
+ {
730
+ "epoch": 0.48,
731
+ "learning_rate": 3.0873727089012816e-06,
732
+ "logits/chosen": -2.4507811069488525,
733
+ "logits/rejected": -2.357919931411743,
734
+ "logps/chosen": -212.8636932373047,
735
+ "logps/rejected": -208.5207977294922,
736
+ "loss": 0.5393,
737
+ "rewards/accuracies": 0.75,
738
+ "rewards/chosen": -2.4198460578918457,
739
+ "rewards/margins": 0.7457529306411743,
740
+ "rewards/rejected": -3.1655986309051514,
741
+ "step": 490
742
+ },
743
+ {
744
+ "epoch": 0.49,
745
+ "learning_rate": 3.0035178158038026e-06,
746
+ "logits/chosen": -2.2298638820648193,
747
+ "logits/rejected": -2.035947322845459,
748
+ "logps/chosen": -217.26846313476562,
749
+ "logps/rejected": -209.74526977539062,
750
+ "loss": 0.4886,
751
+ "rewards/accuracies": 0.7250000238418579,
752
+ "rewards/chosen": -2.4076380729675293,
753
+ "rewards/margins": 0.9488040208816528,
754
+ "rewards/rejected": -3.3564422130584717,
755
+ "step": 500
756
+ },
757
+ {
758
+ "epoch": 0.5,
759
+ "learning_rate": 2.919068111094937e-06,
760
+ "logits/chosen": -2.3187146186828613,
761
+ "logits/rejected": -2.193861484527588,
762
+ "logps/chosen": -185.0901641845703,
763
+ "logps/rejected": -195.90634155273438,
764
+ "loss": 0.552,
765
+ "rewards/accuracies": 0.6875,
766
+ "rewards/chosen": -2.132998466491699,
767
+ "rewards/margins": 0.827830970287323,
768
+ "rewards/rejected": -2.960829257965088,
769
+ "step": 510
770
+ },
771
+ {
772
+ "epoch": 0.51,
773
+ "learning_rate": 2.8341233562197895e-06,
774
+ "logits/chosen": -2.3116376399993896,
775
+ "logits/rejected": -2.246950626373291,
776
+ "logps/chosen": -172.0282440185547,
777
+ "logps/rejected": -177.47348022460938,
778
+ "loss": 0.5152,
779
+ "rewards/accuracies": 0.762499988079071,
780
+ "rewards/chosen": -1.7447055578231812,
781
+ "rewards/margins": 0.754051685333252,
782
+ "rewards/rejected": -2.4987568855285645,
783
+ "step": 520
784
+ },
785
+ {
786
+ "epoch": 0.52,
787
+ "learning_rate": 2.7487838974321352e-06,
788
+ "logits/chosen": -2.2577805519104004,
789
+ "logits/rejected": -2.143658399581909,
790
+ "logps/chosen": -178.53317260742188,
791
+ "logps/rejected": -182.5507049560547,
792
+ "loss": 0.4719,
793
+ "rewards/accuracies": 0.800000011920929,
794
+ "rewards/chosen": -1.789046287536621,
795
+ "rewards/margins": 0.8771921396255493,
796
+ "rewards/rejected": -2.666238307952881,
797
+ "step": 530
798
+ },
799
+ {
800
+ "epoch": 0.53,
801
+ "learning_rate": 2.6631505472541997e-06,
802
+ "logits/chosen": -2.2621216773986816,
803
+ "logits/rejected": -2.1167335510253906,
804
+ "logps/chosen": -193.0259246826172,
805
+ "logps/rejected": -198.51705932617188,
806
+ "loss": 0.4442,
807
+ "rewards/accuracies": 0.762499988079071,
808
+ "rewards/chosen": -1.8945804834365845,
809
+ "rewards/margins": 1.0066120624542236,
810
+ "rewards/rejected": -2.9011926651000977,
811
+ "step": 540
812
+ },
813
+ {
814
+ "epoch": 0.54,
815
+ "learning_rate": 2.5773244653856173e-06,
816
+ "logits/chosen": -2.2354609966278076,
817
+ "logits/rejected": -2.1045310497283936,
818
+ "logps/chosen": -205.27163696289062,
819
+ "logps/rejected": -207.8455810546875,
820
+ "loss": 0.4791,
821
+ "rewards/accuracies": 0.7250000238418579,
822
+ "rewards/chosen": -2.161865711212158,
823
+ "rewards/margins": 0.9317368268966675,
824
+ "rewards/rejected": -3.0936026573181152,
825
+ "step": 550
826
+ },
827
+ {
828
+ "epoch": 0.55,
829
+ "learning_rate": 2.4914070392022717e-06,
830
+ "logits/chosen": -2.274534225463867,
831
+ "logits/rejected": -2.158811569213867,
832
+ "logps/chosen": -208.08349609375,
833
+ "logps/rejected": -211.2762451171875,
834
+ "loss": 0.4978,
835
+ "rewards/accuracies": 0.75,
836
+ "rewards/chosen": -2.0876078605651855,
837
+ "rewards/margins": 0.8888559341430664,
838
+ "rewards/rejected": -2.976463794708252,
839
+ "step": 560
840
+ },
841
+ {
842
+ "epoch": 0.56,
843
+ "learning_rate": 2.4054997639861778e-06,
844
+ "logits/chosen": -2.1874241828918457,
845
+ "logits/rejected": -2.0126781463623047,
846
+ "logps/chosen": -207.1369171142578,
847
+ "logps/rejected": -207.59768676757812,
848
+ "loss": 0.4732,
849
+ "rewards/accuracies": 0.800000011920929,
850
+ "rewards/chosen": -2.0929489135742188,
851
+ "rewards/margins": 1.14609694480896,
852
+ "rewards/rejected": -3.2390456199645996,
853
+ "step": 570
854
+ },
855
+ {
856
+ "epoch": 0.57,
857
+ "learning_rate": 2.3197041230278905e-06,
858
+ "logits/chosen": -2.3066487312316895,
859
+ "logits/rejected": -2.18741512298584,
860
+ "logps/chosen": -204.58642578125,
861
+ "logps/rejected": -222.44973754882812,
862
+ "loss": 0.427,
863
+ "rewards/accuracies": 0.8125,
864
+ "rewards/chosen": -2.2493700981140137,
865
+ "rewards/margins": 1.2541824579238892,
866
+ "rewards/rejected": -3.5035526752471924,
867
+ "step": 580
868
+ },
869
+ {
870
+ "epoch": 0.58,
871
+ "learning_rate": 2.234121467743082e-06,
872
+ "logits/chosen": -2.3349661827087402,
873
+ "logits/rejected": -2.252894163131714,
874
+ "logps/chosen": -208.3444061279297,
875
+ "logps/rejected": -211.37295532226562,
876
+ "loss": 0.5968,
877
+ "rewards/accuracies": 0.7250000238418579,
878
+ "rewards/chosen": -2.635042905807495,
879
+ "rewards/margins": 0.8392454981803894,
880
+ "rewards/rejected": -3.4742884635925293,
881
+ "step": 590
882
+ },
883
+ {
884
+ "epoch": 0.59,
885
+ "learning_rate": 2.148852897944905e-06,
886
+ "logits/chosen": -2.3977198600769043,
887
+ "logits/rejected": -2.2428812980651855,
888
+ "logps/chosen": -208.42538452148438,
889
+ "logps/rejected": -219.8905487060547,
890
+ "loss": 0.4249,
891
+ "rewards/accuracies": 0.8374999761581421,
892
+ "rewards/chosen": -2.645089626312256,
893
+ "rewards/margins": 1.1129385232925415,
894
+ "rewards/rejected": -3.758028030395508,
895
+ "step": 600
896
+ },
897
+ {
898
+ "epoch": 0.59,
899
+ "eval_logits/chosen": -2.17734956741333,
900
+ "eval_logits/rejected": -2.078249454498291,
901
+ "eval_logps/chosen": -237.10446166992188,
902
+ "eval_logps/rejected": -241.38665771484375,
903
+ "eval_loss": 0.5327094793319702,
904
+ "eval_rewards/accuracies": 0.724056601524353,
905
+ "eval_rewards/chosen": -3.455418348312378,
906
+ "eval_rewards/margins": 0.8692519068717957,
907
+ "eval_rewards/rejected": -4.324670314788818,
908
+ "eval_runtime": 423.7708,
909
+ "eval_samples_per_second": 0.986,
910
+ "eval_steps_per_second": 0.125,
911
+ "step": 600
912
+ },
913
+ {
914
+ "epoch": 0.6,
915
+ "learning_rate": 2.063999142413574e-06,
916
+ "logits/chosen": -2.2915334701538086,
917
+ "logits/rejected": -2.1494946479797363,
918
+ "logps/chosen": -247.0482177734375,
919
+ "logps/rejected": -239.52462768554688,
920
+ "loss": 0.489,
921
+ "rewards/accuracies": 0.762499988079071,
922
+ "rewards/chosen": -3.5384364128112793,
923
+ "rewards/margins": 0.9590060114860535,
924
+ "rewards/rejected": -4.497443199157715,
925
+ "step": 610
926
+ },
927
+ {
928
+ "epoch": 0.61,
929
+ "learning_rate": 1.9796604399042547e-06,
930
+ "logits/chosen": -2.3757712841033936,
931
+ "logits/rejected": -2.2203211784362793,
932
+ "logps/chosen": -268.9779052734375,
933
+ "logps/rejected": -274.21771240234375,
934
+ "loss": 0.4595,
935
+ "rewards/accuracies": 0.7875000238418579,
936
+ "rewards/chosen": -4.319927215576172,
937
+ "rewards/margins": 1.1538012027740479,
938
+ "rewards/rejected": -5.473728179931641,
939
+ "step": 620
940
+ },
941
+ {
942
+ "epoch": 0.62,
943
+ "learning_rate": 1.8959364207338216e-06,
944
+ "logits/chosen": -2.345416784286499,
945
+ "logits/rejected": -2.180387020111084,
946
+ "logps/chosen": -245.0520477294922,
947
+ "logps/rejected": -258.79498291015625,
948
+ "loss": 0.4835,
949
+ "rewards/accuracies": 0.737500011920929,
950
+ "rewards/chosen": -4.156452655792236,
951
+ "rewards/margins": 1.1458499431610107,
952
+ "rewards/rejected": -5.302302360534668,
953
+ "step": 630
954
+ },
955
+ {
956
+ "epoch": 0.63,
957
+ "learning_rate": 1.8129259890863825e-06,
958
+ "logits/chosen": -2.3113701343536377,
959
+ "logits/rejected": -2.200329303741455,
960
+ "logps/chosen": -258.2785949707031,
961
+ "logps/rejected": -273.8367614746094,
962
+ "loss": 0.5494,
963
+ "rewards/accuracies": 0.675000011920929,
964
+ "rewards/chosen": -3.8280506134033203,
965
+ "rewards/margins": 1.0454813241958618,
966
+ "rewards/rejected": -4.873531818389893,
967
+ "step": 640
968
+ },
969
+ {
970
+ "epoch": 0.64,
971
+ "learning_rate": 1.7307272061765738e-06,
972
+ "logits/chosen": -2.332291841506958,
973
+ "logits/rejected": -2.238374948501587,
974
+ "logps/chosen": -245.2693328857422,
975
+ "logps/rejected": -258.13922119140625,
976
+ "loss": 0.4966,
977
+ "rewards/accuracies": 0.762499988079071,
978
+ "rewards/chosen": -3.4873454570770264,
979
+ "rewards/margins": 1.1437709331512451,
980
+ "rewards/rejected": -4.6311163902282715,
981
+ "step": 650
982
+ },
983
+ {
984
+ "epoch": 0.65,
985
+ "learning_rate": 1.649437174408685e-06,
986
+ "logits/chosen": -2.2653393745422363,
987
+ "logits/rejected": -2.1328892707824707,
988
+ "logps/chosen": -225.3964385986328,
989
+ "logps/rejected": -243.22384643554688,
990
+ "loss": 0.5442,
991
+ "rewards/accuracies": 0.75,
992
+ "rewards/chosen": -2.7976624965667725,
993
+ "rewards/margins": 1.2127068042755127,
994
+ "rewards/rejected": -4.010369300842285,
995
+ "step": 660
996
+ },
997
+ {
998
+ "epoch": 0.66,
999
+ "learning_rate": 1.569151922668422e-06,
1000
+ "logits/chosen": -2.367928981781006,
1001
+ "logits/rejected": -2.253605365753174,
1002
+ "logps/chosen": -201.37025451660156,
1003
+ "logps/rejected": -218.9671630859375,
1004
+ "loss": 0.5362,
1005
+ "rewards/accuracies": 0.7749999761581421,
1006
+ "rewards/chosen": -2.52730131149292,
1007
+ "rewards/margins": 1.1274640560150146,
1008
+ "rewards/rejected": -3.6547648906707764,
1009
+ "step": 670
1010
+ },
1011
+ {
1012
+ "epoch": 0.67,
1013
+ "learning_rate": 1.4899662928828428e-06,
1014
+ "logits/chosen": -2.309407949447632,
1015
+ "logits/rejected": -2.1246845722198486,
1016
+ "logps/chosen": -186.25376892089844,
1017
+ "logps/rejected": -205.3202667236328,
1018
+ "loss": 0.4534,
1019
+ "rewards/accuracies": 0.8125,
1020
+ "rewards/chosen": -2.198615550994873,
1021
+ "rewards/margins": 1.3432915210723877,
1022
+ "rewards/rejected": -3.5419068336486816,
1023
+ "step": 680
1024
+ },
1025
+ {
1026
+ "epoch": 0.68,
1027
+ "learning_rate": 1.4119738279824507e-06,
1028
+ "logits/chosen": -2.233764886856079,
1029
+ "logits/rejected": -2.0911850929260254,
1030
+ "logps/chosen": -194.0051727294922,
1031
+ "logps/rejected": -202.19174194335938,
1032
+ "loss": 0.5816,
1033
+ "rewards/accuracies": 0.7250000238418579,
1034
+ "rewards/chosen": -2.272157907485962,
1035
+ "rewards/margins": 0.9171239137649536,
1036
+ "rewards/rejected": -3.189281463623047,
1037
+ "step": 690
1038
+ },
1039
+ {
1040
+ "epoch": 0.69,
1041
+ "learning_rate": 1.3352666613978152e-06,
1042
+ "logits/chosen": -2.1566410064697266,
1043
+ "logits/rejected": -2.075209379196167,
1044
+ "logps/chosen": -198.47071838378906,
1045
+ "logps/rejected": -202.29379272460938,
1046
+ "loss": 0.5355,
1047
+ "rewards/accuracies": 0.7250000238418579,
1048
+ "rewards/chosen": -2.325533151626587,
1049
+ "rewards/margins": 0.8646724820137024,
1050
+ "rewards/rejected": -3.1902058124542236,
1051
+ "step": 700
1052
+ },
1053
+ {
1054
+ "epoch": 0.7,
1055
+ "learning_rate": 1.2599354082212523e-06,
1056
+ "logits/chosen": -2.222766876220703,
1057
+ "logits/rejected": -2.0928432941436768,
1058
+ "logps/chosen": -190.0797119140625,
1059
+ "logps/rejected": -198.69985961914062,
1060
+ "loss": 0.5152,
1061
+ "rewards/accuracies": 0.737500011920929,
1062
+ "rewards/chosen": -2.1240830421447754,
1063
+ "rewards/margins": 0.9723888635635376,
1064
+ "rewards/rejected": -3.0964715480804443,
1065
+ "step": 710
1066
+ },
1067
+ {
1068
+ "epoch": 0.71,
1069
+ "learning_rate": 1.186069058162127e-06,
1070
+ "logits/chosen": -2.3559908866882324,
1071
+ "logits/rejected": -2.249436140060425,
1072
+ "logps/chosen": -197.04818725585938,
1073
+ "logps/rejected": -201.38693237304688,
1074
+ "loss": 0.5537,
1075
+ "rewards/accuracies": 0.737500011920929,
1076
+ "rewards/chosen": -2.438502788543701,
1077
+ "rewards/margins": 0.8104500770568848,
1078
+ "rewards/rejected": -3.248952865600586,
1079
+ "step": 720
1080
+ },
1081
+ {
1082
+ "epoch": 0.72,
1083
+ "learning_rate": 1.113754870422254e-06,
1084
+ "logits/chosen": -2.40106201171875,
1085
+ "logits/rejected": -2.2911598682403564,
1086
+ "logps/chosen": -198.04409790039062,
1087
+ "logps/rejected": -214.9007568359375,
1088
+ "loss": 0.5454,
1089
+ "rewards/accuracies": 0.75,
1090
+ "rewards/chosen": -2.3586318492889404,
1091
+ "rewards/margins": 0.8204809427261353,
1092
+ "rewards/rejected": -3.179112672805786,
1093
+ "step": 730
1094
+ },
1095
+ {
1096
+ "epoch": 0.73,
1097
+ "learning_rate": 1.0430782706155545e-06,
1098
+ "logits/chosen": -2.4164676666259766,
1099
+ "logits/rejected": -2.2673213481903076,
1100
+ "logps/chosen": -204.253173828125,
1101
+ "logps/rejected": -209.43753051757812,
1102
+ "loss": 0.45,
1103
+ "rewards/accuracies": 0.7250000238418579,
1104
+ "rewards/chosen": -2.2196271419525146,
1105
+ "rewards/margins": 1.149065613746643,
1106
+ "rewards/rejected": -3.3686928749084473,
1107
+ "step": 740
1108
+ },
1109
+ {
1110
+ "epoch": 0.74,
1111
+ "learning_rate": 9.741227498537615e-07,
1112
+ "logits/chosen": -2.457432270050049,
1113
+ "logits/rejected": -2.321898937225342,
1114
+ "logps/chosen": -194.0103302001953,
1115
+ "logps/rejected": -204.46145629882812,
1116
+ "loss": 0.4596,
1117
+ "rewards/accuracies": 0.7749999761581421,
1118
+ "rewards/chosen": -2.4977972507476807,
1119
+ "rewards/margins": 1.0296275615692139,
1120
+ "rewards/rejected": -3.5274243354797363,
1121
+ "step": 750
1122
+ },
1123
+ {
1124
+ "epoch": 0.75,
1125
+ "learning_rate": 9.069697661173668e-07,
1126
+ "logits/chosen": -2.3087573051452637,
1127
+ "logits/rejected": -2.210942268371582,
1128
+ "logps/chosen": -205.71200561523438,
1129
+ "logps/rejected": -217.53921508789062,
1130
+ "loss": 0.6425,
1131
+ "rewards/accuracies": 0.6875,
1132
+ "rewards/chosen": -2.667379379272461,
1133
+ "rewards/margins": 0.8116022348403931,
1134
+ "rewards/rejected": -3.4789810180664062,
1135
+ "step": 760
1136
+ },
1137
+ {
1138
+ "epoch": 0.76,
1139
+ "learning_rate": 8.416986480283434e-07,
1140
+ "logits/chosen": -2.2393643856048584,
1141
+ "logits/rejected": -2.096648693084717,
1142
+ "logps/chosen": -205.7020721435547,
1143
+ "logps/rejected": -220.5606689453125,
1144
+ "loss": 0.5619,
1145
+ "rewards/accuracies": 0.699999988079071,
1146
+ "rewards/chosen": -2.711803913116455,
1147
+ "rewards/margins": 1.0723652839660645,
1148
+ "rewards/rejected": -3.7841694355010986,
1149
+ "step": 770
1150
+ },
1151
+ {
1152
+ "epoch": 0.77,
1153
+ "learning_rate": 7.783865011382876e-07,
1154
+ "logits/chosen": -2.2971372604370117,
1155
+ "logits/rejected": -2.1558918952941895,
1156
+ "logps/chosen": -213.41958618164062,
1157
+ "logps/rejected": -214.00326538085938,
1158
+ "loss": 0.5043,
1159
+ "rewards/accuracies": 0.699999988079071,
1160
+ "rewards/chosen": -2.363002300262451,
1161
+ "rewards/margins": 1.088847279548645,
1162
+ "rewards/rejected": -3.4518496990203857,
1163
+ "step": 780
1164
+ },
1165
+ {
1166
+ "epoch": 0.78,
1167
+ "learning_rate": 7.171081168427205e-07,
1168
+ "logits/chosen": -2.331343173980713,
1169
+ "logits/rejected": -2.165480613708496,
1170
+ "logps/chosen": -221.4240264892578,
1171
+ "logps/rejected": -226.91650390625,
1172
+ "loss": 0.473,
1173
+ "rewards/accuracies": 0.762499988079071,
1174
+ "rewards/chosen": -2.2688238620758057,
1175
+ "rewards/margins": 1.2109057903289795,
1176
+ "rewards/rejected": -3.479729413986206,
1177
+ "step": 790
1178
+ },
1179
+ {
1180
+ "epoch": 0.79,
1181
+ "learning_rate": 6.579358840291064e-07,
1182
+ "logits/chosen": -2.2774598598480225,
1183
+ "logits/rejected": -2.1744918823242188,
1184
+ "logps/chosen": -209.7314453125,
1185
+ "logps/rejected": -224.2617645263672,
1186
+ "loss": 0.5858,
1187
+ "rewards/accuracies": 0.737500011920929,
1188
+ "rewards/chosen": -2.5639281272888184,
1189
+ "rewards/margins": 0.7768001556396484,
1190
+ "rewards/rejected": -3.340728282928467,
1191
+ "step": 800
1192
+ },
1193
+ {
1194
+ "epoch": 0.79,
1195
+ "eval_logits/chosen": -2.159109354019165,
1196
+ "eval_logits/rejected": -2.058640241622925,
1197
+ "eval_logps/chosen": -205.4982452392578,
1198
+ "eval_logps/rejected": -212.2717742919922,
1199
+ "eval_loss": 0.5206710696220398,
1200
+ "eval_rewards/accuracies": 0.7334905862808228,
1201
+ "eval_rewards/chosen": -2.507232666015625,
1202
+ "eval_rewards/margins": 0.9439911842346191,
1203
+ "eval_rewards/rejected": -3.4512243270874023,
1204
+ "eval_runtime": 423.3516,
1205
+ "eval_samples_per_second": 0.987,
1206
+ "eval_steps_per_second": 0.125,
1207
+ "step": 800
1208
+ },
1209
+ {
1210
+ "epoch": 0.8,
1211
+ "learning_rate": 6.00939703563006e-07,
1212
+ "logits/chosen": -2.3462395668029785,
1213
+ "logits/rejected": -2.2609333992004395,
1214
+ "logps/chosen": -191.94070434570312,
1215
+ "logps/rejected": -208.1775665283203,
1216
+ "loss": 0.5793,
1217
+ "rewards/accuracies": 0.699999988079071,
1218
+ "rewards/chosen": -2.5009541511535645,
1219
+ "rewards/margins": 0.8846192359924316,
1220
+ "rewards/rejected": -3.385573625564575,
1221
+ "step": 810
1222
+ },
1223
+ {
1224
+ "epoch": 0.81,
1225
+ "learning_rate": 5.461869057133412e-07,
1226
+ "logits/chosen": -2.386518955230713,
1227
+ "logits/rejected": -2.2898330688476562,
1228
+ "logps/chosen": -198.1256866455078,
1229
+ "logps/rejected": -216.5961456298828,
1230
+ "loss": 0.4895,
1231
+ "rewards/accuracies": 0.699999988079071,
1232
+ "rewards/chosen": -2.2799556255340576,
1233
+ "rewards/margins": 1.0353368520736694,
1234
+ "rewards/rejected": -3.3152928352355957,
1235
+ "step": 820
1236
+ },
1237
+ {
1238
+ "epoch": 0.82,
1239
+ "learning_rate": 4.937421706143497e-07,
1240
+ "logits/chosen": -2.2601094245910645,
1241
+ "logits/rejected": -2.1199710369110107,
1242
+ "logps/chosen": -208.54898071289062,
1243
+ "logps/rejected": -210.0823974609375,
1244
+ "loss": 0.5261,
1245
+ "rewards/accuracies": 0.75,
1246
+ "rewards/chosen": -2.4491305351257324,
1247
+ "rewards/margins": 0.9635556936264038,
1248
+ "rewards/rejected": -3.412686586380005,
1249
+ "step": 830
1250
+ },
1251
+ {
1252
+ "epoch": 0.83,
1253
+ "learning_rate": 4.43667451858166e-07,
1254
+ "logits/chosen": -2.262453079223633,
1255
+ "logits/rejected": -2.0967936515808105,
1256
+ "logps/chosen": -188.5206298828125,
1257
+ "logps/rejected": -202.87240600585938,
1258
+ "loss": 0.4361,
1259
+ "rewards/accuracies": 0.7875000238418579,
1260
+ "rewards/chosen": -2.2963976860046387,
1261
+ "rewards/margins": 1.180870771408081,
1262
+ "rewards/rejected": -3.4772682189941406,
1263
+ "step": 840
1264
+ },
1265
+ {
1266
+ "epoch": 0.84,
1267
+ "learning_rate": 3.9602190330830484e-07,
1268
+ "logits/chosen": -2.2342655658721924,
1269
+ "logits/rejected": -2.130017042160034,
1270
+ "logps/chosen": -200.30343627929688,
1271
+ "logps/rejected": -220.6678466796875,
1272
+ "loss": 0.4885,
1273
+ "rewards/accuracies": 0.7124999761581421,
1274
+ "rewards/chosen": -2.533165454864502,
1275
+ "rewards/margins": 1.0824253559112549,
1276
+ "rewards/rejected": -3.6155905723571777,
1277
+ "step": 850
1278
+ },
1279
+ {
1280
+ "epoch": 0.85,
1281
+ "learning_rate": 3.5086180922049295e-07,
1282
+ "logits/chosen": -2.3764655590057373,
1283
+ "logits/rejected": -2.235109329223633,
1284
+ "logps/chosen": -224.97146606445312,
1285
+ "logps/rejected": -221.2453155517578,
1286
+ "loss": 0.4675,
1287
+ "rewards/accuracies": 0.762499988079071,
1288
+ "rewards/chosen": -2.4995951652526855,
1289
+ "rewards/margins": 1.0894619226455688,
1290
+ "rewards/rejected": -3.589057445526123,
1291
+ "step": 860
1292
+ },
1293
+ {
1294
+ "epoch": 0.86,
1295
+ "learning_rate": 3.0824051775340895e-07,
1296
+ "logits/chosen": -2.380509853363037,
1297
+ "logits/rejected": -2.275191068649292,
1298
+ "logps/chosen": -180.95895385742188,
1299
+ "logps/rejected": -195.82479858398438,
1300
+ "loss": 0.547,
1301
+ "rewards/accuracies": 0.6875,
1302
+ "rewards/chosen": -2.3850598335266113,
1303
+ "rewards/margins": 0.8363308906555176,
1304
+ "rewards/rejected": -3.22139048576355,
1305
+ "step": 870
1306
+ },
1307
+ {
1308
+ "epoch": 0.87,
1309
+ "learning_rate": 2.6820837794786336e-07,
1310
+ "logits/chosen": -2.2321319580078125,
1311
+ "logits/rejected": -2.1590046882629395,
1312
+ "logps/chosen": -204.28173828125,
1313
+ "logps/rejected": -214.01327514648438,
1314
+ "loss": 0.6701,
1315
+ "rewards/accuracies": 0.5249999761581421,
1316
+ "rewards/chosen": -2.5836868286132812,
1317
+ "rewards/margins": 0.4722086787223816,
1318
+ "rewards/rejected": -3.0558953285217285,
1319
+ "step": 880
1320
+ },
1321
+ {
1322
+ "epoch": 0.88,
1323
+ "learning_rate": 2.3081268024887694e-07,
1324
+ "logits/chosen": -2.222381114959717,
1325
+ "logits/rejected": -2.051706075668335,
1326
+ "logps/chosen": -199.63970947265625,
1327
+ "logps/rejected": -208.906005859375,
1328
+ "loss": 0.4139,
1329
+ "rewards/accuracies": 0.8125,
1330
+ "rewards/chosen": -2.3457841873168945,
1331
+ "rewards/margins": 1.2535767555236816,
1332
+ "rewards/rejected": -3.599360942840576,
1333
+ "step": 890
1334
+ },
1335
+ {
1336
+ "epoch": 0.89,
1337
+ "learning_rate": 1.9609760064091044e-07,
1338
+ "logits/chosen": -2.3242409229278564,
1339
+ "logits/rejected": -2.2560477256774902,
1340
+ "logps/chosen": -205.27554321289062,
1341
+ "logps/rejected": -201.7273712158203,
1342
+ "loss": 0.5173,
1343
+ "rewards/accuracies": 0.7124999761581421,
1344
+ "rewards/chosen": -2.428300142288208,
1345
+ "rewards/margins": 0.8561038970947266,
1346
+ "rewards/rejected": -3.2844040393829346,
1347
+ "step": 900
1348
+ },
1349
+ {
1350
+ "epoch": 0.9,
1351
+ "learning_rate": 1.6410414846224992e-07,
1352
+ "logits/chosen": -2.220360517501831,
1353
+ "logits/rejected": -2.109575033187866,
1354
+ "logps/chosen": -201.75045776367188,
1355
+ "logps/rejected": -214.3638458251953,
1356
+ "loss": 0.4565,
1357
+ "rewards/accuracies": 0.7875000238418579,
1358
+ "rewards/chosen": -2.569467067718506,
1359
+ "rewards/margins": 1.1193673610687256,
1360
+ "rewards/rejected": -3.6888339519500732,
1361
+ "step": 910
1362
+ },
1363
+ {
1364
+ "epoch": 0.91,
1365
+ "learning_rate": 1.348701179601819e-07,
1366
+ "logits/chosen": -2.401984691619873,
1367
+ "logits/rejected": -2.2650654315948486,
1368
+ "logps/chosen": -215.353515625,
1369
+ "logps/rejected": -225.1014404296875,
1370
+ "loss": 0.4529,
1371
+ "rewards/accuracies": 0.7875000238418579,
1372
+ "rewards/chosen": -2.38716983795166,
1373
+ "rewards/margins": 1.1682064533233643,
1374
+ "rewards/rejected": -3.5553765296936035,
1375
+ "step": 920
1376
+ },
1377
+ {
1378
+ "epoch": 0.91,
1379
+ "learning_rate": 1.0843004364420151e-07,
1380
+ "logits/chosen": -2.2123489379882812,
1381
+ "logits/rejected": -2.100048065185547,
1382
+ "logps/chosen": -215.25479125976562,
1383
+ "logps/rejected": -229.445556640625,
1384
+ "loss": 0.5697,
1385
+ "rewards/accuracies": 0.6875,
1386
+ "rewards/chosen": -2.393723964691162,
1387
+ "rewards/margins": 0.8360812067985535,
1388
+ "rewards/rejected": -3.2298049926757812,
1389
+ "step": 930
1390
+ },
1391
+ {
1392
+ "epoch": 0.92,
1393
+ "learning_rate": 8.481515948997931e-08,
1394
+ "logits/chosen": -2.3680367469787598,
1395
+ "logits/rejected": -2.26953387260437,
1396
+ "logps/chosen": -216.99514770507812,
1397
+ "logps/rejected": -212.17263793945312,
1398
+ "loss": 0.6119,
1399
+ "rewards/accuracies": 0.737500011920929,
1400
+ "rewards/chosen": -2.478694200515747,
1401
+ "rewards/margins": 0.7769169807434082,
1402
+ "rewards/rejected": -3.255610942840576,
1403
+ "step": 940
1404
+ },
1405
+ {
1406
+ "epoch": 0.93,
1407
+ "learning_rate": 6.4053362042297e-08,
1408
+ "logits/chosen": -2.2625975608825684,
1409
+ "logits/rejected": -2.104025363922119,
1410
+ "logps/chosen": -205.9175262451172,
1411
+ "logps/rejected": -217.52304077148438,
1412
+ "loss": 0.4583,
1413
+ "rewards/accuracies": 0.7749999761581421,
1414
+ "rewards/chosen": -2.4613893032073975,
1415
+ "rewards/margins": 1.17227303981781,
1416
+ "rewards/rejected": -3.633662462234497,
1417
+ "step": 950
1418
+ },
1419
+ {
1420
+ "epoch": 0.94,
1421
+ "learning_rate": 4.616917746052163e-08,
1422
+ "logits/chosen": -2.3516554832458496,
1423
+ "logits/rejected": -2.214130401611328,
1424
+ "logps/chosen": -203.63438415527344,
1425
+ "logps/rejected": -211.27334594726562,
1426
+ "loss": 0.546,
1427
+ "rewards/accuracies": 0.75,
1428
+ "rewards/chosen": -2.5478570461273193,
1429
+ "rewards/margins": 0.990521252155304,
1430
+ "rewards/rejected": -3.5383784770965576,
1431
+ "step": 960
1432
+ },
1433
+ {
1434
+ "epoch": 0.95,
1435
+ "learning_rate": 3.118373254556412e-08,
1436
+ "logits/chosen": -2.3878164291381836,
1437
+ "logits/rejected": -2.2502613067626953,
1438
+ "logps/chosen": -198.12130737304688,
1439
+ "logps/rejected": -199.36111450195312,
1440
+ "loss": 0.4535,
1441
+ "rewards/accuracies": 0.7749999761581421,
1442
+ "rewards/chosen": -2.448812484741211,
1443
+ "rewards/margins": 1.0348026752471924,
1444
+ "rewards/rejected": -3.483614683151245,
1445
+ "step": 970
1446
+ },
1447
+ {
1448
+ "epoch": 0.96,
1449
+ "learning_rate": 1.9114729782535037e-08,
1450
+ "logits/chosen": -2.4108872413635254,
1451
+ "logits/rejected": -2.297142505645752,
1452
+ "logps/chosen": -194.57632446289062,
1453
+ "logps/rejected": -202.7489776611328,
1454
+ "loss": 0.4981,
1455
+ "rewards/accuracies": 0.699999988079071,
1456
+ "rewards/chosen": -2.350036859512329,
1457
+ "rewards/margins": 0.9002410173416138,
1458
+ "rewards/rejected": -3.2502777576446533,
1459
+ "step": 980
1460
+ },
1461
+ {
1462
+ "epoch": 0.97,
1463
+ "learning_rate": 9.97642642858815e-09,
1464
+ "logits/chosen": -2.28024959564209,
1465
+ "logits/rejected": -2.139801502227783,
1466
+ "logps/chosen": -211.3317413330078,
1467
+ "logps/rejected": -211.1747283935547,
1468
+ "loss": 0.5021,
1469
+ "rewards/accuracies": 0.6875,
1470
+ "rewards/chosen": -2.55255126953125,
1471
+ "rewards/margins": 0.9248201251029968,
1472
+ "rewards/rejected": -3.4773712158203125,
1473
+ "step": 990
1474
+ },
1475
+ {
1476
+ "epoch": 0.98,
1477
+ "learning_rate": 3.779617670651436e-09,
1478
+ "logits/chosen": -2.248671054840088,
1479
+ "logits/rejected": -2.1480166912078857,
1480
+ "logps/chosen": -217.71707153320312,
1481
+ "logps/rejected": -225.20040893554688,
1482
+ "loss": 0.6128,
1483
+ "rewards/accuracies": 0.699999988079071,
1484
+ "rewards/chosen": -2.73987078666687,
1485
+ "rewards/margins": 0.9156008958816528,
1486
+ "rewards/rejected": -3.6554713249206543,
1487
+ "step": 1000
1488
+ },
1489
+ {
1490
+ "epoch": 0.98,
1491
+ "eval_logits/chosen": -2.1620335578918457,
1492
+ "eval_logits/rejected": -2.062356948852539,
1493
+ "eval_logps/chosen": -206.584716796875,
1494
+ "eval_logps/rejected": -214.93492126464844,
1495
+ "eval_loss": 0.521207869052887,
1496
+ "eval_rewards/accuracies": 0.7405660152435303,
1497
+ "eval_rewards/chosen": -2.5398268699645996,
1498
+ "eval_rewards/margins": 0.9912916421890259,
1499
+ "eval_rewards/rejected": -3.531118631362915,
1500
+ "eval_runtime": 423.4333,
1501
+ "eval_samples_per_second": 0.987,
1502
+ "eval_steps_per_second": 0.125,
1503
+ "step": 1000
1504
+ },
1505
+ {
1506
+ "epoch": 0.99,
1507
+ "learning_rate": 5.316238729444201e-10,
1508
+ "logits/chosen": -2.234221935272217,
1509
+ "logits/rejected": -2.0627448558807373,
1510
+ "logps/chosen": -209.44503784179688,
1511
+ "logps/rejected": -219.24606323242188,
1512
+ "loss": 0.4684,
1513
+ "rewards/accuracies": 0.7749999761581421,
1514
+ "rewards/chosen": -2.601386547088623,
1515
+ "rewards/margins": 1.190915584564209,
1516
+ "rewards/rejected": -3.792301893234253,
1517
+ "step": 1010
1518
+ },
1519
+ {
1520
+ "epoch": 1.0,
1521
+ "step": 1016,
1522
+ "total_flos": 0.0,
1523
+ "train_loss": 0.5583291621658746,
1524
+ "train_runtime": 16785.8838,
1525
+ "train_samples_per_second": 0.484,
1526
+ "train_steps_per_second": 0.061
1527
+ }
1528
+ ],
1529
+ "logging_steps": 10,
1530
+ "max_steps": 1016,
1531
+ "num_input_tokens_seen": 0,
1532
+ "num_train_epochs": 1,
1533
+ "save_steps": 100,
1534
+ "total_flos": 0.0,
1535
+ "train_batch_size": 4,
1536
+ "trial_name": null,
1537
+ "trial_params": null
1538
+ }