lole25 commited on
Commit
5941df8
1 Parent(s): 71cdc77

Model save

Browse files
Files changed (5) hide show
  1. README.md +82 -0
  2. all_results.json +21 -0
  3. eval_results.json +16 -0
  4. train_results.json +8 -0
  5. trainer_state.json +1604 -0
README.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ base_model: mistralai/Mistral-7B-v0.1
9
+ model-index:
10
+ - name: zephyr-7b-gpo-iter1
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # zephyr-7b-gpo-iter1
18
+
19
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the None dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.0069
22
+ - Rewards/chosen: 0.0025
23
+ - Rewards/rejected: 0.0081
24
+ - Rewards/accuracies: 0.4595
25
+ - Rewards/margins: -0.0056
26
+ - Logps/rejected: -272.5866
27
+ - Logps/chosen: -298.8498
28
+ - Logits/rejected: -2.1749
29
+ - Logits/chosen: -2.3692
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 5e-06
49
+ - train_batch_size: 1
50
+ - eval_batch_size: 2
51
+ - seed: 42
52
+ - distributed_type: multi-GPU
53
+ - gradient_accumulation_steps: 2
54
+ - total_train_batch_size: 2
55
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
56
+ - lr_scheduler_type: cosine
57
+ - lr_scheduler_warmup_ratio: 0.1
58
+ - num_epochs: 2
59
+
60
+ ### Training results
61
+
62
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
63
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
64
+ | 0.0006 | 0.2 | 100 | 0.0031 | -0.0541 | -0.0467 | 0.4245 | -0.0074 | -278.0669 | -304.5065 | -2.1506 | -2.3436 |
65
+ | 0.0025 | 0.4 | 200 | 0.0033 | -0.0115 | -0.0107 | 0.4910 | -0.0008 | -274.4619 | -300.2420 | -2.1684 | -2.3612 |
66
+ | 0.0009 | 0.6 | 300 | 0.0030 | -0.0220 | -0.0216 | 0.4935 | -0.0004 | -275.5567 | -301.2960 | -2.1427 | -2.3360 |
67
+ | 0.0013 | 0.8 | 400 | 0.0034 | -0.0156 | -0.0142 | 0.4935 | -0.0014 | -274.8156 | -300.6561 | -2.1462 | -2.3405 |
68
+ | 0.0011 | 1.0 | 500 | 0.0037 | -0.0565 | -0.0502 | 0.4520 | -0.0063 | -278.4165 | -304.7457 | -2.1454 | -2.3392 |
69
+ | 0.0116 | 1.2 | 600 | 0.0049 | -0.0283 | -0.0229 | 0.4435 | -0.0054 | -275.6791 | -301.9266 | -2.1527 | -2.3449 |
70
+ | 0.015 | 1.4 | 700 | 0.0065 | -0.0261 | -0.0182 | 0.4450 | -0.0078 | -275.2170 | -301.7041 | -2.1650 | -2.3586 |
71
+ | 0.0009 | 1.6 | 800 | 0.0069 | 0.0079 | 0.0124 | 0.4720 | -0.0044 | -272.1540 | -298.3011 | -2.1746 | -2.3689 |
72
+ | 0.0109 | 1.8 | 900 | 0.0069 | 0.0024 | 0.0080 | 0.4570 | -0.0057 | -272.5880 | -298.8583 | -2.1739 | -2.3682 |
73
+ | 0.0015 | 2.0 | 1000 | 0.0069 | 0.0025 | 0.0081 | 0.4595 | -0.0056 | -272.5866 | -298.8498 | -2.1749 | -2.3692 |
74
+
75
+
76
+ ### Framework versions
77
+
78
+ - PEFT 0.7.1
79
+ - Transformers 4.36.2
80
+ - Pytorch 2.1.2+cu118
81
+ - Datasets 2.14.6
82
+ - Tokenizers 0.15.2
all_results.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_logits/chosen": -2.3691599369049072,
4
+ "eval_logits/rejected": -2.1749157905578613,
5
+ "eval_logps/chosen": -298.8498229980469,
6
+ "eval_logps/rejected": -272.58660888671875,
7
+ "eval_loss": 0.006900906562805176,
8
+ "eval_rewards/accuracies": 0.4595000147819519,
9
+ "eval_rewards/chosen": 0.002455136040225625,
10
+ "eval_rewards/margins": -0.005604185629636049,
11
+ "eval_rewards/rejected": 0.008059320971369743,
12
+ "eval_runtime": 1421.1253,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 1.407,
15
+ "eval_steps_per_second": 0.704,
16
+ "train_loss": 0.005910432943725027,
17
+ "train_runtime": 19053.6119,
18
+ "train_samples": 61135,
19
+ "train_samples_per_second": 0.105,
20
+ "train_steps_per_second": 0.052
21
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_logits/chosen": -2.3691599369049072,
4
+ "eval_logits/rejected": -2.1749157905578613,
5
+ "eval_logps/chosen": -298.8498229980469,
6
+ "eval_logps/rejected": -272.58660888671875,
7
+ "eval_loss": 0.006900906562805176,
8
+ "eval_rewards/accuracies": 0.4595000147819519,
9
+ "eval_rewards/chosen": 0.002455136040225625,
10
+ "eval_rewards/margins": -0.005604185629636049,
11
+ "eval_rewards/rejected": 0.008059320971369743,
12
+ "eval_runtime": 1421.1253,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 1.407,
15
+ "eval_steps_per_second": 0.704
16
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "train_loss": 0.005910432943725027,
4
+ "train_runtime": 19053.6119,
5
+ "train_samples": 61135,
6
+ "train_samples_per_second": 0.105,
7
+ "train_steps_per_second": 0.052
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1604 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.0,
5
+ "eval_steps": 100,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 5.0000000000000004e-08,
14
+ "logits/chosen": -3.1678528785705566,
15
+ "logits/rejected": -3.1461400985717773,
16
+ "logps/chosen": -344.33587646484375,
17
+ "logps/rejected": -334.830810546875,
18
+ "loss": 0.0011,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.02,
27
+ "learning_rate": 5.000000000000001e-07,
28
+ "logits/chosen": -2.8056228160858154,
29
+ "logits/rejected": -2.711918830871582,
30
+ "logps/chosen": -137.3001708984375,
31
+ "logps/rejected": -114.56926727294922,
32
+ "loss": 0.001,
33
+ "rewards/accuracies": 0.6111111044883728,
34
+ "rewards/chosen": 0.0003753364726435393,
35
+ "rewards/margins": 0.001190218492411077,
36
+ "rewards/rejected": -0.0008148818160407245,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.04,
41
+ "learning_rate": 1.0000000000000002e-06,
42
+ "logits/chosen": -2.664208173751831,
43
+ "logits/rejected": -2.3068690299987793,
44
+ "logps/chosen": -95.76632690429688,
45
+ "logps/rejected": -91.30467224121094,
46
+ "loss": 0.0011,
47
+ "rewards/accuracies": 0.4000000059604645,
48
+ "rewards/chosen": -7.150838064262643e-05,
49
+ "rewards/margins": 0.00038482382660731673,
50
+ "rewards/rejected": -0.0004563321708701551,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.06,
55
+ "learning_rate": 1.5e-06,
56
+ "logits/chosen": -2.8235249519348145,
57
+ "logits/rejected": -2.742885112762451,
58
+ "logps/chosen": -103.0609359741211,
59
+ "logps/rejected": -119.29620361328125,
60
+ "loss": 0.0011,
61
+ "rewards/accuracies": 0.550000011920929,
62
+ "rewards/chosen": -0.00019755656830966473,
63
+ "rewards/margins": 0.00018337149231228977,
64
+ "rewards/rejected": -0.00038092798786237836,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.08,
69
+ "learning_rate": 2.0000000000000003e-06,
70
+ "logits/chosen": -2.812087059020996,
71
+ "logits/rejected": -2.3899402618408203,
72
+ "logps/chosen": -56.24201583862305,
73
+ "logps/rejected": -52.55266189575195,
74
+ "loss": 0.001,
75
+ "rewards/accuracies": 0.4000000059604645,
76
+ "rewards/chosen": -0.002384146675467491,
77
+ "rewards/margins": 0.0011411935556679964,
78
+ "rewards/rejected": -0.0035253397654742002,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.1,
83
+ "learning_rate": 2.5e-06,
84
+ "logits/chosen": -2.919544219970703,
85
+ "logits/rejected": -2.68827486038208,
86
+ "logps/chosen": -85.77859497070312,
87
+ "logps/rejected": -82.93376159667969,
88
+ "loss": 0.001,
89
+ "rewards/accuracies": 0.6000000238418579,
90
+ "rewards/chosen": -0.002311713993549347,
91
+ "rewards/margins": 0.0031323134899139404,
92
+ "rewards/rejected": -0.005444027483463287,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.12,
97
+ "learning_rate": 3e-06,
98
+ "logits/chosen": -2.7442586421966553,
99
+ "logits/rejected": -2.5409207344055176,
100
+ "logps/chosen": -135.08984375,
101
+ "logps/rejected": -137.5052032470703,
102
+ "loss": 0.001,
103
+ "rewards/accuracies": 0.6000000238418579,
104
+ "rewards/chosen": -0.0012713891919702291,
105
+ "rewards/margins": 0.005557497497648001,
106
+ "rewards/rejected": -0.006828887853771448,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 0.14,
111
+ "learning_rate": 3.5e-06,
112
+ "logits/chosen": -2.680795669555664,
113
+ "logits/rejected": -2.3159432411193848,
114
+ "logps/chosen": -86.95841979980469,
115
+ "logps/rejected": -71.47272491455078,
116
+ "loss": 0.0012,
117
+ "rewards/accuracies": 0.550000011920929,
118
+ "rewards/chosen": 0.0019758909475058317,
119
+ "rewards/margins": 0.005556103773415089,
120
+ "rewards/rejected": -0.0035802137572318316,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.16,
125
+ "learning_rate": 4.000000000000001e-06,
126
+ "logits/chosen": -2.789736270904541,
127
+ "logits/rejected": -2.5091891288757324,
128
+ "logps/chosen": -54.42887496948242,
129
+ "logps/rejected": -45.48096466064453,
130
+ "loss": 0.0009,
131
+ "rewards/accuracies": 0.699999988079071,
132
+ "rewards/chosen": 0.011168469674885273,
133
+ "rewards/margins": 0.00967374723404646,
134
+ "rewards/rejected": 0.0014947212766855955,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 0.18,
139
+ "learning_rate": 4.5e-06,
140
+ "logits/chosen": -2.686554431915283,
141
+ "logits/rejected": -2.3966121673583984,
142
+ "logps/chosen": -88.09907531738281,
143
+ "logps/rejected": -87.56222534179688,
144
+ "loss": 0.0008,
145
+ "rewards/accuracies": 0.6000000238418579,
146
+ "rewards/chosen": 0.016689006239175797,
147
+ "rewards/margins": 0.0082827378064394,
148
+ "rewards/rejected": 0.008406268432736397,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 0.2,
153
+ "learning_rate": 5e-06,
154
+ "logits/chosen": -2.821899652481079,
155
+ "logits/rejected": -2.5310721397399902,
156
+ "logps/chosen": -64.77155303955078,
157
+ "logps/rejected": -73.11659240722656,
158
+ "loss": 0.0006,
159
+ "rewards/accuracies": 0.699999988079071,
160
+ "rewards/chosen": 0.018898095935583115,
161
+ "rewards/margins": 0.017503971233963966,
162
+ "rewards/rejected": 0.001394127495586872,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 0.2,
167
+ "eval_logits/chosen": -2.343594551086426,
168
+ "eval_logits/rejected": -2.15059494972229,
169
+ "eval_logps/chosen": -304.50653076171875,
170
+ "eval_logps/rejected": -278.0669250488281,
171
+ "eval_loss": 0.003129809396341443,
172
+ "eval_rewards/accuracies": 0.4244999885559082,
173
+ "eval_rewards/chosen": -0.054112162441015244,
174
+ "eval_rewards/margins": -0.007367968093603849,
175
+ "eval_rewards/rejected": -0.046744197607040405,
176
+ "eval_runtime": 1422.4177,
177
+ "eval_samples_per_second": 1.406,
178
+ "eval_steps_per_second": 0.703,
179
+ "step": 100
180
+ },
181
+ {
182
+ "epoch": 0.22,
183
+ "learning_rate": 4.99847706754774e-06,
184
+ "logits/chosen": -2.8732924461364746,
185
+ "logits/rejected": -2.670675754547119,
186
+ "logps/chosen": -80.2041244506836,
187
+ "logps/rejected": -76.78239440917969,
188
+ "loss": 0.0007,
189
+ "rewards/accuracies": 0.6499999761581421,
190
+ "rewards/chosen": 0.0098586305975914,
191
+ "rewards/margins": 0.01424575038254261,
192
+ "rewards/rejected": -0.004387119319289923,
193
+ "step": 110
194
+ },
195
+ {
196
+ "epoch": 0.24,
197
+ "learning_rate": 4.993910125649561e-06,
198
+ "logits/chosen": -2.841202735900879,
199
+ "logits/rejected": -2.513766050338745,
200
+ "logps/chosen": -60.289215087890625,
201
+ "logps/rejected": -46.964500427246094,
202
+ "loss": 0.0011,
203
+ "rewards/accuracies": 0.6000000238418579,
204
+ "rewards/chosen": 0.013731713406741619,
205
+ "rewards/margins": 0.013367941603064537,
206
+ "rewards/rejected": 0.00036377101787365973,
207
+ "step": 120
208
+ },
209
+ {
210
+ "epoch": 0.26,
211
+ "learning_rate": 4.986304738420684e-06,
212
+ "logits/chosen": -2.712897777557373,
213
+ "logits/rejected": -2.391780138015747,
214
+ "logps/chosen": -134.18972778320312,
215
+ "logps/rejected": -118.10213470458984,
216
+ "loss": 0.0011,
217
+ "rewards/accuracies": 0.6499999761581421,
218
+ "rewards/chosen": 0.0306332316249609,
219
+ "rewards/margins": 0.015287762507796288,
220
+ "rewards/rejected": 0.015345467254519463,
221
+ "step": 130
222
+ },
223
+ {
224
+ "epoch": 0.28,
225
+ "learning_rate": 4.975670171853926e-06,
226
+ "logits/chosen": -2.9027531147003174,
227
+ "logits/rejected": -2.519896984100342,
228
+ "logps/chosen": -57.02116012573242,
229
+ "logps/rejected": -53.072364807128906,
230
+ "loss": 0.0009,
231
+ "rewards/accuracies": 0.6499999761581421,
232
+ "rewards/chosen": -0.0006130338879302144,
233
+ "rewards/margins": 0.008738933131098747,
234
+ "rewards/rejected": -0.00935196690261364,
235
+ "step": 140
236
+ },
237
+ {
238
+ "epoch": 0.3,
239
+ "learning_rate": 4.962019382530521e-06,
240
+ "logits/chosen": -2.8638014793395996,
241
+ "logits/rejected": -2.5246753692626953,
242
+ "logps/chosen": -61.296531677246094,
243
+ "logps/rejected": -76.2929916381836,
244
+ "loss": 0.0014,
245
+ "rewards/accuracies": 0.699999988079071,
246
+ "rewards/chosen": -0.025341859087347984,
247
+ "rewards/margins": 0.015296203084290028,
248
+ "rewards/rejected": -0.04063806310296059,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 0.32,
253
+ "learning_rate": 4.9453690018345144e-06,
254
+ "logits/chosen": -2.8262181282043457,
255
+ "logits/rejected": -2.3856072425842285,
256
+ "logps/chosen": -141.7677459716797,
257
+ "logps/rejected": -134.89096069335938,
258
+ "loss": 0.001,
259
+ "rewards/accuracies": 0.699999988079071,
260
+ "rewards/chosen": 0.005657578818500042,
261
+ "rewards/margins": 0.017082063481211662,
262
+ "rewards/rejected": -0.011424483731389046,
263
+ "step": 160
264
+ },
265
+ {
266
+ "epoch": 0.34,
267
+ "learning_rate": 4.925739315689991e-06,
268
+ "logits/chosen": -2.871880531311035,
269
+ "logits/rejected": -2.5630600452423096,
270
+ "logps/chosen": -61.668121337890625,
271
+ "logps/rejected": -58.292213439941406,
272
+ "loss": 0.0011,
273
+ "rewards/accuracies": 0.699999988079071,
274
+ "rewards/chosen": -0.016033068299293518,
275
+ "rewards/margins": 0.01451338641345501,
276
+ "rewards/rejected": -0.030546456575393677,
277
+ "step": 170
278
+ },
279
+ {
280
+ "epoch": 0.36,
281
+ "learning_rate": 4.903154239845798e-06,
282
+ "logits/chosen": -2.893052339553833,
283
+ "logits/rejected": -2.682218074798584,
284
+ "logps/chosen": -92.70951080322266,
285
+ "logps/rejected": -104.5350570678711,
286
+ "loss": 0.0014,
287
+ "rewards/accuracies": 0.6000000238418579,
288
+ "rewards/chosen": -0.004353048745542765,
289
+ "rewards/margins": 0.005879141390323639,
290
+ "rewards/rejected": -0.010232190601527691,
291
+ "step": 180
292
+ },
293
+ {
294
+ "epoch": 0.38,
295
+ "learning_rate": 4.8776412907378845e-06,
296
+ "logits/chosen": -2.6648871898651123,
297
+ "logits/rejected": -2.4408602714538574,
298
+ "logps/chosen": -68.06556701660156,
299
+ "logps/rejected": -83.47774505615234,
300
+ "loss": 0.0016,
301
+ "rewards/accuracies": 0.6000000238418579,
302
+ "rewards/chosen": -0.023349497467279434,
303
+ "rewards/margins": 0.0038781266193836927,
304
+ "rewards/rejected": -0.027227621525526047,
305
+ "step": 190
306
+ },
307
+ {
308
+ "epoch": 0.4,
309
+ "learning_rate": 4.849231551964771e-06,
310
+ "logits/chosen": -2.9131691455841064,
311
+ "logits/rejected": -2.6140384674072266,
312
+ "logps/chosen": -118.0514907836914,
313
+ "logps/rejected": -105.72004699707031,
314
+ "loss": 0.0025,
315
+ "rewards/accuracies": 0.550000011920929,
316
+ "rewards/chosen": -0.01497165858745575,
317
+ "rewards/margins": 0.009273095987737179,
318
+ "rewards/rejected": -0.024244757369160652,
319
+ "step": 200
320
+ },
321
+ {
322
+ "epoch": 0.4,
323
+ "eval_logits/chosen": -2.3611958026885986,
324
+ "eval_logits/rejected": -2.168394088745117,
325
+ "eval_logps/chosen": -300.2420349121094,
326
+ "eval_logps/rejected": -274.46185302734375,
327
+ "eval_loss": 0.0032995175570249557,
328
+ "eval_rewards/accuracies": 0.4909999966621399,
329
+ "eval_rewards/chosen": -0.011466986499726772,
330
+ "eval_rewards/margins": -0.0007737927371636033,
331
+ "eval_rewards/rejected": -0.010693194344639778,
332
+ "eval_runtime": 1422.8869,
333
+ "eval_samples_per_second": 1.406,
334
+ "eval_steps_per_second": 0.703,
335
+ "step": 200
336
+ },
337
+ {
338
+ "epoch": 0.42,
339
+ "learning_rate": 4.817959636416969e-06,
340
+ "logits/chosen": -2.7499923706054688,
341
+ "logits/rejected": -2.4605581760406494,
342
+ "logps/chosen": -60.11127471923828,
343
+ "logps/rejected": -57.330078125,
344
+ "loss": 0.0008,
345
+ "rewards/accuracies": 0.75,
346
+ "rewards/chosen": 2.1754764020442963e-05,
347
+ "rewards/margins": 0.01557920128107071,
348
+ "rewards/rejected": -0.015557448379695415,
349
+ "step": 210
350
+ },
351
+ {
352
+ "epoch": 0.44,
353
+ "learning_rate": 4.783863644106502e-06,
354
+ "logits/chosen": -2.8885045051574707,
355
+ "logits/rejected": -2.744602680206299,
356
+ "logps/chosen": -133.10980224609375,
357
+ "logps/rejected": -116.32179260253906,
358
+ "loss": 0.0013,
359
+ "rewards/accuracies": 0.6499999761581421,
360
+ "rewards/chosen": 0.012220713309943676,
361
+ "rewards/margins": 0.013802575878798962,
362
+ "rewards/rejected": -0.0015818632673472166,
363
+ "step": 220
364
+ },
365
+ {
366
+ "epoch": 0.46,
367
+ "learning_rate": 4.746985115747918e-06,
368
+ "logits/chosen": -2.6919429302215576,
369
+ "logits/rejected": -2.4415671825408936,
370
+ "logps/chosen": -57.36275100708008,
371
+ "logps/rejected": -43.19419479370117,
372
+ "loss": 0.0008,
373
+ "rewards/accuracies": 0.75,
374
+ "rewards/chosen": 0.01155207958072424,
375
+ "rewards/margins": 0.017826344817876816,
376
+ "rewards/rejected": -0.0062742652371525764,
377
+ "step": 230
378
+ },
379
+ {
380
+ "epoch": 0.48,
381
+ "learning_rate": 4.707368982147318e-06,
382
+ "logits/chosen": -2.7176296710968018,
383
+ "logits/rejected": -2.6274712085723877,
384
+ "logps/chosen": -59.256858825683594,
385
+ "logps/rejected": -60.37900924682617,
386
+ "loss": 0.0019,
387
+ "rewards/accuracies": 0.6499999761581421,
388
+ "rewards/chosen": 0.004980463068932295,
389
+ "rewards/margins": 0.006475840695202351,
390
+ "rewards/rejected": -0.0014953784411773086,
391
+ "step": 240
392
+ },
393
+ {
394
+ "epoch": 0.5,
395
+ "learning_rate": 4.665063509461098e-06,
396
+ "logits/chosen": -2.8012423515319824,
397
+ "logits/rejected": -2.525057554244995,
398
+ "logps/chosen": -98.50082397460938,
399
+ "logps/rejected": -90.67588806152344,
400
+ "loss": 0.0013,
401
+ "rewards/accuracies": 0.699999988079071,
402
+ "rewards/chosen": -0.008941109292209148,
403
+ "rewards/margins": 0.012530329637229443,
404
+ "rewards/rejected": -0.021471437066793442,
405
+ "step": 250
406
+ },
407
+ {
408
+ "epoch": 0.52,
409
+ "learning_rate": 4.620120240391065e-06,
410
+ "logits/chosen": -2.762904644012451,
411
+ "logits/rejected": -2.579153537750244,
412
+ "logps/chosen": -79.9581069946289,
413
+ "logps/rejected": -84.54290771484375,
414
+ "loss": 0.0016,
415
+ "rewards/accuracies": 0.550000011920929,
416
+ "rewards/chosen": -0.008370987139642239,
417
+ "rewards/margins": 0.0011044790735468268,
418
+ "rewards/rejected": -0.009475464932620525,
419
+ "step": 260
420
+ },
421
+ {
422
+ "epoch": 0.54,
423
+ "learning_rate": 4.572593931387604e-06,
424
+ "logits/chosen": -2.697049140930176,
425
+ "logits/rejected": -2.420090675354004,
426
+ "logps/chosen": -68.5470199584961,
427
+ "logps/rejected": -59.51050567626953,
428
+ "loss": 0.0011,
429
+ "rewards/accuracies": 0.550000011920929,
430
+ "rewards/chosen": -0.007652081549167633,
431
+ "rewards/margins": 0.009131882339715958,
432
+ "rewards/rejected": -0.01678396575152874,
433
+ "step": 270
434
+ },
435
+ {
436
+ "epoch": 0.56,
437
+ "learning_rate": 4.522542485937369e-06,
438
+ "logits/chosen": -2.8917226791381836,
439
+ "logits/rejected": -2.56544828414917,
440
+ "logps/chosen": -133.83926391601562,
441
+ "logps/rejected": -106.41746520996094,
442
+ "loss": 0.0014,
443
+ "rewards/accuracies": 0.4000000059604645,
444
+ "rewards/chosen": 0.0028765772003680468,
445
+ "rewards/margins": 0.0040840087458491325,
446
+ "rewards/rejected": -0.001207432011142373,
447
+ "step": 280
448
+ },
449
+ {
450
+ "epoch": 0.58,
451
+ "learning_rate": 4.470026884016805e-06,
452
+ "logits/chosen": -2.8042643070220947,
453
+ "logits/rejected": -2.6527392864227295,
454
+ "logps/chosen": -119.4009017944336,
455
+ "logps/rejected": -110.30342102050781,
456
+ "loss": 0.0015,
457
+ "rewards/accuracies": 0.6000000238418579,
458
+ "rewards/chosen": 0.002782848197966814,
459
+ "rewards/margins": 0.009942631237208843,
460
+ "rewards/rejected": -0.007159784436225891,
461
+ "step": 290
462
+ },
463
+ {
464
+ "epoch": 0.6,
465
+ "learning_rate": 4.415111107797445e-06,
466
+ "logits/chosen": -2.963005781173706,
467
+ "logits/rejected": -2.809359073638916,
468
+ "logps/chosen": -124.54817199707031,
469
+ "logps/rejected": -123.4760513305664,
470
+ "loss": 0.0009,
471
+ "rewards/accuracies": 0.6499999761581421,
472
+ "rewards/chosen": 0.008486522361636162,
473
+ "rewards/margins": 0.007400536444038153,
474
+ "rewards/rejected": 0.0010859851026907563,
475
+ "step": 300
476
+ },
477
+ {
478
+ "epoch": 0.6,
479
+ "eval_logits/chosen": -2.335963249206543,
480
+ "eval_logits/rejected": -2.142676830291748,
481
+ "eval_logps/chosen": -301.29595947265625,
482
+ "eval_logps/rejected": -275.5566711425781,
483
+ "eval_loss": 0.0030309471767395735,
484
+ "eval_rewards/accuracies": 0.4934999942779541,
485
+ "eval_rewards/chosen": -0.02200666442513466,
486
+ "eval_rewards/margins": -0.0003654572064988315,
487
+ "eval_rewards/rejected": -0.021641207858920097,
488
+ "eval_runtime": 1423.8728,
489
+ "eval_samples_per_second": 1.405,
490
+ "eval_steps_per_second": 0.702,
491
+ "step": 300
492
+ },
493
+ {
494
+ "epoch": 0.62,
495
+ "learning_rate": 4.357862063693486e-06,
496
+ "logits/chosen": -2.8513238430023193,
497
+ "logits/rejected": -2.672865390777588,
498
+ "logps/chosen": -66.75782775878906,
499
+ "logps/rejected": -73.05040740966797,
500
+ "loss": 0.001,
501
+ "rewards/accuracies": 0.699999988079071,
502
+ "rewards/chosen": -0.007263772189617157,
503
+ "rewards/margins": 0.007496596314013004,
504
+ "rewards/rejected": -0.014760365709662437,
505
+ "step": 310
506
+ },
507
+ {
508
+ "epoch": 0.64,
509
+ "learning_rate": 4.2983495008466285e-06,
510
+ "logits/chosen": -2.7180657386779785,
511
+ "logits/rejected": -2.3951573371887207,
512
+ "logps/chosen": -88.17804718017578,
513
+ "logps/rejected": -78.4397964477539,
514
+ "loss": 0.0007,
515
+ "rewards/accuracies": 0.75,
516
+ "rewards/chosen": 0.009861504659056664,
517
+ "rewards/margins": 0.019821077585220337,
518
+ "rewards/rejected": -0.009959569200873375,
519
+ "step": 320
520
+ },
521
+ {
522
+ "epoch": 0.66,
523
+ "learning_rate": 4.236645926147493e-06,
524
+ "logits/chosen": -2.687565326690674,
525
+ "logits/rejected": -2.4989490509033203,
526
+ "logps/chosen": -130.00393676757812,
527
+ "logps/rejected": -130.13922119140625,
528
+ "loss": 0.0013,
529
+ "rewards/accuracies": 0.5,
530
+ "rewards/chosen": 0.04140586778521538,
531
+ "rewards/margins": 0.013412979431450367,
532
+ "rewards/rejected": 0.027992893010377884,
533
+ "step": 330
534
+ },
535
+ {
536
+ "epoch": 0.68,
537
+ "learning_rate": 4.172826515897146e-06,
538
+ "logits/chosen": -2.6931281089782715,
539
+ "logits/rejected": -2.3428401947021484,
540
+ "logps/chosen": -79.95687103271484,
541
+ "logps/rejected": -78.25990295410156,
542
+ "loss": 0.0011,
543
+ "rewards/accuracies": 0.6499999761581421,
544
+ "rewards/chosen": 0.010445808991789818,
545
+ "rewards/margins": 0.013384808786213398,
546
+ "rewards/rejected": -0.0029389983974397182,
547
+ "step": 340
548
+ },
549
+ {
550
+ "epoch": 0.7,
551
+ "learning_rate": 4.106969024216348e-06,
552
+ "logits/chosen": -2.71099853515625,
553
+ "logits/rejected": -2.4201858043670654,
554
+ "logps/chosen": -51.87015914916992,
555
+ "logps/rejected": -49.29432678222656,
556
+ "loss": 0.0013,
557
+ "rewards/accuracies": 0.75,
558
+ "rewards/chosen": -0.0006995106232352555,
559
+ "rewards/margins": 0.008277077227830887,
560
+ "rewards/rejected": -0.008976588025689125,
561
+ "step": 350
562
+ },
563
+ {
564
+ "epoch": 0.72,
565
+ "learning_rate": 4.039153688314146e-06,
566
+ "logits/chosen": -2.80985689163208,
567
+ "logits/rejected": -2.5829150676727295,
568
+ "logps/chosen": -73.7370834350586,
569
+ "logps/rejected": -69.81620788574219,
570
+ "loss": 0.0008,
571
+ "rewards/accuracies": 0.75,
572
+ "rewards/chosen": 0.014964630827307701,
573
+ "rewards/margins": 0.014514106325805187,
574
+ "rewards/rejected": 0.0004505239485297352,
575
+ "step": 360
576
+ },
577
+ {
578
+ "epoch": 0.74,
579
+ "learning_rate": 3.969463130731183e-06,
580
+ "logits/chosen": -2.7932417392730713,
581
+ "logits/rejected": -2.5239012241363525,
582
+ "logps/chosen": -114.29008483886719,
583
+ "logps/rejected": -102.12156677246094,
584
+ "loss": 0.0011,
585
+ "rewards/accuracies": 0.6000000238418579,
586
+ "rewards/chosen": 0.004044980742037296,
587
+ "rewards/margins": 0.010396437719464302,
588
+ "rewards/rejected": -0.006351456046104431,
589
+ "step": 370
590
+ },
591
+ {
592
+ "epoch": 0.76,
593
+ "learning_rate": 3.897982258676867e-06,
594
+ "logits/chosen": -2.7236623764038086,
595
+ "logits/rejected": -2.5601420402526855,
596
+ "logps/chosen": -84.38848876953125,
597
+ "logps/rejected": -85.55775451660156,
598
+ "loss": 0.0008,
599
+ "rewards/accuracies": 0.6499999761581421,
600
+ "rewards/chosen": 0.012120635248720646,
601
+ "rewards/margins": 0.010280453599989414,
602
+ "rewards/rejected": 0.0018401825800538063,
603
+ "step": 380
604
+ },
605
+ {
606
+ "epoch": 0.78,
607
+ "learning_rate": 3.824798160583012e-06,
608
+ "logits/chosen": -2.8765292167663574,
609
+ "logits/rejected": -2.7375664710998535,
610
+ "logps/chosen": -107.2117919921875,
611
+ "logps/rejected": -103.0725326538086,
612
+ "loss": 0.001,
613
+ "rewards/accuracies": 0.550000011920929,
614
+ "rewards/chosen": 0.021095167845487595,
615
+ "rewards/margins": 0.009734300896525383,
616
+ "rewards/rejected": 0.011360866948962212,
617
+ "step": 390
618
+ },
619
+ {
620
+ "epoch": 0.8,
621
+ "learning_rate": 3.7500000000000005e-06,
622
+ "logits/chosen": -2.8528428077697754,
623
+ "logits/rejected": -2.539654493331909,
624
+ "logps/chosen": -86.22474670410156,
625
+ "logps/rejected": -74.02642822265625,
626
+ "loss": 0.0013,
627
+ "rewards/accuracies": 0.699999988079071,
628
+ "rewards/chosen": 0.02159132994711399,
629
+ "rewards/margins": 0.008513709530234337,
630
+ "rewards/rejected": 0.013077618554234505,
631
+ "step": 400
632
+ },
633
+ {
634
+ "epoch": 0.8,
635
+ "eval_logits/chosen": -2.340451955795288,
636
+ "eval_logits/rejected": -2.1462416648864746,
637
+ "eval_logps/chosen": -300.6561279296875,
638
+ "eval_logps/rejected": -274.8155822753906,
639
+ "eval_loss": 0.0034076529555022717,
640
+ "eval_rewards/accuracies": 0.4934999942779541,
641
+ "eval_rewards/chosen": -0.015608005225658417,
642
+ "eval_rewards/margins": -0.0013772795209661126,
643
+ "eval_rewards/rejected": -0.014230725355446339,
644
+ "eval_runtime": 1422.7868,
645
+ "eval_samples_per_second": 1.406,
646
+ "eval_steps_per_second": 0.703,
647
+ "step": 400
648
+ },
649
+ {
650
+ "epoch": 0.82,
651
+ "learning_rate": 3.6736789069647273e-06,
652
+ "logits/chosen": -2.8410146236419678,
653
+ "logits/rejected": -2.368011713027954,
654
+ "logps/chosen": -64.5174789428711,
655
+ "logps/rejected": -56.3655891418457,
656
+ "loss": 0.0015,
657
+ "rewards/accuracies": 0.550000011920929,
658
+ "rewards/chosen": 0.02277212217450142,
659
+ "rewards/margins": 0.015335053205490112,
660
+ "rewards/rejected": 0.007437069900333881,
661
+ "step": 410
662
+ },
663
+ {
664
+ "epoch": 0.84,
665
+ "learning_rate": 3.595927866972694e-06,
666
+ "logits/chosen": -2.773725986480713,
667
+ "logits/rejected": -2.6576743125915527,
668
+ "logps/chosen": -95.10247802734375,
669
+ "logps/rejected": -95.42603302001953,
670
+ "loss": 0.0007,
671
+ "rewards/accuracies": 0.75,
672
+ "rewards/chosen": 0.0311850905418396,
673
+ "rewards/margins": 0.010761396028101444,
674
+ "rewards/rejected": 0.02042369544506073,
675
+ "step": 420
676
+ },
677
+ {
678
+ "epoch": 0.86,
679
+ "learning_rate": 3.516841607689501e-06,
680
+ "logits/chosen": -2.824734926223755,
681
+ "logits/rejected": -2.556206226348877,
682
+ "logps/chosen": -108.12715911865234,
683
+ "logps/rejected": -96.86244201660156,
684
+ "loss": 0.001,
685
+ "rewards/accuracies": 0.6499999761581421,
686
+ "rewards/chosen": 0.032226406037807465,
687
+ "rewards/margins": 0.0070249298587441444,
688
+ "rewards/rejected": 0.025201475247740746,
689
+ "step": 430
690
+ },
691
+ {
692
+ "epoch": 0.88,
693
+ "learning_rate": 3.436516483539781e-06,
694
+ "logits/chosen": -2.926262617111206,
695
+ "logits/rejected": -2.665631055831909,
696
+ "logps/chosen": -61.947792053222656,
697
+ "logps/rejected": -50.202911376953125,
698
+ "loss": 0.0006,
699
+ "rewards/accuracies": 0.8999999761581421,
700
+ "rewards/chosen": 0.0329199843108654,
701
+ "rewards/margins": 0.02404285967350006,
702
+ "rewards/rejected": 0.00887712650001049,
703
+ "step": 440
704
+ },
705
+ {
706
+ "epoch": 0.9,
707
+ "learning_rate": 3.3550503583141726e-06,
708
+ "logits/chosen": -2.8114187717437744,
709
+ "logits/rejected": -2.5821549892425537,
710
+ "logps/chosen": -56.29111862182617,
711
+ "logps/rejected": -47.3681526184082,
712
+ "loss": 0.0013,
713
+ "rewards/accuracies": 0.550000011920929,
714
+ "rewards/chosen": 0.0194512028247118,
715
+ "rewards/margins": 0.006624855101108551,
716
+ "rewards/rejected": 0.012826347723603249,
717
+ "step": 450
718
+ },
719
+ {
720
+ "epoch": 0.92,
721
+ "learning_rate": 3.272542485937369e-06,
722
+ "logits/chosen": -2.826735019683838,
723
+ "logits/rejected": -2.5145363807678223,
724
+ "logps/chosen": -141.8760986328125,
725
+ "logps/rejected": -135.92344665527344,
726
+ "loss": 0.0016,
727
+ "rewards/accuracies": 0.550000011920929,
728
+ "rewards/chosen": 0.02360866777598858,
729
+ "rewards/margins": 0.004182119388133287,
730
+ "rewards/rejected": 0.019426550716161728,
731
+ "step": 460
732
+ },
733
+ {
734
+ "epoch": 0.94,
735
+ "learning_rate": 3.189093389542498e-06,
736
+ "logits/chosen": -2.9071967601776123,
737
+ "logits/rejected": -2.701315402984619,
738
+ "logps/chosen": -52.77317428588867,
739
+ "logps/rejected": -43.23090362548828,
740
+ "loss": 0.0008,
741
+ "rewards/accuracies": 0.6499999761581421,
742
+ "rewards/chosen": 0.028543706983327866,
743
+ "rewards/margins": 0.014000201597809792,
744
+ "rewards/rejected": 0.014543506316840649,
745
+ "step": 470
746
+ },
747
+ {
748
+ "epoch": 0.96,
749
+ "learning_rate": 3.1048047389991693e-06,
750
+ "logits/chosen": -2.944225788116455,
751
+ "logits/rejected": -2.7375664710998535,
752
+ "logps/chosen": -123.64131927490234,
753
+ "logps/rejected": -121.98396301269531,
754
+ "loss": 0.0007,
755
+ "rewards/accuracies": 0.800000011920929,
756
+ "rewards/chosen": 0.025555819272994995,
757
+ "rewards/margins": 0.01265828125178814,
758
+ "rewards/rejected": 0.012897538021206856,
759
+ "step": 480
760
+ },
761
+ {
762
+ "epoch": 0.98,
763
+ "learning_rate": 3.019779227044398e-06,
764
+ "logits/chosen": -2.7981390953063965,
765
+ "logits/rejected": -2.5953376293182373,
766
+ "logps/chosen": -100.98303985595703,
767
+ "logps/rejected": -113.0623550415039,
768
+ "loss": 0.0009,
769
+ "rewards/accuracies": 0.800000011920929,
770
+ "rewards/chosen": 0.021701429039239883,
771
+ "rewards/margins": 0.013312505558133125,
772
+ "rewards/rejected": 0.008388923481106758,
773
+ "step": 490
774
+ },
775
+ {
776
+ "epoch": 1.0,
777
+ "learning_rate": 2.9341204441673267e-06,
778
+ "logits/chosen": -2.7481579780578613,
779
+ "logits/rejected": -2.5245938301086426,
780
+ "logps/chosen": -215.2183074951172,
781
+ "logps/rejected": -199.92349243164062,
782
+ "loss": 0.0011,
783
+ "rewards/accuracies": 0.6000000238418579,
784
+ "rewards/chosen": 0.024366769939661026,
785
+ "rewards/margins": 0.014933845028281212,
786
+ "rewards/rejected": 0.00943292398005724,
787
+ "step": 500
788
+ },
789
+ {
790
+ "epoch": 1.0,
791
+ "eval_logits/chosen": -2.3392491340637207,
792
+ "eval_logits/rejected": -2.1454310417175293,
793
+ "eval_logps/chosen": -304.7457275390625,
794
+ "eval_logps/rejected": -278.41650390625,
795
+ "eval_loss": 0.0036536348052322865,
796
+ "eval_rewards/accuracies": 0.4519999921321869,
797
+ "eval_rewards/chosen": -0.05650419369339943,
798
+ "eval_rewards/margins": -0.0062644826248288155,
799
+ "eval_rewards/rejected": -0.05023970827460289,
800
+ "eval_runtime": 1422.9482,
801
+ "eval_samples_per_second": 1.406,
802
+ "eval_steps_per_second": 0.703,
803
+ "step": 500
804
+ },
805
+ {
806
+ "epoch": 1.02,
807
+ "learning_rate": 2.847932752400164e-06,
808
+ "logits/chosen": -2.8486416339874268,
809
+ "logits/rejected": -2.635902166366577,
810
+ "logps/chosen": -69.38902282714844,
811
+ "logps/rejected": -46.49626159667969,
812
+ "loss": 0.0112,
813
+ "rewards/accuracies": 0.8999999761581421,
814
+ "rewards/chosen": 0.062225647270679474,
815
+ "rewards/margins": 0.07529176026582718,
816
+ "rewards/rejected": -0.013066110201179981,
817
+ "step": 510
818
+ },
819
+ {
820
+ "epoch": 1.04,
821
+ "learning_rate": 2.761321158169134e-06,
822
+ "logits/chosen": -2.7715601921081543,
823
+ "logits/rejected": -2.531752347946167,
824
+ "logps/chosen": -59.361083984375,
825
+ "logps/rejected": -49.393150329589844,
826
+ "loss": 0.001,
827
+ "rewards/accuracies": 0.800000011920929,
828
+ "rewards/chosen": 0.018055101856589317,
829
+ "rewards/margins": 0.018152302131056786,
830
+ "rewards/rejected": -9.720241359900683e-05,
831
+ "step": 520
832
+ },
833
+ {
834
+ "epoch": 1.06,
835
+ "learning_rate": 2.6743911843603134e-06,
836
+ "logits/chosen": -2.88091778755188,
837
+ "logits/rejected": -2.714815139770508,
838
+ "logps/chosen": -75.8203353881836,
839
+ "logps/rejected": -91.77738952636719,
840
+ "loss": 0.0053,
841
+ "rewards/accuracies": 0.949999988079071,
842
+ "rewards/chosen": 0.03629200905561447,
843
+ "rewards/margins": 0.05072981119155884,
844
+ "rewards/rejected": -0.014437800273299217,
845
+ "step": 530
846
+ },
847
+ {
848
+ "epoch": 1.08,
849
+ "learning_rate": 2.587248741756253e-06,
850
+ "logits/chosen": -2.7633297443389893,
851
+ "logits/rejected": -2.4665675163269043,
852
+ "logps/chosen": -100.9280776977539,
853
+ "logps/rejected": -104.80192565917969,
854
+ "loss": 0.0037,
855
+ "rewards/accuracies": 0.6000000238418579,
856
+ "rewards/chosen": 0.0258990116417408,
857
+ "rewards/margins": 0.028545299544930458,
858
+ "rewards/rejected": -0.002646292094141245,
859
+ "step": 540
860
+ },
861
+ {
862
+ "epoch": 1.1,
863
+ "learning_rate": 2.5e-06,
864
+ "logits/chosen": -2.8614132404327393,
865
+ "logits/rejected": -2.539499282836914,
866
+ "logps/chosen": -58.6580810546875,
867
+ "logps/rejected": -53.3350715637207,
868
+ "loss": 0.0207,
869
+ "rewards/accuracies": 0.800000011920929,
870
+ "rewards/chosen": 0.06129790097475052,
871
+ "rewards/margins": 0.07703909277915955,
872
+ "rewards/rejected": -0.01574121043086052,
873
+ "step": 550
874
+ },
875
+ {
876
+ "epoch": 1.12,
877
+ "learning_rate": 2.4127512582437486e-06,
878
+ "logits/chosen": -2.688095808029175,
879
+ "logits/rejected": -2.4665043354034424,
880
+ "logps/chosen": -87.18086242675781,
881
+ "logps/rejected": -87.13175964355469,
882
+ "loss": 0.0036,
883
+ "rewards/accuracies": 0.6499999761581421,
884
+ "rewards/chosen": -0.012053154408931732,
885
+ "rewards/margins": 0.03777941316366196,
886
+ "rewards/rejected": -0.04983257129788399,
887
+ "step": 560
888
+ },
889
+ {
890
+ "epoch": 1.14,
891
+ "learning_rate": 2.325608815639687e-06,
892
+ "logits/chosen": -2.9195265769958496,
893
+ "logits/rejected": -2.6637749671936035,
894
+ "logps/chosen": -130.01551818847656,
895
+ "logps/rejected": -122.54390716552734,
896
+ "loss": 0.0058,
897
+ "rewards/accuracies": 0.699999988079071,
898
+ "rewards/chosen": 0.012607279233634472,
899
+ "rewards/margins": 0.04042843356728554,
900
+ "rewards/rejected": -0.02782115340232849,
901
+ "step": 570
902
+ },
903
+ {
904
+ "epoch": 1.16,
905
+ "learning_rate": 2.238678841830867e-06,
906
+ "logits/chosen": -2.6946258544921875,
907
+ "logits/rejected": -2.5128772258758545,
908
+ "logps/chosen": -143.2740478515625,
909
+ "logps/rejected": -139.3197479248047,
910
+ "loss": 0.0026,
911
+ "rewards/accuracies": 0.699999988079071,
912
+ "rewards/chosen": 0.0010408941889181733,
913
+ "rewards/margins": 0.011470312252640724,
914
+ "rewards/rejected": -0.010429417714476585,
915
+ "step": 580
916
+ },
917
+ {
918
+ "epoch": 1.18,
919
+ "learning_rate": 2.1520672475998374e-06,
920
+ "logits/chosen": -2.934413433074951,
921
+ "logits/rejected": -2.6382033824920654,
922
+ "logps/chosen": -97.65046691894531,
923
+ "logps/rejected": -84.37725830078125,
924
+ "loss": 0.0031,
925
+ "rewards/accuracies": 0.75,
926
+ "rewards/chosen": 0.02459452673792839,
927
+ "rewards/margins": 0.02938224747776985,
928
+ "rewards/rejected": -0.004787721671164036,
929
+ "step": 590
930
+ },
931
+ {
932
+ "epoch": 1.2,
933
+ "learning_rate": 2.0658795558326745e-06,
934
+ "logits/chosen": -2.9206016063690186,
935
+ "logits/rejected": -2.740886926651001,
936
+ "logps/chosen": -59.428001403808594,
937
+ "logps/rejected": -76.50398254394531,
938
+ "loss": 0.0116,
939
+ "rewards/accuracies": 0.800000011920929,
940
+ "rewards/chosen": 0.056895893067121506,
941
+ "rewards/margins": 0.06610508263111115,
942
+ "rewards/rejected": -0.00920918770134449,
943
+ "step": 600
944
+ },
945
+ {
946
+ "epoch": 1.2,
947
+ "eval_logits/chosen": -2.3448727130889893,
948
+ "eval_logits/rejected": -2.152658700942993,
949
+ "eval_logps/chosen": -301.92657470703125,
950
+ "eval_logps/rejected": -275.6791076660156,
951
+ "eval_loss": 0.004949736408889294,
952
+ "eval_rewards/accuracies": 0.44350001215934753,
953
+ "eval_rewards/chosen": -0.028312114998698235,
954
+ "eval_rewards/margins": -0.005446531809866428,
955
+ "eval_rewards/rejected": -0.02286558412015438,
956
+ "eval_runtime": 1423.4392,
957
+ "eval_samples_per_second": 1.405,
958
+ "eval_steps_per_second": 0.703,
959
+ "step": 600
960
+ },
961
+ {
962
+ "epoch": 1.22,
963
+ "learning_rate": 1.9802207729556023e-06,
964
+ "logits/chosen": -2.9324729442596436,
965
+ "logits/rejected": -2.5813498497009277,
966
+ "logps/chosen": -204.2996063232422,
967
+ "logps/rejected": -187.82083129882812,
968
+ "loss": 0.0123,
969
+ "rewards/accuracies": 0.699999988079071,
970
+ "rewards/chosen": -0.0035219527781009674,
971
+ "rewards/margins": -0.00877875555306673,
972
+ "rewards/rejected": 0.005256800912320614,
973
+ "step": 610
974
+ },
975
+ {
976
+ "epoch": 1.24,
977
+ "learning_rate": 1.895195261000831e-06,
978
+ "logits/chosen": -2.8246846199035645,
979
+ "logits/rejected": -2.683941602706909,
980
+ "logps/chosen": -59.7250862121582,
981
+ "logps/rejected": -53.9716682434082,
982
+ "loss": 0.0048,
983
+ "rewards/accuracies": 0.800000011920929,
984
+ "rewards/chosen": 0.055084872990846634,
985
+ "rewards/margins": 0.05010204762220383,
986
+ "rewards/rejected": 0.004982819315046072,
987
+ "step": 620
988
+ },
989
+ {
990
+ "epoch": 1.26,
991
+ "learning_rate": 1.8109066104575023e-06,
992
+ "logits/chosen": -2.9231362342834473,
993
+ "logits/rejected": -2.695604085922241,
994
+ "logps/chosen": -56.9963264465332,
995
+ "logps/rejected": -63.59523391723633,
996
+ "loss": 0.0045,
997
+ "rewards/accuracies": 0.75,
998
+ "rewards/chosen": 0.04905426502227783,
999
+ "rewards/margins": 0.04801047965884209,
1000
+ "rewards/rejected": 0.0010437825694680214,
1001
+ "step": 630
1002
+ },
1003
+ {
1004
+ "epoch": 1.28,
1005
+ "learning_rate": 1.7274575140626318e-06,
1006
+ "logits/chosen": -2.905579090118408,
1007
+ "logits/rejected": -2.642456531524658,
1008
+ "logps/chosen": -98.58207702636719,
1009
+ "logps/rejected": -93.85881805419922,
1010
+ "loss": 0.0013,
1011
+ "rewards/accuracies": 0.75,
1012
+ "rewards/chosen": 0.03151048347353935,
1013
+ "rewards/margins": 0.029562344774603844,
1014
+ "rewards/rejected": 0.0019481380004435778,
1015
+ "step": 640
1016
+ },
1017
+ {
1018
+ "epoch": 1.3,
1019
+ "learning_rate": 1.6449496416858285e-06,
1020
+ "logits/chosen": -3.0052525997161865,
1021
+ "logits/rejected": -2.7315216064453125,
1022
+ "logps/chosen": -89.30766296386719,
1023
+ "logps/rejected": -104.63911437988281,
1024
+ "loss": 0.0338,
1025
+ "rewards/accuracies": 0.75,
1026
+ "rewards/chosen": 0.0381547287106514,
1027
+ "rewards/margins": 0.0724136009812355,
1028
+ "rewards/rejected": -0.034258872270584106,
1029
+ "step": 650
1030
+ },
1031
+ {
1032
+ "epoch": 1.32,
1033
+ "learning_rate": 1.56348351646022e-06,
1034
+ "logits/chosen": -2.6560475826263428,
1035
+ "logits/rejected": -2.5114564895629883,
1036
+ "logps/chosen": -131.77236938476562,
1037
+ "logps/rejected": -127.79585266113281,
1038
+ "loss": 0.0026,
1039
+ "rewards/accuracies": 0.550000011920929,
1040
+ "rewards/chosen": 0.020144177600741386,
1041
+ "rewards/margins": 0.023866238072514534,
1042
+ "rewards/rejected": -0.003722056746482849,
1043
+ "step": 660
1044
+ },
1045
+ {
1046
+ "epoch": 1.34,
1047
+ "learning_rate": 1.4831583923105e-06,
1048
+ "logits/chosen": -2.8219382762908936,
1049
+ "logits/rejected": -2.446211814880371,
1050
+ "logps/chosen": -68.78748321533203,
1051
+ "logps/rejected": -53.5217170715332,
1052
+ "loss": 0.0027,
1053
+ "rewards/accuracies": 0.8500000238418579,
1054
+ "rewards/chosen": 0.04556558281183243,
1055
+ "rewards/margins": 0.04576994851231575,
1056
+ "rewards/rejected": -0.00020437035709619522,
1057
+ "step": 670
1058
+ },
1059
+ {
1060
+ "epoch": 1.36,
1061
+ "learning_rate": 1.4040721330273063e-06,
1062
+ "logits/chosen": -2.910677433013916,
1063
+ "logits/rejected": -2.7675867080688477,
1064
+ "logps/chosen": -59.946563720703125,
1065
+ "logps/rejected": -60.795616149902344,
1066
+ "loss": 0.0106,
1067
+ "rewards/accuracies": 0.800000011920929,
1068
+ "rewards/chosen": 0.05829508230090141,
1069
+ "rewards/margins": 0.04981726408004761,
1070
+ "rewards/rejected": 0.008477816358208656,
1071
+ "step": 680
1072
+ },
1073
+ {
1074
+ "epoch": 1.38,
1075
+ "learning_rate": 1.3263210930352737e-06,
1076
+ "logits/chosen": -2.8532004356384277,
1077
+ "logits/rejected": -2.6016685962677,
1078
+ "logps/chosen": -196.2278289794922,
1079
+ "logps/rejected": -204.3356170654297,
1080
+ "loss": 0.0076,
1081
+ "rewards/accuracies": 0.6499999761581421,
1082
+ "rewards/chosen": 0.008091023191809654,
1083
+ "rewards/margins": 0.023959284648299217,
1084
+ "rewards/rejected": -0.015868261456489563,
1085
+ "step": 690
1086
+ },
1087
+ {
1088
+ "epoch": 1.4,
1089
+ "learning_rate": 1.2500000000000007e-06,
1090
+ "logits/chosen": -2.841736316680908,
1091
+ "logits/rejected": -2.5827274322509766,
1092
+ "logps/chosen": -52.04328155517578,
1093
+ "logps/rejected": -62.17645263671875,
1094
+ "loss": 0.015,
1095
+ "rewards/accuracies": 0.699999988079071,
1096
+ "rewards/chosen": 0.026109540835022926,
1097
+ "rewards/margins": 0.010281763039529324,
1098
+ "rewards/rejected": 0.015827778726816177,
1099
+ "step": 700
1100
+ },
1101
+ {
1102
+ "epoch": 1.4,
1103
+ "eval_logits/chosen": -2.3585643768310547,
1104
+ "eval_logits/rejected": -2.164954662322998,
1105
+ "eval_logps/chosen": -301.7041320800781,
1106
+ "eval_logps/rejected": -275.2170104980469,
1107
+ "eval_loss": 0.006464843638241291,
1108
+ "eval_rewards/accuracies": 0.4449999928474426,
1109
+ "eval_rewards/chosen": -0.026087837293744087,
1110
+ "eval_rewards/margins": -0.007843218743801117,
1111
+ "eval_rewards/rejected": -0.01824462041258812,
1112
+ "eval_runtime": 1422.5043,
1113
+ "eval_samples_per_second": 1.406,
1114
+ "eval_steps_per_second": 0.703,
1115
+ "step": 700
1116
+ },
1117
+ {
1118
+ "epoch": 1.42,
1119
+ "learning_rate": 1.1752018394169882e-06,
1120
+ "logits/chosen": -2.8012290000915527,
1121
+ "logits/rejected": -2.348876476287842,
1122
+ "logps/chosen": -50.55641555786133,
1123
+ "logps/rejected": -46.36492156982422,
1124
+ "loss": 0.0033,
1125
+ "rewards/accuracies": 0.8999999761581421,
1126
+ "rewards/chosen": 0.0446331761777401,
1127
+ "rewards/margins": 0.04935276508331299,
1128
+ "rewards/rejected": -0.004719586111605167,
1129
+ "step": 710
1130
+ },
1131
+ {
1132
+ "epoch": 1.44,
1133
+ "learning_rate": 1.1020177413231334e-06,
1134
+ "logits/chosen": -2.766397476196289,
1135
+ "logits/rejected": -2.423826217651367,
1136
+ "logps/chosen": -111.70279693603516,
1137
+ "logps/rejected": -129.45628356933594,
1138
+ "loss": 0.0049,
1139
+ "rewards/accuracies": 0.949999988079071,
1140
+ "rewards/chosen": 0.04479371756315231,
1141
+ "rewards/margins": 0.06533690541982651,
1142
+ "rewards/rejected": -0.020543191581964493,
1143
+ "step": 720
1144
+ },
1145
+ {
1146
+ "epoch": 1.46,
1147
+ "learning_rate": 1.0305368692688175e-06,
1148
+ "logits/chosen": -2.9610862731933594,
1149
+ "logits/rejected": -2.7894320487976074,
1150
+ "logps/chosen": -86.23368835449219,
1151
+ "logps/rejected": -86.16008758544922,
1152
+ "loss": 0.0027,
1153
+ "rewards/accuracies": 0.8999999761581421,
1154
+ "rewards/chosen": 0.04561948776245117,
1155
+ "rewards/margins": 0.04706469178199768,
1156
+ "rewards/rejected": -0.0014452062314376235,
1157
+ "step": 730
1158
+ },
1159
+ {
1160
+ "epoch": 1.48,
1161
+ "learning_rate": 9.608463116858544e-07,
1162
+ "logits/chosen": -2.8900036811828613,
1163
+ "logits/rejected": -2.4443981647491455,
1164
+ "logps/chosen": -70.54168701171875,
1165
+ "logps/rejected": -50.89817428588867,
1166
+ "loss": 0.0013,
1167
+ "rewards/accuracies": 0.949999988079071,
1168
+ "rewards/chosen": 0.031780801713466644,
1169
+ "rewards/margins": 0.038311202079057693,
1170
+ "rewards/rejected": -0.006530401296913624,
1171
+ "step": 740
1172
+ },
1173
+ {
1174
+ "epoch": 1.5,
1175
+ "learning_rate": 8.930309757836517e-07,
1176
+ "logits/chosen": -2.9060871601104736,
1177
+ "logits/rejected": -2.524108648300171,
1178
+ "logps/chosen": -61.922813415527344,
1179
+ "logps/rejected": -65.15422821044922,
1180
+ "loss": 0.0056,
1181
+ "rewards/accuracies": 0.8500000238418579,
1182
+ "rewards/chosen": 0.0495627298951149,
1183
+ "rewards/margins": 0.04960542917251587,
1184
+ "rewards/rejected": -4.269964847480878e-05,
1185
+ "step": 750
1186
+ },
1187
+ {
1188
+ "epoch": 1.52,
1189
+ "learning_rate": 8.271734841028553e-07,
1190
+ "logits/chosen": -2.644831657409668,
1191
+ "logits/rejected": -2.305298328399658,
1192
+ "logps/chosen": -67.29566955566406,
1193
+ "logps/rejected": -63.685523986816406,
1194
+ "loss": 0.0035,
1195
+ "rewards/accuracies": 0.800000011920929,
1196
+ "rewards/chosen": 0.04994013160467148,
1197
+ "rewards/margins": 0.052011966705322266,
1198
+ "rewards/rejected": -0.002071837428957224,
1199
+ "step": 760
1200
+ },
1201
+ {
1202
+ "epoch": 1.54,
1203
+ "learning_rate": 7.633540738525066e-07,
1204
+ "logits/chosen": -2.9390599727630615,
1205
+ "logits/rejected": -2.688645839691162,
1206
+ "logps/chosen": -65.12007904052734,
1207
+ "logps/rejected": -62.56877517700195,
1208
+ "loss": 0.1119,
1209
+ "rewards/accuracies": 0.8500000238418579,
1210
+ "rewards/chosen": 0.05300932377576828,
1211
+ "rewards/margins": 0.12731197476387024,
1212
+ "rewards/rejected": -0.07430265843868256,
1213
+ "step": 770
1214
+ },
1215
+ {
1216
+ "epoch": 1.56,
1217
+ "learning_rate": 7.016504991533727e-07,
1218
+ "logits/chosen": -2.625112295150757,
1219
+ "logits/rejected": -2.447942018508911,
1220
+ "logps/chosen": -58.0603141784668,
1221
+ "logps/rejected": -50.18201446533203,
1222
+ "loss": 0.0028,
1223
+ "rewards/accuracies": 0.699999988079071,
1224
+ "rewards/chosen": 0.009573986753821373,
1225
+ "rewards/margins": 0.0028710602782666683,
1226
+ "rewards/rejected": 0.006702926941215992,
1227
+ "step": 780
1228
+ },
1229
+ {
1230
+ "epoch": 1.58,
1231
+ "learning_rate": 6.421379363065142e-07,
1232
+ "logits/chosen": -2.982449769973755,
1233
+ "logits/rejected": -2.7615859508514404,
1234
+ "logps/chosen": -51.4494743347168,
1235
+ "logps/rejected": -57.383644104003906,
1236
+ "loss": 0.0061,
1237
+ "rewards/accuracies": 0.800000011920929,
1238
+ "rewards/chosen": 0.05977553874254227,
1239
+ "rewards/margins": 0.05702256038784981,
1240
+ "rewards/rejected": 0.002752984408289194,
1241
+ "step": 790
1242
+ },
1243
+ {
1244
+ "epoch": 1.6,
1245
+ "learning_rate": 5.848888922025553e-07,
1246
+ "logits/chosen": -2.7822391986846924,
1247
+ "logits/rejected": -2.5350170135498047,
1248
+ "logps/chosen": -43.52353286743164,
1249
+ "logps/rejected": -54.28175735473633,
1250
+ "loss": 0.0009,
1251
+ "rewards/accuracies": 0.949999988079071,
1252
+ "rewards/chosen": 0.03572973236441612,
1253
+ "rewards/margins": 0.035435039550065994,
1254
+ "rewards/rejected": 0.0002946966851595789,
1255
+ "step": 800
1256
+ },
1257
+ {
1258
+ "epoch": 1.6,
1259
+ "eval_logits/chosen": -2.3689210414886475,
1260
+ "eval_logits/rejected": -2.174628496170044,
1261
+ "eval_logps/chosen": -298.3010559082031,
1262
+ "eval_logps/rejected": -272.15399169921875,
1263
+ "eval_loss": 0.006914817728102207,
1264
+ "eval_rewards/accuracies": 0.47200000286102295,
1265
+ "eval_rewards/chosen": 0.007942860946059227,
1266
+ "eval_rewards/margins": -0.0044423178769648075,
1267
+ "eval_rewards/rejected": 0.012385179288685322,
1268
+ "eval_runtime": 1422.6658,
1269
+ "eval_samples_per_second": 1.406,
1270
+ "eval_steps_per_second": 0.703,
1271
+ "step": 800
1272
+ },
1273
+ {
1274
+ "epoch": 1.62,
1275
+ "learning_rate": 5.299731159831953e-07,
1276
+ "logits/chosen": -2.868394136428833,
1277
+ "logits/rejected": -2.6725270748138428,
1278
+ "logps/chosen": -113.2739486694336,
1279
+ "logps/rejected": -118.2945327758789,
1280
+ "loss": 0.0086,
1281
+ "rewards/accuracies": 0.8500000238418579,
1282
+ "rewards/chosen": 0.050359614193439484,
1283
+ "rewards/margins": 0.05471315234899521,
1284
+ "rewards/rejected": -0.004353542346507311,
1285
+ "step": 810
1286
+ },
1287
+ {
1288
+ "epoch": 1.64,
1289
+ "learning_rate": 4.774575140626317e-07,
1290
+ "logits/chosen": -2.7905325889587402,
1291
+ "logits/rejected": -2.5390892028808594,
1292
+ "logps/chosen": -61.987098693847656,
1293
+ "logps/rejected": -72.47862243652344,
1294
+ "loss": 0.0051,
1295
+ "rewards/accuracies": 0.800000011920929,
1296
+ "rewards/chosen": 0.035242609679698944,
1297
+ "rewards/margins": 0.04513431712985039,
1298
+ "rewards/rejected": -0.009891709312796593,
1299
+ "step": 820
1300
+ },
1301
+ {
1302
+ "epoch": 1.66,
1303
+ "learning_rate": 4.27406068612396e-07,
1304
+ "logits/chosen": -2.744027614593506,
1305
+ "logits/rejected": -2.451847553253174,
1306
+ "logps/chosen": -94.64505767822266,
1307
+ "logps/rejected": -86.02244567871094,
1308
+ "loss": 0.0009,
1309
+ "rewards/accuracies": 0.800000011920929,
1310
+ "rewards/chosen": 0.022839754819869995,
1311
+ "rewards/margins": 0.03421180695295334,
1312
+ "rewards/rejected": -0.011372053064405918,
1313
+ "step": 830
1314
+ },
1315
+ {
1316
+ "epoch": 1.68,
1317
+ "learning_rate": 3.798797596089351e-07,
1318
+ "logits/chosen": -2.88804292678833,
1319
+ "logits/rejected": -2.348177194595337,
1320
+ "logps/chosen": -62.28667068481445,
1321
+ "logps/rejected": -47.92333221435547,
1322
+ "loss": 0.0024,
1323
+ "rewards/accuracies": 0.8999999761581421,
1324
+ "rewards/chosen": 0.031227191910147667,
1325
+ "rewards/margins": 0.039407771080732346,
1326
+ "rewards/rejected": -0.008180581033229828,
1327
+ "step": 840
1328
+ },
1329
+ {
1330
+ "epoch": 1.7,
1331
+ "learning_rate": 3.3493649053890325e-07,
1332
+ "logits/chosen": -2.8707077503204346,
1333
+ "logits/rejected": -2.6572961807250977,
1334
+ "logps/chosen": -97.13041687011719,
1335
+ "logps/rejected": -102.47623443603516,
1336
+ "loss": 0.0026,
1337
+ "rewards/accuracies": 0.75,
1338
+ "rewards/chosen": 0.015258235856890678,
1339
+ "rewards/margins": 0.032782141119241714,
1340
+ "rewards/rejected": -0.017523903399705887,
1341
+ "step": 850
1342
+ },
1343
+ {
1344
+ "epoch": 1.72,
1345
+ "learning_rate": 2.9263101785268253e-07,
1346
+ "logits/chosen": -2.6006197929382324,
1347
+ "logits/rejected": -2.57403826713562,
1348
+ "logps/chosen": -182.5235595703125,
1349
+ "logps/rejected": -166.66905212402344,
1350
+ "loss": 0.001,
1351
+ "rewards/accuracies": 0.6000000238418579,
1352
+ "rewards/chosen": 0.0023535334039479494,
1353
+ "rewards/margins": 0.01678496040403843,
1354
+ "rewards/rejected": -0.0144314244389534,
1355
+ "step": 860
1356
+ },
1357
+ {
1358
+ "epoch": 1.74,
1359
+ "learning_rate": 2.53014884252083e-07,
1360
+ "logits/chosen": -2.9139671325683594,
1361
+ "logits/rejected": -2.8268768787384033,
1362
+ "logps/chosen": -132.52719116210938,
1363
+ "logps/rejected": -130.03152465820312,
1364
+ "loss": 0.0014,
1365
+ "rewards/accuracies": 0.8500000238418579,
1366
+ "rewards/chosen": 0.038164813071489334,
1367
+ "rewards/margins": 0.03188065439462662,
1368
+ "rewards/rejected": 0.006284159608185291,
1369
+ "step": 870
1370
+ },
1371
+ {
1372
+ "epoch": 1.76,
1373
+ "learning_rate": 2.1613635589349756e-07,
1374
+ "logits/chosen": -2.772710084915161,
1375
+ "logits/rejected": -2.3980655670166016,
1376
+ "logps/chosen": -47.05355453491211,
1377
+ "logps/rejected": -44.157875061035156,
1378
+ "loss": 0.0034,
1379
+ "rewards/accuracies": 0.8500000238418579,
1380
+ "rewards/chosen": 0.03233030438423157,
1381
+ "rewards/margins": 0.03725982457399368,
1382
+ "rewards/rejected": -0.00492951599881053,
1383
+ "step": 880
1384
+ },
1385
+ {
1386
+ "epoch": 1.78,
1387
+ "learning_rate": 1.8204036358303173e-07,
1388
+ "logits/chosen": -2.9192633628845215,
1389
+ "logits/rejected": -2.627864360809326,
1390
+ "logps/chosen": -70.42695617675781,
1391
+ "logps/rejected": -99.09819030761719,
1392
+ "loss": 0.1069,
1393
+ "rewards/accuracies": 0.8500000238418579,
1394
+ "rewards/chosen": 0.06590764224529266,
1395
+ "rewards/margins": 0.1522078961133957,
1396
+ "rewards/rejected": -0.08630025386810303,
1397
+ "step": 890
1398
+ },
1399
+ {
1400
+ "epoch": 1.8,
1401
+ "learning_rate": 1.507684480352292e-07,
1402
+ "logits/chosen": -2.7338740825653076,
1403
+ "logits/rejected": -2.4327640533447266,
1404
+ "logps/chosen": -48.297508239746094,
1405
+ "logps/rejected": -40.352996826171875,
1406
+ "loss": 0.0109,
1407
+ "rewards/accuracies": 0.699999988079071,
1408
+ "rewards/chosen": 0.037285320460796356,
1409
+ "rewards/margins": 0.0314871221780777,
1410
+ "rewards/rejected": 0.005798191763460636,
1411
+ "step": 900
1412
+ },
1413
+ {
1414
+ "epoch": 1.8,
1415
+ "eval_logits/chosen": -2.368154764175415,
1416
+ "eval_logits/rejected": -2.1738698482513428,
1417
+ "eval_logps/chosen": -298.8583068847656,
1418
+ "eval_logps/rejected": -272.5880432128906,
1419
+ "eval_loss": 0.006868657190352678,
1420
+ "eval_rewards/accuracies": 0.4569999873638153,
1421
+ "eval_rewards/chosen": 0.0023701086174696684,
1422
+ "eval_rewards/margins": -0.005675030872225761,
1423
+ "eval_rewards/rejected": 0.008045138791203499,
1424
+ "eval_runtime": 1423.7285,
1425
+ "eval_samples_per_second": 1.405,
1426
+ "eval_steps_per_second": 0.702,
1427
+ "step": 900
1428
+ },
1429
+ {
1430
+ "epoch": 1.82,
1431
+ "learning_rate": 1.223587092621162e-07,
1432
+ "logits/chosen": -3.062129497528076,
1433
+ "logits/rejected": -2.726337432861328,
1434
+ "logps/chosen": -103.9426040649414,
1435
+ "logps/rejected": -109.22408294677734,
1436
+ "loss": 0.0132,
1437
+ "rewards/accuracies": 0.6499999761581421,
1438
+ "rewards/chosen": 0.01760241389274597,
1439
+ "rewards/margins": 0.04251215234398842,
1440
+ "rewards/rejected": -0.024909736588597298,
1441
+ "step": 910
1442
+ },
1443
+ {
1444
+ "epoch": 1.84,
1445
+ "learning_rate": 9.684576015420277e-08,
1446
+ "logits/chosen": -2.8683624267578125,
1447
+ "logits/rejected": -2.5635323524475098,
1448
+ "logps/chosen": -132.88455200195312,
1449
+ "logps/rejected": -131.37045288085938,
1450
+ "loss": 0.0063,
1451
+ "rewards/accuracies": 0.699999988079071,
1452
+ "rewards/chosen": 0.03422384709119797,
1453
+ "rewards/margins": 0.05052335187792778,
1454
+ "rewards/rejected": -0.016299499198794365,
1455
+ "step": 920
1456
+ },
1457
+ {
1458
+ "epoch": 1.86,
1459
+ "learning_rate": 7.426068431000883e-08,
1460
+ "logits/chosen": -2.763186454772949,
1461
+ "logits/rejected": -2.4577767848968506,
1462
+ "logps/chosen": -62.349082946777344,
1463
+ "logps/rejected": -48.44455337524414,
1464
+ "loss": 0.0027,
1465
+ "rewards/accuracies": 0.949999988079071,
1466
+ "rewards/chosen": 0.03767557442188263,
1467
+ "rewards/margins": 0.04414733499288559,
1468
+ "rewards/rejected": -0.006471761967986822,
1469
+ "step": 930
1470
+ },
1471
+ {
1472
+ "epoch": 1.88,
1473
+ "learning_rate": 5.463099816548578e-08,
1474
+ "logits/chosen": -2.749911308288574,
1475
+ "logits/rejected": -2.5365560054779053,
1476
+ "logps/chosen": -109.21867370605469,
1477
+ "logps/rejected": -97.2970962524414,
1478
+ "loss": 0.0013,
1479
+ "rewards/accuracies": 0.75,
1480
+ "rewards/chosen": 0.03286752849817276,
1481
+ "rewards/margins": 0.033335305750370026,
1482
+ "rewards/rejected": -0.00046777556417509913,
1483
+ "step": 940
1484
+ },
1485
+ {
1486
+ "epoch": 1.9,
1487
+ "learning_rate": 3.798061746947995e-08,
1488
+ "logits/chosen": -2.9467415809631348,
1489
+ "logits/rejected": -2.6370301246643066,
1490
+ "logps/chosen": -142.62974548339844,
1491
+ "logps/rejected": -140.1845703125,
1492
+ "loss": 0.0015,
1493
+ "rewards/accuracies": 0.800000011920929,
1494
+ "rewards/chosen": 0.03325565904378891,
1495
+ "rewards/margins": 0.027630716562271118,
1496
+ "rewards/rejected": 0.005624941550195217,
1497
+ "step": 950
1498
+ },
1499
+ {
1500
+ "epoch": 1.92,
1501
+ "learning_rate": 2.4329828146074096e-08,
1502
+ "logits/chosen": -2.7907421588897705,
1503
+ "logits/rejected": -2.5240225791931152,
1504
+ "logps/chosen": -112.5775146484375,
1505
+ "logps/rejected": -115.86857604980469,
1506
+ "loss": 0.0156,
1507
+ "rewards/accuracies": 0.6499999761581421,
1508
+ "rewards/chosen": 0.012037028558552265,
1509
+ "rewards/margins": -0.006716550327837467,
1510
+ "rewards/rejected": 0.01875358074903488,
1511
+ "step": 960
1512
+ },
1513
+ {
1514
+ "epoch": 1.94,
1515
+ "learning_rate": 1.3695261579316776e-08,
1516
+ "logits/chosen": -3.0265414714813232,
1517
+ "logits/rejected": -2.6483359336853027,
1518
+ "logps/chosen": -85.69535064697266,
1519
+ "logps/rejected": -82.9826889038086,
1520
+ "loss": 0.0296,
1521
+ "rewards/accuracies": 0.6499999761581421,
1522
+ "rewards/chosen": 0.013691084459424019,
1523
+ "rewards/margins": 0.002223056508228183,
1524
+ "rewards/rejected": 0.011468032374978065,
1525
+ "step": 970
1526
+ },
1527
+ {
1528
+ "epoch": 1.96,
1529
+ "learning_rate": 6.089874350439507e-09,
1530
+ "logits/chosen": -2.6236181259155273,
1531
+ "logits/rejected": -2.498073101043701,
1532
+ "logps/chosen": -81.69920349121094,
1533
+ "logps/rejected": -90.76789855957031,
1534
+ "loss": 0.0062,
1535
+ "rewards/accuracies": 0.699999988079071,
1536
+ "rewards/chosen": 0.018706561997532845,
1537
+ "rewards/margins": 0.04268919676542282,
1538
+ "rewards/rejected": -0.023982631042599678,
1539
+ "step": 980
1540
+ },
1541
+ {
1542
+ "epoch": 1.98,
1543
+ "learning_rate": 1.5229324522605949e-09,
1544
+ "logits/chosen": -2.952378034591675,
1545
+ "logits/rejected": -2.7012767791748047,
1546
+ "logps/chosen": -114.75297546386719,
1547
+ "logps/rejected": -98.20231628417969,
1548
+ "loss": 0.0073,
1549
+ "rewards/accuracies": 0.6000000238418579,
1550
+ "rewards/chosen": 0.020036840811371803,
1551
+ "rewards/margins": 0.031693797558546066,
1552
+ "rewards/rejected": -0.011656956747174263,
1553
+ "step": 990
1554
+ },
1555
+ {
1556
+ "epoch": 2.0,
1557
+ "learning_rate": 0.0,
1558
+ "logits/chosen": -2.9289968013763428,
1559
+ "logits/rejected": -2.6982641220092773,
1560
+ "logps/chosen": -100.9759750366211,
1561
+ "logps/rejected": -94.3797607421875,
1562
+ "loss": 0.0015,
1563
+ "rewards/accuracies": 0.699999988079071,
1564
+ "rewards/chosen": 0.02024517022073269,
1565
+ "rewards/margins": 0.02688099816441536,
1566
+ "rewards/rejected": -0.006635826081037521,
1567
+ "step": 1000
1568
+ },
1569
+ {
1570
+ "epoch": 2.0,
1571
+ "eval_logits/chosen": -2.3691599369049072,
1572
+ "eval_logits/rejected": -2.1749157905578613,
1573
+ "eval_logps/chosen": -298.8498229980469,
1574
+ "eval_logps/rejected": -272.58660888671875,
1575
+ "eval_loss": 0.006900906562805176,
1576
+ "eval_rewards/accuracies": 0.4595000147819519,
1577
+ "eval_rewards/chosen": 0.002455136040225625,
1578
+ "eval_rewards/margins": -0.005604185629636049,
1579
+ "eval_rewards/rejected": 0.008059320971369743,
1580
+ "eval_runtime": 1422.3751,
1581
+ "eval_samples_per_second": 1.406,
1582
+ "eval_steps_per_second": 0.703,
1583
+ "step": 1000
1584
+ },
1585
+ {
1586
+ "epoch": 2.0,
1587
+ "step": 1000,
1588
+ "total_flos": 0.0,
1589
+ "train_loss": 0.005910432943725027,
1590
+ "train_runtime": 19053.6119,
1591
+ "train_samples_per_second": 0.105,
1592
+ "train_steps_per_second": 0.052
1593
+ }
1594
+ ],
1595
+ "logging_steps": 10,
1596
+ "max_steps": 1000,
1597
+ "num_input_tokens_seen": 0,
1598
+ "num_train_epochs": 2,
1599
+ "save_steps": 100,
1600
+ "total_flos": 0.0,
1601
+ "train_batch_size": 1,
1602
+ "trial_name": null,
1603
+ "trial_params": null
1604
+ }