jikaixuan commited on
Commit
d10a046
1 Parent(s): e540398

Model save

Browse files
README.md CHANGED
@@ -14,6 +14,18 @@ should probably proofread and complete it, then remove this comment. -->
14
  # zephyr-ds
15
 
16
  This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the None dataset.
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  ## Model description
19
 
@@ -32,14 +44,14 @@ More information needed
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
- - learning_rate: 1e-05
36
  - train_batch_size: 4
37
  - eval_batch_size: 4
38
  - seed: 42
39
  - distributed_type: multi-GPU
40
  - num_devices: 4
41
- - gradient_accumulation_steps: 4
42
- - total_train_batch_size: 64
43
  - total_eval_batch_size: 16
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
  - lr_scheduler_type: linear
@@ -48,6 +60,9 @@ The following hyperparameters were used during training:
48
 
49
  ### Training results
50
 
 
 
 
51
 
52
 
53
  ### Framework versions
 
14
  # zephyr-ds
15
 
16
  This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the None dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 0.5199
19
+ - Rewards/chosen: -0.1238
20
+ - Rewards/rejected: -1.1258
21
+ - Rewards/accuracies: 0.7300
22
+ - Rewards/margins: 1.0020
23
+ - Logps/rejected: -270.5574
24
+ - Logps/chosen: -285.4951
25
+ - Logits/rejected: -2.8178
26
+ - Logits/chosen: -2.8221
27
+ - Use Label: 0.0
28
+ - Pred Label: 0.0
29
 
30
  ## Model description
31
 
 
44
  ### Training hyperparameters
45
 
46
  The following hyperparameters were used during training:
47
+ - learning_rate: 5e-05
48
  - train_batch_size: 4
49
  - eval_batch_size: 4
50
  - seed: 42
51
  - distributed_type: multi-GPU
52
  - num_devices: 4
53
+ - gradient_accumulation_steps: 8
54
+ - total_train_batch_size: 128
55
  - total_eval_batch_size: 16
56
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
57
  - lr_scheduler_type: linear
 
60
 
61
  ### Training results
62
 
63
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen | Use Label | Pred Label |
64
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|:---------:|:----------:|
65
+ | 0.4951 | 1.0 | 477 | 0.5199 | -0.1238 | -1.1258 | 0.7300 | 1.0020 | -270.5574 | -285.4951 | -2.8178 | -2.8221 | 0.0 | 0.0 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:58202970dc92d0fb4bad6632a3b733fcc28228fbebdc3e657e3a048835bace33
3
  size 218138576
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:113769ca75081b006e5afbb71ddc0c5d371f9107f8efec1c029f3129a997de8f
3
  size 218138576
all_results.json CHANGED
@@ -1,8 +1,23 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.6931471824645996,
4
- "train_runtime": 27.1859,
5
- "train_samples": 61,
6
- "train_samples_per_second": 2.244,
7
- "train_steps_per_second": 0.037
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -2.822110891342163,
4
+ "eval_logits/rejected": -2.8178136348724365,
5
+ "eval_logps/chosen": -285.4951171875,
6
+ "eval_logps/rejected": -270.55743408203125,
7
+ "eval_loss": 0.5198934078216553,
8
+ "eval_pred_label": 0.0,
9
+ "eval_rewards/accuracies": 0.7300000190734863,
10
+ "eval_rewards/chosen": -0.12377375364303589,
11
+ "eval_rewards/margins": 1.001997470855713,
12
+ "eval_rewards/rejected": -1.1257712841033936,
13
+ "eval_runtime": 453.8128,
14
+ "eval_samples": 2000,
15
+ "eval_samples_per_second": 4.407,
16
+ "eval_steps_per_second": 0.275,
17
+ "eval_use_label": 0.0,
18
+ "train_loss": 0.5321606655040877,
19
+ "train_runtime": 24451.2028,
20
+ "train_samples": 61135,
21
+ "train_samples_per_second": 2.5,
22
+ "train_steps_per_second": 0.02
23
  }
eval_results.json CHANGED
@@ -1,18 +1,18 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.819918155670166,
4
- "eval_logits/rejected": -2.7931315898895264,
5
- "eval_logps/chosen": -319.5545654296875,
6
- "eval_logps/rejected": -329.6432189941406,
7
- "eval_loss": 0.19092892110347748,
8
- "eval_pred_label": 19507.943359375,
9
- "eval_rewards/accuracies": 0.6940000057220459,
10
- "eval_rewards/chosen": -3.757920265197754,
11
- "eval_rewards/margins": 2.9776601791381836,
12
- "eval_rewards/rejected": -6.735579967498779,
13
- "eval_runtime": 855.9781,
14
  "eval_samples": 2000,
15
- "eval_samples_per_second": 2.337,
16
- "eval_steps_per_second": 0.292,
17
- "eval_use_label": 12554.0556640625
18
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -2.822110891342163,
4
+ "eval_logits/rejected": -2.8178136348724365,
5
+ "eval_logps/chosen": -285.4951171875,
6
+ "eval_logps/rejected": -270.55743408203125,
7
+ "eval_loss": 0.5198934078216553,
8
+ "eval_pred_label": 0.0,
9
+ "eval_rewards/accuracies": 0.7300000190734863,
10
+ "eval_rewards/chosen": -0.12377375364303589,
11
+ "eval_rewards/margins": 1.001997470855713,
12
+ "eval_rewards/rejected": -1.1257712841033936,
13
+ "eval_runtime": 453.8128,
14
  "eval_samples": 2000,
15
+ "eval_samples_per_second": 4.407,
16
+ "eval_steps_per_second": 0.275,
17
+ "eval_use_label": 0.0
18
  }
runs/Mar25_22-32-18_uclaml04.cs.ucla.edu/events.out.tfevents.1711431194.uclaml04.cs.ucla.edu.282670.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef3a2ae4c8df933ddf5a4b3e5752849714e7c9e65d45e25092f6b2c2154e54fe
3
- size 38440
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bd6d8d978f8970690763999b8c0c13245b54fa582a289d7997c052e40bbf6a4
3
+ size 41127
runs/Mar25_22-32-18_uclaml04.cs.ucla.edu/events.out.tfevents.1711456098.uclaml04.cs.ucla.edu.282670.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9902476a403d5b482d88dcbfb9fac9a6a9bda2c52b4c63ce321cf0b3e5e6b684
3
+ size 935
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.6931471824645996,
4
- "train_runtime": 27.1859,
5
- "train_samples": 61,
6
- "train_samples_per_second": 2.244,
7
- "train_steps_per_second": 0.037
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.5321606655040877,
4
+ "train_runtime": 24451.2028,
5
+ "train_samples": 61135,
6
+ "train_samples_per_second": 2.5,
7
+ "train_steps_per_second": 0.02
8
  }
trainer_state.json CHANGED
@@ -1,39 +1,813 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.0,
5
- "eval_steps": 500,
6
- "global_step": 1,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 1.0,
13
- "learning_rate": 0.0,
14
- "logps/chosen": -242.30880737304688,
15
- "logps/rejected": -225.008056640625,
 
 
16
  "loss": 0.6931,
 
17
  "rewards/accuracies": 0.0,
18
  "rewards/chosen": 0.0,
19
  "rewards/margins": 0.0,
20
  "rewards/rejected": 0.0,
21
- "step": 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  },
23
  {
24
  "epoch": 1.0,
25
- "step": 1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  "total_flos": 0.0,
27
- "train_loss": 0.6931471824645996,
28
- "train_runtime": 27.1859,
29
- "train_samples_per_second": 2.244,
30
- "train_steps_per_second": 0.037
31
  }
32
  ],
33
  "logging_steps": 10,
34
- "max_steps": 1,
35
  "num_train_epochs": 1,
36
- "save_steps": 100,
37
  "total_flos": 0.0,
38
  "trial_name": null,
39
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.998691442030882,
5
+ "eval_steps": 100,
6
+ "global_step": 477,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0,
13
+ "learning_rate": 1.0416666666666667e-06,
14
+ "logits/chosen": -2.9089105129241943,
15
+ "logits/rejected": -2.8982176780700684,
16
+ "logps/chosen": -328.48699951171875,
17
+ "logps/rejected": -294.0901794433594,
18
  "loss": 0.6931,
19
+ "pred_label": 0.0,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
22
  "rewards/margins": 0.0,
23
  "rewards/rejected": 0.0,
24
+ "step": 1,
25
+ "use_label": 0.0
26
+ },
27
+ {
28
+ "epoch": 0.02,
29
+ "learning_rate": 1.0416666666666668e-05,
30
+ "logits/chosen": -2.802924633026123,
31
+ "logits/rejected": -2.845370292663574,
32
+ "logps/chosen": -274.2305908203125,
33
+ "logps/rejected": -257.4792785644531,
34
+ "loss": 0.6894,
35
+ "pred_label": 0.0,
36
+ "rewards/accuracies": 0.4965277910232544,
37
+ "rewards/chosen": 0.0021643945947289467,
38
+ "rewards/margins": 0.006878577638417482,
39
+ "rewards/rejected": -0.004714183043688536,
40
+ "step": 10,
41
+ "use_label": 0.0
42
+ },
43
+ {
44
+ "epoch": 0.04,
45
+ "learning_rate": 2.0833333333333336e-05,
46
+ "logits/chosen": -2.8522541522979736,
47
+ "logits/rejected": -2.842360019683838,
48
+ "logps/chosen": -279.8498840332031,
49
+ "logps/rejected": -264.8490905761719,
50
+ "loss": 0.6557,
51
+ "pred_label": 0.0,
52
+ "rewards/accuracies": 0.65625,
53
+ "rewards/chosen": 0.02565639279782772,
54
+ "rewards/margins": 0.10996748507022858,
55
+ "rewards/rejected": -0.0843110978603363,
56
+ "step": 20,
57
+ "use_label": 0.0
58
+ },
59
+ {
60
+ "epoch": 0.06,
61
+ "learning_rate": 3.125e-05,
62
+ "logits/chosen": -2.8415942192077637,
63
+ "logits/rejected": -2.829390525817871,
64
+ "logps/chosen": -283.43377685546875,
65
+ "logps/rejected": -275.16021728515625,
66
+ "loss": 0.6152,
67
+ "pred_label": 0.0,
68
+ "rewards/accuracies": 0.668749988079071,
69
+ "rewards/chosen": -0.017193807289004326,
70
+ "rewards/margins": 0.2977373003959656,
71
+ "rewards/rejected": -0.31493109464645386,
72
+ "step": 30,
73
+ "use_label": 0.0
74
+ },
75
+ {
76
+ "epoch": 0.08,
77
+ "learning_rate": 4.166666666666667e-05,
78
+ "logits/chosen": -2.808344841003418,
79
+ "logits/rejected": -2.8009369373321533,
80
+ "logps/chosen": -299.697998046875,
81
+ "logps/rejected": -281.28240966796875,
82
+ "loss": 0.5509,
83
+ "pred_label": 0.0,
84
+ "rewards/accuracies": 0.734375,
85
+ "rewards/chosen": 0.011143045499920845,
86
+ "rewards/margins": 0.5404817461967468,
87
+ "rewards/rejected": -0.5293387174606323,
88
+ "step": 40,
89
+ "use_label": 0.0
90
+ },
91
+ {
92
+ "epoch": 0.1,
93
+ "learning_rate": 4.976689976689977e-05,
94
+ "logits/chosen": -2.735532283782959,
95
+ "logits/rejected": -2.762310743331909,
96
+ "logps/chosen": -284.2992248535156,
97
+ "logps/rejected": -285.3406982421875,
98
+ "loss": 0.5757,
99
+ "pred_label": 0.0,
100
+ "rewards/accuracies": 0.731249988079071,
101
+ "rewards/chosen": 0.06582433730363846,
102
+ "rewards/margins": 0.6093484163284302,
103
+ "rewards/rejected": -0.5435240864753723,
104
+ "step": 50,
105
+ "use_label": 0.0
106
+ },
107
+ {
108
+ "epoch": 0.13,
109
+ "learning_rate": 4.86013986013986e-05,
110
+ "logits/chosen": -2.7604079246520996,
111
+ "logits/rejected": -2.749760389328003,
112
+ "logps/chosen": -271.2710876464844,
113
+ "logps/rejected": -253.1558074951172,
114
+ "loss": 0.5778,
115
+ "pred_label": 0.0,
116
+ "rewards/accuracies": 0.690625011920929,
117
+ "rewards/chosen": 0.08226754516363144,
118
+ "rewards/margins": 0.5549899935722351,
119
+ "rewards/rejected": -0.47272244095802307,
120
+ "step": 60,
121
+ "use_label": 0.0
122
+ },
123
+ {
124
+ "epoch": 0.15,
125
+ "learning_rate": 4.7435897435897435e-05,
126
+ "logits/chosen": -2.825266122817993,
127
+ "logits/rejected": -2.8063371181488037,
128
+ "logps/chosen": -282.8874816894531,
129
+ "logps/rejected": -260.37786865234375,
130
+ "loss": 0.5591,
131
+ "pred_label": 0.0,
132
+ "rewards/accuracies": 0.6968749761581421,
133
+ "rewards/chosen": -0.020037399604916573,
134
+ "rewards/margins": 0.611237108707428,
135
+ "rewards/rejected": -0.6312744617462158,
136
+ "step": 70,
137
+ "use_label": 0.0
138
+ },
139
+ {
140
+ "epoch": 0.17,
141
+ "learning_rate": 4.6270396270396274e-05,
142
+ "logits/chosen": -2.777280807495117,
143
+ "logits/rejected": -2.760341167449951,
144
+ "logps/chosen": -278.3568420410156,
145
+ "logps/rejected": -281.4912109375,
146
+ "loss": 0.5484,
147
+ "pred_label": 0.0,
148
+ "rewards/accuracies": 0.6812499761581421,
149
+ "rewards/chosen": 0.02771860361099243,
150
+ "rewards/margins": 0.6373428702354431,
151
+ "rewards/rejected": -0.6096242666244507,
152
+ "step": 80,
153
+ "use_label": 0.0
154
+ },
155
+ {
156
+ "epoch": 0.19,
157
+ "learning_rate": 4.5104895104895105e-05,
158
+ "logits/chosen": -2.7895703315734863,
159
+ "logits/rejected": -2.7826099395751953,
160
+ "logps/chosen": -274.6077575683594,
161
+ "logps/rejected": -265.6002197265625,
162
+ "loss": 0.5476,
163
+ "pred_label": 0.0,
164
+ "rewards/accuracies": 0.71875,
165
+ "rewards/chosen": 0.15948975086212158,
166
+ "rewards/margins": 0.703137218952179,
167
+ "rewards/rejected": -0.5436475276947021,
168
+ "step": 90,
169
+ "use_label": 0.0
170
+ },
171
+ {
172
+ "epoch": 0.21,
173
+ "learning_rate": 4.3939393939393944e-05,
174
+ "logits/chosen": -2.842555046081543,
175
+ "logits/rejected": -2.8219830989837646,
176
+ "logps/chosen": -282.922607421875,
177
+ "logps/rejected": -272.0762939453125,
178
+ "loss": 0.5218,
179
+ "pred_label": 0.0,
180
+ "rewards/accuracies": 0.753125011920929,
181
+ "rewards/chosen": 0.1369757205247879,
182
+ "rewards/margins": 0.8804744482040405,
183
+ "rewards/rejected": -0.7434987425804138,
184
+ "step": 100,
185
+ "use_label": 0.0
186
+ },
187
+ {
188
+ "epoch": 0.23,
189
+ "learning_rate": 4.2773892773892776e-05,
190
+ "logits/chosen": -2.7973036766052246,
191
+ "logits/rejected": -2.8159468173980713,
192
+ "logps/chosen": -287.54254150390625,
193
+ "logps/rejected": -269.5066223144531,
194
+ "loss": 0.5455,
195
+ "pred_label": 0.0,
196
+ "rewards/accuracies": 0.734375,
197
+ "rewards/chosen": 0.057723164558410645,
198
+ "rewards/margins": 0.7660588622093201,
199
+ "rewards/rejected": -0.7083355784416199,
200
+ "step": 110,
201
+ "use_label": 0.0
202
+ },
203
+ {
204
+ "epoch": 0.25,
205
+ "learning_rate": 4.1608391608391614e-05,
206
+ "logits/chosen": -2.7736809253692627,
207
+ "logits/rejected": -2.7753469944000244,
208
+ "logps/chosen": -292.09246826171875,
209
+ "logps/rejected": -252.3135986328125,
210
+ "loss": 0.5353,
211
+ "pred_label": 0.0,
212
+ "rewards/accuracies": 0.7250000238418579,
213
+ "rewards/chosen": -0.07172087579965591,
214
+ "rewards/margins": 0.8031927347183228,
215
+ "rewards/rejected": -0.8749135732650757,
216
+ "step": 120,
217
+ "use_label": 0.0
218
+ },
219
+ {
220
+ "epoch": 0.27,
221
+ "learning_rate": 4.0442890442890446e-05,
222
+ "logits/chosen": -2.7675487995147705,
223
+ "logits/rejected": -2.761944532394409,
224
+ "logps/chosen": -271.2415771484375,
225
+ "logps/rejected": -269.3936767578125,
226
+ "loss": 0.5413,
227
+ "pred_label": 0.0,
228
+ "rewards/accuracies": 0.703125,
229
+ "rewards/chosen": -0.16168196499347687,
230
+ "rewards/margins": 0.7922881841659546,
231
+ "rewards/rejected": -0.9539702534675598,
232
+ "step": 130,
233
+ "use_label": 0.0
234
+ },
235
+ {
236
+ "epoch": 0.29,
237
+ "learning_rate": 3.9277389277389285e-05,
238
+ "logits/chosen": -2.821808338165283,
239
+ "logits/rejected": -2.828434467315674,
240
+ "logps/chosen": -293.92376708984375,
241
+ "logps/rejected": -278.75469970703125,
242
+ "loss": 0.5249,
243
+ "pred_label": 0.0,
244
+ "rewards/accuracies": 0.7593749761581421,
245
+ "rewards/chosen": -0.10061223804950714,
246
+ "rewards/margins": 0.9257994890213013,
247
+ "rewards/rejected": -1.026411771774292,
248
+ "step": 140,
249
+ "use_label": 0.0
250
+ },
251
+ {
252
+ "epoch": 0.31,
253
+ "learning_rate": 3.811188811188811e-05,
254
+ "logits/chosen": -2.8263511657714844,
255
+ "logits/rejected": -2.803342819213867,
256
+ "logps/chosen": -280.3300476074219,
257
+ "logps/rejected": -244.6310577392578,
258
+ "loss": 0.5312,
259
+ "pred_label": 0.0,
260
+ "rewards/accuracies": 0.721875011920929,
261
+ "rewards/chosen": -0.2029426395893097,
262
+ "rewards/margins": 0.7571284174919128,
263
+ "rewards/rejected": -0.9600710868835449,
264
+ "step": 150,
265
+ "use_label": 0.0
266
+ },
267
+ {
268
+ "epoch": 0.33,
269
+ "learning_rate": 3.694638694638695e-05,
270
+ "logits/chosen": -2.7946958541870117,
271
+ "logits/rejected": -2.748753309249878,
272
+ "logps/chosen": -282.52154541015625,
273
+ "logps/rejected": -296.4395446777344,
274
+ "loss": 0.5058,
275
+ "pred_label": 0.0,
276
+ "rewards/accuracies": 0.762499988079071,
277
+ "rewards/chosen": -0.10657407343387604,
278
+ "rewards/margins": 1.0698082447052002,
279
+ "rewards/rejected": -1.176382303237915,
280
+ "step": 160,
281
+ "use_label": 0.0
282
+ },
283
+ {
284
+ "epoch": 0.36,
285
+ "learning_rate": 3.578088578088578e-05,
286
+ "logits/chosen": -2.7990992069244385,
287
+ "logits/rejected": -2.7839698791503906,
288
+ "logps/chosen": -280.283447265625,
289
+ "logps/rejected": -245.1365966796875,
290
+ "loss": 0.5741,
291
+ "pred_label": 0.0,
292
+ "rewards/accuracies": 0.675000011920929,
293
+ "rewards/chosen": -0.0219185221940279,
294
+ "rewards/margins": 0.7351241707801819,
295
+ "rewards/rejected": -0.7570425868034363,
296
+ "step": 170,
297
+ "use_label": 0.0
298
+ },
299
+ {
300
+ "epoch": 0.38,
301
+ "learning_rate": 3.461538461538462e-05,
302
+ "logits/chosen": -2.8004555702209473,
303
+ "logits/rejected": -2.80672025680542,
304
+ "logps/chosen": -286.1470031738281,
305
+ "logps/rejected": -280.5009765625,
306
+ "loss": 0.5354,
307
+ "pred_label": 0.0,
308
+ "rewards/accuracies": 0.768750011920929,
309
+ "rewards/chosen": -0.1152573823928833,
310
+ "rewards/margins": 0.79168301820755,
311
+ "rewards/rejected": -0.9069403409957886,
312
+ "step": 180,
313
+ "use_label": 0.0
314
+ },
315
+ {
316
+ "epoch": 0.4,
317
+ "learning_rate": 3.344988344988345e-05,
318
+ "logits/chosen": -2.820003032684326,
319
+ "logits/rejected": -2.789689302444458,
320
+ "logps/chosen": -281.3421936035156,
321
+ "logps/rejected": -278.36883544921875,
322
+ "loss": 0.5092,
323
+ "pred_label": 0.0,
324
+ "rewards/accuracies": 0.762499988079071,
325
+ "rewards/chosen": -0.06256841123104095,
326
+ "rewards/margins": 0.9592651128768921,
327
+ "rewards/rejected": -1.0218336582183838,
328
+ "step": 190,
329
+ "use_label": 0.0
330
+ },
331
+ {
332
+ "epoch": 0.42,
333
+ "learning_rate": 3.228438228438229e-05,
334
+ "logits/chosen": -2.7884361743927,
335
+ "logits/rejected": -2.782519578933716,
336
+ "logps/chosen": -294.6650390625,
337
+ "logps/rejected": -261.40435791015625,
338
+ "loss": 0.5052,
339
+ "pred_label": 0.0,
340
+ "rewards/accuracies": 0.734375,
341
+ "rewards/chosen": 0.02564082108438015,
342
+ "rewards/margins": 0.9850121736526489,
343
+ "rewards/rejected": -0.9593712687492371,
344
+ "step": 200,
345
+ "use_label": 0.0
346
+ },
347
+ {
348
+ "epoch": 0.44,
349
+ "learning_rate": 3.111888111888112e-05,
350
+ "logits/chosen": -2.797853946685791,
351
+ "logits/rejected": -2.7864465713500977,
352
+ "logps/chosen": -269.8060607910156,
353
+ "logps/rejected": -262.01422119140625,
354
+ "loss": 0.5258,
355
+ "pred_label": 0.0,
356
+ "rewards/accuracies": 0.706250011920929,
357
+ "rewards/chosen": -0.046703118830919266,
358
+ "rewards/margins": 0.7715562582015991,
359
+ "rewards/rejected": -0.8182594180107117,
360
+ "step": 210,
361
+ "use_label": 0.0
362
+ },
363
+ {
364
+ "epoch": 0.46,
365
+ "learning_rate": 2.9953379953379956e-05,
366
+ "logits/chosen": -2.8102259635925293,
367
+ "logits/rejected": -2.813842296600342,
368
+ "logps/chosen": -280.2008972167969,
369
+ "logps/rejected": -262.47161865234375,
370
+ "loss": 0.5326,
371
+ "pred_label": 0.0,
372
+ "rewards/accuracies": 0.7093750238418579,
373
+ "rewards/chosen": 0.038443028926849365,
374
+ "rewards/margins": 0.6599202156066895,
375
+ "rewards/rejected": -0.6214772462844849,
376
+ "step": 220,
377
+ "use_label": 0.0
378
+ },
379
+ {
380
+ "epoch": 0.48,
381
+ "learning_rate": 2.878787878787879e-05,
382
+ "logits/chosen": -2.7520573139190674,
383
+ "logits/rejected": -2.744856357574463,
384
+ "logps/chosen": -265.61956787109375,
385
+ "logps/rejected": -256.87115478515625,
386
+ "loss": 0.5328,
387
+ "pred_label": 0.0,
388
+ "rewards/accuracies": 0.690625011920929,
389
+ "rewards/chosen": 0.014087711460888386,
390
+ "rewards/margins": 0.7686548233032227,
391
+ "rewards/rejected": -0.7545671463012695,
392
+ "step": 230,
393
+ "use_label": 0.0
394
+ },
395
+ {
396
+ "epoch": 0.5,
397
+ "learning_rate": 2.762237762237762e-05,
398
+ "logits/chosen": -2.7545018196105957,
399
+ "logits/rejected": -2.730861186981201,
400
+ "logps/chosen": -256.40753173828125,
401
+ "logps/rejected": -260.3459167480469,
402
+ "loss": 0.5315,
403
+ "pred_label": 0.0,
404
+ "rewards/accuracies": 0.7093750238418579,
405
+ "rewards/chosen": -0.0070175291039049625,
406
+ "rewards/margins": 0.7594768404960632,
407
+ "rewards/rejected": -0.7664943933486938,
408
+ "step": 240,
409
+ "use_label": 0.0
410
+ },
411
+ {
412
+ "epoch": 0.52,
413
+ "learning_rate": 2.6456876456876455e-05,
414
+ "logits/chosen": -2.8026585578918457,
415
+ "logits/rejected": -2.778594493865967,
416
+ "logps/chosen": -277.6792907714844,
417
+ "logps/rejected": -256.2415771484375,
418
+ "loss": 0.5158,
419
+ "pred_label": 0.0,
420
+ "rewards/accuracies": 0.7406250238418579,
421
+ "rewards/chosen": 0.056344062089920044,
422
+ "rewards/margins": 0.8976815938949585,
423
+ "rewards/rejected": -0.8413375616073608,
424
+ "step": 250,
425
+ "use_label": 0.0
426
+ },
427
+ {
428
+ "epoch": 0.54,
429
+ "learning_rate": 2.529137529137529e-05,
430
+ "logits/chosen": -2.8391366004943848,
431
+ "logits/rejected": -2.8148343563079834,
432
+ "logps/chosen": -284.32781982421875,
433
+ "logps/rejected": -262.647705078125,
434
+ "loss": 0.4916,
435
+ "pred_label": 0.0,
436
+ "rewards/accuracies": 0.7406250238418579,
437
+ "rewards/chosen": -0.06821730732917786,
438
+ "rewards/margins": 1.0360552072525024,
439
+ "rewards/rejected": -1.104272484779358,
440
+ "step": 260,
441
+ "use_label": 0.0
442
+ },
443
+ {
444
+ "epoch": 0.57,
445
+ "learning_rate": 2.4125874125874125e-05,
446
+ "logits/chosen": -2.841184139251709,
447
+ "logits/rejected": -2.813793659210205,
448
+ "logps/chosen": -295.7508544921875,
449
+ "logps/rejected": -291.97467041015625,
450
+ "loss": 0.5071,
451
+ "pred_label": 0.0,
452
+ "rewards/accuracies": 0.746874988079071,
453
+ "rewards/chosen": -0.13246159255504608,
454
+ "rewards/margins": 1.0678224563598633,
455
+ "rewards/rejected": -1.2002841234207153,
456
+ "step": 270,
457
+ "use_label": 0.0
458
+ },
459
+ {
460
+ "epoch": 0.59,
461
+ "learning_rate": 2.296037296037296e-05,
462
+ "logits/chosen": -2.8141586780548096,
463
+ "logits/rejected": -2.803541898727417,
464
+ "logps/chosen": -283.2509765625,
465
+ "logps/rejected": -250.5514373779297,
466
+ "loss": 0.5358,
467
+ "pred_label": 0.0,
468
+ "rewards/accuracies": 0.715624988079071,
469
+ "rewards/chosen": -0.14472146332263947,
470
+ "rewards/margins": 0.9390050768852234,
471
+ "rewards/rejected": -1.0837266445159912,
472
+ "step": 280,
473
+ "use_label": 0.0
474
+ },
475
+ {
476
+ "epoch": 0.61,
477
+ "learning_rate": 2.1794871794871795e-05,
478
+ "logits/chosen": -2.821655035018921,
479
+ "logits/rejected": -2.8060765266418457,
480
+ "logps/chosen": -274.44903564453125,
481
+ "logps/rejected": -272.57745361328125,
482
+ "loss": 0.5206,
483
+ "pred_label": 0.0,
484
+ "rewards/accuracies": 0.7437499761581421,
485
+ "rewards/chosen": -0.06504921615123749,
486
+ "rewards/margins": 0.8676969408988953,
487
+ "rewards/rejected": -0.9327462315559387,
488
+ "step": 290,
489
+ "use_label": 0.0
490
+ },
491
+ {
492
+ "epoch": 0.63,
493
+ "learning_rate": 2.062937062937063e-05,
494
+ "logits/chosen": -2.8019015789031982,
495
+ "logits/rejected": -2.801147222518921,
496
+ "logps/chosen": -292.081787109375,
497
+ "logps/rejected": -301.0537414550781,
498
+ "loss": 0.5177,
499
+ "pred_label": 0.0,
500
+ "rewards/accuracies": 0.734375,
501
+ "rewards/chosen": 0.03986026719212532,
502
+ "rewards/margins": 0.968209445476532,
503
+ "rewards/rejected": -0.9283491969108582,
504
+ "step": 300,
505
+ "use_label": 0.0
506
+ },
507
+ {
508
+ "epoch": 0.65,
509
+ "learning_rate": 1.9463869463869462e-05,
510
+ "logits/chosen": -2.809706449508667,
511
+ "logits/rejected": -2.781670570373535,
512
+ "logps/chosen": -293.81451416015625,
513
+ "logps/rejected": -256.2749938964844,
514
+ "loss": 0.5066,
515
+ "pred_label": 0.0,
516
+ "rewards/accuracies": 0.7281249761581421,
517
+ "rewards/chosen": -0.08690959960222244,
518
+ "rewards/margins": 0.949812114238739,
519
+ "rewards/rejected": -1.0367217063903809,
520
+ "step": 310,
521
+ "use_label": 0.0
522
+ },
523
+ {
524
+ "epoch": 0.67,
525
+ "learning_rate": 1.8298368298368298e-05,
526
+ "logits/chosen": -2.797341823577881,
527
+ "logits/rejected": -2.787775754928589,
528
+ "logps/chosen": -269.12139892578125,
529
+ "logps/rejected": -266.01080322265625,
530
+ "loss": 0.4996,
531
+ "pred_label": 0.0,
532
+ "rewards/accuracies": 0.768750011920929,
533
+ "rewards/chosen": 0.0034091435372829437,
534
+ "rewards/margins": 1.1171700954437256,
535
+ "rewards/rejected": -1.1137609481811523,
536
+ "step": 320,
537
+ "use_label": 0.0
538
+ },
539
+ {
540
+ "epoch": 0.69,
541
+ "learning_rate": 1.7132867132867133e-05,
542
+ "logits/chosen": -2.8247230052948,
543
+ "logits/rejected": -2.80410099029541,
544
+ "logps/chosen": -299.09442138671875,
545
+ "logps/rejected": -273.0837707519531,
546
+ "loss": 0.5236,
547
+ "pred_label": 0.0,
548
+ "rewards/accuracies": 0.731249988079071,
549
+ "rewards/chosen": -0.04382320120930672,
550
+ "rewards/margins": 0.871612548828125,
551
+ "rewards/rejected": -0.915435791015625,
552
+ "step": 330,
553
+ "use_label": 0.0
554
+ },
555
+ {
556
+ "epoch": 0.71,
557
+ "learning_rate": 1.5967365967365968e-05,
558
+ "logits/chosen": -2.806962251663208,
559
+ "logits/rejected": -2.79986572265625,
560
+ "logps/chosen": -284.2406005859375,
561
+ "logps/rejected": -258.4219665527344,
562
+ "loss": 0.5258,
563
+ "pred_label": 0.0,
564
+ "rewards/accuracies": 0.7093750238418579,
565
+ "rewards/chosen": -0.023638445883989334,
566
+ "rewards/margins": 0.8371860384941101,
567
+ "rewards/rejected": -0.8608245849609375,
568
+ "step": 340,
569
+ "use_label": 0.0
570
+ },
571
+ {
572
+ "epoch": 0.73,
573
+ "learning_rate": 1.4801864801864803e-05,
574
+ "logits/chosen": -2.803683280944824,
575
+ "logits/rejected": -2.7878894805908203,
576
+ "logps/chosen": -263.1024169921875,
577
+ "logps/rejected": -239.3209686279297,
578
+ "loss": 0.4963,
579
+ "pred_label": 0.0,
580
+ "rewards/accuracies": 0.765625,
581
+ "rewards/chosen": -0.0065319957211613655,
582
+ "rewards/margins": 1.0474125146865845,
583
+ "rewards/rejected": -1.05394446849823,
584
+ "step": 350,
585
+ "use_label": 0.0
586
+ },
587
+ {
588
+ "epoch": 0.75,
589
+ "learning_rate": 1.3636363636363637e-05,
590
+ "logits/chosen": -2.8253402709960938,
591
+ "logits/rejected": -2.817469596862793,
592
+ "logps/chosen": -280.67022705078125,
593
+ "logps/rejected": -258.00579833984375,
594
+ "loss": 0.4967,
595
+ "pred_label": 0.0,
596
+ "rewards/accuracies": 0.734375,
597
+ "rewards/chosen": -0.015683341771364212,
598
+ "rewards/margins": 1.0513224601745605,
599
+ "rewards/rejected": -1.0670057535171509,
600
+ "step": 360,
601
+ "use_label": 0.0
602
+ },
603
+ {
604
+ "epoch": 0.77,
605
+ "learning_rate": 1.2470862470862472e-05,
606
+ "logits/chosen": -2.793775796890259,
607
+ "logits/rejected": -2.796365261077881,
608
+ "logps/chosen": -302.306884765625,
609
+ "logps/rejected": -273.70635986328125,
610
+ "loss": 0.5115,
611
+ "pred_label": 0.0,
612
+ "rewards/accuracies": 0.737500011920929,
613
+ "rewards/chosen": 0.010395345278084278,
614
+ "rewards/margins": 0.973157525062561,
615
+ "rewards/rejected": -0.9627620577812195,
616
+ "step": 370,
617
+ "use_label": 0.0
618
+ },
619
+ {
620
+ "epoch": 0.8,
621
+ "learning_rate": 1.1305361305361307e-05,
622
+ "logits/chosen": -2.8007330894470215,
623
+ "logits/rejected": -2.809499740600586,
624
+ "logps/chosen": -277.95599365234375,
625
+ "logps/rejected": -271.8043212890625,
626
+ "loss": 0.5023,
627
+ "pred_label": 0.0,
628
+ "rewards/accuracies": 0.768750011920929,
629
+ "rewards/chosen": 0.0019541799556463957,
630
+ "rewards/margins": 1.0503606796264648,
631
+ "rewards/rejected": -1.0484063625335693,
632
+ "step": 380,
633
+ "use_label": 0.0
634
+ },
635
+ {
636
+ "epoch": 0.82,
637
+ "learning_rate": 1.013986013986014e-05,
638
+ "logits/chosen": -2.838869571685791,
639
+ "logits/rejected": -2.8424456119537354,
640
+ "logps/chosen": -291.6543884277344,
641
+ "logps/rejected": -275.7422790527344,
642
+ "loss": 0.5223,
643
+ "pred_label": 0.0,
644
+ "rewards/accuracies": 0.7406250238418579,
645
+ "rewards/chosen": 0.009039236232638359,
646
+ "rewards/margins": 0.990314781665802,
647
+ "rewards/rejected": -0.9812755584716797,
648
+ "step": 390,
649
+ "use_label": 0.0
650
+ },
651
+ {
652
+ "epoch": 0.84,
653
+ "learning_rate": 8.974358974358976e-06,
654
+ "logits/chosen": -2.7799577713012695,
655
+ "logits/rejected": -2.7700963020324707,
656
+ "logps/chosen": -292.00604248046875,
657
+ "logps/rejected": -278.8124694824219,
658
+ "loss": 0.5059,
659
+ "pred_label": 0.0,
660
+ "rewards/accuracies": 0.7281249761581421,
661
+ "rewards/chosen": -0.08654220402240753,
662
+ "rewards/margins": 0.9550994634628296,
663
+ "rewards/rejected": -1.0416417121887207,
664
+ "step": 400,
665
+ "use_label": 0.0
666
+ },
667
+ {
668
+ "epoch": 0.86,
669
+ "learning_rate": 7.808857808857809e-06,
670
+ "logits/chosen": -2.7790863513946533,
671
+ "logits/rejected": -2.7839720249176025,
672
+ "logps/chosen": -288.0146484375,
673
+ "logps/rejected": -246.18386840820312,
674
+ "loss": 0.5193,
675
+ "pred_label": 0.0,
676
+ "rewards/accuracies": 0.7593749761581421,
677
+ "rewards/chosen": -0.0843573808670044,
678
+ "rewards/margins": 1.01529860496521,
679
+ "rewards/rejected": -1.0996559858322144,
680
+ "step": 410,
681
+ "use_label": 0.0
682
+ },
683
+ {
684
+ "epoch": 0.88,
685
+ "learning_rate": 6.643356643356643e-06,
686
+ "logits/chosen": -2.8042919635772705,
687
+ "logits/rejected": -2.814549446105957,
688
+ "logps/chosen": -267.73272705078125,
689
+ "logps/rejected": -282.6224060058594,
690
+ "loss": 0.5131,
691
+ "pred_label": 0.0,
692
+ "rewards/accuracies": 0.7250000238418579,
693
+ "rewards/chosen": -0.23174908757209778,
694
+ "rewards/margins": 0.7961767315864563,
695
+ "rewards/rejected": -1.027925729751587,
696
+ "step": 420,
697
+ "use_label": 0.0
698
+ },
699
+ {
700
+ "epoch": 0.9,
701
+ "learning_rate": 5.477855477855478e-06,
702
+ "logits/chosen": -2.8008358478546143,
703
+ "logits/rejected": -2.827676773071289,
704
+ "logps/chosen": -291.685791015625,
705
+ "logps/rejected": -269.49896240234375,
706
+ "loss": 0.5135,
707
+ "pred_label": 0.0,
708
+ "rewards/accuracies": 0.7593749761581421,
709
+ "rewards/chosen": -0.1730644553899765,
710
+ "rewards/margins": 0.9842265248298645,
711
+ "rewards/rejected": -1.157291054725647,
712
+ "step": 430,
713
+ "use_label": 0.0
714
+ },
715
+ {
716
+ "epoch": 0.92,
717
+ "learning_rate": 4.312354312354312e-06,
718
+ "logits/chosen": -2.8276031017303467,
719
+ "logits/rejected": -2.81488299369812,
720
+ "logps/chosen": -265.67242431640625,
721
+ "logps/rejected": -261.6026611328125,
722
+ "loss": 0.51,
723
+ "pred_label": 0.0,
724
+ "rewards/accuracies": 0.734375,
725
+ "rewards/chosen": -0.08840381354093552,
726
+ "rewards/margins": 0.9768926501274109,
727
+ "rewards/rejected": -1.0652964115142822,
728
+ "step": 440,
729
+ "use_label": 0.0
730
+ },
731
+ {
732
+ "epoch": 0.94,
733
+ "learning_rate": 3.1468531468531472e-06,
734
+ "logits/chosen": -2.794529676437378,
735
+ "logits/rejected": -2.7928192615509033,
736
+ "logps/chosen": -280.88995361328125,
737
+ "logps/rejected": -275.9464416503906,
738
+ "loss": 0.5128,
739
+ "pred_label": 0.0,
740
+ "rewards/accuracies": 0.765625,
741
+ "rewards/chosen": -0.06786171346902847,
742
+ "rewards/margins": 0.9994968175888062,
743
+ "rewards/rejected": -1.0673584938049316,
744
+ "step": 450,
745
+ "use_label": 0.0
746
+ },
747
+ {
748
+ "epoch": 0.96,
749
+ "learning_rate": 1.981351981351981e-06,
750
+ "logits/chosen": -2.7951102256774902,
751
+ "logits/rejected": -2.7973313331604004,
752
+ "logps/chosen": -297.4739990234375,
753
+ "logps/rejected": -272.06024169921875,
754
+ "loss": 0.4871,
755
+ "pred_label": 0.0,
756
+ "rewards/accuracies": 0.737500011920929,
757
+ "rewards/chosen": -0.08221320062875748,
758
+ "rewards/margins": 0.9878571629524231,
759
+ "rewards/rejected": -1.0700703859329224,
760
+ "step": 460,
761
+ "use_label": 0.0
762
+ },
763
+ {
764
+ "epoch": 0.98,
765
+ "learning_rate": 8.158508158508159e-07,
766
+ "logits/chosen": -2.8256263732910156,
767
+ "logits/rejected": -2.7975211143493652,
768
+ "logps/chosen": -278.83154296875,
769
+ "logps/rejected": -266.3514099121094,
770
+ "loss": 0.4951,
771
+ "pred_label": 0.0,
772
+ "rewards/accuracies": 0.7593749761581421,
773
+ "rewards/chosen": -0.10287537425756454,
774
+ "rewards/margins": 1.0982601642608643,
775
+ "rewards/rejected": -1.2011353969573975,
776
+ "step": 470,
777
+ "use_label": 0.0
778
  },
779
  {
780
  "epoch": 1.0,
781
+ "eval_logits/chosen": -2.822110891342163,
782
+ "eval_logits/rejected": -2.8178136348724365,
783
+ "eval_logps/chosen": -285.4951171875,
784
+ "eval_logps/rejected": -270.55743408203125,
785
+ "eval_loss": 0.5198934078216553,
786
+ "eval_pred_label": 0.0,
787
+ "eval_rewards/accuracies": 0.7300000190734863,
788
+ "eval_rewards/chosen": -0.12377375364303589,
789
+ "eval_rewards/margins": 1.001997470855713,
790
+ "eval_rewards/rejected": -1.1257712841033936,
791
+ "eval_runtime": 453.8631,
792
+ "eval_samples_per_second": 4.407,
793
+ "eval_steps_per_second": 0.275,
794
+ "eval_use_label": 0.0,
795
+ "step": 477
796
+ },
797
+ {
798
+ "epoch": 1.0,
799
+ "step": 477,
800
  "total_flos": 0.0,
801
+ "train_loss": 0.5321606655040877,
802
+ "train_runtime": 24451.2028,
803
+ "train_samples_per_second": 2.5,
804
+ "train_steps_per_second": 0.02
805
  }
806
  ],
807
  "logging_steps": 10,
808
+ "max_steps": 477,
809
  "num_train_epochs": 1,
810
+ "save_steps": 50,
811
  "total_flos": 0.0,
812
  "trial_name": null,
813
  "trial_params": null