Model save
Browse files- README.md +2 -2
- all_results.json +5 -5
- model-00001-of-00003.safetensors +1 -1
- model-00002-of-00003.safetensors +1 -1
- model-00003-of-00003.safetensors +1 -1
- runs/Jun21_05-09-37_n136-112-146/events.out.tfevents.1718919313.n136-112-146.2797891.0 +2 -2
- train_results.json +5 -5
- trainer_state.json +560 -230
README.md
CHANGED
@@ -14,7 +14,7 @@ should probably proofread and complete it, then remove this comment. -->
|
|
14 |
|
15 |
# zephyr-7b-dpo-full
|
16 |
|
17 |
-
This model is a fine-tuned version of [princeton-nlp/Mistral-7B-Base-SFT-DPO](https://huggingface.co/princeton-nlp/Mistral-7B-Base-SFT-DPO) on
|
18 |
|
19 |
## Model description
|
20 |
|
@@ -55,5 +55,5 @@ The following hyperparameters were used during training:
|
|
55 |
|
56 |
- Transformers 4.39.3
|
57 |
- Pytorch 2.1.2+cu118
|
58 |
-
- Datasets 2.
|
59 |
- Tokenizers 0.15.2
|
|
|
14 |
|
15 |
# zephyr-7b-dpo-full
|
16 |
|
17 |
+
This model is a fine-tuned version of [princeton-nlp/Mistral-7B-Base-SFT-DPO](https://huggingface.co/princeton-nlp/Mistral-7B-Base-SFT-DPO) on an unknown dataset.
|
18 |
|
19 |
## Model description
|
20 |
|
|
|
55 |
|
56 |
- Transformers 4.39.3
|
57 |
- Pytorch 2.1.2+cu118
|
58 |
+
- Datasets 2.19.1
|
59 |
- Tokenizers 0.15.2
|
all_results.json
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
-
"train_loss": 0.
|
4 |
-
"train_runtime":
|
5 |
-
"train_samples":
|
6 |
-
"train_samples_per_second": 8.
|
7 |
-
"train_steps_per_second": 0.
|
8 |
}
|
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
+
"train_loss": 0.2785977178812027,
|
4 |
+
"train_runtime": 11929.9898,
|
5 |
+
"train_samples": 102360,
|
6 |
+
"train_samples_per_second": 8.58,
|
7 |
+
"train_steps_per_second": 0.034
|
8 |
}
|
model-00001-of-00003.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4943162336
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:41b405c6b39fce54d81023410da5b0175bc34f1b707551cc87bb57315a19139d
|
3 |
size 4943162336
|
model-00002-of-00003.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4999819336
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b4fcfc77bc0cce12435a691bee318c376a963ab3c60e50f0201871ef7f9f1899
|
3 |
size 4999819336
|
model-00003-of-00003.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4540516344
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ec4cc12a5a582b9be0861f8da52397528a0b5094e4e53a00c5c10ad9fdc740da
|
3 |
size 4540516344
|
runs/Jun21_05-09-37_n136-112-146/events.out.tfevents.1718919313.n136-112-146.2797891.0
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ec7cfced69671d65a2c2d087a11f64906f935b37201b11175a3442ce3a673df9
|
3 |
+
size 33340
|
train_results.json
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
-
"train_loss": 0.
|
4 |
-
"train_runtime":
|
5 |
-
"train_samples":
|
6 |
-
"train_samples_per_second": 8.
|
7 |
-
"train_steps_per_second": 0.
|
8 |
}
|
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
+
"train_loss": 0.2785977178812027,
|
4 |
+
"train_runtime": 11929.9898,
|
5 |
+
"train_samples": 102360,
|
6 |
+
"train_samples_per_second": 8.58,
|
7 |
+
"train_steps_per_second": 0.034
|
8 |
}
|
trainer_state.json
CHANGED
@@ -3,20 +3,20 @@
|
|
3 |
"best_model_checkpoint": null,
|
4 |
"epoch": 1.0,
|
5 |
"eval_steps": 500,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
10 |
"log_history": [
|
11 |
{
|
12 |
-
"epoch": 0.
|
13 |
-
"grad_norm":
|
14 |
-
"learning_rate":
|
15 |
-
"logits/chosen": -0.
|
16 |
-
"logits/rejected": 0.
|
17 |
-
"logps/chosen": -
|
18 |
-
"logps/rejected": -
|
19 |
-
"loss": 0.
|
20 |
"rewards/accuracies": 0.0,
|
21 |
"rewards/chosen": 0.0,
|
22 |
"rewards/margins": 0.0,
|
@@ -24,287 +24,617 @@
|
|
24 |
"step": 1
|
25 |
},
|
26 |
{
|
27 |
-
"epoch": 0.
|
28 |
-
"grad_norm":
|
29 |
-
"learning_rate":
|
30 |
-
"logits/chosen":
|
31 |
-
"logits/rejected": 0.
|
32 |
-
"logps/chosen": -
|
33 |
-
"logps/rejected": -
|
34 |
-
"loss": 0.
|
35 |
-
"rewards/accuracies": 0.
|
36 |
-
"rewards/chosen":
|
37 |
-
"rewards/margins": 0.
|
38 |
-
"rewards/rejected": -
|
39 |
"step": 10
|
40 |
},
|
41 |
{
|
42 |
-
"epoch": 0.
|
43 |
-
"grad_norm":
|
44 |
-
"learning_rate":
|
45 |
-
"logits/chosen": -0.
|
46 |
-
"logits/rejected": 0.
|
47 |
-
"logps/chosen": -
|
48 |
-
"logps/rejected": -
|
49 |
-
"loss": 0.
|
50 |
-
"rewards/accuracies": 0.
|
51 |
-
"rewards/chosen": 0.
|
52 |
-
"rewards/margins":
|
53 |
-
"rewards/rejected": -
|
54 |
"step": 20
|
55 |
},
|
56 |
{
|
57 |
-
"epoch": 0.
|
58 |
-
"grad_norm":
|
59 |
-
"learning_rate":
|
60 |
-
"logits/chosen": -0.
|
61 |
-
"logits/rejected": 0.
|
62 |
-
"logps/chosen": -
|
63 |
-
"logps/rejected": -
|
64 |
-
"loss": 0.
|
65 |
-
"rewards/accuracies": 0.
|
66 |
-
"rewards/chosen":
|
67 |
-
"rewards/margins":
|
68 |
-
"rewards/rejected": -
|
69 |
"step": 30
|
70 |
},
|
71 |
{
|
72 |
-
"epoch": 0.
|
73 |
-
"grad_norm":
|
74 |
-
"learning_rate":
|
75 |
-
"logits/chosen": -0.
|
76 |
-
"logits/rejected": 0.
|
77 |
-
"logps/chosen": -
|
78 |
-
"logps/rejected": -
|
79 |
-
"loss": 0.
|
80 |
-
"rewards/accuracies": 0.
|
81 |
-
"rewards/chosen":
|
82 |
-
"rewards/margins":
|
83 |
-
"rewards/rejected": -
|
84 |
"step": 40
|
85 |
},
|
86 |
{
|
87 |
-
"epoch": 0.
|
88 |
-
"grad_norm":
|
89 |
-
"learning_rate": 4.
|
90 |
-
"logits/chosen": -0.
|
91 |
-
"logits/rejected": 0.
|
92 |
-
"logps/chosen": -
|
93 |
-
"logps/rejected": -
|
94 |
-
"loss": 0.
|
95 |
-
"rewards/accuracies": 0.
|
96 |
-
"rewards/chosen": 0.
|
97 |
-
"rewards/margins":
|
98 |
-
"rewards/rejected": -
|
99 |
"step": 50
|
100 |
},
|
101 |
{
|
102 |
-
"epoch": 0.
|
103 |
-
"grad_norm":
|
104 |
-
"learning_rate": 4.
|
105 |
-
"logits/chosen": -0.
|
106 |
-
"logits/rejected": 0.
|
107 |
-
"logps/chosen": -
|
108 |
-
"logps/rejected": -
|
109 |
-
"loss": 0.
|
110 |
-
"rewards/accuracies": 0.
|
111 |
-
"rewards/chosen": -0.
|
112 |
-
"rewards/margins":
|
113 |
-
"rewards/rejected": -
|
114 |
"step": 60
|
115 |
},
|
116 |
{
|
117 |
-
"epoch": 0.
|
118 |
-
"grad_norm":
|
119 |
-
"learning_rate":
|
120 |
-
"logits/chosen":
|
121 |
-
"logits/rejected":
|
122 |
-
"logps/chosen": -
|
123 |
-
"logps/rejected": -
|
124 |
-
"loss": 0.
|
125 |
-
"rewards/accuracies": 0.
|
126 |
-
"rewards/chosen": 0.
|
127 |
-
"rewards/margins":
|
128 |
-
"rewards/rejected": -
|
129 |
"step": 70
|
130 |
},
|
131 |
{
|
132 |
-
"epoch": 0.
|
133 |
-
"grad_norm":
|
134 |
-
"learning_rate":
|
135 |
-
"logits/chosen":
|
136 |
-
"logits/rejected":
|
137 |
-
"logps/chosen": -
|
138 |
-
"logps/rejected": -
|
139 |
-
"loss": 0.
|
140 |
-
"rewards/accuracies": 0.
|
141 |
-
"rewards/chosen": -1.
|
142 |
-
"rewards/margins":
|
143 |
-
"rewards/rejected": -
|
144 |
"step": 80
|
145 |
},
|
146 |
{
|
147 |
-
"epoch": 0.
|
148 |
-
"grad_norm":
|
149 |
-
"learning_rate":
|
150 |
-
"logits/chosen":
|
151 |
-
"logits/rejected":
|
152 |
-
"logps/chosen": -
|
153 |
-
"logps/rejected": -
|
154 |
-
"loss": 0.
|
155 |
-
"rewards/accuracies": 0.
|
156 |
-
"rewards/chosen": -
|
157 |
-
"rewards/margins":
|
158 |
-
"rewards/rejected": -
|
159 |
"step": 90
|
160 |
},
|
161 |
{
|
162 |
-
"epoch": 0.
|
163 |
-
"grad_norm":
|
164 |
-
"learning_rate":
|
165 |
-
"logits/chosen":
|
166 |
-
"logits/rejected":
|
167 |
-
"logps/chosen": -
|
168 |
-
"logps/rejected": -
|
169 |
-
"loss": 0.
|
170 |
-
"rewards/accuracies": 0.
|
171 |
-
"rewards/chosen": -
|
172 |
-
"rewards/margins":
|
173 |
-
"rewards/rejected": -
|
174 |
"step": 100
|
175 |
},
|
176 |
{
|
177 |
-
"epoch": 0.
|
178 |
-
"grad_norm":
|
179 |
-
"learning_rate":
|
180 |
-
"logits/chosen":
|
181 |
-
"logits/rejected":
|
182 |
-
"logps/chosen": -
|
183 |
-
"logps/rejected": -
|
184 |
-
"loss": 0.
|
185 |
-
"rewards/accuracies": 0.
|
186 |
-
"rewards/chosen": -
|
187 |
-
"rewards/margins":
|
188 |
-
"rewards/rejected": -
|
189 |
"step": 110
|
190 |
},
|
191 |
{
|
192 |
-
"epoch": 0.
|
193 |
-
"grad_norm":
|
194 |
-
"learning_rate":
|
195 |
-
"logits/chosen":
|
196 |
-
"logits/rejected":
|
197 |
-
"logps/chosen": -
|
198 |
-
"logps/rejected": -
|
199 |
-
"loss": 0.
|
200 |
-
"rewards/accuracies": 0.
|
201 |
-
"rewards/chosen": -
|
202 |
-
"rewards/margins":
|
203 |
-
"rewards/rejected": -
|
204 |
"step": 120
|
205 |
},
|
206 |
{
|
207 |
-
"epoch": 0.
|
208 |
-
"grad_norm":
|
209 |
-
"learning_rate":
|
210 |
-
"logits/chosen":
|
211 |
-
"logits/rejected":
|
212 |
-
"logps/chosen": -
|
213 |
-
"logps/rejected": -
|
214 |
-
"loss": 0.
|
215 |
-
"rewards/accuracies": 0.
|
216 |
-
"rewards/chosen": -
|
217 |
-
"rewards/margins":
|
218 |
-
"rewards/rejected": -
|
219 |
"step": 130
|
220 |
},
|
221 |
{
|
222 |
-
"epoch": 0.
|
223 |
-
"grad_norm":
|
224 |
-
"learning_rate":
|
225 |
-
"logits/chosen":
|
226 |
-
"logits/rejected":
|
227 |
-
"logps/chosen": -
|
228 |
-
"logps/rejected": -
|
229 |
-
"loss": 0.
|
230 |
-
"rewards/accuracies": 0.
|
231 |
-
"rewards/chosen": -
|
232 |
-
"rewards/margins":
|
233 |
-
"rewards/rejected": -6.
|
234 |
"step": 140
|
235 |
},
|
236 |
{
|
237 |
-
"epoch": 0.
|
238 |
-
"grad_norm":
|
239 |
-
"learning_rate":
|
240 |
-
"logits/chosen":
|
241 |
-
"logits/rejected":
|
242 |
-
"logps/chosen": -
|
243 |
-
"logps/rejected": -
|
244 |
-
"loss": 0.
|
245 |
-
"rewards/accuracies": 0.
|
246 |
-
"rewards/chosen":
|
247 |
-
"rewards/margins":
|
248 |
-
"rewards/rejected": -
|
249 |
"step": 150
|
250 |
},
|
251 |
{
|
252 |
-
"epoch": 0.
|
253 |
-
"grad_norm":
|
254 |
-
"learning_rate":
|
255 |
-
"logits/chosen":
|
256 |
-
"logits/rejected":
|
257 |
-
"logps/chosen": -
|
258 |
-
"logps/rejected": -
|
259 |
-
"loss": 0.
|
260 |
"rewards/accuracies": 0.893750011920929,
|
261 |
-
"rewards/chosen":
|
262 |
-
"rewards/margins":
|
263 |
-
"rewards/rejected": -
|
264 |
"step": 160
|
265 |
},
|
266 |
{
|
267 |
-
"epoch": 0.
|
268 |
-
"grad_norm":
|
269 |
-
"learning_rate":
|
270 |
-
"logits/chosen":
|
271 |
-
"logits/rejected":
|
272 |
-
"logps/chosen": -
|
273 |
-
"logps/rejected": -
|
274 |
-
"loss": 0.
|
275 |
-
"rewards/accuracies": 0.
|
276 |
-
"rewards/chosen": -
|
277 |
-
"rewards/margins":
|
278 |
-
"rewards/rejected": -6.
|
279 |
"step": 170
|
280 |
},
|
281 |
{
|
282 |
-
"epoch": 0.
|
283 |
-
"grad_norm":
|
284 |
-
"learning_rate":
|
285 |
-
"logits/chosen":
|
286 |
-
"logits/rejected":
|
287 |
-
"logps/chosen": -
|
288 |
-
"logps/rejected": -
|
289 |
-
"loss": 0.
|
290 |
-
"rewards/accuracies": 0.
|
291 |
-
"rewards/chosen": -
|
292 |
-
"rewards/margins":
|
293 |
-
"rewards/rejected": -6.
|
294 |
"step": 180
|
295 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
296 |
{
|
297 |
"epoch": 1.0,
|
298 |
-
"step":
|
299 |
"total_flos": 0.0,
|
300 |
-
"train_loss": 0.
|
301 |
-
"train_runtime":
|
302 |
-
"train_samples_per_second": 8.
|
303 |
-
"train_steps_per_second": 0.
|
304 |
}
|
305 |
],
|
306 |
"logging_steps": 10,
|
307 |
-
"max_steps":
|
308 |
"num_input_tokens_seen": 0,
|
309 |
"num_train_epochs": 1,
|
310 |
"save_steps": 100,
|
|
|
3 |
"best_model_checkpoint": null,
|
4 |
"epoch": 1.0,
|
5 |
"eval_steps": 500,
|
6 |
+
"global_step": 400,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
10 |
"log_history": [
|
11 |
{
|
12 |
+
"epoch": 0.0,
|
13 |
+
"grad_norm": 24.862652137264853,
|
14 |
+
"learning_rate": 1.25e-08,
|
15 |
+
"logits/chosen": -0.5811702013015747,
|
16 |
+
"logits/rejected": -0.11655431985855103,
|
17 |
+
"logps/chosen": -351.5902099609375,
|
18 |
+
"logps/rejected": -240.969970703125,
|
19 |
+
"loss": 0.6931,
|
20 |
"rewards/accuracies": 0.0,
|
21 |
"rewards/chosen": 0.0,
|
22 |
"rewards/margins": 0.0,
|
|
|
24 |
"step": 1
|
25 |
},
|
26 |
{
|
27 |
+
"epoch": 0.03,
|
28 |
+
"grad_norm": 23.69292682023629,
|
29 |
+
"learning_rate": 1.25e-07,
|
30 |
+
"logits/chosen": 0.26120826601982117,
|
31 |
+
"logits/rejected": 0.23706814646720886,
|
32 |
+
"logps/chosen": -333.1805419921875,
|
33 |
+
"logps/rejected": -244.67898559570312,
|
34 |
+
"loss": 0.6922,
|
35 |
+
"rewards/accuracies": 0.5173611044883728,
|
36 |
+
"rewards/chosen": 0.0021614907309412956,
|
37 |
+
"rewards/margins": 0.0021554920822381973,
|
38 |
+
"rewards/rejected": 5.998538654239383e-06,
|
39 |
"step": 10
|
40 |
},
|
41 |
{
|
42 |
+
"epoch": 0.05,
|
43 |
+
"grad_norm": 18.203526649945516,
|
44 |
+
"learning_rate": 2.5e-07,
|
45 |
+
"logits/chosen": -0.017204787582159042,
|
46 |
+
"logits/rejected": 0.1991611272096634,
|
47 |
+
"logps/chosen": -320.430908203125,
|
48 |
+
"logps/rejected": -234.376220703125,
|
49 |
+
"loss": 0.669,
|
50 |
+
"rewards/accuracies": 0.737500011920929,
|
51 |
+
"rewards/chosen": 0.033605434000492096,
|
52 |
+
"rewards/margins": 0.04716432839632034,
|
53 |
+
"rewards/rejected": -0.01355889905244112,
|
54 |
"step": 20
|
55 |
},
|
56 |
{
|
57 |
+
"epoch": 0.07,
|
58 |
+
"grad_norm": 10.096989474079606,
|
59 |
+
"learning_rate": 3.75e-07,
|
60 |
+
"logits/chosen": -0.2575300931930542,
|
61 |
+
"logits/rejected": -0.4580558240413666,
|
62 |
+
"logps/chosen": -300.87896728515625,
|
63 |
+
"logps/rejected": -255.5655517578125,
|
64 |
+
"loss": 0.5805,
|
65 |
+
"rewards/accuracies": 0.7749999761581421,
|
66 |
+
"rewards/chosen": 0.14600001275539398,
|
67 |
+
"rewards/margins": 0.2884979844093323,
|
68 |
+
"rewards/rejected": -0.14249801635742188,
|
69 |
"step": 30
|
70 |
},
|
71 |
{
|
72 |
+
"epoch": 0.1,
|
73 |
+
"grad_norm": 9.68944337059453,
|
74 |
+
"learning_rate": 5e-07,
|
75 |
+
"logits/chosen": -0.6759181022644043,
|
76 |
+
"logits/rejected": -0.6345951557159424,
|
77 |
+
"logps/chosen": -317.50872802734375,
|
78 |
+
"logps/rejected": -302.39630126953125,
|
79 |
+
"loss": 0.4819,
|
80 |
+
"rewards/accuracies": 0.840624988079071,
|
81 |
+
"rewards/chosen": 0.07102981209754944,
|
82 |
+
"rewards/margins": 0.6418195366859436,
|
83 |
+
"rewards/rejected": -0.5707896947860718,
|
84 |
"step": 40
|
85 |
},
|
86 |
{
|
87 |
+
"epoch": 0.12,
|
88 |
+
"grad_norm": 13.329379682299182,
|
89 |
+
"learning_rate": 4.990486745229364e-07,
|
90 |
+
"logits/chosen": -0.12263472378253937,
|
91 |
+
"logits/rejected": 0.44540151953697205,
|
92 |
+
"logps/chosen": -374.64556884765625,
|
93 |
+
"logps/rejected": -388.1717224121094,
|
94 |
+
"loss": 0.3966,
|
95 |
+
"rewards/accuracies": 0.800000011920929,
|
96 |
+
"rewards/chosen": -0.4549541473388672,
|
97 |
+
"rewards/margins": 1.0250240564346313,
|
98 |
+
"rewards/rejected": -1.4799782037734985,
|
99 |
"step": 50
|
100 |
},
|
101 |
{
|
102 |
+
"epoch": 0.15,
|
103 |
+
"grad_norm": 17.333516248641253,
|
104 |
+
"learning_rate": 4.96201938253052e-07,
|
105 |
+
"logits/chosen": -0.30300790071487427,
|
106 |
+
"logits/rejected": 0.3122316002845764,
|
107 |
+
"logps/chosen": -394.78106689453125,
|
108 |
+
"logps/rejected": -432.4813537597656,
|
109 |
+
"loss": 0.3861,
|
110 |
+
"rewards/accuracies": 0.8187500238418579,
|
111 |
+
"rewards/chosen": -0.7015730142593384,
|
112 |
+
"rewards/margins": 1.1719900369644165,
|
113 |
+
"rewards/rejected": -1.8735630512237549,
|
114 |
"step": 60
|
115 |
},
|
116 |
{
|
117 |
+
"epoch": 0.17,
|
118 |
+
"grad_norm": 15.677534908750197,
|
119 |
+
"learning_rate": 4.91481456572267e-07,
|
120 |
+
"logits/chosen": 0.7395630478858948,
|
121 |
+
"logits/rejected": 1.5376254320144653,
|
122 |
+
"logps/chosen": -425.17236328125,
|
123 |
+
"logps/rejected": -448.2694396972656,
|
124 |
+
"loss": 0.3474,
|
125 |
+
"rewards/accuracies": 0.831250011920929,
|
126 |
+
"rewards/chosen": -0.8609533309936523,
|
127 |
+
"rewards/margins": 1.3486477136611938,
|
128 |
+
"rewards/rejected": -2.2096011638641357,
|
129 |
"step": 70
|
130 |
},
|
131 |
{
|
132 |
+
"epoch": 0.2,
|
133 |
+
"grad_norm": 17.182808543364636,
|
134 |
+
"learning_rate": 4.849231551964771e-07,
|
135 |
+
"logits/chosen": 2.598942995071411,
|
136 |
+
"logits/rejected": 3.4538092613220215,
|
137 |
+
"logps/chosen": -448.8929748535156,
|
138 |
+
"logps/rejected": -540.0630493164062,
|
139 |
+
"loss": 0.3215,
|
140 |
+
"rewards/accuracies": 0.890625,
|
141 |
+
"rewards/chosen": -1.3736767768859863,
|
142 |
+
"rewards/margins": 1.7528272867202759,
|
143 |
+
"rewards/rejected": -3.126504421234131,
|
144 |
"step": 80
|
145 |
},
|
146 |
{
|
147 |
+
"epoch": 0.23,
|
148 |
+
"grad_norm": 16.648755569621386,
|
149 |
+
"learning_rate": 4.7657694675916247e-07,
|
150 |
+
"logits/chosen": 2.8463895320892334,
|
151 |
+
"logits/rejected": 3.732513427734375,
|
152 |
+
"logps/chosen": -496.74005126953125,
|
153 |
+
"logps/rejected": -623.58984375,
|
154 |
+
"loss": 0.3048,
|
155 |
+
"rewards/accuracies": 0.878125011920929,
|
156 |
+
"rewards/chosen": -1.7601783275604248,
|
157 |
+
"rewards/margins": 1.9939384460449219,
|
158 |
+
"rewards/rejected": -3.7541167736053467,
|
159 |
"step": 90
|
160 |
},
|
161 |
{
|
162 |
+
"epoch": 0.25,
|
163 |
+
"grad_norm": 15.972608527062494,
|
164 |
+
"learning_rate": 4.6650635094610966e-07,
|
165 |
+
"logits/chosen": 2.0133347511291504,
|
166 |
+
"logits/rejected": 3.3279690742492676,
|
167 |
+
"logps/chosen": -554.5970458984375,
|
168 |
+
"logps/rejected": -683.0777587890625,
|
169 |
+
"loss": 0.2797,
|
170 |
+
"rewards/accuracies": 0.859375,
|
171 |
+
"rewards/chosen": -2.21871018409729,
|
172 |
+
"rewards/margins": 2.000453233718872,
|
173 |
+
"rewards/rejected": -4.219162940979004,
|
174 |
"step": 100
|
175 |
},
|
176 |
{
|
177 |
+
"epoch": 0.28,
|
178 |
+
"grad_norm": 16.95927334748175,
|
179 |
+
"learning_rate": 4.5478801107224794e-07,
|
180 |
+
"logits/chosen": 2.1293346881866455,
|
181 |
+
"logits/rejected": 3.9433817863464355,
|
182 |
+
"logps/chosen": -545.55078125,
|
183 |
+
"logps/rejected": -698.3030395507812,
|
184 |
+
"loss": 0.2718,
|
185 |
+
"rewards/accuracies": 0.887499988079071,
|
186 |
+
"rewards/chosen": -2.248697280883789,
|
187 |
+
"rewards/margins": 2.459144353866577,
|
188 |
+
"rewards/rejected": -4.707841873168945,
|
189 |
"step": 110
|
190 |
},
|
191 |
{
|
192 |
+
"epoch": 0.3,
|
193 |
+
"grad_norm": 15.769838259410646,
|
194 |
+
"learning_rate": 4.415111107797445e-07,
|
195 |
+
"logits/chosen": 2.2328364849090576,
|
196 |
+
"logits/rejected": 3.943868637084961,
|
197 |
+
"logps/chosen": -547.4822998046875,
|
198 |
+
"logps/rejected": -709.2218017578125,
|
199 |
+
"loss": 0.2597,
|
200 |
+
"rewards/accuracies": 0.859375,
|
201 |
+
"rewards/chosen": -2.386432409286499,
|
202 |
+
"rewards/margins": 2.306048631668091,
|
203 |
+
"rewards/rejected": -4.692481517791748,
|
204 |
"step": 120
|
205 |
},
|
206 |
{
|
207 |
+
"epoch": 0.33,
|
208 |
+
"grad_norm": 16.240997635455848,
|
209 |
+
"learning_rate": 4.2677669529663686e-07,
|
210 |
+
"logits/chosen": 3.3713316917419434,
|
211 |
+
"logits/rejected": 4.970644950866699,
|
212 |
+
"logps/chosen": -669.5197143554688,
|
213 |
+
"logps/rejected": -839.8416748046875,
|
214 |
+
"loss": 0.2523,
|
215 |
+
"rewards/accuracies": 0.8687499761581421,
|
216 |
+
"rewards/chosen": -3.3710944652557373,
|
217 |
+
"rewards/margins": 2.5790421962738037,
|
218 |
+
"rewards/rejected": -5.950136661529541,
|
219 |
"step": 130
|
220 |
},
|
221 |
{
|
222 |
+
"epoch": 0.35,
|
223 |
+
"grad_norm": 16.664869807154886,
|
224 |
+
"learning_rate": 4.106969024216348e-07,
|
225 |
+
"logits/chosen": 3.0220611095428467,
|
226 |
+
"logits/rejected": 4.610594749450684,
|
227 |
+
"logps/chosen": -647.0032958984375,
|
228 |
+
"logps/rejected": -834.1439208984375,
|
229 |
+
"loss": 0.2514,
|
230 |
+
"rewards/accuracies": 0.90625,
|
231 |
+
"rewards/chosen": -3.284292221069336,
|
232 |
+
"rewards/margins": 2.7818052768707275,
|
233 |
+
"rewards/rejected": -6.066097259521484,
|
234 |
"step": 140
|
235 |
},
|
236 |
{
|
237 |
+
"epoch": 0.38,
|
238 |
+
"grad_norm": 17.103959159416473,
|
239 |
+
"learning_rate": 3.933941090877615e-07,
|
240 |
+
"logits/chosen": 1.9788957834243774,
|
241 |
+
"logits/rejected": 3.797266721725464,
|
242 |
+
"logps/chosen": -657.1544799804688,
|
243 |
+
"logps/rejected": -866.92236328125,
|
244 |
+
"loss": 0.2465,
|
245 |
+
"rewards/accuracies": 0.887499988079071,
|
246 |
+
"rewards/chosen": -3.3205840587615967,
|
247 |
+
"rewards/margins": 2.8848683834075928,
|
248 |
+
"rewards/rejected": -6.205452919006348,
|
249 |
"step": 150
|
250 |
},
|
251 |
{
|
252 |
+
"epoch": 0.4,
|
253 |
+
"grad_norm": 22.71759647433438,
|
254 |
+
"learning_rate": 3.75e-07,
|
255 |
+
"logits/chosen": 2.598877429962158,
|
256 |
+
"logits/rejected": 3.922821044921875,
|
257 |
+
"logps/chosen": -650.6119995117188,
|
258 |
+
"logps/rejected": -860.2496337890625,
|
259 |
+
"loss": 0.2424,
|
260 |
"rewards/accuracies": 0.893750011920929,
|
261 |
+
"rewards/chosen": -3.585509777069092,
|
262 |
+
"rewards/margins": 2.7439045906066895,
|
263 |
+
"rewards/rejected": -6.3294148445129395,
|
264 |
"step": 160
|
265 |
},
|
266 |
{
|
267 |
+
"epoch": 0.42,
|
268 |
+
"grad_norm": 17.154680074008297,
|
269 |
+
"learning_rate": 3.5565456543517485e-07,
|
270 |
+
"logits/chosen": 1.2129310369491577,
|
271 |
+
"logits/rejected": 3.644993543624878,
|
272 |
+
"logps/chosen": -650.40576171875,
|
273 |
+
"logps/rejected": -869.5897216796875,
|
274 |
+
"loss": 0.245,
|
275 |
+
"rewards/accuracies": 0.8656250238418579,
|
276 |
+
"rewards/chosen": -3.348802089691162,
|
277 |
+
"rewards/margins": 3.0448169708251953,
|
278 |
+
"rewards/rejected": -6.393619537353516,
|
279 |
"step": 170
|
280 |
},
|
281 |
{
|
282 |
+
"epoch": 0.45,
|
283 |
+
"grad_norm": 15.288786440112402,
|
284 |
+
"learning_rate": 3.355050358314172e-07,
|
285 |
+
"logits/chosen": 2.0979018211364746,
|
286 |
+
"logits/rejected": 3.6165339946746826,
|
287 |
+
"logps/chosen": -733.31298828125,
|
288 |
+
"logps/rejected": -946.0720825195312,
|
289 |
+
"loss": 0.225,
|
290 |
+
"rewards/accuracies": 0.909375011920929,
|
291 |
+
"rewards/chosen": -3.9488494396209717,
|
292 |
+
"rewards/margins": 3.014504909515381,
|
293 |
+
"rewards/rejected": -6.963354587554932,
|
294 |
"step": 180
|
295 |
},
|
296 |
+
{
|
297 |
+
"epoch": 0.47,
|
298 |
+
"grad_norm": 15.20089211524797,
|
299 |
+
"learning_rate": 3.147047612756302e-07,
|
300 |
+
"logits/chosen": 1.049578309059143,
|
301 |
+
"logits/rejected": 3.2230868339538574,
|
302 |
+
"logps/chosen": -655.8287963867188,
|
303 |
+
"logps/rejected": -909.56787109375,
|
304 |
+
"loss": 0.2177,
|
305 |
+
"rewards/accuracies": 0.8999999761581421,
|
306 |
+
"rewards/chosen": -3.187329053878784,
|
307 |
+
"rewards/margins": 3.4261791706085205,
|
308 |
+
"rewards/rejected": -6.613508701324463,
|
309 |
+
"step": 190
|
310 |
+
},
|
311 |
+
{
|
312 |
+
"epoch": 0.5,
|
313 |
+
"grad_norm": 19.21517389497067,
|
314 |
+
"learning_rate": 2.934120444167326e-07,
|
315 |
+
"logits/chosen": 2.0917961597442627,
|
316 |
+
"logits/rejected": 4.381856918334961,
|
317 |
+
"logps/chosen": -707.9210205078125,
|
318 |
+
"logps/rejected": -967.8511962890625,
|
319 |
+
"loss": 0.2291,
|
320 |
+
"rewards/accuracies": 0.875,
|
321 |
+
"rewards/chosen": -4.029782772064209,
|
322 |
+
"rewards/margins": 3.380286455154419,
|
323 |
+
"rewards/rejected": -7.410069465637207,
|
324 |
+
"step": 200
|
325 |
+
},
|
326 |
+
{
|
327 |
+
"epoch": 0.53,
|
328 |
+
"grad_norm": 17.876619392703006,
|
329 |
+
"learning_rate": 2.717889356869146e-07,
|
330 |
+
"logits/chosen": 2.075894832611084,
|
331 |
+
"logits/rejected": 3.812873363494873,
|
332 |
+
"logps/chosen": -664.9110717773438,
|
333 |
+
"logps/rejected": -898.7711791992188,
|
334 |
+
"loss": 0.2335,
|
335 |
+
"rewards/accuracies": 0.90625,
|
336 |
+
"rewards/chosen": -3.6399245262145996,
|
337 |
+
"rewards/margins": 3.0123069286346436,
|
338 |
+
"rewards/rejected": -6.652230739593506,
|
339 |
+
"step": 210
|
340 |
+
},
|
341 |
+
{
|
342 |
+
"epoch": 0.55,
|
343 |
+
"grad_norm": 16.42311250323521,
|
344 |
+
"learning_rate": 2.5e-07,
|
345 |
+
"logits/chosen": 2.077141523361206,
|
346 |
+
"logits/rejected": 4.0336527824401855,
|
347 |
+
"logps/chosen": -714.0510864257812,
|
348 |
+
"logps/rejected": -951.3372802734375,
|
349 |
+
"loss": 0.2163,
|
350 |
+
"rewards/accuracies": 0.893750011920929,
|
351 |
+
"rewards/chosen": -4.000287055969238,
|
352 |
+
"rewards/margins": 3.160945415496826,
|
353 |
+
"rewards/rejected": -7.161231994628906,
|
354 |
+
"step": 220
|
355 |
+
},
|
356 |
+
{
|
357 |
+
"epoch": 0.57,
|
358 |
+
"grad_norm": 21.86260854020408,
|
359 |
+
"learning_rate": 2.2821106431308543e-07,
|
360 |
+
"logits/chosen": 1.8970081806182861,
|
361 |
+
"logits/rejected": 3.8517441749572754,
|
362 |
+
"logps/chosen": -711.104248046875,
|
363 |
+
"logps/rejected": -952.9786987304688,
|
364 |
+
"loss": 0.2307,
|
365 |
+
"rewards/accuracies": 0.875,
|
366 |
+
"rewards/chosen": -4.102777481079102,
|
367 |
+
"rewards/margins": 3.2044379711151123,
|
368 |
+
"rewards/rejected": -7.307215213775635,
|
369 |
+
"step": 230
|
370 |
+
},
|
371 |
+
{
|
372 |
+
"epoch": 0.6,
|
373 |
+
"grad_norm": 17.415535830140726,
|
374 |
+
"learning_rate": 2.065879555832674e-07,
|
375 |
+
"logits/chosen": 1.93063485622406,
|
376 |
+
"logits/rejected": 3.716691255569458,
|
377 |
+
"logps/chosen": -733.0238037109375,
|
378 |
+
"logps/rejected": -995.0330200195312,
|
379 |
+
"loss": 0.2135,
|
380 |
+
"rewards/accuracies": 0.8968750238418579,
|
381 |
+
"rewards/chosen": -4.205197811126709,
|
382 |
+
"rewards/margins": 3.2796833515167236,
|
383 |
+
"rewards/rejected": -7.4848809242248535,
|
384 |
+
"step": 240
|
385 |
+
},
|
386 |
+
{
|
387 |
+
"epoch": 0.62,
|
388 |
+
"grad_norm": 23.48694643420195,
|
389 |
+
"learning_rate": 1.8529523872436977e-07,
|
390 |
+
"logits/chosen": 1.754500150680542,
|
391 |
+
"logits/rejected": 3.7942306995391846,
|
392 |
+
"logps/chosen": -756.137939453125,
|
393 |
+
"logps/rejected": -1002.9494018554688,
|
394 |
+
"loss": 0.2284,
|
395 |
+
"rewards/accuracies": 0.887499988079071,
|
396 |
+
"rewards/chosen": -4.309305667877197,
|
397 |
+
"rewards/margins": 3.231421947479248,
|
398 |
+
"rewards/rejected": -7.5407280921936035,
|
399 |
+
"step": 250
|
400 |
+
},
|
401 |
+
{
|
402 |
+
"epoch": 0.65,
|
403 |
+
"grad_norm": 16.97115932824073,
|
404 |
+
"learning_rate": 1.6449496416858282e-07,
|
405 |
+
"logits/chosen": 2.1520519256591797,
|
406 |
+
"logits/rejected": 4.258932590484619,
|
407 |
+
"logps/chosen": -750.8233642578125,
|
408 |
+
"logps/rejected": -1022.2374267578125,
|
409 |
+
"loss": 0.2136,
|
410 |
+
"rewards/accuracies": 0.8999999761581421,
|
411 |
+
"rewards/chosen": -4.413332939147949,
|
412 |
+
"rewards/margins": 3.5410499572753906,
|
413 |
+
"rewards/rejected": -7.954381465911865,
|
414 |
+
"step": 260
|
415 |
+
},
|
416 |
+
{
|
417 |
+
"epoch": 0.68,
|
418 |
+
"grad_norm": 20.50346815073402,
|
419 |
+
"learning_rate": 1.4434543456482518e-07,
|
420 |
+
"logits/chosen": 1.4445512294769287,
|
421 |
+
"logits/rejected": 3.574235439300537,
|
422 |
+
"logps/chosen": -682.8297729492188,
|
423 |
+
"logps/rejected": -979.3341674804688,
|
424 |
+
"loss": 0.1978,
|
425 |
+
"rewards/accuracies": 0.9156249761581421,
|
426 |
+
"rewards/chosen": -3.6296730041503906,
|
427 |
+
"rewards/margins": 3.7298316955566406,
|
428 |
+
"rewards/rejected": -7.359505653381348,
|
429 |
+
"step": 270
|
430 |
+
},
|
431 |
+
{
|
432 |
+
"epoch": 0.7,
|
433 |
+
"grad_norm": 16.82073433691609,
|
434 |
+
"learning_rate": 1.2500000000000005e-07,
|
435 |
+
"logits/chosen": 1.172753930091858,
|
436 |
+
"logits/rejected": 3.4942619800567627,
|
437 |
+
"logps/chosen": -675.3418579101562,
|
438 |
+
"logps/rejected": -974.0270385742188,
|
439 |
+
"loss": 0.2189,
|
440 |
+
"rewards/accuracies": 0.934374988079071,
|
441 |
+
"rewards/chosen": -3.6095943450927734,
|
442 |
+
"rewards/margins": 3.8261497020721436,
|
443 |
+
"rewards/rejected": -7.435744285583496,
|
444 |
+
"step": 280
|
445 |
+
},
|
446 |
+
{
|
447 |
+
"epoch": 0.72,
|
448 |
+
"grad_norm": 27.148115499609514,
|
449 |
+
"learning_rate": 1.0660589091223854e-07,
|
450 |
+
"logits/chosen": 1.3045436143875122,
|
451 |
+
"logits/rejected": 3.874147891998291,
|
452 |
+
"logps/chosen": -724.1644287109375,
|
453 |
+
"logps/rejected": -1020.5606689453125,
|
454 |
+
"loss": 0.2159,
|
455 |
+
"rewards/accuracies": 0.918749988079071,
|
456 |
+
"rewards/chosen": -3.9626336097717285,
|
457 |
+
"rewards/margins": 3.8746466636657715,
|
458 |
+
"rewards/rejected": -7.837281227111816,
|
459 |
+
"step": 290
|
460 |
+
},
|
461 |
+
{
|
462 |
+
"epoch": 0.75,
|
463 |
+
"grad_norm": 18.896563678409045,
|
464 |
+
"learning_rate": 8.930309757836516e-08,
|
465 |
+
"logits/chosen": 1.5070204734802246,
|
466 |
+
"logits/rejected": 3.8179619312286377,
|
467 |
+
"logps/chosen": -756.2379150390625,
|
468 |
+
"logps/rejected": -1010.8900146484375,
|
469 |
+
"loss": 0.2118,
|
470 |
+
"rewards/accuracies": 0.90625,
|
471 |
+
"rewards/chosen": -4.342662811279297,
|
472 |
+
"rewards/margins": 3.381243944168091,
|
473 |
+
"rewards/rejected": -7.72390604019165,
|
474 |
+
"step": 300
|
475 |
+
},
|
476 |
+
{
|
477 |
+
"epoch": 0.78,
|
478 |
+
"grad_norm": 18.302278636631012,
|
479 |
+
"learning_rate": 7.322330470336313e-08,
|
480 |
+
"logits/chosen": 2.013995885848999,
|
481 |
+
"logits/rejected": 4.006863117218018,
|
482 |
+
"logps/chosen": -755.84326171875,
|
483 |
+
"logps/rejected": -1039.4290771484375,
|
484 |
+
"loss": 0.2007,
|
485 |
+
"rewards/accuracies": 0.925000011920929,
|
486 |
+
"rewards/chosen": -4.390773296356201,
|
487 |
+
"rewards/margins": 3.617499589920044,
|
488 |
+
"rewards/rejected": -8.008273124694824,
|
489 |
+
"step": 310
|
490 |
+
},
|
491 |
+
{
|
492 |
+
"epoch": 0.8,
|
493 |
+
"grad_norm": 19.269057964941258,
|
494 |
+
"learning_rate": 5.848888922025552e-08,
|
495 |
+
"logits/chosen": 1.4602447748184204,
|
496 |
+
"logits/rejected": 3.709857940673828,
|
497 |
+
"logps/chosen": -771.56640625,
|
498 |
+
"logps/rejected": -1046.9013671875,
|
499 |
+
"loss": 0.2211,
|
500 |
+
"rewards/accuracies": 0.903124988079071,
|
501 |
+
"rewards/chosen": -4.392203330993652,
|
502 |
+
"rewards/margins": 3.6757659912109375,
|
503 |
+
"rewards/rejected": -8.067970275878906,
|
504 |
+
"step": 320
|
505 |
+
},
|
506 |
+
{
|
507 |
+
"epoch": 0.82,
|
508 |
+
"grad_norm": 16.212116381856944,
|
509 |
+
"learning_rate": 4.521198892775202e-08,
|
510 |
+
"logits/chosen": 1.5877026319503784,
|
511 |
+
"logits/rejected": 3.5275306701660156,
|
512 |
+
"logps/chosen": -748.9736328125,
|
513 |
+
"logps/rejected": -1030.6241455078125,
|
514 |
+
"loss": 0.1902,
|
515 |
+
"rewards/accuracies": 0.909375011920929,
|
516 |
+
"rewards/chosen": -4.198099613189697,
|
517 |
+
"rewards/margins": 3.724585771560669,
|
518 |
+
"rewards/rejected": -7.922685146331787,
|
519 |
+
"step": 330
|
520 |
+
},
|
521 |
+
{
|
522 |
+
"epoch": 0.85,
|
523 |
+
"grad_norm": 23.048514547738275,
|
524 |
+
"learning_rate": 3.349364905389032e-08,
|
525 |
+
"logits/chosen": 1.2227389812469482,
|
526 |
+
"logits/rejected": 3.192277193069458,
|
527 |
+
"logps/chosen": -744.6568603515625,
|
528 |
+
"logps/rejected": -1009.3792724609375,
|
529 |
+
"loss": 0.2061,
|
530 |
+
"rewards/accuracies": 0.893750011920929,
|
531 |
+
"rewards/chosen": -4.285494804382324,
|
532 |
+
"rewards/margins": 3.405372142791748,
|
533 |
+
"rewards/rejected": -7.690866947174072,
|
534 |
+
"step": 340
|
535 |
+
},
|
536 |
+
{
|
537 |
+
"epoch": 0.88,
|
538 |
+
"grad_norm": 18.4900810885199,
|
539 |
+
"learning_rate": 2.3423053240837514e-08,
|
540 |
+
"logits/chosen": 1.2598426342010498,
|
541 |
+
"logits/rejected": 3.358072280883789,
|
542 |
+
"logps/chosen": -737.2872314453125,
|
543 |
+
"logps/rejected": -1013.8024291992188,
|
544 |
+
"loss": 0.2239,
|
545 |
+
"rewards/accuracies": 0.871874988079071,
|
546 |
+
"rewards/chosen": -4.232865333557129,
|
547 |
+
"rewards/margins": 3.510840892791748,
|
548 |
+
"rewards/rejected": -7.743706703186035,
|
549 |
+
"step": 350
|
550 |
+
},
|
551 |
+
{
|
552 |
+
"epoch": 0.9,
|
553 |
+
"grad_norm": 16.48305540217272,
|
554 |
+
"learning_rate": 1.507684480352292e-08,
|
555 |
+
"logits/chosen": 1.188299298286438,
|
556 |
+
"logits/rejected": 3.3616530895233154,
|
557 |
+
"logps/chosen": -726.2432861328125,
|
558 |
+
"logps/rejected": -1018.1263427734375,
|
559 |
+
"loss": 0.1958,
|
560 |
+
"rewards/accuracies": 0.90625,
|
561 |
+
"rewards/chosen": -4.075113773345947,
|
562 |
+
"rewards/margins": 3.6948330402374268,
|
563 |
+
"rewards/rejected": -7.769946098327637,
|
564 |
+
"step": 360
|
565 |
+
},
|
566 |
+
{
|
567 |
+
"epoch": 0.93,
|
568 |
+
"grad_norm": 20.27854050236199,
|
569 |
+
"learning_rate": 8.518543427732949e-09,
|
570 |
+
"logits/chosen": 1.1218559741973877,
|
571 |
+
"logits/rejected": 3.376429319381714,
|
572 |
+
"logps/chosen": -706.4921875,
|
573 |
+
"logps/rejected": -1009.96533203125,
|
574 |
+
"loss": 0.199,
|
575 |
+
"rewards/accuracies": 0.9156249761581421,
|
576 |
+
"rewards/chosen": -4.078927516937256,
|
577 |
+
"rewards/margins": 3.7843894958496094,
|
578 |
+
"rewards/rejected": -7.863317966461182,
|
579 |
+
"step": 370
|
580 |
+
},
|
581 |
+
{
|
582 |
+
"epoch": 0.95,
|
583 |
+
"grad_norm": 21.44365501017452,
|
584 |
+
"learning_rate": 3.798061746947995e-09,
|
585 |
+
"logits/chosen": 1.2741193771362305,
|
586 |
+
"logits/rejected": 3.5388190746307373,
|
587 |
+
"logps/chosen": -727.7400512695312,
|
588 |
+
"logps/rejected": -996.4246215820312,
|
589 |
+
"loss": 0.2143,
|
590 |
+
"rewards/accuracies": 0.9281250238418579,
|
591 |
+
"rewards/chosen": -4.139595985412598,
|
592 |
+
"rewards/margins": 3.5642218589782715,
|
593 |
+
"rewards/rejected": -7.703817844390869,
|
594 |
+
"step": 380
|
595 |
+
},
|
596 |
+
{
|
597 |
+
"epoch": 0.97,
|
598 |
+
"grad_norm": 19.980658149789488,
|
599 |
+
"learning_rate": 9.513254770636137e-10,
|
600 |
+
"logits/chosen": 1.3589586019515991,
|
601 |
+
"logits/rejected": 3.3931171894073486,
|
602 |
+
"logps/chosen": -751.2428588867188,
|
603 |
+
"logps/rejected": -1019.2142333984375,
|
604 |
+
"loss": 0.2117,
|
605 |
+
"rewards/accuracies": 0.893750011920929,
|
606 |
+
"rewards/chosen": -4.217888832092285,
|
607 |
+
"rewards/margins": 3.668452739715576,
|
608 |
+
"rewards/rejected": -7.8863420486450195,
|
609 |
+
"step": 390
|
610 |
+
},
|
611 |
+
{
|
612 |
+
"epoch": 1.0,
|
613 |
+
"grad_norm": 19.223012927780225,
|
614 |
+
"learning_rate": 0.0,
|
615 |
+
"logits/chosen": 1.3647325038909912,
|
616 |
+
"logits/rejected": 3.651308536529541,
|
617 |
+
"logps/chosen": -756.8760986328125,
|
618 |
+
"logps/rejected": -1033.67333984375,
|
619 |
+
"loss": 0.2003,
|
620 |
+
"rewards/accuracies": 0.9125000238418579,
|
621 |
+
"rewards/chosen": -4.446074485778809,
|
622 |
+
"rewards/margins": 3.6355972290039062,
|
623 |
+
"rewards/rejected": -8.081671714782715,
|
624 |
+
"step": 400
|
625 |
+
},
|
626 |
{
|
627 |
"epoch": 1.0,
|
628 |
+
"step": 400,
|
629 |
"total_flos": 0.0,
|
630 |
+
"train_loss": 0.2785977178812027,
|
631 |
+
"train_runtime": 11929.9898,
|
632 |
+
"train_samples_per_second": 8.58,
|
633 |
+
"train_steps_per_second": 0.034
|
634 |
}
|
635 |
],
|
636 |
"logging_steps": 10,
|
637 |
+
"max_steps": 400,
|
638 |
"num_input_tokens_seen": 0,
|
639 |
"num_train_epochs": 1,
|
640 |
"save_steps": 100,
|