Model save
Browse files- README.md +3 -2
- all_results.json +5 -5
- model-00001-of-00003.safetensors +1 -1
- model-00002-of-00003.safetensors +1 -1
- model-00003-of-00003.safetensors +1 -1
- runs/Jul01_23-19-09_n136-129-074/events.out.tfevents.1719847173.n136-129-074.2002672.0 +2 -2
- train_results.json +5 -5
- trainer_state.json +534 -504
README.md
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
---
|
|
|
2 |
tags:
|
3 |
- trl
|
4 |
- dpo
|
@@ -13,7 +14,7 @@ should probably proofread and complete it, then remove this comment. -->
|
|
13 |
|
14 |
# zephyr-7b-dpo-full
|
15 |
|
16 |
-
This model
|
17 |
|
18 |
## Model description
|
19 |
|
@@ -32,7 +33,7 @@ More information needed
|
|
32 |
### Training hyperparameters
|
33 |
|
34 |
The following hyperparameters were used during training:
|
35 |
-
- learning_rate:
|
36 |
- train_batch_size: 4
|
37 |
- eval_batch_size: 4
|
38 |
- seed: 42
|
|
|
1 |
---
|
2 |
+
base_model: princeton-nlp/Mistral-7B-Base-SFT-SimPO
|
3 |
tags:
|
4 |
- trl
|
5 |
- dpo
|
|
|
14 |
|
15 |
# zephyr-7b-dpo-full
|
16 |
|
17 |
+
This model is a fine-tuned version of [princeton-nlp/Mistral-7B-Base-SFT-SimPO](https://huggingface.co/princeton-nlp/Mistral-7B-Base-SFT-SimPO) on the None dataset.
|
18 |
|
19 |
## Model description
|
20 |
|
|
|
33 |
### Training hyperparameters
|
34 |
|
35 |
The following hyperparameters were used during training:
|
36 |
+
- learning_rate: 1e-07
|
37 |
- train_batch_size: 4
|
38 |
- eval_batch_size: 4
|
39 |
- seed: 42
|
all_results.json
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
-
"train_loss":
|
4 |
-
"train_runtime":
|
5 |
-
"train_samples":
|
6 |
-
"train_samples_per_second": 8.
|
7 |
-
"train_steps_per_second": 0.
|
8 |
}
|
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
+
"train_loss": 0.5863300847029632,
|
4 |
+
"train_runtime": 6843.0852,
|
5 |
+
"train_samples": 56236,
|
6 |
+
"train_samples_per_second": 8.218,
|
7 |
+
"train_steps_per_second": 0.064
|
8 |
}
|
model-00001-of-00003.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4943162336
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d085da4a857fb7f218891d9be471fb86e732b224ff99d833d19b15d462e4ce9d
|
3 |
size 4943162336
|
model-00002-of-00003.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4999819336
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7a5dbc0fe2ece97438d30fd6b0f0e3d1f3e734e6156b8e9d5f3c15a5dcaa7cf0
|
3 |
size 4999819336
|
model-00003-of-00003.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4540516344
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:220289bd03f2c16bc923749307f2d3eab7817266cb64a00415b0df5008ec88f8
|
3 |
size 4540516344
|
runs/Jul01_23-19-09_n136-129-074/events.out.tfevents.1719847173.n136-129-074.2002672.0
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ca5bc1674e38eb08c326a1eb43f08b14e751071a2de1f6aeb0483afae3355ad5
|
3 |
+
size 35395
|
train_results.json
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
-
"train_loss":
|
4 |
-
"train_runtime":
|
5 |
-
"train_samples":
|
6 |
-
"train_samples_per_second": 8.
|
7 |
-
"train_steps_per_second": 0.
|
8 |
}
|
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
+
"train_loss": 0.5863300847029632,
|
4 |
+
"train_runtime": 6843.0852,
|
5 |
+
"train_samples": 56236,
|
6 |
+
"train_samples_per_second": 8.218,
|
7 |
+
"train_steps_per_second": 0.064
|
8 |
}
|
trainer_state.json
CHANGED
@@ -1,22 +1,22 @@
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 10000000,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 0.0,
|
13 |
-
"grad_norm":
|
14 |
-
"learning_rate":
|
15 |
-
"logits/chosen": -
|
16 |
-
"logits/rejected": -
|
17 |
-
"logps/chosen": -1.
|
18 |
-
"logps/rejected": -1.
|
19 |
-
"loss": 0.
|
20 |
"rewards/accuracies": 0.0,
|
21 |
"rewards/chosen": 0.0,
|
22 |
"rewards/margins": 0.0,
|
@@ -25,631 +25,661 @@
|
|
25 |
},
|
26 |
{
|
27 |
"epoch": 0.02,
|
28 |
-
"grad_norm":
|
29 |
-
"learning_rate":
|
30 |
-
"logits/chosen": -
|
31 |
-
"logits/rejected": -
|
32 |
-
"logps/chosen": -
|
33 |
-
"logps/rejected": -
|
34 |
-
"loss": 0.
|
35 |
-
"rewards/accuracies": 0.
|
36 |
-
"rewards/chosen": 0.
|
37 |
-
"rewards/margins": 0.
|
38 |
-
"rewards/rejected":
|
39 |
"step": 10
|
40 |
},
|
41 |
{
|
42 |
"epoch": 0.05,
|
43 |
-
"grad_norm":
|
44 |
-
"learning_rate":
|
45 |
-
"logits/chosen": -
|
46 |
-
"logits/rejected": -
|
47 |
-
"logps/chosen": -
|
48 |
-
"logps/rejected": -1.
|
49 |
-
"loss":
|
50 |
-
"rewards/accuracies": 0.
|
51 |
-
"rewards/chosen": -0.
|
52 |
-
"rewards/margins": 0.
|
53 |
-
"rewards/rejected": -
|
54 |
"step": 20
|
55 |
},
|
56 |
{
|
57 |
"epoch": 0.07,
|
58 |
-
"grad_norm":
|
59 |
-
"learning_rate":
|
60 |
-
"logits/chosen": -
|
61 |
-
"logits/rejected": -
|
62 |
-
"logps/chosen": -
|
63 |
-
"logps/rejected": -
|
64 |
-
"loss":
|
65 |
-
"rewards/accuracies": 0.
|
66 |
-
"rewards/chosen": -
|
67 |
-
"rewards/margins": 0.
|
68 |
-
"rewards/rejected": -
|
69 |
"step": 30
|
70 |
},
|
71 |
{
|
72 |
-
"epoch": 0.
|
73 |
-
"grad_norm":
|
74 |
-
"learning_rate":
|
75 |
-
"logits/chosen": -
|
76 |
-
"logits/rejected": -
|
77 |
-
"logps/chosen": -
|
78 |
-
"logps/rejected": -
|
79 |
-
"loss":
|
80 |
-
"rewards/accuracies": 0.
|
81 |
-
"rewards/chosen": -0.
|
82 |
-
"rewards/margins":
|
83 |
-
"rewards/rejected": -
|
84 |
"step": 40
|
85 |
},
|
86 |
{
|
87 |
-
"epoch": 0.
|
88 |
-
"grad_norm":
|
89 |
-
"learning_rate":
|
90 |
-
"logits/chosen": -
|
91 |
-
"logits/rejected": -
|
92 |
-
"logps/chosen": -
|
93 |
-
"logps/rejected": -
|
94 |
-
"loss":
|
95 |
-
"rewards/accuracies": 0.
|
96 |
-
"rewards/chosen": -0.
|
97 |
-
"rewards/margins":
|
98 |
-
"rewards/rejected": -
|
99 |
"step": 50
|
100 |
},
|
101 |
{
|
102 |
-
"epoch": 0.
|
103 |
-
"grad_norm":
|
104 |
-
"learning_rate":
|
105 |
-
"logits/chosen": -
|
106 |
-
"logits/rejected": -
|
107 |
-
"logps/chosen": -
|
108 |
-
"logps/rejected": -
|
109 |
-
"loss":
|
110 |
-
"rewards/accuracies": 0.
|
111 |
-
"rewards/chosen":
|
112 |
-
"rewards/margins":
|
113 |
-
"rewards/rejected": -
|
114 |
"step": 60
|
115 |
},
|
116 |
{
|
117 |
-
"epoch": 0.
|
118 |
-
"grad_norm":
|
119 |
-
"learning_rate":
|
120 |
-
"logits/chosen": -
|
121 |
-
"logits/rejected": -
|
122 |
-
"logps/chosen": -
|
123 |
-
"logps/rejected": -
|
124 |
-
"loss":
|
125 |
-
"rewards/accuracies": 0.
|
126 |
-
"rewards/chosen":
|
127 |
-
"rewards/margins":
|
128 |
-
"rewards/rejected": -
|
129 |
"step": 70
|
130 |
},
|
131 |
{
|
132 |
-
"epoch": 0.
|
133 |
-
"grad_norm":
|
134 |
-
"learning_rate":
|
135 |
-
"logits/chosen": -
|
136 |
-
"logits/rejected": -
|
137 |
-
"logps/chosen": -
|
138 |
-
"logps/rejected": -1.
|
139 |
-
"loss":
|
140 |
-
"rewards/accuracies": 0.
|
141 |
-
"rewards/chosen": -
|
142 |
-
"rewards/margins":
|
143 |
-
"rewards/rejected": -
|
144 |
"step": 80
|
145 |
},
|
146 |
{
|
147 |
-
"epoch": 0.
|
148 |
-
"grad_norm":
|
149 |
-
"learning_rate":
|
150 |
-
"logits/chosen": -
|
151 |
-
"logits/rejected": -
|
152 |
-
"logps/chosen": -1.
|
153 |
-
"logps/rejected": -1.
|
154 |
-
"loss":
|
155 |
-
"rewards/accuracies": 0.
|
156 |
-
"rewards/chosen":
|
157 |
-
"rewards/margins":
|
158 |
-
"rewards/rejected": -
|
159 |
"step": 90
|
160 |
},
|
161 |
{
|
162 |
-
"epoch": 0.
|
163 |
-
"grad_norm":
|
164 |
-
"learning_rate":
|
165 |
-
"logits/chosen": -
|
166 |
-
"logits/rejected": -
|
167 |
-
"logps/chosen": -
|
168 |
-
"logps/rejected": -
|
169 |
-
"loss":
|
170 |
-
"rewards/accuracies": 0.
|
171 |
-
"rewards/chosen": -
|
172 |
-
"rewards/margins":
|
173 |
-
"rewards/rejected": -
|
174 |
"step": 100
|
175 |
},
|
176 |
{
|
177 |
-
"epoch": 0.
|
178 |
-
"grad_norm":
|
179 |
-
"learning_rate":
|
180 |
-
"logits/chosen": -
|
181 |
-
"logits/rejected": -
|
182 |
-
"logps/chosen": -
|
183 |
-
"logps/rejected": -
|
184 |
-
"loss":
|
185 |
-
"rewards/accuracies": 0.
|
186 |
-
"rewards/chosen":
|
187 |
-
"rewards/margins":
|
188 |
-
"rewards/rejected": -1.
|
189 |
"step": 110
|
190 |
},
|
191 |
{
|
192 |
-
"epoch": 0.
|
193 |
-
"grad_norm":
|
194 |
-
"learning_rate":
|
195 |
-
"logits/chosen": -
|
196 |
-
"logits/rejected": -
|
197 |
-
"logps/chosen": -
|
198 |
-
"logps/rejected": -1.
|
199 |
-
"loss":
|
200 |
-
"rewards/accuracies": 0.
|
201 |
-
"rewards/chosen": -
|
202 |
-
"rewards/margins":
|
203 |
-
"rewards/rejected": -
|
204 |
"step": 120
|
205 |
},
|
206 |
{
|
207 |
-
"epoch": 0.
|
208 |
-
"grad_norm":
|
209 |
-
"learning_rate":
|
210 |
-
"logits/chosen": -
|
211 |
-
"logits/rejected": -
|
212 |
-
"logps/chosen": -
|
213 |
-
"logps/rejected": -
|
214 |
-
"loss":
|
215 |
-
"rewards/accuracies": 0.
|
216 |
-
"rewards/chosen":
|
217 |
-
"rewards/margins":
|
218 |
-
"rewards/rejected": -
|
219 |
"step": 130
|
220 |
},
|
221 |
{
|
222 |
-
"epoch": 0.
|
223 |
-
"grad_norm":
|
224 |
-
"learning_rate":
|
225 |
-
"logits/chosen": -
|
226 |
-
"logits/rejected": -
|
227 |
-
"logps/chosen": -
|
228 |
-
"logps/rejected": -
|
229 |
-
"loss":
|
230 |
-
"rewards/accuracies": 0.
|
231 |
-
"rewards/chosen": -
|
232 |
-
"rewards/margins":
|
233 |
-
"rewards/rejected": -
|
234 |
"step": 140
|
235 |
},
|
236 |
{
|
237 |
-
"epoch": 0.
|
238 |
-
"grad_norm":
|
239 |
-
"learning_rate":
|
240 |
-
"logits/chosen": -
|
241 |
-
"logits/rejected": -
|
242 |
-
"logps/chosen": -
|
243 |
-
"logps/rejected": -
|
244 |
-
"loss":
|
245 |
-
"rewards/accuracies": 0.
|
246 |
-
"rewards/chosen":
|
247 |
-
"rewards/margins":
|
248 |
-
"rewards/rejected": -
|
249 |
"step": 150
|
250 |
},
|
251 |
{
|
252 |
-
"epoch": 0.
|
253 |
-
"grad_norm":
|
254 |
-
"learning_rate":
|
255 |
-
"logits/chosen": -
|
256 |
-
"logits/rejected": -
|
257 |
-
"logps/chosen": -
|
258 |
-
"logps/rejected": -
|
259 |
-
"loss":
|
260 |
-
"rewards/accuracies": 0.
|
261 |
-
"rewards/chosen": -
|
262 |
-
"rewards/margins":
|
263 |
-
"rewards/rejected": -
|
264 |
"step": 160
|
265 |
},
|
266 |
{
|
267 |
-
"epoch": 0.
|
268 |
-
"grad_norm":
|
269 |
-
"learning_rate":
|
270 |
-
"logits/chosen": -
|
271 |
-
"logits/rejected": -
|
272 |
-
"logps/chosen": -
|
273 |
-
"logps/rejected": -
|
274 |
-
"loss":
|
275 |
-
"rewards/accuracies": 0.
|
276 |
-
"rewards/chosen": -
|
277 |
-
"rewards/margins":
|
278 |
-
"rewards/rejected": -
|
279 |
"step": 170
|
280 |
},
|
281 |
{
|
282 |
-
"epoch": 0.
|
283 |
-
"grad_norm":
|
284 |
-
"learning_rate":
|
285 |
-
"logits/chosen": -
|
286 |
-
"logits/rejected": -
|
287 |
-
"logps/chosen": -
|
288 |
-
"logps/rejected": -
|
289 |
-
"loss":
|
290 |
-
"rewards/accuracies": 0.
|
291 |
-
"rewards/chosen":
|
292 |
-
"rewards/margins":
|
293 |
-
"rewards/rejected": -
|
294 |
"step": 180
|
295 |
},
|
296 |
{
|
297 |
-
"epoch": 0.
|
298 |
-
"grad_norm":
|
299 |
-
"learning_rate":
|
300 |
-
"logits/chosen": -
|
301 |
-
"logits/rejected": -
|
302 |
-
"logps/chosen": -
|
303 |
-
"logps/rejected": -
|
304 |
-
"loss":
|
305 |
-
"rewards/accuracies": 0.
|
306 |
-
"rewards/chosen": -
|
307 |
-
"rewards/margins":
|
308 |
-
"rewards/rejected": -
|
309 |
"step": 190
|
310 |
},
|
311 |
{
|
312 |
-
"epoch": 0.
|
313 |
-
"grad_norm":
|
314 |
-
"learning_rate":
|
315 |
-
"logits/chosen": -
|
316 |
-
"logits/rejected": -
|
317 |
-
"logps/chosen": -
|
318 |
-
"logps/rejected": -
|
319 |
-
"loss":
|
320 |
-
"rewards/accuracies": 0.
|
321 |
-
"rewards/chosen":
|
322 |
-
"rewards/margins":
|
323 |
-
"rewards/rejected": -2.
|
324 |
"step": 200
|
325 |
},
|
326 |
{
|
327 |
-
"epoch": 0.
|
328 |
-
"grad_norm":
|
329 |
-
"learning_rate":
|
330 |
-
"logits/chosen": -
|
331 |
-
"logits/rejected": -
|
332 |
-
"logps/chosen": -
|
333 |
-
"logps/rejected": -
|
334 |
-
"loss":
|
335 |
-
"rewards/accuracies": 0.
|
336 |
-
"rewards/chosen":
|
337 |
-
"rewards/margins":
|
338 |
-
"rewards/rejected": -
|
339 |
"step": 210
|
340 |
},
|
341 |
{
|
342 |
-
"epoch": 0.
|
343 |
-
"grad_norm":
|
344 |
-
"learning_rate":
|
345 |
-
"logits/chosen": -
|
346 |
-
"logits/rejected": -
|
347 |
-
"logps/chosen": -
|
348 |
-
"logps/rejected": -
|
349 |
-
"loss":
|
350 |
-
"rewards/accuracies": 0.
|
351 |
-
"rewards/chosen":
|
352 |
-
"rewards/margins":
|
353 |
-
"rewards/rejected": -
|
354 |
"step": 220
|
355 |
},
|
356 |
{
|
357 |
-
"epoch": 0.
|
358 |
-
"grad_norm":
|
359 |
-
"learning_rate":
|
360 |
-
"logits/chosen": -
|
361 |
-
"logits/rejected": -
|
362 |
-
"logps/chosen": -
|
363 |
-
"logps/rejected": -
|
364 |
-
"loss":
|
365 |
-
"rewards/accuracies": 0.
|
366 |
-
"rewards/chosen": -
|
367 |
-
"rewards/margins":
|
368 |
-
"rewards/rejected": -
|
369 |
"step": 230
|
370 |
},
|
371 |
{
|
372 |
-
"epoch": 0.
|
373 |
-
"grad_norm":
|
374 |
-
"learning_rate":
|
375 |
-
"logits/chosen": -
|
376 |
-
"logits/rejected": -
|
377 |
-
"logps/chosen": -
|
378 |
-
"logps/rejected": -
|
379 |
-
"loss":
|
380 |
-
"rewards/accuracies": 0.
|
381 |
-
"rewards/chosen": -
|
382 |
-
"rewards/margins":
|
383 |
-
"rewards/rejected": -
|
384 |
"step": 240
|
385 |
},
|
386 |
{
|
387 |
-
"epoch": 0.
|
388 |
-
"grad_norm":
|
389 |
-
"learning_rate":
|
390 |
-
"logits/chosen": -
|
391 |
-
"logits/rejected": -
|
392 |
-
"logps/chosen": -
|
393 |
-
"logps/rejected": -
|
394 |
-
"loss":
|
395 |
-
"rewards/accuracies": 0.
|
396 |
-
"rewards/chosen":
|
397 |
-
"rewards/margins":
|
398 |
-
"rewards/rejected": -2.
|
399 |
"step": 250
|
400 |
},
|
401 |
{
|
402 |
-
"epoch": 0.
|
403 |
-
"grad_norm":
|
404 |
-
"learning_rate":
|
405 |
-
"logits/chosen": -
|
406 |
-
"logits/rejected": -
|
407 |
-
"logps/chosen": -
|
408 |
-
"logps/rejected": -
|
409 |
-
"loss":
|
410 |
-
"rewards/accuracies": 0.
|
411 |
-
"rewards/chosen":
|
412 |
-
"rewards/margins":
|
413 |
-
"rewards/rejected": -
|
414 |
"step": 260
|
415 |
},
|
416 |
{
|
417 |
-
"epoch": 0.
|
418 |
-
"grad_norm":
|
419 |
-
"learning_rate":
|
420 |
-
"logits/chosen": -
|
421 |
-
"logits/rejected": -
|
422 |
-
"logps/chosen": -
|
423 |
-
"logps/rejected": -
|
424 |
-
"loss":
|
425 |
-
"rewards/accuracies": 0.
|
426 |
-
"rewards/chosen": -
|
427 |
-
"rewards/margins":
|
428 |
-
"rewards/rejected": -
|
429 |
"step": 270
|
430 |
},
|
431 |
{
|
432 |
-
"epoch": 0.
|
433 |
-
"grad_norm":
|
434 |
-
"learning_rate":
|
435 |
-
"logits/chosen": -
|
436 |
-
"logits/rejected": -
|
437 |
-
"logps/chosen": -
|
438 |
-
"logps/rejected": -
|
439 |
-
"loss":
|
440 |
-
"rewards/accuracies": 0.
|
441 |
-
"rewards/chosen":
|
442 |
-
"rewards/margins":
|
443 |
-
"rewards/rejected": -
|
444 |
"step": 280
|
445 |
},
|
446 |
{
|
447 |
-
"epoch": 0.
|
448 |
-
"grad_norm":
|
449 |
-
"learning_rate":
|
450 |
-
"logits/chosen": -
|
451 |
-
"logits/rejected": -
|
452 |
-
"logps/chosen": -
|
453 |
-
"logps/rejected": -
|
454 |
-
"loss":
|
455 |
-
"rewards/accuracies": 0.
|
456 |
-
"rewards/chosen":
|
457 |
-
"rewards/margins":
|
458 |
-
"rewards/rejected": -2.
|
459 |
"step": 290
|
460 |
},
|
461 |
{
|
462 |
-
"epoch": 0.
|
463 |
-
"grad_norm":
|
464 |
-
"learning_rate":
|
465 |
-
"logits/chosen": -
|
466 |
-
"logits/rejected": -
|
467 |
-
"logps/chosen": -
|
468 |
-
"logps/rejected": -
|
469 |
-
"loss":
|
470 |
-
"rewards/accuracies": 0.
|
471 |
-
"rewards/chosen":
|
472 |
-
"rewards/margins":
|
473 |
-
"rewards/rejected": -
|
474 |
"step": 300
|
475 |
},
|
476 |
{
|
477 |
-
"epoch": 0.
|
478 |
-
"grad_norm":
|
479 |
-
"learning_rate":
|
480 |
-
"logits/chosen": -
|
481 |
-
"logits/rejected": -
|
482 |
-
"logps/chosen": -
|
483 |
-
"logps/rejected": -
|
484 |
-
"loss":
|
485 |
-
"rewards/accuracies": 0.
|
486 |
-
"rewards/chosen": -2.
|
487 |
-
"rewards/margins":
|
488 |
-
"rewards/rejected": -
|
489 |
"step": 310
|
490 |
},
|
491 |
{
|
492 |
-
"epoch": 0.
|
493 |
-
"grad_norm":
|
494 |
-
"learning_rate":
|
495 |
-
"logits/chosen": -
|
496 |
-
"logits/rejected": -
|
497 |
-
"logps/chosen": -
|
498 |
-
"logps/rejected": -
|
499 |
-
"loss":
|
500 |
-
"rewards/accuracies": 0.
|
501 |
-
"rewards/chosen":
|
502 |
-
"rewards/margins":
|
503 |
-
"rewards/rejected": -
|
504 |
"step": 320
|
505 |
},
|
506 |
{
|
507 |
-
"epoch": 0.
|
508 |
-
"grad_norm":
|
509 |
-
"learning_rate":
|
510 |
-
"logits/chosen": -
|
511 |
-
"logits/rejected": -
|
512 |
-
"logps/chosen": -
|
513 |
-
"logps/rejected": -
|
514 |
-
"loss":
|
515 |
-
"rewards/accuracies": 0.
|
516 |
-
"rewards/chosen":
|
517 |
-
"rewards/margins":
|
518 |
-
"rewards/rejected": -
|
519 |
"step": 330
|
520 |
},
|
521 |
{
|
522 |
-
"epoch": 0.
|
523 |
-
"grad_norm":
|
524 |
-
"learning_rate":
|
525 |
-
"logits/chosen": -
|
526 |
-
"logits/rejected": -
|
527 |
-
"logps/chosen": -
|
528 |
-
"logps/rejected": -
|
529 |
-
"loss":
|
530 |
-
"rewards/accuracies": 0.
|
531 |
-
"rewards/chosen":
|
532 |
-
"rewards/margins":
|
533 |
-
"rewards/rejected": -
|
534 |
"step": 340
|
535 |
},
|
536 |
{
|
537 |
-
"epoch": 0.
|
538 |
-
"grad_norm":
|
539 |
-
"learning_rate":
|
540 |
-
"logits/chosen": -
|
541 |
-
"logits/rejected": -
|
542 |
-
"logps/chosen": -
|
543 |
-
"logps/rejected": -
|
544 |
-
"loss":
|
545 |
-
"rewards/accuracies": 0.
|
546 |
-
"rewards/chosen": -
|
547 |
-
"rewards/margins":
|
548 |
-
"rewards/rejected": -
|
549 |
"step": 350
|
550 |
},
|
551 |
{
|
552 |
-
"epoch": 0.
|
553 |
-
"grad_norm":
|
554 |
-
"learning_rate":
|
555 |
-
"logits/chosen": -
|
556 |
-
"logits/rejected": -
|
557 |
-
"logps/chosen": -
|
558 |
-
"logps/rejected": -
|
559 |
-
"loss":
|
560 |
-
"rewards/accuracies": 0.
|
561 |
-
"rewards/chosen": 2.
|
562 |
-
"rewards/margins":
|
563 |
-
"rewards/rejected": -
|
564 |
"step": 360
|
565 |
},
|
566 |
{
|
567 |
-
"epoch": 0.
|
568 |
-
"grad_norm":
|
569 |
-
"learning_rate":
|
570 |
-
"logits/chosen": -
|
571 |
-
"logits/rejected": -
|
572 |
-
"logps/chosen": -
|
573 |
-
"logps/rejected": -
|
574 |
-
"loss":
|
575 |
-
"rewards/accuracies": 0.
|
576 |
-
"rewards/chosen":
|
577 |
-
"rewards/margins":
|
578 |
-
"rewards/rejected": -
|
579 |
"step": 370
|
580 |
},
|
581 |
{
|
582 |
-
"epoch": 0.
|
583 |
-
"grad_norm":
|
584 |
-
"learning_rate":
|
585 |
-
"logits/chosen": -
|
586 |
-
"logits/rejected": -
|
587 |
-
"logps/chosen": -
|
588 |
-
"logps/rejected": -
|
589 |
-
"loss":
|
590 |
-
"rewards/accuracies": 0.
|
591 |
-
"rewards/chosen":
|
592 |
-
"rewards/margins":
|
593 |
-
"rewards/rejected": -
|
594 |
"step": 380
|
595 |
},
|
596 |
{
|
597 |
-
"epoch": 0.
|
598 |
-
"grad_norm":
|
599 |
-
"learning_rate": 3.
|
600 |
-
"logits/chosen": -
|
601 |
-
"logits/rejected": -
|
602 |
-
"logps/chosen": -
|
603 |
-
"logps/rejected": -
|
604 |
-
"loss":
|
605 |
-
"rewards/accuracies": 0.
|
606 |
-
"rewards/chosen":
|
607 |
-
"rewards/margins":
|
608 |
-
"rewards/rejected": -
|
609 |
"step": 390
|
610 |
},
|
611 |
{
|
612 |
-
"epoch": 0.
|
613 |
-
"grad_norm":
|
614 |
-
"learning_rate":
|
615 |
-
"logits/chosen": -
|
616 |
-
"logits/rejected": -
|
617 |
-
"logps/chosen": -
|
618 |
-
"logps/rejected": -
|
619 |
-
"loss":
|
620 |
-
"rewards/accuracies": 0.
|
621 |
-
"rewards/chosen":
|
622 |
-
"rewards/margins":
|
623 |
-
"rewards/rejected": -
|
624 |
"step": 400
|
625 |
},
|
626 |
{
|
627 |
-
"epoch": 0.
|
628 |
-
"grad_norm":
|
629 |
-
"learning_rate":
|
630 |
-
"logits/chosen": -
|
631 |
-
"logits/rejected": -
|
632 |
-
"logps/chosen": -
|
633 |
-
"logps/rejected": -
|
634 |
-
"loss":
|
635 |
-
"rewards/accuracies": 0.
|
636 |
-
"rewards/chosen": -2.
|
637 |
-
"rewards/margins":
|
638 |
-
"rewards/rejected": -
|
639 |
"step": 410
|
640 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
641 |
{
|
642 |
"epoch": 1.0,
|
643 |
-
"step":
|
644 |
"total_flos": 0.0,
|
645 |
-
"train_loss":
|
646 |
-
"train_runtime":
|
647 |
-
"train_samples_per_second": 8.
|
648 |
-
"train_steps_per_second": 0.
|
649 |
}
|
650 |
],
|
651 |
"logging_steps": 10,
|
652 |
-
"max_steps":
|
653 |
"num_input_tokens_seen": 0,
|
654 |
"num_train_epochs": 1,
|
655 |
"save_steps": 100,
|
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.9988623435722411,
|
5 |
"eval_steps": 10000000,
|
6 |
+
"global_step": 439,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 0.0,
|
13 |
+
"grad_norm": 22.78893022336454,
|
14 |
+
"learning_rate": 2.2727272727272727e-09,
|
15 |
+
"logits/chosen": -1.6768856048583984,
|
16 |
+
"logits/rejected": -1.7259055376052856,
|
17 |
+
"logps/chosen": -1.2793102264404297,
|
18 |
+
"logps/rejected": -1.2162058353424072,
|
19 |
+
"loss": 0.6931,
|
20 |
"rewards/accuracies": 0.0,
|
21 |
"rewards/chosen": 0.0,
|
22 |
"rewards/margins": 0.0,
|
|
|
25 |
},
|
26 |
{
|
27 |
"epoch": 0.02,
|
28 |
+
"grad_norm": 25.567365971420195,
|
29 |
+
"learning_rate": 2.2727272727272725e-08,
|
30 |
+
"logits/chosen": -1.7031302452087402,
|
31 |
+
"logits/rejected": -1.6688512563705444,
|
32 |
+
"logps/chosen": -1.213205337524414,
|
33 |
+
"logps/rejected": -1.220165729522705,
|
34 |
+
"loss": 0.693,
|
35 |
+
"rewards/accuracies": 0.4513888955116272,
|
36 |
+
"rewards/chosen": 0.0002006387512665242,
|
37 |
+
"rewards/margins": -0.0009609226835891604,
|
38 |
+
"rewards/rejected": 0.001161561463959515,
|
39 |
"step": 10
|
40 |
},
|
41 |
{
|
42 |
"epoch": 0.05,
|
43 |
+
"grad_norm": 26.2968319036787,
|
44 |
+
"learning_rate": 4.545454545454545e-08,
|
45 |
+
"logits/chosen": -1.7800958156585693,
|
46 |
+
"logits/rejected": -1.7349421977996826,
|
47 |
+
"logps/chosen": -1.1448661088943481,
|
48 |
+
"logps/rejected": -1.185571312904358,
|
49 |
+
"loss": 0.6924,
|
50 |
+
"rewards/accuracies": 0.5062500238418579,
|
51 |
+
"rewards/chosen": -0.0032871514558792114,
|
52 |
+
"rewards/margins": 0.0009361729025840759,
|
53 |
+
"rewards/rejected": -0.004223324358463287,
|
54 |
"step": 20
|
55 |
},
|
56 |
{
|
57 |
"epoch": 0.07,
|
58 |
+
"grad_norm": 28.64228481569523,
|
59 |
+
"learning_rate": 6.818181818181817e-08,
|
60 |
+
"logits/chosen": -1.744109869003296,
|
61 |
+
"logits/rejected": -1.6754045486450195,
|
62 |
+
"logps/chosen": -1.195277452468872,
|
63 |
+
"logps/rejected": -1.2481118440628052,
|
64 |
+
"loss": 0.6883,
|
65 |
+
"rewards/accuracies": 0.6187499761581421,
|
66 |
+
"rewards/chosen": -0.02475227788090706,
|
67 |
+
"rewards/margins": 0.011208651587367058,
|
68 |
+
"rewards/rejected": -0.035960931330919266,
|
69 |
"step": 30
|
70 |
},
|
71 |
{
|
72 |
+
"epoch": 0.09,
|
73 |
+
"grad_norm": 19.497764912088467,
|
74 |
+
"learning_rate": 9.09090909090909e-08,
|
75 |
+
"logits/chosen": -1.7310603857040405,
|
76 |
+
"logits/rejected": -1.6648972034454346,
|
77 |
+
"logps/chosen": -1.2513717412948608,
|
78 |
+
"logps/rejected": -1.3350750207901,
|
79 |
+
"loss": 0.6798,
|
80 |
+
"rewards/accuracies": 0.6812499761581421,
|
81 |
+
"rewards/chosen": -0.08884630352258682,
|
82 |
+
"rewards/margins": 0.06275957077741623,
|
83 |
+
"rewards/rejected": -0.15160587430000305,
|
84 |
"step": 40
|
85 |
},
|
86 |
{
|
87 |
+
"epoch": 0.11,
|
88 |
+
"grad_norm": 24.1777149048992,
|
89 |
+
"learning_rate": 9.994307990108962e-08,
|
90 |
+
"logits/chosen": -1.6923043727874756,
|
91 |
+
"logits/rejected": -1.6273339986801147,
|
92 |
+
"logps/chosen": -1.2986948490142822,
|
93 |
+
"logps/rejected": -1.356567144393921,
|
94 |
+
"loss": 0.6636,
|
95 |
+
"rewards/accuracies": 0.71875,
|
96 |
+
"rewards/chosen": -0.18121571838855743,
|
97 |
+
"rewards/margins": 0.0755590870976448,
|
98 |
+
"rewards/rejected": -0.25677481293678284,
|
99 |
"step": 50
|
100 |
},
|
101 |
{
|
102 |
+
"epoch": 0.14,
|
103 |
+
"grad_norm": 28.137865307641825,
|
104 |
+
"learning_rate": 9.959570405988094e-08,
|
105 |
+
"logits/chosen": -1.7212276458740234,
|
106 |
+
"logits/rejected": -1.6404285430908203,
|
107 |
+
"logps/chosen": -1.2889435291290283,
|
108 |
+
"logps/rejected": -1.3794549703598022,
|
109 |
+
"loss": 0.6598,
|
110 |
+
"rewards/accuracies": 0.643750011920929,
|
111 |
+
"rewards/chosen": -0.35969024896621704,
|
112 |
+
"rewards/margins": 0.07716657221317291,
|
113 |
+
"rewards/rejected": -0.4368568956851959,
|
114 |
"step": 60
|
115 |
},
|
116 |
{
|
117 |
+
"epoch": 0.16,
|
118 |
+
"grad_norm": 21.253003888204287,
|
119 |
+
"learning_rate": 9.893476820924666e-08,
|
120 |
+
"logits/chosen": -1.8007967472076416,
|
121 |
+
"logits/rejected": -1.7120532989501953,
|
122 |
+
"logps/chosen": -1.476588487625122,
|
123 |
+
"logps/rejected": -1.5963420867919922,
|
124 |
+
"loss": 0.6499,
|
125 |
+
"rewards/accuracies": 0.6937500238418579,
|
126 |
+
"rewards/chosen": -0.5274931192398071,
|
127 |
+
"rewards/margins": 0.14617758989334106,
|
128 |
+
"rewards/rejected": -0.6736707091331482,
|
129 |
"step": 70
|
130 |
},
|
131 |
{
|
132 |
+
"epoch": 0.18,
|
133 |
+
"grad_norm": 26.777703545743453,
|
134 |
+
"learning_rate": 9.796445099843647e-08,
|
135 |
+
"logits/chosen": -1.7857911586761475,
|
136 |
+
"logits/rejected": -1.6999902725219727,
|
137 |
+
"logps/chosen": -1.5350762605667114,
|
138 |
+
"logps/rejected": -1.6896966695785522,
|
139 |
+
"loss": 0.654,
|
140 |
+
"rewards/accuracies": 0.668749988079071,
|
141 |
+
"rewards/chosen": -0.6562157869338989,
|
142 |
+
"rewards/margins": 0.18818075954914093,
|
143 |
+
"rewards/rejected": -0.8443965911865234,
|
144 |
"step": 80
|
145 |
},
|
146 |
{
|
147 |
+
"epoch": 0.2,
|
148 |
+
"grad_norm": 25.00309177460346,
|
149 |
+
"learning_rate": 9.669088708527066e-08,
|
150 |
+
"logits/chosen": -1.7340404987335205,
|
151 |
+
"logits/rejected": -1.6651910543441772,
|
152 |
+
"logps/chosen": -1.6042931079864502,
|
153 |
+
"logps/rejected": -1.6743539571762085,
|
154 |
+
"loss": 0.6399,
|
155 |
+
"rewards/accuracies": 0.637499988079071,
|
156 |
+
"rewards/chosen": -0.737191379070282,
|
157 |
+
"rewards/margins": 0.12603236734867096,
|
158 |
+
"rewards/rejected": -0.8632237315177917,
|
159 |
"step": 90
|
160 |
},
|
161 |
{
|
162 |
+
"epoch": 0.23,
|
163 |
+
"grad_norm": 27.969594098965942,
|
164 |
+
"learning_rate": 9.512212835085849e-08,
|
165 |
+
"logits/chosen": -1.772202730178833,
|
166 |
+
"logits/rejected": -1.6827232837677002,
|
167 |
+
"logps/chosen": -1.5563361644744873,
|
168 |
+
"logps/rejected": -1.69924795627594,
|
169 |
+
"loss": 0.6231,
|
170 |
+
"rewards/accuracies": 0.699999988079071,
|
171 |
+
"rewards/chosen": -0.7757617831230164,
|
172 |
+
"rewards/margins": 0.20040392875671387,
|
173 |
+
"rewards/rejected": -0.9761656522750854,
|
174 |
"step": 100
|
175 |
},
|
176 |
{
|
177 |
+
"epoch": 0.25,
|
178 |
+
"grad_norm": 25.83218161515289,
|
179 |
+
"learning_rate": 9.326809299301306e-08,
|
180 |
+
"logits/chosen": -1.771267294883728,
|
181 |
+
"logits/rejected": -1.6669820547103882,
|
182 |
+
"logps/chosen": -1.600318193435669,
|
183 |
+
"logps/rejected": -1.7721306085586548,
|
184 |
+
"loss": 0.6211,
|
185 |
+
"rewards/accuracies": 0.7124999761581421,
|
186 |
+
"rewards/chosen": -0.7993522882461548,
|
187 |
+
"rewards/margins": 0.24041876196861267,
|
188 |
+
"rewards/rejected": -1.0397710800170898,
|
189 |
"step": 110
|
190 |
},
|
191 |
{
|
192 |
+
"epoch": 0.27,
|
193 |
+
"grad_norm": 26.667996957961257,
|
194 |
+
"learning_rate": 9.114050282021158e-08,
|
195 |
+
"logits/chosen": -1.767559289932251,
|
196 |
+
"logits/rejected": -1.7058799266815186,
|
197 |
+
"logps/chosen": -1.565065622329712,
|
198 |
+
"logps/rejected": -1.7299690246582031,
|
199 |
+
"loss": 0.6144,
|
200 |
+
"rewards/accuracies": 0.731249988079071,
|
201 |
+
"rewards/chosen": -0.8350059390068054,
|
202 |
+
"rewards/margins": 0.2350219041109085,
|
203 |
+
"rewards/rejected": -1.0700278282165527,
|
204 |
"step": 120
|
205 |
},
|
206 |
{
|
207 |
+
"epoch": 0.3,
|
208 |
+
"grad_norm": 25.330536994335382,
|
209 |
+
"learning_rate": 8.875280914254802e-08,
|
210 |
+
"logits/chosen": -1.765298843383789,
|
211 |
+
"logits/rejected": -1.6749998331069946,
|
212 |
+
"logps/chosen": -1.7646329402923584,
|
213 |
+
"logps/rejected": -1.9669040441513062,
|
214 |
+
"loss": 0.5993,
|
215 |
+
"rewards/accuracies": 0.675000011920929,
|
216 |
+
"rewards/chosen": -1.1436058282852173,
|
217 |
+
"rewards/margins": 0.34769219160079956,
|
218 |
+
"rewards/rejected": -1.491297960281372,
|
219 |
"step": 130
|
220 |
},
|
221 |
{
|
222 |
+
"epoch": 0.32,
|
223 |
+
"grad_norm": 24.002049854639584,
|
224 |
+
"learning_rate": 8.612010772821971e-08,
|
225 |
+
"logits/chosen": -1.7989845275878906,
|
226 |
+
"logits/rejected": -1.7554515600204468,
|
227 |
+
"logps/chosen": -1.8291784524917603,
|
228 |
+
"logps/rejected": -1.9761606454849243,
|
229 |
+
"loss": 0.5991,
|
230 |
+
"rewards/accuracies": 0.731249988079071,
|
231 |
+
"rewards/chosen": -1.2305986881256104,
|
232 |
+
"rewards/margins": 0.3437841534614563,
|
233 |
+
"rewards/rejected": -1.5743829011917114,
|
234 |
"step": 140
|
235 |
},
|
236 |
{
|
237 |
+
"epoch": 0.34,
|
238 |
+
"grad_norm": 28.7685379938573,
|
239 |
+
"learning_rate": 8.325904336322055e-08,
|
240 |
+
"logits/chosen": -1.777856469154358,
|
241 |
+
"logits/rejected": -1.7238337993621826,
|
242 |
+
"logps/chosen": -1.9271425008773804,
|
243 |
+
"logps/rejected": -2.141960859298706,
|
244 |
+
"loss": 0.6082,
|
245 |
+
"rewards/accuracies": 0.6875,
|
246 |
+
"rewards/chosen": -1.5195552110671997,
|
247 |
+
"rewards/margins": 0.3352898359298706,
|
248 |
+
"rewards/rejected": -1.8548450469970703,
|
249 |
"step": 150
|
250 |
},
|
251 |
{
|
252 |
+
"epoch": 0.36,
|
253 |
+
"grad_norm": 22.36425985091183,
|
254 |
+
"learning_rate": 8.01877046176447e-08,
|
255 |
+
"logits/chosen": -1.72470223903656,
|
256 |
+
"logits/rejected": -1.6554687023162842,
|
257 |
+
"logps/chosen": -1.9798545837402344,
|
258 |
+
"logps/rejected": -2.1781439781188965,
|
259 |
+
"loss": 0.5901,
|
260 |
+
"rewards/accuracies": 0.675000011920929,
|
261 |
+
"rewards/chosen": -1.6031091213226318,
|
262 |
+
"rewards/margins": 0.29380664229393005,
|
263 |
+
"rewards/rejected": -1.8969157934188843,
|
264 |
"step": 160
|
265 |
},
|
266 |
{
|
267 |
+
"epoch": 0.39,
|
268 |
+
"grad_norm": 19.908864367961982,
|
269 |
+
"learning_rate": 7.692550948392249e-08,
|
270 |
+
"logits/chosen": -1.7723356485366821,
|
271 |
+
"logits/rejected": -1.7198549509048462,
|
272 |
+
"logps/chosen": -1.9558120965957642,
|
273 |
+
"logps/rejected": -2.159667491912842,
|
274 |
+
"loss": 0.5921,
|
275 |
+
"rewards/accuracies": 0.706250011920929,
|
276 |
+
"rewards/chosen": -1.4911130666732788,
|
277 |
+
"rewards/margins": 0.3868214190006256,
|
278 |
+
"rewards/rejected": -1.877934217453003,
|
279 |
"step": 170
|
280 |
},
|
281 |
{
|
282 |
+
"epoch": 0.41,
|
283 |
+
"grad_norm": 31.864529340507623,
|
284 |
+
"learning_rate": 7.349308261002021e-08,
|
285 |
+
"logits/chosen": -1.7342097759246826,
|
286 |
+
"logits/rejected": -1.6827507019042969,
|
287 |
+
"logps/chosen": -1.9941844940185547,
|
288 |
+
"logps/rejected": -2.22194504737854,
|
289 |
+
"loss": 0.5855,
|
290 |
+
"rewards/accuracies": 0.706250011920929,
|
291 |
+
"rewards/chosen": -1.5941965579986572,
|
292 |
+
"rewards/margins": 0.35622045397758484,
|
293 |
+
"rewards/rejected": -1.950416922569275,
|
294 |
"step": 180
|
295 |
},
|
296 |
{
|
297 |
+
"epoch": 0.43,
|
298 |
+
"grad_norm": 24.691918932601595,
|
299 |
+
"learning_rate": 6.991212490377531e-08,
|
300 |
+
"logits/chosen": -1.7905107736587524,
|
301 |
+
"logits/rejected": -1.7474826574325562,
|
302 |
+
"logps/chosen": -2.10019850730896,
|
303 |
+
"logps/rejected": -2.3696181774139404,
|
304 |
+
"loss": 0.5604,
|
305 |
+
"rewards/accuracies": 0.75,
|
306 |
+
"rewards/chosen": -1.740191102027893,
|
307 |
+
"rewards/margins": 0.4957551956176758,
|
308 |
+
"rewards/rejected": -2.2359461784362793,
|
309 |
"step": 190
|
310 |
},
|
311 |
{
|
312 |
+
"epoch": 0.46,
|
313 |
+
"grad_norm": 30.11763021258028,
|
314 |
+
"learning_rate": 6.620527633276978e-08,
|
315 |
+
"logits/chosen": -1.7320966720581055,
|
316 |
+
"logits/rejected": -1.6665375232696533,
|
317 |
+
"logps/chosen": -2.161935329437256,
|
318 |
+
"logps/rejected": -2.522731304168701,
|
319 |
+
"loss": 0.5808,
|
320 |
+
"rewards/accuracies": 0.7250000238418579,
|
321 |
+
"rewards/chosen": -1.9349641799926758,
|
322 |
+
"rewards/margins": 0.5480148792266846,
|
323 |
+
"rewards/rejected": -2.4829792976379395,
|
324 |
"step": 200
|
325 |
},
|
326 |
{
|
327 |
+
"epoch": 0.48,
|
328 |
+
"grad_norm": 27.459036499855436,
|
329 |
+
"learning_rate": 6.239597278716581e-08,
|
330 |
+
"logits/chosen": -1.7859094142913818,
|
331 |
+
"logits/rejected": -1.7306747436523438,
|
332 |
+
"logps/chosen": -2.2954204082489014,
|
333 |
+
"logps/rejected": -2.540391445159912,
|
334 |
+
"loss": 0.5619,
|
335 |
+
"rewards/accuracies": 0.7250000238418579,
|
336 |
+
"rewards/chosen": -2.1001808643341064,
|
337 |
+
"rewards/margins": 0.5312689542770386,
|
338 |
+
"rewards/rejected": -2.6314499378204346,
|
339 |
"step": 210
|
340 |
},
|
341 |
{
|
342 |
+
"epoch": 0.5,
|
343 |
+
"grad_norm": 26.562029580561404,
|
344 |
+
"learning_rate": 5.8508297910462456e-08,
|
345 |
+
"logits/chosen": -1.7313369512557983,
|
346 |
+
"logits/rejected": -1.6572465896606445,
|
347 |
+
"logps/chosen": -2.212656021118164,
|
348 |
+
"logps/rejected": -2.5422561168670654,
|
349 |
+
"loss": 0.5673,
|
350 |
+
"rewards/accuracies": 0.699999988079071,
|
351 |
+
"rewards/chosen": -2.0982813835144043,
|
352 |
+
"rewards/margins": 0.5436533689498901,
|
353 |
+
"rewards/rejected": -2.641934871673584,
|
354 |
"step": 220
|
355 |
},
|
356 |
{
|
357 |
+
"epoch": 0.52,
|
358 |
+
"grad_norm": 26.043734197401424,
|
359 |
+
"learning_rate": 5.456683083494731e-08,
|
360 |
+
"logits/chosen": -1.7218725681304932,
|
361 |
+
"logits/rejected": -1.6862335205078125,
|
362 |
+
"logps/chosen": -2.1797163486480713,
|
363 |
+
"logps/rejected": -2.389193058013916,
|
364 |
+
"loss": 0.5843,
|
365 |
+
"rewards/accuracies": 0.65625,
|
366 |
+
"rewards/chosen": -1.9718148708343506,
|
367 |
+
"rewards/margins": 0.37147068977355957,
|
368 |
+
"rewards/rejected": -2.34328556060791,
|
369 |
"step": 230
|
370 |
},
|
371 |
{
|
372 |
+
"epoch": 0.55,
|
373 |
+
"grad_norm": 33.60349201324705,
|
374 |
+
"learning_rate": 5.059649078450834e-08,
|
375 |
+
"logits/chosen": -1.7049477100372314,
|
376 |
+
"logits/rejected": -1.6663001775741577,
|
377 |
+
"logps/chosen": -2.2287259101867676,
|
378 |
+
"logps/rejected": -2.5061872005462646,
|
379 |
+
"loss": 0.5527,
|
380 |
+
"rewards/accuracies": 0.6625000238418579,
|
381 |
+
"rewards/chosen": -2.120744466781616,
|
382 |
+
"rewards/margins": 0.4308454990386963,
|
383 |
+
"rewards/rejected": -2.5515899658203125,
|
384 |
"step": 240
|
385 |
},
|
386 |
{
|
387 |
+
"epoch": 0.57,
|
388 |
+
"grad_norm": 24.721682812448297,
|
389 |
+
"learning_rate": 4.6622379527277186e-08,
|
390 |
+
"logits/chosen": -1.716957688331604,
|
391 |
+
"logits/rejected": -1.6692520380020142,
|
392 |
+
"logps/chosen": -2.323068857192993,
|
393 |
+
"logps/rejected": -2.551687240600586,
|
394 |
+
"loss": 0.5645,
|
395 |
+
"rewards/accuracies": 0.625,
|
396 |
+
"rewards/chosen": -2.3683600425720215,
|
397 |
+
"rewards/margins": 0.3910773694515228,
|
398 |
+
"rewards/rejected": -2.759437322616577,
|
399 |
"step": 250
|
400 |
},
|
401 |
{
|
402 |
+
"epoch": 0.59,
|
403 |
+
"grad_norm": 28.965518895994943,
|
404 |
+
"learning_rate": 4.26696226741691e-08,
|
405 |
+
"logits/chosen": -1.731431007385254,
|
406 |
+
"logits/rejected": -1.6687753200531006,
|
407 |
+
"logps/chosen": -2.4228413105010986,
|
408 |
+
"logps/rejected": -2.701592206954956,
|
409 |
+
"loss": 0.5647,
|
410 |
+
"rewards/accuracies": 0.6499999761581421,
|
411 |
+
"rewards/chosen": -2.380035877227783,
|
412 |
+
"rewards/margins": 0.5175679922103882,
|
413 |
+
"rewards/rejected": -2.897603750228882,
|
414 |
"step": 260
|
415 |
},
|
416 |
{
|
417 |
+
"epoch": 0.61,
|
418 |
+
"grad_norm": 24.276560436452733,
|
419 |
+
"learning_rate": 3.876321082668098e-08,
|
420 |
+
"logits/chosen": -1.7877088785171509,
|
421 |
+
"logits/rejected": -1.7258117198944092,
|
422 |
+
"logps/chosen": -2.3104138374328613,
|
423 |
+
"logps/rejected": -2.597568988800049,
|
424 |
+
"loss": 0.5577,
|
425 |
+
"rewards/accuracies": 0.7250000238418579,
|
426 |
+
"rewards/chosen": -2.143846035003662,
|
427 |
+
"rewards/margins": 0.5556932091712952,
|
428 |
+
"rewards/rejected": -2.6995394229888916,
|
429 |
"step": 270
|
430 |
},
|
431 |
{
|
432 |
+
"epoch": 0.64,
|
433 |
+
"grad_norm": 26.01398581499373,
|
434 |
+
"learning_rate": 3.492784157826244e-08,
|
435 |
+
"logits/chosen": -1.7255363464355469,
|
436 |
+
"logits/rejected": -1.6368858814239502,
|
437 |
+
"logps/chosen": -2.307375431060791,
|
438 |
+
"logps/rejected": -2.6467177867889404,
|
439 |
+
"loss": 0.5569,
|
440 |
+
"rewards/accuracies": 0.71875,
|
441 |
+
"rewards/chosen": -2.1689984798431396,
|
442 |
+
"rewards/margins": 0.6079045534133911,
|
443 |
+
"rewards/rejected": -2.7769031524658203,
|
444 |
"step": 280
|
445 |
},
|
446 |
{
|
447 |
+
"epoch": 0.66,
|
448 |
+
"grad_norm": 22.449000475495634,
|
449 |
+
"learning_rate": 3.118776336817812e-08,
|
450 |
+
"logits/chosen": -1.7589473724365234,
|
451 |
+
"logits/rejected": -1.7079540491104126,
|
452 |
+
"logps/chosen": -2.2426087856292725,
|
453 |
+
"logps/rejected": -2.575594425201416,
|
454 |
+
"loss": 0.5461,
|
455 |
+
"rewards/accuracies": 0.731249988079071,
|
456 |
+
"rewards/chosen": -2.0787200927734375,
|
457 |
+
"rewards/margins": 0.6515394449234009,
|
458 |
+
"rewards/rejected": -2.730259656906128,
|
459 |
"step": 290
|
460 |
},
|
461 |
{
|
462 |
+
"epoch": 0.68,
|
463 |
+
"grad_norm": 23.916342153040954,
|
464 |
+
"learning_rate": 2.7566622175067443e-08,
|
465 |
+
"logits/chosen": -1.7405074834823608,
|
466 |
+
"logits/rejected": -1.6836473941802979,
|
467 |
+
"logps/chosen": -2.341914176940918,
|
468 |
+
"logps/rejected": -2.724348545074463,
|
469 |
+
"loss": 0.5507,
|
470 |
+
"rewards/accuracies": 0.7562500238418579,
|
471 |
+
"rewards/chosen": -2.2666609287261963,
|
472 |
+
"rewards/margins": 0.6088961958885193,
|
473 |
+
"rewards/rejected": -2.875556707382202,
|
474 |
"step": 300
|
475 |
},
|
476 |
{
|
477 |
+
"epoch": 0.71,
|
478 |
+
"grad_norm": 28.87016818276154,
|
479 |
+
"learning_rate": 2.408731201945432e-08,
|
480 |
+
"logits/chosen": -1.738867998123169,
|
481 |
+
"logits/rejected": -1.6946017742156982,
|
482 |
+
"logps/chosen": -2.2607665061950684,
|
483 |
+
"logps/rejected": -2.508131742477417,
|
484 |
+
"loss": 0.5549,
|
485 |
+
"rewards/accuracies": 0.6812499761581421,
|
486 |
+
"rewards/chosen": -2.133742332458496,
|
487 |
+
"rewards/margins": 0.45442089438438416,
|
488 |
+
"rewards/rejected": -2.5881636142730713,
|
489 |
"step": 310
|
490 |
},
|
491 |
{
|
492 |
+
"epoch": 0.73,
|
493 |
+
"grad_norm": 26.474891102018546,
|
494 |
+
"learning_rate": 2.0771830220378112e-08,
|
495 |
+
"logits/chosen": -1.6925548315048218,
|
496 |
+
"logits/rejected": -1.6349446773529053,
|
497 |
+
"logps/chosen": -2.292504072189331,
|
498 |
+
"logps/rejected": -2.5281872749328613,
|
499 |
+
"loss": 0.5545,
|
500 |
+
"rewards/accuracies": 0.625,
|
501 |
+
"rewards/chosen": -2.139239549636841,
|
502 |
+
"rewards/margins": 0.48706698417663574,
|
503 |
+
"rewards/rejected": -2.6263065338134766,
|
504 |
"step": 320
|
505 |
},
|
506 |
{
|
507 |
+
"epoch": 0.75,
|
508 |
+
"grad_norm": 26.153240463475917,
|
509 |
+
"learning_rate": 1.7641138321260257e-08,
|
510 |
+
"logits/chosen": -1.7273342609405518,
|
511 |
+
"logits/rejected": -1.6638519763946533,
|
512 |
+
"logps/chosen": -2.218357563018799,
|
513 |
+
"logps/rejected": -2.6145200729370117,
|
514 |
+
"loss": 0.5485,
|
515 |
+
"rewards/accuracies": 0.824999988079071,
|
516 |
+
"rewards/chosen": -2.069005250930786,
|
517 |
+
"rewards/margins": 0.7372555136680603,
|
518 |
+
"rewards/rejected": -2.806260585784912,
|
519 |
"step": 330
|
520 |
},
|
521 |
{
|
522 |
+
"epoch": 0.77,
|
523 |
+
"grad_norm": 26.27728864294511,
|
524 |
+
"learning_rate": 1.4715029564277793e-08,
|
525 |
+
"logits/chosen": -1.7901878356933594,
|
526 |
+
"logits/rejected": -1.7485193014144897,
|
527 |
+
"logps/chosen": -2.2015347480773926,
|
528 |
+
"logps/rejected": -2.5572023391723633,
|
529 |
+
"loss": 0.5577,
|
530 |
+
"rewards/accuracies": 0.7562500238418579,
|
531 |
+
"rewards/chosen": -1.9937489032745361,
|
532 |
+
"rewards/margins": 0.637208104133606,
|
533 |
+
"rewards/rejected": -2.6309566497802734,
|
534 |
"step": 340
|
535 |
},
|
536 |
{
|
537 |
+
"epoch": 0.8,
|
538 |
+
"grad_norm": 26.035613152091976,
|
539 |
+
"learning_rate": 1.2012003751113343e-08,
|
540 |
+
"logits/chosen": -1.7805286645889282,
|
541 |
+
"logits/rejected": -1.7256838083267212,
|
542 |
+
"logps/chosen": -2.3138480186462402,
|
543 |
+
"logps/rejected": -2.6945948600769043,
|
544 |
+
"loss": 0.536,
|
545 |
+
"rewards/accuracies": 0.71875,
|
546 |
+
"rewards/chosen": -2.3033559322357178,
|
547 |
+
"rewards/margins": 0.6611131429672241,
|
548 |
+
"rewards/rejected": -2.9644687175750732,
|
549 |
"step": 350
|
550 |
},
|
551 |
{
|
552 |
+
"epoch": 0.82,
|
553 |
+
"grad_norm": 31.4301067051425,
|
554 |
+
"learning_rate": 9.549150281252633e-09,
|
555 |
+
"logits/chosen": -1.7304834127426147,
|
556 |
+
"logits/rejected": -1.6880794763565063,
|
557 |
+
"logps/chosen": -2.2502734661102295,
|
558 |
+
"logps/rejected": -2.593324661254883,
|
559 |
+
"loss": 0.54,
|
560 |
+
"rewards/accuracies": 0.6937500238418579,
|
561 |
+
"rewards/chosen": -2.1562764644622803,
|
562 |
+
"rewards/margins": 0.5826417207717896,
|
563 |
+
"rewards/rejected": -2.7389183044433594,
|
564 |
"step": 360
|
565 |
},
|
566 |
{
|
567 |
+
"epoch": 0.84,
|
568 |
+
"grad_norm": 25.13509986712804,
|
569 |
+
"learning_rate": 7.3420401072985306e-09,
|
570 |
+
"logits/chosen": -1.7723455429077148,
|
571 |
+
"logits/rejected": -1.721980094909668,
|
572 |
+
"logps/chosen": -2.3107597827911377,
|
573 |
+
"logps/rejected": -2.679028272628784,
|
574 |
+
"loss": 0.5374,
|
575 |
+
"rewards/accuracies": 0.6875,
|
576 |
+
"rewards/chosen": -2.1922922134399414,
|
577 |
+
"rewards/margins": 0.6216103434562683,
|
578 |
+
"rewards/rejected": -2.8139023780822754,
|
579 |
"step": 370
|
580 |
},
|
581 |
{
|
582 |
+
"epoch": 0.86,
|
583 |
+
"grad_norm": 28.235691242335875,
|
584 |
+
"learning_rate": 5.404627290395369e-09,
|
585 |
+
"logits/chosen": -1.733109712600708,
|
586 |
+
"logits/rejected": -1.6734564304351807,
|
587 |
+
"logps/chosen": -2.241391897201538,
|
588 |
+
"logps/rejected": -2.590752363204956,
|
589 |
+
"loss": 0.5365,
|
590 |
+
"rewards/accuracies": 0.7437499761581421,
|
591 |
+
"rewards/chosen": -2.103571653366089,
|
592 |
+
"rewards/margins": 0.6187294721603394,
|
593 |
+
"rewards/rejected": -2.7223010063171387,
|
594 |
"step": 380
|
595 |
},
|
596 |
{
|
597 |
+
"epoch": 0.89,
|
598 |
+
"grad_norm": 33.12956908569685,
|
599 |
+
"learning_rate": 3.74916077816162e-09,
|
600 |
+
"logits/chosen": -1.7393004894256592,
|
601 |
+
"logits/rejected": -1.6843922138214111,
|
602 |
+
"logps/chosen": -2.267721652984619,
|
603 |
+
"logps/rejected": -2.5625317096710205,
|
604 |
+
"loss": 0.5513,
|
605 |
+
"rewards/accuracies": 0.6875,
|
606 |
+
"rewards/chosen": -2.235853672027588,
|
607 |
+
"rewards/margins": 0.5573619604110718,
|
608 |
+
"rewards/rejected": -2.7932159900665283,
|
609 |
"step": 390
|
610 |
},
|
611 |
{
|
612 |
+
"epoch": 0.91,
|
613 |
+
"grad_norm": 28.33250387056753,
|
614 |
+
"learning_rate": 2.386106962899165e-09,
|
615 |
+
"logits/chosen": -1.666548490524292,
|
616 |
+
"logits/rejected": -1.6048628091812134,
|
617 |
+
"logps/chosen": -2.401040554046631,
|
618 |
+
"logps/rejected": -2.733664035797119,
|
619 |
+
"loss": 0.5422,
|
620 |
+
"rewards/accuracies": 0.706250011920929,
|
621 |
+
"rewards/chosen": -2.349238872528076,
|
622 |
+
"rewards/margins": 0.5525677800178528,
|
623 |
+
"rewards/rejected": -2.901806592941284,
|
624 |
"step": 400
|
625 |
},
|
626 |
{
|
627 |
+
"epoch": 0.93,
|
628 |
+
"grad_norm": 31.32317842504756,
|
629 |
+
"learning_rate": 1.3240835096913706e-09,
|
630 |
+
"logits/chosen": -1.6938998699188232,
|
631 |
+
"logits/rejected": -1.6031659841537476,
|
632 |
+
"logps/chosen": -2.251462459564209,
|
633 |
+
"logps/rejected": -2.6835711002349854,
|
634 |
+
"loss": 0.5597,
|
635 |
+
"rewards/accuracies": 0.7124999761581421,
|
636 |
+
"rewards/chosen": -2.1598236560821533,
|
637 |
+
"rewards/margins": 0.7253878116607666,
|
638 |
+
"rewards/rejected": -2.885211706161499,
|
639 |
"step": 410
|
640 |
},
|
641 |
+
{
|
642 |
+
"epoch": 0.96,
|
643 |
+
"grad_norm": 23.813879321456167,
|
644 |
+
"learning_rate": 5.698048727497462e-10,
|
645 |
+
"logits/chosen": -1.7287553548812866,
|
646 |
+
"logits/rejected": -1.6634016036987305,
|
647 |
+
"logps/chosen": -2.358034610748291,
|
648 |
+
"logps/rejected": -2.7596287727355957,
|
649 |
+
"loss": 0.5431,
|
650 |
+
"rewards/accuracies": 0.731249988079071,
|
651 |
+
"rewards/chosen": -2.3334739208221436,
|
652 |
+
"rewards/margins": 0.669273853302002,
|
653 |
+
"rewards/rejected": -3.0027480125427246,
|
654 |
+
"step": 420
|
655 |
+
},
|
656 |
+
{
|
657 |
+
"epoch": 0.98,
|
658 |
+
"grad_norm": 24.13495374841747,
|
659 |
+
"learning_rate": 1.2803984447259387e-10,
|
660 |
+
"logits/chosen": -1.7348169088363647,
|
661 |
+
"logits/rejected": -1.6923096179962158,
|
662 |
+
"logps/chosen": -2.339822769165039,
|
663 |
+
"logps/rejected": -2.7242085933685303,
|
664 |
+
"loss": 0.5338,
|
665 |
+
"rewards/accuracies": 0.7250000238418579,
|
666 |
+
"rewards/chosen": -2.314966917037964,
|
667 |
+
"rewards/margins": 0.6734664440155029,
|
668 |
+
"rewards/rejected": -2.9884331226348877,
|
669 |
+
"step": 430
|
670 |
+
},
|
671 |
{
|
672 |
"epoch": 1.0,
|
673 |
+
"step": 439,
|
674 |
"total_flos": 0.0,
|
675 |
+
"train_loss": 0.5863300847029632,
|
676 |
+
"train_runtime": 6843.0852,
|
677 |
+
"train_samples_per_second": 8.218,
|
678 |
+
"train_steps_per_second": 0.064
|
679 |
}
|
680 |
],
|
681 |
"logging_steps": 10,
|
682 |
+
"max_steps": 439,
|
683 |
"num_input_tokens_seen": 0,
|
684 |
"num_train_epochs": 1,
|
685 |
"save_steps": 100,
|